Merge tag 'stable/for-linus-3.9-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel...
[cascardo/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24
25 MODULE_VERSION(DRV_VER);
26 MODULE_DEVICE_TABLE(pci, be_dev_ids);
27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28 MODULE_AUTHOR("Emulex Corporation");
29 MODULE_LICENSE("GPL");
30
31 static unsigned int num_vfs;
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static ushort rx_frag_size = 2048;
36 module_param(rx_frag_size, ushort, S_IRUGO);
37 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
48         { 0 }
49 };
50 MODULE_DEVICE_TABLE(pci, be_dev_ids);
51 /* UE Status Low CSR */
52 static const char * const ue_status_low_desc[] = {
53         "CEV",
54         "CTX",
55         "DBUF",
56         "ERX",
57         "Host",
58         "MPU",
59         "NDMA",
60         "PTC ",
61         "RDMA ",
62         "RXF ",
63         "RXIPS ",
64         "RXULP0 ",
65         "RXULP1 ",
66         "RXULP2 ",
67         "TIM ",
68         "TPOST ",
69         "TPRE ",
70         "TXIPS ",
71         "TXULP0 ",
72         "TXULP1 ",
73         "UC ",
74         "WDMA ",
75         "TXULP2 ",
76         "HOST1 ",
77         "P0_OB_LINK ",
78         "P1_OB_LINK ",
79         "HOST_GPIO ",
80         "MBOX ",
81         "AXGMAC0",
82         "AXGMAC1",
83         "JTAG",
84         "MPU_INTPEND"
85 };
86 /* UE Status High CSR */
87 static const char * const ue_status_hi_desc[] = {
88         "LPCMEMHOST",
89         "MGMT_MAC",
90         "PCS0ONLINE",
91         "MPU_IRAM",
92         "PCS1ONLINE",
93         "PCTL0",
94         "PCTL1",
95         "PMEM",
96         "RR",
97         "TXPB",
98         "RXPP",
99         "XAUI",
100         "TXP",
101         "ARM",
102         "IPC",
103         "HOST2",
104         "HOST3",
105         "HOST4",
106         "HOST5",
107         "HOST6",
108         "HOST7",
109         "HOST8",
110         "HOST9",
111         "NETC",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown"
120 };
121
122 /* Is BE in a multi-channel mode */
123 static inline bool be_is_mc(struct be_adapter *adapter) {
124         return (adapter->function_mode & FLEX10_MODE ||
125                 adapter->function_mode & VNIC_MODE ||
126                 adapter->function_mode & UMC_ENABLED);
127 }
128
129 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130 {
131         struct be_dma_mem *mem = &q->dma_mem;
132         if (mem->va) {
133                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134                                   mem->dma);
135                 mem->va = NULL;
136         }
137 }
138
139 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140                 u16 len, u16 entry_size)
141 {
142         struct be_dma_mem *mem = &q->dma_mem;
143
144         memset(q, 0, sizeof(*q));
145         q->len = len;
146         q->entry_size = entry_size;
147         mem->size = len * entry_size;
148         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149                                      GFP_KERNEL);
150         if (!mem->va)
151                 return -ENOMEM;
152         memset(mem->va, 0, mem->size);
153         return 0;
154 }
155
156 static void be_intr_set(struct be_adapter *adapter, bool enable)
157 {
158         u32 reg, enabled;
159
160         if (adapter->eeh_error)
161                 return;
162
163         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
164                                 &reg);
165         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
166
167         if (!enabled && enable)
168                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
169         else if (enabled && !enable)
170                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
171         else
172                 return;
173
174         pci_write_config_dword(adapter->pdev,
175                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
176 }
177
178 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
179 {
180         u32 val = 0;
181         val |= qid & DB_RQ_RING_ID_MASK;
182         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
183
184         wmb();
185         iowrite32(val, adapter->db + DB_RQ_OFFSET);
186 }
187
188 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
189 {
190         u32 val = 0;
191         val |= qid & DB_TXULP_RING_ID_MASK;
192         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
193
194         wmb();
195         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
196 }
197
198 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
199                 bool arm, bool clear_int, u16 num_popped)
200 {
201         u32 val = 0;
202         val |= qid & DB_EQ_RING_ID_MASK;
203         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
204                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
205
206         if (adapter->eeh_error)
207                 return;
208
209         if (arm)
210                 val |= 1 << DB_EQ_REARM_SHIFT;
211         if (clear_int)
212                 val |= 1 << DB_EQ_CLR_SHIFT;
213         val |= 1 << DB_EQ_EVNT_SHIFT;
214         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
215         iowrite32(val, adapter->db + DB_EQ_OFFSET);
216 }
217
218 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
219 {
220         u32 val = 0;
221         val |= qid & DB_CQ_RING_ID_MASK;
222         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
223                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
224
225         if (adapter->eeh_error)
226                 return;
227
228         if (arm)
229                 val |= 1 << DB_CQ_REARM_SHIFT;
230         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
231         iowrite32(val, adapter->db + DB_CQ_OFFSET);
232 }
233
234 static int be_mac_addr_set(struct net_device *netdev, void *p)
235 {
236         struct be_adapter *adapter = netdev_priv(netdev);
237         struct sockaddr *addr = p;
238         int status = 0;
239         u8 current_mac[ETH_ALEN];
240         u32 pmac_id = adapter->pmac_id[0];
241         bool active_mac = true;
242
243         if (!is_valid_ether_addr(addr->sa_data))
244                 return -EADDRNOTAVAIL;
245
246         /* For BE VF, MAC address is already activated by PF.
247          * Hence only operation left is updating netdev->devaddr.
248          * Update it if user is passing the same MAC which was used
249          * during configuring VF MAC from PF(Hypervisor).
250          */
251         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
252                 status = be_cmd_mac_addr_query(adapter, current_mac,
253                                                false, adapter->if_handle, 0);
254                 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
255                         goto done;
256                 else
257                         goto err;
258         }
259
260         if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
261                 goto done;
262
263         /* For Lancer check if any MAC is active.
264          * If active, get its mac id.
265          */
266         if (lancer_chip(adapter) && !be_physfn(adapter))
267                 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
268                                          &pmac_id, 0);
269
270         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
271                                  adapter->if_handle,
272                                  &adapter->pmac_id[0], 0);
273
274         if (status)
275                 goto err;
276
277         if (active_mac)
278                 be_cmd_pmac_del(adapter, adapter->if_handle,
279                                 pmac_id, 0);
280 done:
281         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
282         return 0;
283 err:
284         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
285         return status;
286 }
287
288 /* BE2 supports only v0 cmd */
289 static void *hw_stats_from_cmd(struct be_adapter *adapter)
290 {
291         if (BE2_chip(adapter)) {
292                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
293
294                 return &cmd->hw_stats;
295         } else  {
296                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
297
298                 return &cmd->hw_stats;
299         }
300 }
301
302 /* BE2 supports only v0 cmd */
303 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
304 {
305         if (BE2_chip(adapter)) {
306                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
307
308                 return &hw_stats->erx;
309         } else {
310                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
311
312                 return &hw_stats->erx;
313         }
314 }
315
316 static void populate_be_v0_stats(struct be_adapter *adapter)
317 {
318         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
319         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
320         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
321         struct be_port_rxf_stats_v0 *port_stats =
322                                         &rxf_stats->port[adapter->port_num];
323         struct be_drv_stats *drvs = &adapter->drv_stats;
324
325         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
326         drvs->rx_pause_frames = port_stats->rx_pause_frames;
327         drvs->rx_crc_errors = port_stats->rx_crc_errors;
328         drvs->rx_control_frames = port_stats->rx_control_frames;
329         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
330         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
331         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
332         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
333         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
334         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
335         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
336         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
337         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
338         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
339         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
340         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
341         drvs->rx_dropped_header_too_small =
342                 port_stats->rx_dropped_header_too_small;
343         drvs->rx_address_mismatch_drops =
344                                         port_stats->rx_address_mismatch_drops +
345                                         port_stats->rx_vlan_mismatch_drops;
346         drvs->rx_alignment_symbol_errors =
347                 port_stats->rx_alignment_symbol_errors;
348
349         drvs->tx_pauseframes = port_stats->tx_pauseframes;
350         drvs->tx_controlframes = port_stats->tx_controlframes;
351
352         if (adapter->port_num)
353                 drvs->jabber_events = rxf_stats->port1_jabber_events;
354         else
355                 drvs->jabber_events = rxf_stats->port0_jabber_events;
356         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
357         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
358         drvs->forwarded_packets = rxf_stats->forwarded_packets;
359         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
360         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
361         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
362         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
363 }
364
365 static void populate_be_v1_stats(struct be_adapter *adapter)
366 {
367         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
368         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
369         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
370         struct be_port_rxf_stats_v1 *port_stats =
371                                         &rxf_stats->port[adapter->port_num];
372         struct be_drv_stats *drvs = &adapter->drv_stats;
373
374         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
375         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
376         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
377         drvs->rx_pause_frames = port_stats->rx_pause_frames;
378         drvs->rx_crc_errors = port_stats->rx_crc_errors;
379         drvs->rx_control_frames = port_stats->rx_control_frames;
380         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
381         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
382         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
383         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
384         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
385         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
386         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
387         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
388         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
389         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
390         drvs->rx_dropped_header_too_small =
391                 port_stats->rx_dropped_header_too_small;
392         drvs->rx_input_fifo_overflow_drop =
393                 port_stats->rx_input_fifo_overflow_drop;
394         drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
395         drvs->rx_alignment_symbol_errors =
396                 port_stats->rx_alignment_symbol_errors;
397         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
398         drvs->tx_pauseframes = port_stats->tx_pauseframes;
399         drvs->tx_controlframes = port_stats->tx_controlframes;
400         drvs->jabber_events = port_stats->jabber_events;
401         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
402         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
403         drvs->forwarded_packets = rxf_stats->forwarded_packets;
404         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
405         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
406         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
407         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
408 }
409
410 static void populate_lancer_stats(struct be_adapter *adapter)
411 {
412
413         struct be_drv_stats *drvs = &adapter->drv_stats;
414         struct lancer_pport_stats *pport_stats =
415                                         pport_stats_from_cmd(adapter);
416
417         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
418         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
419         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
420         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
421         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
422         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
423         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
424         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
425         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
426         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
427         drvs->rx_dropped_tcp_length =
428                                 pport_stats->rx_dropped_invalid_tcp_length;
429         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
430         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
431         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
432         drvs->rx_dropped_header_too_small =
433                                 pport_stats->rx_dropped_header_too_small;
434         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
435         drvs->rx_address_mismatch_drops =
436                                         pport_stats->rx_address_mismatch_drops +
437                                         pport_stats->rx_vlan_mismatch_drops;
438         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
439         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
440         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
441         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
442         drvs->jabber_events = pport_stats->rx_jabbers;
443         drvs->forwarded_packets = pport_stats->num_forwards_lo;
444         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
445         drvs->rx_drops_too_many_frags =
446                                 pport_stats->rx_drops_too_many_frags_lo;
447 }
448
449 static void accumulate_16bit_val(u32 *acc, u16 val)
450 {
451 #define lo(x)                   (x & 0xFFFF)
452 #define hi(x)                   (x & 0xFFFF0000)
453         bool wrapped = val < lo(*acc);
454         u32 newacc = hi(*acc) + val;
455
456         if (wrapped)
457                 newacc += 65536;
458         ACCESS_ONCE(*acc) = newacc;
459 }
460
461 void be_parse_stats(struct be_adapter *adapter)
462 {
463         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
464         struct be_rx_obj *rxo;
465         int i;
466
467         if (lancer_chip(adapter)) {
468                 populate_lancer_stats(adapter);
469         } else {
470                 if (BE2_chip(adapter))
471                         populate_be_v0_stats(adapter);
472                 else
473                         /* for BE3 and Skyhawk */
474                         populate_be_v1_stats(adapter);
475
476                 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
477                 for_all_rx_queues(adapter, rxo, i) {
478                         /* below erx HW counter can actually wrap around after
479                          * 65535. Driver accumulates a 32-bit value
480                          */
481                         accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
482                                              (u16)erx->rx_drops_no_fragments \
483                                              [rxo->q.id]);
484                 }
485         }
486 }
487
488 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
489                                         struct rtnl_link_stats64 *stats)
490 {
491         struct be_adapter *adapter = netdev_priv(netdev);
492         struct be_drv_stats *drvs = &adapter->drv_stats;
493         struct be_rx_obj *rxo;
494         struct be_tx_obj *txo;
495         u64 pkts, bytes;
496         unsigned int start;
497         int i;
498
499         for_all_rx_queues(adapter, rxo, i) {
500                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
501                 do {
502                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
503                         pkts = rx_stats(rxo)->rx_pkts;
504                         bytes = rx_stats(rxo)->rx_bytes;
505                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
506                 stats->rx_packets += pkts;
507                 stats->rx_bytes += bytes;
508                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
509                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
510                                         rx_stats(rxo)->rx_drops_no_frags;
511         }
512
513         for_all_tx_queues(adapter, txo, i) {
514                 const struct be_tx_stats *tx_stats = tx_stats(txo);
515                 do {
516                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
517                         pkts = tx_stats(txo)->tx_pkts;
518                         bytes = tx_stats(txo)->tx_bytes;
519                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
520                 stats->tx_packets += pkts;
521                 stats->tx_bytes += bytes;
522         }
523
524         /* bad pkts received */
525         stats->rx_errors = drvs->rx_crc_errors +
526                 drvs->rx_alignment_symbol_errors +
527                 drvs->rx_in_range_errors +
528                 drvs->rx_out_range_errors +
529                 drvs->rx_frame_too_long +
530                 drvs->rx_dropped_too_small +
531                 drvs->rx_dropped_too_short +
532                 drvs->rx_dropped_header_too_small +
533                 drvs->rx_dropped_tcp_length +
534                 drvs->rx_dropped_runt;
535
536         /* detailed rx errors */
537         stats->rx_length_errors = drvs->rx_in_range_errors +
538                 drvs->rx_out_range_errors +
539                 drvs->rx_frame_too_long;
540
541         stats->rx_crc_errors = drvs->rx_crc_errors;
542
543         /* frame alignment errors */
544         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
545
546         /* receiver fifo overrun */
547         /* drops_no_pbuf is no per i/f, it's per BE card */
548         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
549                                 drvs->rx_input_fifo_overflow_drop +
550                                 drvs->rx_drops_no_pbuf;
551         return stats;
552 }
553
554 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
555 {
556         struct net_device *netdev = adapter->netdev;
557
558         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
559                 netif_carrier_off(netdev);
560                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
561         }
562
563         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
564                 netif_carrier_on(netdev);
565         else
566                 netif_carrier_off(netdev);
567 }
568
569 static void be_tx_stats_update(struct be_tx_obj *txo,
570                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
571 {
572         struct be_tx_stats *stats = tx_stats(txo);
573
574         u64_stats_update_begin(&stats->sync);
575         stats->tx_reqs++;
576         stats->tx_wrbs += wrb_cnt;
577         stats->tx_bytes += copied;
578         stats->tx_pkts += (gso_segs ? gso_segs : 1);
579         if (stopped)
580                 stats->tx_stops++;
581         u64_stats_update_end(&stats->sync);
582 }
583
584 /* Determine number of WRB entries needed to xmit data in an skb */
585 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
586                                                                 bool *dummy)
587 {
588         int cnt = (skb->len > skb->data_len);
589
590         cnt += skb_shinfo(skb)->nr_frags;
591
592         /* to account for hdr wrb */
593         cnt++;
594         if (lancer_chip(adapter) || !(cnt & 1)) {
595                 *dummy = false;
596         } else {
597                 /* add a dummy to make it an even num */
598                 cnt++;
599                 *dummy = true;
600         }
601         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
602         return cnt;
603 }
604
605 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
606 {
607         wrb->frag_pa_hi = upper_32_bits(addr);
608         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
609         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
610         wrb->rsvd0 = 0;
611 }
612
613 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
614                                         struct sk_buff *skb)
615 {
616         u8 vlan_prio;
617         u16 vlan_tag;
618
619         vlan_tag = vlan_tx_tag_get(skb);
620         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
621         /* If vlan priority provided by OS is NOT in available bmap */
622         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
623                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
624                                 adapter->recommended_prio;
625
626         return vlan_tag;
627 }
628
629 static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
630 {
631         return vlan_tx_tag_present(skb) || adapter->pvid;
632 }
633
634 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
635                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
636 {
637         u16 vlan_tag;
638
639         memset(hdr, 0, sizeof(*hdr));
640
641         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
642
643         if (skb_is_gso(skb)) {
644                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
645                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
646                         hdr, skb_shinfo(skb)->gso_size);
647                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
648                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
649         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
650                 if (is_tcp_pkt(skb))
651                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
652                 else if (is_udp_pkt(skb))
653                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
654         }
655
656         if (vlan_tx_tag_present(skb)) {
657                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
658                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
659                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
660         }
661
662         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
663         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
664         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
665         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
666 }
667
668 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
669                 bool unmap_single)
670 {
671         dma_addr_t dma;
672
673         be_dws_le_to_cpu(wrb, sizeof(*wrb));
674
675         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
676         if (wrb->frag_len) {
677                 if (unmap_single)
678                         dma_unmap_single(dev, dma, wrb->frag_len,
679                                          DMA_TO_DEVICE);
680                 else
681                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
682         }
683 }
684
685 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
686                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
687 {
688         dma_addr_t busaddr;
689         int i, copied = 0;
690         struct device *dev = &adapter->pdev->dev;
691         struct sk_buff *first_skb = skb;
692         struct be_eth_wrb *wrb;
693         struct be_eth_hdr_wrb *hdr;
694         bool map_single = false;
695         u16 map_head;
696
697         hdr = queue_head_node(txq);
698         queue_head_inc(txq);
699         map_head = txq->head;
700
701         if (skb->len > skb->data_len) {
702                 int len = skb_headlen(skb);
703                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
704                 if (dma_mapping_error(dev, busaddr))
705                         goto dma_err;
706                 map_single = true;
707                 wrb = queue_head_node(txq);
708                 wrb_fill(wrb, busaddr, len);
709                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
710                 queue_head_inc(txq);
711                 copied += len;
712         }
713
714         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
715                 const struct skb_frag_struct *frag =
716                         &skb_shinfo(skb)->frags[i];
717                 busaddr = skb_frag_dma_map(dev, frag, 0,
718                                            skb_frag_size(frag), DMA_TO_DEVICE);
719                 if (dma_mapping_error(dev, busaddr))
720                         goto dma_err;
721                 wrb = queue_head_node(txq);
722                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
723                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
724                 queue_head_inc(txq);
725                 copied += skb_frag_size(frag);
726         }
727
728         if (dummy_wrb) {
729                 wrb = queue_head_node(txq);
730                 wrb_fill(wrb, 0, 0);
731                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732                 queue_head_inc(txq);
733         }
734
735         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
736         be_dws_cpu_to_le(hdr, sizeof(*hdr));
737
738         return copied;
739 dma_err:
740         txq->head = map_head;
741         while (copied) {
742                 wrb = queue_head_node(txq);
743                 unmap_tx_frag(dev, wrb, map_single);
744                 map_single = false;
745                 copied -= wrb->frag_len;
746                 queue_head_inc(txq);
747         }
748         return 0;
749 }
750
751 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
752                                              struct sk_buff *skb)
753 {
754         u16 vlan_tag = 0;
755
756         skb = skb_share_check(skb, GFP_ATOMIC);
757         if (unlikely(!skb))
758                 return skb;
759
760         if (vlan_tx_tag_present(skb)) {
761                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
762                 __vlan_put_tag(skb, vlan_tag);
763                 skb->vlan_tci = 0;
764         }
765
766         return skb;
767 }
768
769 static netdev_tx_t be_xmit(struct sk_buff *skb,
770                         struct net_device *netdev)
771 {
772         struct be_adapter *adapter = netdev_priv(netdev);
773         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
774         struct be_queue_info *txq = &txo->q;
775         struct iphdr *ip = NULL;
776         u32 wrb_cnt = 0, copied = 0;
777         u32 start = txq->head, eth_hdr_len;
778         bool dummy_wrb, stopped = false;
779
780         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
781                 VLAN_ETH_HLEN : ETH_HLEN;
782
783         /* HW has a bug which considers padding bytes as legal
784          * and modifies the IPv4 hdr's 'tot_len' field
785          */
786         if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
787                         is_ipv4_pkt(skb)) {
788                 ip = (struct iphdr *)ip_hdr(skb);
789                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
790         }
791
792         /* HW has a bug wherein it will calculate CSUM for VLAN
793          * pkts even though it is disabled.
794          * Manually insert VLAN in pkt.
795          */
796         if (skb->ip_summed != CHECKSUM_PARTIAL &&
797                         be_vlan_tag_chk(adapter, skb)) {
798                 skb = be_insert_vlan_in_pkt(adapter, skb);
799                 if (unlikely(!skb))
800                         goto tx_drop;
801         }
802
803         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
804
805         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
806         if (copied) {
807                 int gso_segs = skb_shinfo(skb)->gso_segs;
808
809                 /* record the sent skb in the sent_skb table */
810                 BUG_ON(txo->sent_skb_list[start]);
811                 txo->sent_skb_list[start] = skb;
812
813                 /* Ensure txq has space for the next skb; Else stop the queue
814                  * *BEFORE* ringing the tx doorbell, so that we serialze the
815                  * tx compls of the current transmit which'll wake up the queue
816                  */
817                 atomic_add(wrb_cnt, &txq->used);
818                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
819                                                                 txq->len) {
820                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
821                         stopped = true;
822                 }
823
824                 be_txq_notify(adapter, txq->id, wrb_cnt);
825
826                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
827         } else {
828                 txq->head = start;
829                 dev_kfree_skb_any(skb);
830         }
831 tx_drop:
832         return NETDEV_TX_OK;
833 }
834
835 static int be_change_mtu(struct net_device *netdev, int new_mtu)
836 {
837         struct be_adapter *adapter = netdev_priv(netdev);
838         if (new_mtu < BE_MIN_MTU ||
839                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
840                                         (ETH_HLEN + ETH_FCS_LEN))) {
841                 dev_info(&adapter->pdev->dev,
842                         "MTU must be between %d and %d bytes\n",
843                         BE_MIN_MTU,
844                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
845                 return -EINVAL;
846         }
847         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
848                         netdev->mtu, new_mtu);
849         netdev->mtu = new_mtu;
850         return 0;
851 }
852
853 /*
854  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
855  * If the user configures more, place BE in vlan promiscuous mode.
856  */
857 static int be_vid_config(struct be_adapter *adapter)
858 {
859         u16 vids[BE_NUM_VLANS_SUPPORTED];
860         u16 num = 0, i;
861         int status = 0;
862
863         /* No need to further configure vids if in promiscuous mode */
864         if (adapter->promiscuous)
865                 return 0;
866
867         if (adapter->vlans_added > adapter->max_vlans)
868                 goto set_vlan_promisc;
869
870         /* Construct VLAN Table to give to HW */
871         for (i = 0; i < VLAN_N_VID; i++)
872                 if (adapter->vlan_tag[i])
873                         vids[num++] = cpu_to_le16(i);
874
875         status = be_cmd_vlan_config(adapter, adapter->if_handle,
876                                     vids, num, 1, 0);
877
878         /* Set to VLAN promisc mode as setting VLAN filter failed */
879         if (status) {
880                 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
881                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
882                 goto set_vlan_promisc;
883         }
884
885         return status;
886
887 set_vlan_promisc:
888         status = be_cmd_vlan_config(adapter, adapter->if_handle,
889                                     NULL, 0, 1, 1);
890         return status;
891 }
892
893 static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
894 {
895         struct be_adapter *adapter = netdev_priv(netdev);
896         int status = 0;
897
898         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
899                 status = -EINVAL;
900                 goto ret;
901         }
902
903         /* Packets with VID 0 are always received by Lancer by default */
904         if (lancer_chip(adapter) && vid == 0)
905                 goto ret;
906
907         adapter->vlan_tag[vid] = 1;
908         if (adapter->vlans_added <= (adapter->max_vlans + 1))
909                 status = be_vid_config(adapter);
910
911         if (!status)
912                 adapter->vlans_added++;
913         else
914                 adapter->vlan_tag[vid] = 0;
915 ret:
916         return status;
917 }
918
919 static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
920 {
921         struct be_adapter *adapter = netdev_priv(netdev);
922         int status = 0;
923
924         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
925                 status = -EINVAL;
926                 goto ret;
927         }
928
929         /* Packets with VID 0 are always received by Lancer by default */
930         if (lancer_chip(adapter) && vid == 0)
931                 goto ret;
932
933         adapter->vlan_tag[vid] = 0;
934         if (adapter->vlans_added <= adapter->max_vlans)
935                 status = be_vid_config(adapter);
936
937         if (!status)
938                 adapter->vlans_added--;
939         else
940                 adapter->vlan_tag[vid] = 1;
941 ret:
942         return status;
943 }
944
945 static void be_set_rx_mode(struct net_device *netdev)
946 {
947         struct be_adapter *adapter = netdev_priv(netdev);
948         int status;
949
950         if (netdev->flags & IFF_PROMISC) {
951                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
952                 adapter->promiscuous = true;
953                 goto done;
954         }
955
956         /* BE was previously in promiscuous mode; disable it */
957         if (adapter->promiscuous) {
958                 adapter->promiscuous = false;
959                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
960
961                 if (adapter->vlans_added)
962                         be_vid_config(adapter);
963         }
964
965         /* Enable multicast promisc if num configured exceeds what we support */
966         if (netdev->flags & IFF_ALLMULTI ||
967             netdev_mc_count(netdev) > adapter->max_mcast_mac) {
968                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
969                 goto done;
970         }
971
972         if (netdev_uc_count(netdev) != adapter->uc_macs) {
973                 struct netdev_hw_addr *ha;
974                 int i = 1; /* First slot is claimed by the Primary MAC */
975
976                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
977                         be_cmd_pmac_del(adapter, adapter->if_handle,
978                                         adapter->pmac_id[i], 0);
979                 }
980
981                 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
982                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
983                         adapter->promiscuous = true;
984                         goto done;
985                 }
986
987                 netdev_for_each_uc_addr(ha, adapter->netdev) {
988                         adapter->uc_macs++; /* First slot is for Primary MAC */
989                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
990                                         adapter->if_handle,
991                                         &adapter->pmac_id[adapter->uc_macs], 0);
992                 }
993         }
994
995         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
996
997         /* Set to MCAST promisc mode if setting MULTICAST address fails */
998         if (status) {
999                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1000                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1001                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1002         }
1003 done:
1004         return;
1005 }
1006
1007 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1008 {
1009         struct be_adapter *adapter = netdev_priv(netdev);
1010         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1011         int status;
1012         bool active_mac = false;
1013         u32 pmac_id;
1014         u8 old_mac[ETH_ALEN];
1015
1016         if (!sriov_enabled(adapter))
1017                 return -EPERM;
1018
1019         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1020                 return -EINVAL;
1021
1022         if (lancer_chip(adapter)) {
1023                 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1024                                                   &pmac_id, vf + 1);
1025                 if (!status && active_mac)
1026                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1027                                         pmac_id, vf + 1);
1028
1029                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
1030         } else {
1031                 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1032                                          vf_cfg->pmac_id, vf + 1);
1033
1034                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1035                                          &vf_cfg->pmac_id, vf + 1);
1036         }
1037
1038         if (status)
1039                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1040                                 mac, vf);
1041         else
1042                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1043
1044         return status;
1045 }
1046
1047 static int be_get_vf_config(struct net_device *netdev, int vf,
1048                         struct ifla_vf_info *vi)
1049 {
1050         struct be_adapter *adapter = netdev_priv(netdev);
1051         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1052
1053         if (!sriov_enabled(adapter))
1054                 return -EPERM;
1055
1056         if (vf >= adapter->num_vfs)
1057                 return -EINVAL;
1058
1059         vi->vf = vf;
1060         vi->tx_rate = vf_cfg->tx_rate;
1061         vi->vlan = vf_cfg->vlan_tag;
1062         vi->qos = 0;
1063         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1064
1065         return 0;
1066 }
1067
1068 static int be_set_vf_vlan(struct net_device *netdev,
1069                         int vf, u16 vlan, u8 qos)
1070 {
1071         struct be_adapter *adapter = netdev_priv(netdev);
1072         int status = 0;
1073
1074         if (!sriov_enabled(adapter))
1075                 return -EPERM;
1076
1077         if (vf >= adapter->num_vfs || vlan > 4095)
1078                 return -EINVAL;
1079
1080         if (vlan) {
1081                 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1082                         /* If this is new value, program it. Else skip. */
1083                         adapter->vf_cfg[vf].vlan_tag = vlan;
1084
1085                         status = be_cmd_set_hsw_config(adapter, vlan,
1086                                 vf + 1, adapter->vf_cfg[vf].if_handle);
1087                 }
1088         } else {
1089                 /* Reset Transparent Vlan Tagging. */
1090                 adapter->vf_cfg[vf].vlan_tag = 0;
1091                 vlan = adapter->vf_cfg[vf].def_vid;
1092                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1093                         adapter->vf_cfg[vf].if_handle);
1094         }
1095
1096
1097         if (status)
1098                 dev_info(&adapter->pdev->dev,
1099                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1100         return status;
1101 }
1102
1103 static int be_set_vf_tx_rate(struct net_device *netdev,
1104                         int vf, int rate)
1105 {
1106         struct be_adapter *adapter = netdev_priv(netdev);
1107         int status = 0;
1108
1109         if (!sriov_enabled(adapter))
1110                 return -EPERM;
1111
1112         if (vf >= adapter->num_vfs)
1113                 return -EINVAL;
1114
1115         if (rate < 100 || rate > 10000) {
1116                 dev_err(&adapter->pdev->dev,
1117                         "tx rate must be between 100 and 10000 Mbps\n");
1118                 return -EINVAL;
1119         }
1120
1121         if (lancer_chip(adapter))
1122                 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1123         else
1124                 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1125
1126         if (status)
1127                 dev_err(&adapter->pdev->dev,
1128                                 "tx rate %d on VF %d failed\n", rate, vf);
1129         else
1130                 adapter->vf_cfg[vf].tx_rate = rate;
1131         return status;
1132 }
1133
1134 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1135 {
1136         struct pci_dev *dev, *pdev = adapter->pdev;
1137         int vfs = 0, assigned_vfs = 0, pos;
1138         u16 offset, stride;
1139
1140         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1141         if (!pos)
1142                 return 0;
1143         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1144         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1145
1146         dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1147         while (dev) {
1148                 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
1149                         vfs++;
1150                         if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1151                                 assigned_vfs++;
1152                 }
1153                 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1154         }
1155         return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1156 }
1157
1158 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1159 {
1160         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1161         ulong now = jiffies;
1162         ulong delta = now - stats->rx_jiffies;
1163         u64 pkts;
1164         unsigned int start, eqd;
1165
1166         if (!eqo->enable_aic) {
1167                 eqd = eqo->eqd;
1168                 goto modify_eqd;
1169         }
1170
1171         if (eqo->idx >= adapter->num_rx_qs)
1172                 return;
1173
1174         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1175
1176         /* Wrapped around */
1177         if (time_before(now, stats->rx_jiffies)) {
1178                 stats->rx_jiffies = now;
1179                 return;
1180         }
1181
1182         /* Update once a second */
1183         if (delta < HZ)
1184                 return;
1185
1186         do {
1187                 start = u64_stats_fetch_begin_bh(&stats->sync);
1188                 pkts = stats->rx_pkts;
1189         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1190
1191         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1192         stats->rx_pkts_prev = pkts;
1193         stats->rx_jiffies = now;
1194         eqd = (stats->rx_pps / 110000) << 3;
1195         eqd = min(eqd, eqo->max_eqd);
1196         eqd = max(eqd, eqo->min_eqd);
1197         if (eqd < 10)
1198                 eqd = 0;
1199
1200 modify_eqd:
1201         if (eqd != eqo->cur_eqd) {
1202                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1203                 eqo->cur_eqd = eqd;
1204         }
1205 }
1206
1207 static void be_rx_stats_update(struct be_rx_obj *rxo,
1208                 struct be_rx_compl_info *rxcp)
1209 {
1210         struct be_rx_stats *stats = rx_stats(rxo);
1211
1212         u64_stats_update_begin(&stats->sync);
1213         stats->rx_compl++;
1214         stats->rx_bytes += rxcp->pkt_size;
1215         stats->rx_pkts++;
1216         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1217                 stats->rx_mcast_pkts++;
1218         if (rxcp->err)
1219                 stats->rx_compl_err++;
1220         u64_stats_update_end(&stats->sync);
1221 }
1222
1223 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1224 {
1225         /* L4 checksum is not reliable for non TCP/UDP packets.
1226          * Also ignore ipcksm for ipv6 pkts */
1227         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1228                                 (rxcp->ip_csum || rxcp->ipv6);
1229 }
1230
1231 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1232                                                 u16 frag_idx)
1233 {
1234         struct be_adapter *adapter = rxo->adapter;
1235         struct be_rx_page_info *rx_page_info;
1236         struct be_queue_info *rxq = &rxo->q;
1237
1238         rx_page_info = &rxo->page_info_tbl[frag_idx];
1239         BUG_ON(!rx_page_info->page);
1240
1241         if (rx_page_info->last_page_user) {
1242                 dma_unmap_page(&adapter->pdev->dev,
1243                                dma_unmap_addr(rx_page_info, bus),
1244                                adapter->big_page_size, DMA_FROM_DEVICE);
1245                 rx_page_info->last_page_user = false;
1246         }
1247
1248         atomic_dec(&rxq->used);
1249         return rx_page_info;
1250 }
1251
1252 /* Throwaway the data in the Rx completion */
1253 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1254                                 struct be_rx_compl_info *rxcp)
1255 {
1256         struct be_queue_info *rxq = &rxo->q;
1257         struct be_rx_page_info *page_info;
1258         u16 i, num_rcvd = rxcp->num_rcvd;
1259
1260         for (i = 0; i < num_rcvd; i++) {
1261                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1262                 put_page(page_info->page);
1263                 memset(page_info, 0, sizeof(*page_info));
1264                 index_inc(&rxcp->rxq_idx, rxq->len);
1265         }
1266 }
1267
1268 /*
1269  * skb_fill_rx_data forms a complete skb for an ether frame
1270  * indicated by rxcp.
1271  */
1272 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1273                              struct be_rx_compl_info *rxcp)
1274 {
1275         struct be_queue_info *rxq = &rxo->q;
1276         struct be_rx_page_info *page_info;
1277         u16 i, j;
1278         u16 hdr_len, curr_frag_len, remaining;
1279         u8 *start;
1280
1281         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1282         start = page_address(page_info->page) + page_info->page_offset;
1283         prefetch(start);
1284
1285         /* Copy data in the first descriptor of this completion */
1286         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1287
1288         skb->len = curr_frag_len;
1289         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1290                 memcpy(skb->data, start, curr_frag_len);
1291                 /* Complete packet has now been moved to data */
1292                 put_page(page_info->page);
1293                 skb->data_len = 0;
1294                 skb->tail += curr_frag_len;
1295         } else {
1296                 hdr_len = ETH_HLEN;
1297                 memcpy(skb->data, start, hdr_len);
1298                 skb_shinfo(skb)->nr_frags = 1;
1299                 skb_frag_set_page(skb, 0, page_info->page);
1300                 skb_shinfo(skb)->frags[0].page_offset =
1301                                         page_info->page_offset + hdr_len;
1302                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1303                 skb->data_len = curr_frag_len - hdr_len;
1304                 skb->truesize += rx_frag_size;
1305                 skb->tail += hdr_len;
1306         }
1307         page_info->page = NULL;
1308
1309         if (rxcp->pkt_size <= rx_frag_size) {
1310                 BUG_ON(rxcp->num_rcvd != 1);
1311                 return;
1312         }
1313
1314         /* More frags present for this completion */
1315         index_inc(&rxcp->rxq_idx, rxq->len);
1316         remaining = rxcp->pkt_size - curr_frag_len;
1317         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1318                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1319                 curr_frag_len = min(remaining, rx_frag_size);
1320
1321                 /* Coalesce all frags from the same physical page in one slot */
1322                 if (page_info->page_offset == 0) {
1323                         /* Fresh page */
1324                         j++;
1325                         skb_frag_set_page(skb, j, page_info->page);
1326                         skb_shinfo(skb)->frags[j].page_offset =
1327                                                         page_info->page_offset;
1328                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1329                         skb_shinfo(skb)->nr_frags++;
1330                 } else {
1331                         put_page(page_info->page);
1332                 }
1333
1334                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1335                 skb->len += curr_frag_len;
1336                 skb->data_len += curr_frag_len;
1337                 skb->truesize += rx_frag_size;
1338                 remaining -= curr_frag_len;
1339                 index_inc(&rxcp->rxq_idx, rxq->len);
1340                 page_info->page = NULL;
1341         }
1342         BUG_ON(j > MAX_SKB_FRAGS);
1343 }
1344
1345 /* Process the RX completion indicated by rxcp when GRO is disabled */
1346 static void be_rx_compl_process(struct be_rx_obj *rxo,
1347                                 struct be_rx_compl_info *rxcp)
1348 {
1349         struct be_adapter *adapter = rxo->adapter;
1350         struct net_device *netdev = adapter->netdev;
1351         struct sk_buff *skb;
1352
1353         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1354         if (unlikely(!skb)) {
1355                 rx_stats(rxo)->rx_drops_no_skbs++;
1356                 be_rx_compl_discard(rxo, rxcp);
1357                 return;
1358         }
1359
1360         skb_fill_rx_data(rxo, skb, rxcp);
1361
1362         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1363                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1364         else
1365                 skb_checksum_none_assert(skb);
1366
1367         skb->protocol = eth_type_trans(skb, netdev);
1368         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1369         if (netdev->features & NETIF_F_RXHASH)
1370                 skb->rxhash = rxcp->rss_hash;
1371
1372
1373         if (rxcp->vlanf)
1374                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1375
1376         netif_receive_skb(skb);
1377 }
1378
1379 /* Process the RX completion indicated by rxcp when GRO is enabled */
1380 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1381                              struct be_rx_compl_info *rxcp)
1382 {
1383         struct be_adapter *adapter = rxo->adapter;
1384         struct be_rx_page_info *page_info;
1385         struct sk_buff *skb = NULL;
1386         struct be_queue_info *rxq = &rxo->q;
1387         u16 remaining, curr_frag_len;
1388         u16 i, j;
1389
1390         skb = napi_get_frags(napi);
1391         if (!skb) {
1392                 be_rx_compl_discard(rxo, rxcp);
1393                 return;
1394         }
1395
1396         remaining = rxcp->pkt_size;
1397         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1398                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1399
1400                 curr_frag_len = min(remaining, rx_frag_size);
1401
1402                 /* Coalesce all frags from the same physical page in one slot */
1403                 if (i == 0 || page_info->page_offset == 0) {
1404                         /* First frag or Fresh page */
1405                         j++;
1406                         skb_frag_set_page(skb, j, page_info->page);
1407                         skb_shinfo(skb)->frags[j].page_offset =
1408                                                         page_info->page_offset;
1409                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1410                 } else {
1411                         put_page(page_info->page);
1412                 }
1413                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1414                 skb->truesize += rx_frag_size;
1415                 remaining -= curr_frag_len;
1416                 index_inc(&rxcp->rxq_idx, rxq->len);
1417                 memset(page_info, 0, sizeof(*page_info));
1418         }
1419         BUG_ON(j > MAX_SKB_FRAGS);
1420
1421         skb_shinfo(skb)->nr_frags = j + 1;
1422         skb->len = rxcp->pkt_size;
1423         skb->data_len = rxcp->pkt_size;
1424         skb->ip_summed = CHECKSUM_UNNECESSARY;
1425         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1426         if (adapter->netdev->features & NETIF_F_RXHASH)
1427                 skb->rxhash = rxcp->rss_hash;
1428
1429         if (rxcp->vlanf)
1430                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1431
1432         napi_gro_frags(napi);
1433 }
1434
1435 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1436                                  struct be_rx_compl_info *rxcp)
1437 {
1438         rxcp->pkt_size =
1439                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1440         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1441         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1442         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1443         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1444         rxcp->ip_csum =
1445                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1446         rxcp->l4_csum =
1447                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1448         rxcp->ipv6 =
1449                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1450         rxcp->rxq_idx =
1451                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1452         rxcp->num_rcvd =
1453                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1454         rxcp->pkt_type =
1455                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1456         rxcp->rss_hash =
1457                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1458         if (rxcp->vlanf) {
1459                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1460                                           compl);
1461                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1462                                                compl);
1463         }
1464         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1465 }
1466
1467 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1468                                  struct be_rx_compl_info *rxcp)
1469 {
1470         rxcp->pkt_size =
1471                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1472         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1473         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1474         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1475         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1476         rxcp->ip_csum =
1477                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1478         rxcp->l4_csum =
1479                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1480         rxcp->ipv6 =
1481                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1482         rxcp->rxq_idx =
1483                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1484         rxcp->num_rcvd =
1485                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1486         rxcp->pkt_type =
1487                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1488         rxcp->rss_hash =
1489                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1490         if (rxcp->vlanf) {
1491                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1492                                           compl);
1493                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1494                                                compl);
1495         }
1496         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1497 }
1498
1499 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1500 {
1501         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1502         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1503         struct be_adapter *adapter = rxo->adapter;
1504
1505         /* For checking the valid bit it is Ok to use either definition as the
1506          * valid bit is at the same position in both v0 and v1 Rx compl */
1507         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1508                 return NULL;
1509
1510         rmb();
1511         be_dws_le_to_cpu(compl, sizeof(*compl));
1512
1513         if (adapter->be3_native)
1514                 be_parse_rx_compl_v1(compl, rxcp);
1515         else
1516                 be_parse_rx_compl_v0(compl, rxcp);
1517
1518         if (rxcp->vlanf) {
1519                 /* vlanf could be wrongly set in some cards.
1520                  * ignore if vtm is not set */
1521                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1522                         rxcp->vlanf = 0;
1523
1524                 if (!lancer_chip(adapter))
1525                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1526
1527                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1528                     !adapter->vlan_tag[rxcp->vlan_tag])
1529                         rxcp->vlanf = 0;
1530         }
1531
1532         /* As the compl has been parsed, reset it; we wont touch it again */
1533         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1534
1535         queue_tail_inc(&rxo->cq);
1536         return rxcp;
1537 }
1538
1539 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1540 {
1541         u32 order = get_order(size);
1542
1543         if (order > 0)
1544                 gfp |= __GFP_COMP;
1545         return  alloc_pages(gfp, order);
1546 }
1547
1548 /*
1549  * Allocate a page, split it to fragments of size rx_frag_size and post as
1550  * receive buffers to BE
1551  */
1552 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1553 {
1554         struct be_adapter *adapter = rxo->adapter;
1555         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1556         struct be_queue_info *rxq = &rxo->q;
1557         struct page *pagep = NULL;
1558         struct be_eth_rx_d *rxd;
1559         u64 page_dmaaddr = 0, frag_dmaaddr;
1560         u32 posted, page_offset = 0;
1561
1562         page_info = &rxo->page_info_tbl[rxq->head];
1563         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1564                 if (!pagep) {
1565                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1566                         if (unlikely(!pagep)) {
1567                                 rx_stats(rxo)->rx_post_fail++;
1568                                 break;
1569                         }
1570                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1571                                                     0, adapter->big_page_size,
1572                                                     DMA_FROM_DEVICE);
1573                         page_info->page_offset = 0;
1574                 } else {
1575                         get_page(pagep);
1576                         page_info->page_offset = page_offset + rx_frag_size;
1577                 }
1578                 page_offset = page_info->page_offset;
1579                 page_info->page = pagep;
1580                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1581                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1582
1583                 rxd = queue_head_node(rxq);
1584                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1585                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1586
1587                 /* Any space left in the current big page for another frag? */
1588                 if ((page_offset + rx_frag_size + rx_frag_size) >
1589                                         adapter->big_page_size) {
1590                         pagep = NULL;
1591                         page_info->last_page_user = true;
1592                 }
1593
1594                 prev_page_info = page_info;
1595                 queue_head_inc(rxq);
1596                 page_info = &rxo->page_info_tbl[rxq->head];
1597         }
1598         if (pagep)
1599                 prev_page_info->last_page_user = true;
1600
1601         if (posted) {
1602                 atomic_add(posted, &rxq->used);
1603                 be_rxq_notify(adapter, rxq->id, posted);
1604         } else if (atomic_read(&rxq->used) == 0) {
1605                 /* Let be_worker replenish when memory is available */
1606                 rxo->rx_post_starved = true;
1607         }
1608 }
1609
1610 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1611 {
1612         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1613
1614         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1615                 return NULL;
1616
1617         rmb();
1618         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1619
1620         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1621
1622         queue_tail_inc(tx_cq);
1623         return txcp;
1624 }
1625
1626 static u16 be_tx_compl_process(struct be_adapter *adapter,
1627                 struct be_tx_obj *txo, u16 last_index)
1628 {
1629         struct be_queue_info *txq = &txo->q;
1630         struct be_eth_wrb *wrb;
1631         struct sk_buff **sent_skbs = txo->sent_skb_list;
1632         struct sk_buff *sent_skb;
1633         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1634         bool unmap_skb_hdr = true;
1635
1636         sent_skb = sent_skbs[txq->tail];
1637         BUG_ON(!sent_skb);
1638         sent_skbs[txq->tail] = NULL;
1639
1640         /* skip header wrb */
1641         queue_tail_inc(txq);
1642
1643         do {
1644                 cur_index = txq->tail;
1645                 wrb = queue_tail_node(txq);
1646                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1647                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1648                 unmap_skb_hdr = false;
1649
1650                 num_wrbs++;
1651                 queue_tail_inc(txq);
1652         } while (cur_index != last_index);
1653
1654         kfree_skb(sent_skb);
1655         return num_wrbs;
1656 }
1657
1658 /* Return the number of events in the event queue */
1659 static inline int events_get(struct be_eq_obj *eqo)
1660 {
1661         struct be_eq_entry *eqe;
1662         int num = 0;
1663
1664         do {
1665                 eqe = queue_tail_node(&eqo->q);
1666                 if (eqe->evt == 0)
1667                         break;
1668
1669                 rmb();
1670                 eqe->evt = 0;
1671                 num++;
1672                 queue_tail_inc(&eqo->q);
1673         } while (true);
1674
1675         return num;
1676 }
1677
1678 /* Leaves the EQ is disarmed state */
1679 static void be_eq_clean(struct be_eq_obj *eqo)
1680 {
1681         int num = events_get(eqo);
1682
1683         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1684 }
1685
1686 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1687 {
1688         struct be_rx_page_info *page_info;
1689         struct be_queue_info *rxq = &rxo->q;
1690         struct be_queue_info *rx_cq = &rxo->cq;
1691         struct be_rx_compl_info *rxcp;
1692         struct be_adapter *adapter = rxo->adapter;
1693         int flush_wait = 0;
1694         u16 tail;
1695
1696         /* Consume pending rx completions.
1697          * Wait for the flush completion (identified by zero num_rcvd)
1698          * to arrive. Notify CQ even when there are no more CQ entries
1699          * for HW to flush partially coalesced CQ entries.
1700          * In Lancer, there is no need to wait for flush compl.
1701          */
1702         for (;;) {
1703                 rxcp = be_rx_compl_get(rxo);
1704                 if (rxcp == NULL) {
1705                         if (lancer_chip(adapter))
1706                                 break;
1707
1708                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1709                                 dev_warn(&adapter->pdev->dev,
1710                                          "did not receive flush compl\n");
1711                                 break;
1712                         }
1713                         be_cq_notify(adapter, rx_cq->id, true, 0);
1714                         mdelay(1);
1715                 } else {
1716                         be_rx_compl_discard(rxo, rxcp);
1717                         be_cq_notify(adapter, rx_cq->id, true, 1);
1718                         if (rxcp->num_rcvd == 0)
1719                                 break;
1720                 }
1721         }
1722
1723         /* After cleanup, leave the CQ in unarmed state */
1724         be_cq_notify(adapter, rx_cq->id, false, 0);
1725
1726         /* Then free posted rx buffers that were not used */
1727         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1728         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1729                 page_info = get_rx_page_info(rxo, tail);
1730                 put_page(page_info->page);
1731                 memset(page_info, 0, sizeof(*page_info));
1732         }
1733         BUG_ON(atomic_read(&rxq->used));
1734         rxq->tail = rxq->head = 0;
1735 }
1736
1737 static void be_tx_compl_clean(struct be_adapter *adapter)
1738 {
1739         struct be_tx_obj *txo;
1740         struct be_queue_info *txq;
1741         struct be_eth_tx_compl *txcp;
1742         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1743         struct sk_buff *sent_skb;
1744         bool dummy_wrb;
1745         int i, pending_txqs;
1746
1747         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1748         do {
1749                 pending_txqs = adapter->num_tx_qs;
1750
1751                 for_all_tx_queues(adapter, txo, i) {
1752                         txq = &txo->q;
1753                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1754                                 end_idx =
1755                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1756                                                       wrb_index, txcp);
1757                                 num_wrbs += be_tx_compl_process(adapter, txo,
1758                                                                 end_idx);
1759                                 cmpl++;
1760                         }
1761                         if (cmpl) {
1762                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1763                                 atomic_sub(num_wrbs, &txq->used);
1764                                 cmpl = 0;
1765                                 num_wrbs = 0;
1766                         }
1767                         if (atomic_read(&txq->used) == 0)
1768                                 pending_txqs--;
1769                 }
1770
1771                 if (pending_txqs == 0 || ++timeo > 200)
1772                         break;
1773
1774                 mdelay(1);
1775         } while (true);
1776
1777         for_all_tx_queues(adapter, txo, i) {
1778                 txq = &txo->q;
1779                 if (atomic_read(&txq->used))
1780                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1781                                 atomic_read(&txq->used));
1782
1783                 /* free posted tx for which compls will never arrive */
1784                 while (atomic_read(&txq->used)) {
1785                         sent_skb = txo->sent_skb_list[txq->tail];
1786                         end_idx = txq->tail;
1787                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1788                                                    &dummy_wrb);
1789                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1790                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1791                         atomic_sub(num_wrbs, &txq->used);
1792                 }
1793         }
1794 }
1795
1796 static void be_evt_queues_destroy(struct be_adapter *adapter)
1797 {
1798         struct be_eq_obj *eqo;
1799         int i;
1800
1801         for_all_evt_queues(adapter, eqo, i) {
1802                 if (eqo->q.created) {
1803                         be_eq_clean(eqo);
1804                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1805                 }
1806                 be_queue_free(adapter, &eqo->q);
1807         }
1808 }
1809
1810 static int be_evt_queues_create(struct be_adapter *adapter)
1811 {
1812         struct be_queue_info *eq;
1813         struct be_eq_obj *eqo;
1814         int i, rc;
1815
1816         adapter->num_evt_qs = num_irqs(adapter);
1817
1818         for_all_evt_queues(adapter, eqo, i) {
1819                 eqo->adapter = adapter;
1820                 eqo->tx_budget = BE_TX_BUDGET;
1821                 eqo->idx = i;
1822                 eqo->max_eqd = BE_MAX_EQD;
1823                 eqo->enable_aic = true;
1824
1825                 eq = &eqo->q;
1826                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1827                                         sizeof(struct be_eq_entry));
1828                 if (rc)
1829                         return rc;
1830
1831                 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1832                 if (rc)
1833                         return rc;
1834         }
1835         return 0;
1836 }
1837
1838 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1839 {
1840         struct be_queue_info *q;
1841
1842         q = &adapter->mcc_obj.q;
1843         if (q->created)
1844                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1845         be_queue_free(adapter, q);
1846
1847         q = &adapter->mcc_obj.cq;
1848         if (q->created)
1849                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1850         be_queue_free(adapter, q);
1851 }
1852
1853 /* Must be called only after TX qs are created as MCC shares TX EQ */
1854 static int be_mcc_queues_create(struct be_adapter *adapter)
1855 {
1856         struct be_queue_info *q, *cq;
1857
1858         cq = &adapter->mcc_obj.cq;
1859         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1860                         sizeof(struct be_mcc_compl)))
1861                 goto err;
1862
1863         /* Use the default EQ for MCC completions */
1864         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1865                 goto mcc_cq_free;
1866
1867         q = &adapter->mcc_obj.q;
1868         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1869                 goto mcc_cq_destroy;
1870
1871         if (be_cmd_mccq_create(adapter, q, cq))
1872                 goto mcc_q_free;
1873
1874         return 0;
1875
1876 mcc_q_free:
1877         be_queue_free(adapter, q);
1878 mcc_cq_destroy:
1879         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1880 mcc_cq_free:
1881         be_queue_free(adapter, cq);
1882 err:
1883         return -1;
1884 }
1885
1886 static void be_tx_queues_destroy(struct be_adapter *adapter)
1887 {
1888         struct be_queue_info *q;
1889         struct be_tx_obj *txo;
1890         u8 i;
1891
1892         for_all_tx_queues(adapter, txo, i) {
1893                 q = &txo->q;
1894                 if (q->created)
1895                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1896                 be_queue_free(adapter, q);
1897
1898                 q = &txo->cq;
1899                 if (q->created)
1900                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1901                 be_queue_free(adapter, q);
1902         }
1903 }
1904
1905 static int be_num_txqs_want(struct be_adapter *adapter)
1906 {
1907         if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1908             be_is_mc(adapter) ||
1909             (!lancer_chip(adapter) && !be_physfn(adapter)) ||
1910             BE2_chip(adapter))
1911                 return 1;
1912         else
1913                 return adapter->max_tx_queues;
1914 }
1915
1916 static int be_tx_cqs_create(struct be_adapter *adapter)
1917 {
1918         struct be_queue_info *cq, *eq;
1919         int status;
1920         struct be_tx_obj *txo;
1921         u8 i;
1922
1923         adapter->num_tx_qs = be_num_txqs_want(adapter);
1924         if (adapter->num_tx_qs != MAX_TX_QS) {
1925                 rtnl_lock();
1926                 netif_set_real_num_tx_queues(adapter->netdev,
1927                         adapter->num_tx_qs);
1928                 rtnl_unlock();
1929         }
1930
1931         for_all_tx_queues(adapter, txo, i) {
1932                 cq = &txo->cq;
1933                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1934                                         sizeof(struct be_eth_tx_compl));
1935                 if (status)
1936                         return status;
1937
1938                 /* If num_evt_qs is less than num_tx_qs, then more than
1939                  * one txq share an eq
1940                  */
1941                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1942                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1943                 if (status)
1944                         return status;
1945         }
1946         return 0;
1947 }
1948
1949 static int be_tx_qs_create(struct be_adapter *adapter)
1950 {
1951         struct be_tx_obj *txo;
1952         int i, status;
1953
1954         for_all_tx_queues(adapter, txo, i) {
1955                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1956                                         sizeof(struct be_eth_wrb));
1957                 if (status)
1958                         return status;
1959
1960                 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1961                 if (status)
1962                         return status;
1963         }
1964
1965         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1966                  adapter->num_tx_qs);
1967         return 0;
1968 }
1969
1970 static void be_rx_cqs_destroy(struct be_adapter *adapter)
1971 {
1972         struct be_queue_info *q;
1973         struct be_rx_obj *rxo;
1974         int i;
1975
1976         for_all_rx_queues(adapter, rxo, i) {
1977                 q = &rxo->cq;
1978                 if (q->created)
1979                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1980                 be_queue_free(adapter, q);
1981         }
1982 }
1983
1984 static int be_rx_cqs_create(struct be_adapter *adapter)
1985 {
1986         struct be_queue_info *eq, *cq;
1987         struct be_rx_obj *rxo;
1988         int rc, i;
1989
1990         /* We'll create as many RSS rings as there are irqs.
1991          * But when there's only one irq there's no use creating RSS rings
1992          */
1993         adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1994                                 num_irqs(adapter) + 1 : 1;
1995         if (adapter->num_rx_qs != MAX_RX_QS) {
1996                 rtnl_lock();
1997                 netif_set_real_num_rx_queues(adapter->netdev,
1998                                              adapter->num_rx_qs);
1999                 rtnl_unlock();
2000         }
2001
2002         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2003         for_all_rx_queues(adapter, rxo, i) {
2004                 rxo->adapter = adapter;
2005                 cq = &rxo->cq;
2006                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2007                                 sizeof(struct be_eth_rx_compl));
2008                 if (rc)
2009                         return rc;
2010
2011                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2012                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2013                 if (rc)
2014                         return rc;
2015         }
2016
2017         dev_info(&adapter->pdev->dev,
2018                  "created %d RSS queue(s) and 1 default RX queue\n",
2019                  adapter->num_rx_qs - 1);
2020         return 0;
2021 }
2022
2023 static irqreturn_t be_intx(int irq, void *dev)
2024 {
2025         struct be_eq_obj *eqo = dev;
2026         struct be_adapter *adapter = eqo->adapter;
2027         int num_evts = 0;
2028
2029         /* IRQ is not expected when NAPI is scheduled as the EQ
2030          * will not be armed.
2031          * But, this can happen on Lancer INTx where it takes
2032          * a while to de-assert INTx or in BE2 where occasionaly
2033          * an interrupt may be raised even when EQ is unarmed.
2034          * If NAPI is already scheduled, then counting & notifying
2035          * events will orphan them.
2036          */
2037         if (napi_schedule_prep(&eqo->napi)) {
2038                 num_evts = events_get(eqo);
2039                 __napi_schedule(&eqo->napi);
2040                 if (num_evts)
2041                         eqo->spurious_intr = 0;
2042         }
2043         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2044
2045         /* Return IRQ_HANDLED only for the the first spurious intr
2046          * after a valid intr to stop the kernel from branding
2047          * this irq as a bad one!
2048          */
2049         if (num_evts || eqo->spurious_intr++ == 0)
2050                 return IRQ_HANDLED;
2051         else
2052                 return IRQ_NONE;
2053 }
2054
2055 static irqreturn_t be_msix(int irq, void *dev)
2056 {
2057         struct be_eq_obj *eqo = dev;
2058
2059         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2060         napi_schedule(&eqo->napi);
2061         return IRQ_HANDLED;
2062 }
2063
2064 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2065 {
2066         return (rxcp->tcpf && !rxcp->err) ? true : false;
2067 }
2068
2069 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2070                         int budget)
2071 {
2072         struct be_adapter *adapter = rxo->adapter;
2073         struct be_queue_info *rx_cq = &rxo->cq;
2074         struct be_rx_compl_info *rxcp;
2075         u32 work_done;
2076
2077         for (work_done = 0; work_done < budget; work_done++) {
2078                 rxcp = be_rx_compl_get(rxo);
2079                 if (!rxcp)
2080                         break;
2081
2082                 /* Is it a flush compl that has no data */
2083                 if (unlikely(rxcp->num_rcvd == 0))
2084                         goto loop_continue;
2085
2086                 /* Discard compl with partial DMA Lancer B0 */
2087                 if (unlikely(!rxcp->pkt_size)) {
2088                         be_rx_compl_discard(rxo, rxcp);
2089                         goto loop_continue;
2090                 }
2091
2092                 /* On BE drop pkts that arrive due to imperfect filtering in
2093                  * promiscuous mode on some skews
2094                  */
2095                 if (unlikely(rxcp->port != adapter->port_num &&
2096                                 !lancer_chip(adapter))) {
2097                         be_rx_compl_discard(rxo, rxcp);
2098                         goto loop_continue;
2099                 }
2100
2101                 if (do_gro(rxcp))
2102                         be_rx_compl_process_gro(rxo, napi, rxcp);
2103                 else
2104                         be_rx_compl_process(rxo, rxcp);
2105 loop_continue:
2106                 be_rx_stats_update(rxo, rxcp);
2107         }
2108
2109         if (work_done) {
2110                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2111
2112                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2113                         be_post_rx_frags(rxo, GFP_ATOMIC);
2114         }
2115
2116         return work_done;
2117 }
2118
2119 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2120                           int budget, int idx)
2121 {
2122         struct be_eth_tx_compl *txcp;
2123         int num_wrbs = 0, work_done;
2124
2125         for (work_done = 0; work_done < budget; work_done++) {
2126                 txcp = be_tx_compl_get(&txo->cq);
2127                 if (!txcp)
2128                         break;
2129                 num_wrbs += be_tx_compl_process(adapter, txo,
2130                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2131                                         wrb_index, txcp));
2132         }
2133
2134         if (work_done) {
2135                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2136                 atomic_sub(num_wrbs, &txo->q.used);
2137
2138                 /* As Tx wrbs have been freed up, wake up netdev queue
2139                  * if it was stopped due to lack of tx wrbs.  */
2140                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2141                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2142                         netif_wake_subqueue(adapter->netdev, idx);
2143                 }
2144
2145                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2146                 tx_stats(txo)->tx_compl += work_done;
2147                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2148         }
2149         return (work_done < budget); /* Done */
2150 }
2151
2152 int be_poll(struct napi_struct *napi, int budget)
2153 {
2154         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2155         struct be_adapter *adapter = eqo->adapter;
2156         int max_work = 0, work, i, num_evts;
2157         bool tx_done;
2158
2159         num_evts = events_get(eqo);
2160
2161         /* Process all TXQs serviced by this EQ */
2162         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2163                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2164                                         eqo->tx_budget, i);
2165                 if (!tx_done)
2166                         max_work = budget;
2167         }
2168
2169         /* This loop will iterate twice for EQ0 in which
2170          * completions of the last RXQ (default one) are also processed
2171          * For other EQs the loop iterates only once
2172          */
2173         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2174                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2175                 max_work = max(work, max_work);
2176         }
2177
2178         if (is_mcc_eqo(eqo))
2179                 be_process_mcc(adapter);
2180
2181         if (max_work < budget) {
2182                 napi_complete(napi);
2183                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2184         } else {
2185                 /* As we'll continue in polling mode, count and clear events */
2186                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2187         }
2188         return max_work;
2189 }
2190
2191 void be_detect_error(struct be_adapter *adapter)
2192 {
2193         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2194         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2195         u32 i;
2196
2197         if (be_hw_error(adapter))
2198                 return;
2199
2200         if (lancer_chip(adapter)) {
2201                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2202                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2203                         sliport_err1 = ioread32(adapter->db +
2204                                         SLIPORT_ERROR1_OFFSET);
2205                         sliport_err2 = ioread32(adapter->db +
2206                                         SLIPORT_ERROR2_OFFSET);
2207                 }
2208         } else {
2209                 pci_read_config_dword(adapter->pdev,
2210                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2211                 pci_read_config_dword(adapter->pdev,
2212                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2213                 pci_read_config_dword(adapter->pdev,
2214                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2215                 pci_read_config_dword(adapter->pdev,
2216                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2217
2218                 ue_lo = (ue_lo & ~ue_lo_mask);
2219                 ue_hi = (ue_hi & ~ue_hi_mask);
2220         }
2221
2222         /* On certain platforms BE hardware can indicate spurious UEs.
2223          * Allow the h/w to stop working completely in case of a real UE.
2224          * Hence not setting the hw_error for UE detection.
2225          */
2226         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2227                 adapter->hw_error = true;
2228                 dev_err(&adapter->pdev->dev,
2229                         "Error detected in the card\n");
2230         }
2231
2232         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2233                 dev_err(&adapter->pdev->dev,
2234                         "ERR: sliport status 0x%x\n", sliport_status);
2235                 dev_err(&adapter->pdev->dev,
2236                         "ERR: sliport error1 0x%x\n", sliport_err1);
2237                 dev_err(&adapter->pdev->dev,
2238                         "ERR: sliport error2 0x%x\n", sliport_err2);
2239         }
2240
2241         if (ue_lo) {
2242                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2243                         if (ue_lo & 1)
2244                                 dev_err(&adapter->pdev->dev,
2245                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2246                 }
2247         }
2248
2249         if (ue_hi) {
2250                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2251                         if (ue_hi & 1)
2252                                 dev_err(&adapter->pdev->dev,
2253                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2254                 }
2255         }
2256
2257 }
2258
2259 static void be_msix_disable(struct be_adapter *adapter)
2260 {
2261         if (msix_enabled(adapter)) {
2262                 pci_disable_msix(adapter->pdev);
2263                 adapter->num_msix_vec = 0;
2264         }
2265 }
2266
2267 static uint be_num_rss_want(struct be_adapter *adapter)
2268 {
2269         u32 num = 0;
2270
2271         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2272             (lancer_chip(adapter) ||
2273              (!sriov_want(adapter) && be_physfn(adapter)))) {
2274                 num = adapter->max_rss_queues;
2275                 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2276         }
2277         return num;
2278 }
2279
2280 static void be_msix_enable(struct be_adapter *adapter)
2281 {
2282 #define BE_MIN_MSIX_VECTORS             1
2283         int i, status, num_vec, num_roce_vec = 0;
2284         struct device *dev = &adapter->pdev->dev;
2285
2286         /* If RSS queues are not used, need a vec for default RX Q */
2287         num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2288         if (be_roce_supported(adapter)) {
2289                 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2290                                         (num_online_cpus() + 1));
2291                 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2292                 num_vec += num_roce_vec;
2293                 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2294         }
2295         num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2296
2297         for (i = 0; i < num_vec; i++)
2298                 adapter->msix_entries[i].entry = i;
2299
2300         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2301         if (status == 0) {
2302                 goto done;
2303         } else if (status >= BE_MIN_MSIX_VECTORS) {
2304                 num_vec = status;
2305                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2306                                 num_vec) == 0)
2307                         goto done;
2308         }
2309
2310         dev_warn(dev, "MSIx enable failed\n");
2311         return;
2312 done:
2313         if (be_roce_supported(adapter)) {
2314                 if (num_vec > num_roce_vec) {
2315                         adapter->num_msix_vec = num_vec - num_roce_vec;
2316                         adapter->num_msix_roce_vec =
2317                                 num_vec - adapter->num_msix_vec;
2318                 } else {
2319                         adapter->num_msix_vec = num_vec;
2320                         adapter->num_msix_roce_vec = 0;
2321                 }
2322         } else
2323                 adapter->num_msix_vec = num_vec;
2324         dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2325         return;
2326 }
2327
2328 static inline int be_msix_vec_get(struct be_adapter *adapter,
2329                                 struct be_eq_obj *eqo)
2330 {
2331         return adapter->msix_entries[eqo->idx].vector;
2332 }
2333
2334 static int be_msix_register(struct be_adapter *adapter)
2335 {
2336         struct net_device *netdev = adapter->netdev;
2337         struct be_eq_obj *eqo;
2338         int status, i, vec;
2339
2340         for_all_evt_queues(adapter, eqo, i) {
2341                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2342                 vec = be_msix_vec_get(adapter, eqo);
2343                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2344                 if (status)
2345                         goto err_msix;
2346         }
2347
2348         return 0;
2349 err_msix:
2350         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2351                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2352         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2353                 status);
2354         be_msix_disable(adapter);
2355         return status;
2356 }
2357
2358 static int be_irq_register(struct be_adapter *adapter)
2359 {
2360         struct net_device *netdev = adapter->netdev;
2361         int status;
2362
2363         if (msix_enabled(adapter)) {
2364                 status = be_msix_register(adapter);
2365                 if (status == 0)
2366                         goto done;
2367                 /* INTx is not supported for VF */
2368                 if (!be_physfn(adapter))
2369                         return status;
2370         }
2371
2372         /* INTx: only the first EQ is used */
2373         netdev->irq = adapter->pdev->irq;
2374         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2375                              &adapter->eq_obj[0]);
2376         if (status) {
2377                 dev_err(&adapter->pdev->dev,
2378                         "INTx request IRQ failed - err %d\n", status);
2379                 return status;
2380         }
2381 done:
2382         adapter->isr_registered = true;
2383         return 0;
2384 }
2385
2386 static void be_irq_unregister(struct be_adapter *adapter)
2387 {
2388         struct net_device *netdev = adapter->netdev;
2389         struct be_eq_obj *eqo;
2390         int i;
2391
2392         if (!adapter->isr_registered)
2393                 return;
2394
2395         /* INTx */
2396         if (!msix_enabled(adapter)) {
2397                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2398                 goto done;
2399         }
2400
2401         /* MSIx */
2402         for_all_evt_queues(adapter, eqo, i)
2403                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2404
2405 done:
2406         adapter->isr_registered = false;
2407 }
2408
2409 static void be_rx_qs_destroy(struct be_adapter *adapter)
2410 {
2411         struct be_queue_info *q;
2412         struct be_rx_obj *rxo;
2413         int i;
2414
2415         for_all_rx_queues(adapter, rxo, i) {
2416                 q = &rxo->q;
2417                 if (q->created) {
2418                         be_cmd_rxq_destroy(adapter, q);
2419                         /* After the rxq is invalidated, wait for a grace time
2420                          * of 1ms for all dma to end and the flush compl to
2421                          * arrive
2422                          */
2423                         mdelay(1);
2424                         be_rx_cq_clean(rxo);
2425                 }
2426                 be_queue_free(adapter, q);
2427         }
2428 }
2429
2430 static int be_close(struct net_device *netdev)
2431 {
2432         struct be_adapter *adapter = netdev_priv(netdev);
2433         struct be_eq_obj *eqo;
2434         int i;
2435
2436         be_roce_dev_close(adapter);
2437
2438         if (!lancer_chip(adapter))
2439                 be_intr_set(adapter, false);
2440
2441         for_all_evt_queues(adapter, eqo, i)
2442                 napi_disable(&eqo->napi);
2443
2444         be_async_mcc_disable(adapter);
2445
2446         /* Wait for all pending tx completions to arrive so that
2447          * all tx skbs are freed.
2448          */
2449         be_tx_compl_clean(adapter);
2450
2451         be_rx_qs_destroy(adapter);
2452
2453         for_all_evt_queues(adapter, eqo, i) {
2454                 if (msix_enabled(adapter))
2455                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2456                 else
2457                         synchronize_irq(netdev->irq);
2458                 be_eq_clean(eqo);
2459         }
2460
2461         be_irq_unregister(adapter);
2462
2463         return 0;
2464 }
2465
2466 static int be_rx_qs_create(struct be_adapter *adapter)
2467 {
2468         struct be_rx_obj *rxo;
2469         int rc, i, j;
2470         u8 rsstable[128];
2471
2472         for_all_rx_queues(adapter, rxo, i) {
2473                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2474                                     sizeof(struct be_eth_rx_d));
2475                 if (rc)
2476                         return rc;
2477         }
2478
2479         /* The FW would like the default RXQ to be created first */
2480         rxo = default_rxo(adapter);
2481         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2482                                adapter->if_handle, false, &rxo->rss_id);
2483         if (rc)
2484                 return rc;
2485
2486         for_all_rss_queues(adapter, rxo, i) {
2487                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2488                                        rx_frag_size, adapter->if_handle,
2489                                        true, &rxo->rss_id);
2490                 if (rc)
2491                         return rc;
2492         }
2493
2494         if (be_multi_rxq(adapter)) {
2495                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2496                         for_all_rss_queues(adapter, rxo, i) {
2497                                 if ((j + i) >= 128)
2498                                         break;
2499                                 rsstable[j + i] = rxo->rss_id;
2500                         }
2501                 }
2502                 rc = be_cmd_rss_config(adapter, rsstable, 128);
2503                 if (rc)
2504                         return rc;
2505         }
2506
2507         /* First time posting */
2508         for_all_rx_queues(adapter, rxo, i)
2509                 be_post_rx_frags(rxo, GFP_KERNEL);
2510         return 0;
2511 }
2512
2513 static int be_open(struct net_device *netdev)
2514 {
2515         struct be_adapter *adapter = netdev_priv(netdev);
2516         struct be_eq_obj *eqo;
2517         struct be_rx_obj *rxo;
2518         struct be_tx_obj *txo;
2519         u8 link_status;
2520         int status, i;
2521
2522         status = be_rx_qs_create(adapter);
2523         if (status)
2524                 goto err;
2525
2526         be_irq_register(adapter);
2527
2528         if (!lancer_chip(adapter))
2529                 be_intr_set(adapter, true);
2530
2531         for_all_rx_queues(adapter, rxo, i)
2532                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2533
2534         for_all_tx_queues(adapter, txo, i)
2535                 be_cq_notify(adapter, txo->cq.id, true, 0);
2536
2537         be_async_mcc_enable(adapter);
2538
2539         for_all_evt_queues(adapter, eqo, i) {
2540                 napi_enable(&eqo->napi);
2541                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2542         }
2543
2544         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2545         if (!status)
2546                 be_link_status_update(adapter, link_status);
2547
2548         be_roce_dev_open(adapter);
2549         return 0;
2550 err:
2551         be_close(adapter->netdev);
2552         return -EIO;
2553 }
2554
2555 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2556 {
2557         struct be_dma_mem cmd;
2558         int status = 0;
2559         u8 mac[ETH_ALEN];
2560
2561         memset(mac, 0, ETH_ALEN);
2562
2563         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2564         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2565                                     GFP_KERNEL);
2566         if (cmd.va == NULL)
2567                 return -1;
2568         memset(cmd.va, 0, cmd.size);
2569
2570         if (enable) {
2571                 status = pci_write_config_dword(adapter->pdev,
2572                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2573                 if (status) {
2574                         dev_err(&adapter->pdev->dev,
2575                                 "Could not enable Wake-on-lan\n");
2576                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2577                                           cmd.dma);
2578                         return status;
2579                 }
2580                 status = be_cmd_enable_magic_wol(adapter,
2581                                 adapter->netdev->dev_addr, &cmd);
2582                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2583                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2584         } else {
2585                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2586                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2587                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2588         }
2589
2590         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2591         return status;
2592 }
2593
2594 /*
2595  * Generate a seed MAC address from the PF MAC Address using jhash.
2596  * MAC Address for VFs are assigned incrementally starting from the seed.
2597  * These addresses are programmed in the ASIC by the PF and the VF driver
2598  * queries for the MAC address during its probe.
2599  */
2600 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2601 {
2602         u32 vf;
2603         int status = 0;
2604         u8 mac[ETH_ALEN];
2605         struct be_vf_cfg *vf_cfg;
2606
2607         be_vf_eth_addr_generate(adapter, mac);
2608
2609         for_all_vfs(adapter, vf_cfg, vf) {
2610                 if (lancer_chip(adapter)) {
2611                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2612                 } else {
2613                         status = be_cmd_pmac_add(adapter, mac,
2614                                                  vf_cfg->if_handle,
2615                                                  &vf_cfg->pmac_id, vf + 1);
2616                 }
2617
2618                 if (status)
2619                         dev_err(&adapter->pdev->dev,
2620                         "Mac address assignment failed for VF %d\n", vf);
2621                 else
2622                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2623
2624                 mac[5] += 1;
2625         }
2626         return status;
2627 }
2628
2629 static int be_vfs_mac_query(struct be_adapter *adapter)
2630 {
2631         int status, vf;
2632         u8 mac[ETH_ALEN];
2633         struct be_vf_cfg *vf_cfg;
2634         bool active;
2635
2636         for_all_vfs(adapter, vf_cfg, vf) {
2637                 be_cmd_get_mac_from_list(adapter, mac, &active,
2638                                          &vf_cfg->pmac_id, 0);
2639
2640                 status = be_cmd_mac_addr_query(adapter, mac, false,
2641                                                vf_cfg->if_handle, 0);
2642                 if (status)
2643                         return status;
2644                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2645         }
2646         return 0;
2647 }
2648
2649 static void be_vf_clear(struct be_adapter *adapter)
2650 {
2651         struct be_vf_cfg *vf_cfg;
2652         u32 vf;
2653
2654         if (be_find_vfs(adapter, ASSIGNED)) {
2655                 dev_warn(&adapter->pdev->dev,
2656                          "VFs are assigned to VMs: not disabling VFs\n");
2657                 goto done;
2658         }
2659
2660         for_all_vfs(adapter, vf_cfg, vf) {
2661                 if (lancer_chip(adapter))
2662                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2663                 else
2664                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2665                                         vf_cfg->pmac_id, vf + 1);
2666
2667                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2668         }
2669         pci_disable_sriov(adapter->pdev);
2670 done:
2671         kfree(adapter->vf_cfg);
2672         adapter->num_vfs = 0;
2673 }
2674
2675 static int be_clear(struct be_adapter *adapter)
2676 {
2677         int i = 1;
2678
2679         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2680                 cancel_delayed_work_sync(&adapter->work);
2681                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2682         }
2683
2684         if (sriov_enabled(adapter))
2685                 be_vf_clear(adapter);
2686
2687         for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2688                 be_cmd_pmac_del(adapter, adapter->if_handle,
2689                         adapter->pmac_id[i], 0);
2690
2691         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2692
2693         be_mcc_queues_destroy(adapter);
2694         be_rx_cqs_destroy(adapter);
2695         be_tx_queues_destroy(adapter);
2696         be_evt_queues_destroy(adapter);
2697
2698         kfree(adapter->pmac_id);
2699         adapter->pmac_id = NULL;
2700
2701         be_msix_disable(adapter);
2702         return 0;
2703 }
2704
2705 static int be_vfs_if_create(struct be_adapter *adapter)
2706 {
2707         struct be_vf_cfg *vf_cfg;
2708         u32 cap_flags, en_flags, vf;
2709         int status;
2710
2711         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2712                     BE_IF_FLAGS_MULTICAST;
2713
2714         for_all_vfs(adapter, vf_cfg, vf) {
2715                 if (!BE3_chip(adapter))
2716                         be_cmd_get_profile_config(adapter, &cap_flags, vf + 1);
2717
2718                 /* If a FW profile exists, then cap_flags are updated */
2719                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2720                            BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2721                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2722                                           &vf_cfg->if_handle, vf + 1);
2723                 if (status)
2724                         goto err;
2725         }
2726 err:
2727         return status;
2728 }
2729
2730 static int be_vf_setup_init(struct be_adapter *adapter)
2731 {
2732         struct be_vf_cfg *vf_cfg;
2733         int vf;
2734
2735         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2736                                   GFP_KERNEL);
2737         if (!adapter->vf_cfg)
2738                 return -ENOMEM;
2739
2740         for_all_vfs(adapter, vf_cfg, vf) {
2741                 vf_cfg->if_handle = -1;
2742                 vf_cfg->pmac_id = -1;
2743         }
2744         return 0;
2745 }
2746
2747 static int be_vf_setup(struct be_adapter *adapter)
2748 {
2749         struct be_vf_cfg *vf_cfg;
2750         u16 def_vlan, lnk_speed;
2751         int status, old_vfs, vf;
2752         struct device *dev = &adapter->pdev->dev;
2753
2754         old_vfs = be_find_vfs(adapter, ENABLED);
2755         if (old_vfs) {
2756                 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2757                 if (old_vfs != num_vfs)
2758                         dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2759                 adapter->num_vfs = old_vfs;
2760         } else {
2761                 if (num_vfs > adapter->dev_num_vfs)
2762                         dev_info(dev, "Device supports %d VFs and not %d\n",
2763                                  adapter->dev_num_vfs, num_vfs);
2764                 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2765
2766                 status = pci_enable_sriov(adapter->pdev, num_vfs);
2767                 if (status) {
2768                         dev_err(dev, "SRIOV enable failed\n");
2769                         adapter->num_vfs = 0;
2770                         return 0;
2771                 }
2772         }
2773
2774         status = be_vf_setup_init(adapter);
2775         if (status)
2776                 goto err;
2777
2778         if (old_vfs) {
2779                 for_all_vfs(adapter, vf_cfg, vf) {
2780                         status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2781                         if (status)
2782                                 goto err;
2783                 }
2784         } else {
2785                 status = be_vfs_if_create(adapter);
2786                 if (status)
2787                         goto err;
2788         }
2789
2790         if (old_vfs) {
2791                 status = be_vfs_mac_query(adapter);
2792                 if (status)
2793                         goto err;
2794         } else {
2795                 status = be_vf_eth_addr_config(adapter);
2796                 if (status)
2797                         goto err;
2798         }
2799
2800         for_all_vfs(adapter, vf_cfg, vf) {
2801                 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2802                  * Allow full available bandwidth
2803                  */
2804                 if (BE3_chip(adapter) && !old_vfs)
2805                         be_cmd_set_qos(adapter, 1000, vf+1);
2806
2807                 status = be_cmd_link_status_query(adapter, &lnk_speed,
2808                                                   NULL, vf + 1);
2809                 if (!status)
2810                         vf_cfg->tx_rate = lnk_speed;
2811
2812                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2813                                                vf + 1, vf_cfg->if_handle);
2814                 if (status)
2815                         goto err;
2816                 vf_cfg->def_vid = def_vlan;
2817
2818                 be_cmd_enable_vf(adapter, vf + 1);
2819         }
2820         return 0;
2821 err:
2822         dev_err(dev, "VF setup failed\n");
2823         be_vf_clear(adapter);
2824         return status;
2825 }
2826
2827 static void be_setup_init(struct be_adapter *adapter)
2828 {
2829         adapter->vlan_prio_bmap = 0xff;
2830         adapter->phy.link_speed = -1;
2831         adapter->if_handle = -1;
2832         adapter->be3_native = false;
2833         adapter->promiscuous = false;
2834         if (be_physfn(adapter))
2835                 adapter->cmd_privileges = MAX_PRIVILEGES;
2836         else
2837                 adapter->cmd_privileges = MIN_PRIVILEGES;
2838 }
2839
2840 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2841                            bool *active_mac, u32 *pmac_id)
2842 {
2843         int status = 0;
2844
2845         if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2846                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2847                 if (!lancer_chip(adapter) && !be_physfn(adapter))
2848                         *active_mac = true;
2849                 else
2850                         *active_mac = false;
2851
2852                 return status;
2853         }
2854
2855         if (lancer_chip(adapter)) {
2856                 status = be_cmd_get_mac_from_list(adapter, mac,
2857                                                   active_mac, pmac_id, 0);
2858                 if (*active_mac) {
2859                         status = be_cmd_mac_addr_query(adapter, mac, false,
2860                                                        if_handle, *pmac_id);
2861                 }
2862         } else if (be_physfn(adapter)) {
2863                 /* For BE3, for PF get permanent MAC */
2864                 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
2865                 *active_mac = false;
2866         } else {
2867                 /* For BE3, for VF get soft MAC assigned by PF*/
2868                 status = be_cmd_mac_addr_query(adapter, mac, false,
2869                                                if_handle, 0);
2870                 *active_mac = true;
2871         }
2872         return status;
2873 }
2874
2875 static void be_get_resources(struct be_adapter *adapter)
2876 {
2877         u16 dev_num_vfs;
2878         int pos, status;
2879         bool profile_present = false;
2880
2881         if (!BEx_chip(adapter)) {
2882                 status = be_cmd_get_func_config(adapter);
2883                 if (!status)
2884                         profile_present = true;
2885         }
2886
2887         if (profile_present) {
2888                 /* Sanity fixes for Lancer */
2889                 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2890                                               BE_UC_PMAC_COUNT);
2891                 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2892                                            BE_NUM_VLANS_SUPPORTED);
2893                 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2894                                                BE_MAX_MC);
2895                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2896                                                MAX_TX_QS);
2897                 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2898                                                 BE3_MAX_RSS_QS);
2899                 adapter->max_event_queues = min_t(u16,
2900                                                   adapter->max_event_queues,
2901                                                   BE3_MAX_RSS_QS);
2902
2903                 if (adapter->max_rss_queues &&
2904                     adapter->max_rss_queues == adapter->max_rx_queues)
2905                         adapter->max_rss_queues -= 1;
2906
2907                 if (adapter->max_event_queues < adapter->max_rss_queues)
2908                         adapter->max_rss_queues = adapter->max_event_queues;
2909
2910         } else {
2911                 if (be_physfn(adapter))
2912                         adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2913                 else
2914                         adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2915
2916                 if (adapter->function_mode & FLEX10_MODE)
2917                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2918                 else
2919                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2920
2921                 adapter->max_mcast_mac = BE_MAX_MC;
2922                 adapter->max_tx_queues = MAX_TX_QS;
2923                 adapter->max_rss_queues = (adapter->be3_native) ?
2924                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2925                 adapter->max_event_queues = BE3_MAX_RSS_QS;
2926
2927                 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2928                                         BE_IF_FLAGS_BROADCAST |
2929                                         BE_IF_FLAGS_MULTICAST |
2930                                         BE_IF_FLAGS_PASS_L3L4_ERRORS |
2931                                         BE_IF_FLAGS_MCAST_PROMISCUOUS |
2932                                         BE_IF_FLAGS_VLAN_PROMISCUOUS |
2933                                         BE_IF_FLAGS_PROMISCUOUS;
2934
2935                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2936                         adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2937         }
2938
2939         pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2940         if (pos) {
2941                 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2942                                      &dev_num_vfs);
2943                 if (BE3_chip(adapter))
2944                         dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2945                 adapter->dev_num_vfs = dev_num_vfs;
2946         }
2947 }
2948
2949 /* Routine to query per function resource limits */
2950 static int be_get_config(struct be_adapter *adapter)
2951 {
2952         int status;
2953
2954         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2955                                      &adapter->function_mode,
2956                                      &adapter->function_caps);
2957         if (status)
2958                 goto err;
2959
2960         be_get_resources(adapter);
2961
2962         /* primary mac needs 1 pmac entry */
2963         adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2964                                    sizeof(u32), GFP_KERNEL);
2965         if (!adapter->pmac_id) {
2966                 status = -ENOMEM;
2967                 goto err;
2968         }
2969
2970 err:
2971         return status;
2972 }
2973
2974 static int be_setup(struct be_adapter *adapter)
2975 {
2976         struct device *dev = &adapter->pdev->dev;
2977         u32 en_flags;
2978         u32 tx_fc, rx_fc;
2979         int status;
2980         u8 mac[ETH_ALEN];
2981         bool active_mac;
2982
2983         be_setup_init(adapter);
2984
2985         if (!lancer_chip(adapter))
2986                 be_cmd_req_native_mode(adapter);
2987
2988         status = be_get_config(adapter);
2989         if (status)
2990                 goto err;
2991
2992         be_msix_enable(adapter);
2993
2994         status = be_evt_queues_create(adapter);
2995         if (status)
2996                 goto err;
2997
2998         status = be_tx_cqs_create(adapter);
2999         if (status)
3000                 goto err;
3001
3002         status = be_rx_cqs_create(adapter);
3003         if (status)
3004                 goto err;
3005
3006         status = be_mcc_queues_create(adapter);
3007         if (status)
3008                 goto err;
3009
3010         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3011         /* In UMC mode FW does not return right privileges.
3012          * Override with correct privilege equivalent to PF.
3013          */
3014         if (be_is_mc(adapter))
3015                 adapter->cmd_privileges = MAX_PRIVILEGES;
3016
3017         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3018                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3019
3020         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3021                 en_flags |= BE_IF_FLAGS_RSS;
3022
3023         en_flags = en_flags & adapter->if_cap_flags;
3024
3025         status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
3026                                   &adapter->if_handle, 0);
3027         if (status != 0)
3028                 goto err;
3029
3030         memset(mac, 0, ETH_ALEN);
3031         active_mac = false;
3032         status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3033                                  &active_mac, &adapter->pmac_id[0]);
3034         if (status != 0)
3035                 goto err;
3036
3037         if (!active_mac) {
3038                 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3039                                          &adapter->pmac_id[0], 0);
3040                 if (status != 0)
3041                         goto err;
3042         }
3043
3044         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3045                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3046                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3047         }
3048
3049         status = be_tx_qs_create(adapter);
3050         if (status)
3051                 goto err;
3052
3053         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
3054
3055         if (adapter->vlans_added)
3056                 be_vid_config(adapter);
3057
3058         be_set_rx_mode(adapter->netdev);
3059
3060         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3061
3062         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3063                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3064                                         adapter->rx_fc);
3065
3066         if (be_physfn(adapter) && num_vfs) {
3067                 if (adapter->dev_num_vfs)
3068                         be_vf_setup(adapter);
3069                 else
3070                         dev_warn(dev, "device doesn't support SRIOV\n");
3071         }
3072
3073         status = be_cmd_get_phy_info(adapter);
3074         if (!status && be_pause_supported(adapter))
3075                 adapter->phy.fc_autoneg = 1;
3076
3077         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3078         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3079         return 0;
3080 err:
3081         be_clear(adapter);
3082         return status;
3083 }
3084
3085 #ifdef CONFIG_NET_POLL_CONTROLLER
3086 static void be_netpoll(struct net_device *netdev)
3087 {
3088         struct be_adapter *adapter = netdev_priv(netdev);
3089         struct be_eq_obj *eqo;
3090         int i;
3091
3092         for_all_evt_queues(adapter, eqo, i) {
3093                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3094                 napi_schedule(&eqo->napi);
3095         }
3096
3097         return;
3098 }
3099 #endif
3100
3101 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3102 char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3103
3104 static bool be_flash_redboot(struct be_adapter *adapter,
3105                         const u8 *p, u32 img_start, int image_size,
3106                         int hdr_size)
3107 {
3108         u32 crc_offset;
3109         u8 flashed_crc[4];
3110         int status;
3111
3112         crc_offset = hdr_size + img_start + image_size - 4;
3113
3114         p += crc_offset;
3115
3116         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3117                         (image_size - 4));
3118         if (status) {
3119                 dev_err(&adapter->pdev->dev,
3120                 "could not get crc from flash, not flashing redboot\n");
3121                 return false;
3122         }
3123
3124         /*update redboot only if crc does not match*/
3125         if (!memcmp(flashed_crc, p, 4))
3126                 return false;
3127         else
3128                 return true;
3129 }
3130
3131 static bool phy_flashing_required(struct be_adapter *adapter)
3132 {
3133         return (adapter->phy.phy_type == TN_8022 &&
3134                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3135 }
3136
3137 static bool is_comp_in_ufi(struct be_adapter *adapter,
3138                            struct flash_section_info *fsec, int type)
3139 {
3140         int i = 0, img_type = 0;
3141         struct flash_section_info_g2 *fsec_g2 = NULL;
3142
3143         if (BE2_chip(adapter))
3144                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3145
3146         for (i = 0; i < MAX_FLASH_COMP; i++) {
3147                 if (fsec_g2)
3148                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3149                 else
3150                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3151
3152                 if (img_type == type)
3153                         return true;
3154         }
3155         return false;
3156
3157 }
3158
3159 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3160                                          int header_size,
3161                                          const struct firmware *fw)
3162 {
3163         struct flash_section_info *fsec = NULL;
3164         const u8 *p = fw->data;
3165
3166         p += header_size;
3167         while (p < (fw->data + fw->size)) {
3168                 fsec = (struct flash_section_info *)p;
3169                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3170                         return fsec;
3171                 p += 32;
3172         }
3173         return NULL;
3174 }
3175
3176 static int be_flash(struct be_adapter *adapter, const u8 *img,
3177                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3178 {
3179         u32 total_bytes = 0, flash_op, num_bytes = 0;
3180         int status = 0;
3181         struct be_cmd_write_flashrom *req = flash_cmd->va;
3182
3183         total_bytes = img_size;
3184         while (total_bytes) {
3185                 num_bytes = min_t(u32, 32*1024, total_bytes);
3186
3187                 total_bytes -= num_bytes;
3188
3189                 if (!total_bytes) {
3190                         if (optype == OPTYPE_PHY_FW)
3191                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3192                         else
3193                                 flash_op = FLASHROM_OPER_FLASH;
3194                 } else {
3195                         if (optype == OPTYPE_PHY_FW)
3196                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3197                         else
3198                                 flash_op = FLASHROM_OPER_SAVE;
3199                 }
3200
3201                 memcpy(req->data_buf, img, num_bytes);
3202                 img += num_bytes;
3203                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3204                                                 flash_op, num_bytes);
3205                 if (status) {
3206                         if (status == ILLEGAL_IOCTL_REQ &&
3207                             optype == OPTYPE_PHY_FW)
3208                                 break;
3209                         dev_err(&adapter->pdev->dev,
3210                                 "cmd to write to flash rom failed.\n");
3211                         return status;
3212                 }
3213         }
3214         return 0;
3215 }
3216
3217 /* For BE2 and BE3 */
3218 static int be_flash_BEx(struct be_adapter *adapter,
3219                          const struct firmware *fw,
3220                          struct be_dma_mem *flash_cmd,
3221                          int num_of_images)
3222
3223 {
3224         int status = 0, i, filehdr_size = 0;
3225         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3226         const u8 *p = fw->data;
3227         const struct flash_comp *pflashcomp;
3228         int num_comp, redboot;
3229         struct flash_section_info *fsec = NULL;
3230
3231         struct flash_comp gen3_flash_types[] = {
3232                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3233                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3234                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3235                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3236                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3237                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3238                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3239                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3240                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3241                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3242                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3243                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3244                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3245                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3246                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3247                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3248                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3249                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3250                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3251                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3252         };
3253
3254         struct flash_comp gen2_flash_types[] = {
3255                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3256                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3257                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3258                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3259                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3260                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3261                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3262                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3263                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3264                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3265                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3266                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3267                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3268                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3269                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3270                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3271         };
3272
3273         if (BE3_chip(adapter)) {
3274                 pflashcomp = gen3_flash_types;
3275                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3276                 num_comp = ARRAY_SIZE(gen3_flash_types);
3277         } else {
3278                 pflashcomp = gen2_flash_types;
3279                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3280                 num_comp = ARRAY_SIZE(gen2_flash_types);
3281         }
3282
3283         /* Get flash section info*/
3284         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3285         if (!fsec) {
3286                 dev_err(&adapter->pdev->dev,
3287                         "Invalid Cookie. UFI corrupted ?\n");
3288                 return -1;
3289         }
3290         for (i = 0; i < num_comp; i++) {
3291                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3292                         continue;
3293
3294                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3295                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3296                         continue;
3297
3298                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3299                     !phy_flashing_required(adapter))
3300                                 continue;
3301
3302                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3303                         redboot = be_flash_redboot(adapter, fw->data,
3304                                 pflashcomp[i].offset, pflashcomp[i].size,
3305                                 filehdr_size + img_hdrs_size);
3306                         if (!redboot)
3307                                 continue;
3308                 }
3309
3310                 p = fw->data;
3311                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3312                 if (p + pflashcomp[i].size > fw->data + fw->size)
3313                         return -1;
3314
3315                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3316                                         pflashcomp[i].size);
3317                 if (status) {
3318                         dev_err(&adapter->pdev->dev,
3319                                 "Flashing section type %d failed.\n",
3320                                 pflashcomp[i].img_type);
3321                         return status;
3322                 }
3323         }
3324         return 0;
3325 }
3326
3327 static int be_flash_skyhawk(struct be_adapter *adapter,
3328                 const struct firmware *fw,
3329                 struct be_dma_mem *flash_cmd, int num_of_images)
3330 {
3331         int status = 0, i, filehdr_size = 0;
3332         int img_offset, img_size, img_optype, redboot;
3333         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3334         const u8 *p = fw->data;
3335         struct flash_section_info *fsec = NULL;
3336
3337         filehdr_size = sizeof(struct flash_file_hdr_g3);
3338         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3339         if (!fsec) {
3340                 dev_err(&adapter->pdev->dev,
3341                         "Invalid Cookie. UFI corrupted ?\n");
3342                 return -1;
3343         }
3344
3345         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3346                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3347                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3348
3349                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3350                 case IMAGE_FIRMWARE_iSCSI:
3351                         img_optype = OPTYPE_ISCSI_ACTIVE;
3352                         break;
3353                 case IMAGE_BOOT_CODE:
3354                         img_optype = OPTYPE_REDBOOT;
3355                         break;
3356                 case IMAGE_OPTION_ROM_ISCSI:
3357                         img_optype = OPTYPE_BIOS;
3358                         break;
3359                 case IMAGE_OPTION_ROM_PXE:
3360                         img_optype = OPTYPE_PXE_BIOS;
3361                         break;
3362                 case IMAGE_OPTION_ROM_FCoE:
3363                         img_optype = OPTYPE_FCOE_BIOS;
3364                         break;
3365                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3366                         img_optype = OPTYPE_ISCSI_BACKUP;
3367                         break;
3368                 case IMAGE_NCSI:
3369                         img_optype = OPTYPE_NCSI_FW;
3370                         break;
3371                 default:
3372                         continue;
3373                 }
3374
3375                 if (img_optype == OPTYPE_REDBOOT) {
3376                         redboot = be_flash_redboot(adapter, fw->data,
3377                                         img_offset, img_size,
3378                                         filehdr_size + img_hdrs_size);
3379                         if (!redboot)
3380                                 continue;
3381                 }
3382
3383                 p = fw->data;
3384                 p += filehdr_size + img_offset + img_hdrs_size;
3385                 if (p + img_size > fw->data + fw->size)
3386                         return -1;
3387
3388                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3389                 if (status) {
3390                         dev_err(&adapter->pdev->dev,
3391                                 "Flashing section type %d failed.\n",
3392                                 fsec->fsec_entry[i].type);
3393                         return status;
3394                 }
3395         }
3396         return 0;
3397 }
3398
3399 static int lancer_wait_idle(struct be_adapter *adapter)
3400 {
3401 #define SLIPORT_IDLE_TIMEOUT 30
3402         u32 reg_val;
3403         int status = 0, i;
3404
3405         for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3406                 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3407                 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3408                         break;
3409
3410                 ssleep(1);
3411         }
3412
3413         if (i == SLIPORT_IDLE_TIMEOUT)
3414                 status = -1;
3415
3416         return status;
3417 }
3418
3419 static int lancer_fw_reset(struct be_adapter *adapter)
3420 {
3421         int status = 0;
3422
3423         status = lancer_wait_idle(adapter);
3424         if (status)
3425                 return status;
3426
3427         iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3428                   PHYSDEV_CONTROL_OFFSET);
3429
3430         return status;
3431 }
3432
3433 static int lancer_fw_download(struct be_adapter *adapter,
3434                                 const struct firmware *fw)
3435 {
3436 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3437 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3438         struct be_dma_mem flash_cmd;
3439         const u8 *data_ptr = NULL;
3440         u8 *dest_image_ptr = NULL;
3441         size_t image_size = 0;
3442         u32 chunk_size = 0;
3443         u32 data_written = 0;
3444         u32 offset = 0;
3445         int status = 0;
3446         u8 add_status = 0;
3447         u8 change_status;
3448
3449         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3450                 dev_err(&adapter->pdev->dev,
3451                         "FW Image not properly aligned. "
3452                         "Length must be 4 byte aligned.\n");
3453                 status = -EINVAL;
3454                 goto lancer_fw_exit;
3455         }
3456
3457         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3458                                 + LANCER_FW_DOWNLOAD_CHUNK;
3459         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3460                                                 &flash_cmd.dma, GFP_KERNEL);
3461         if (!flash_cmd.va) {
3462                 status = -ENOMEM;
3463                 dev_err(&adapter->pdev->dev,
3464                         "Memory allocation failure while flashing\n");
3465                 goto lancer_fw_exit;
3466         }
3467
3468         dest_image_ptr = flash_cmd.va +
3469                                 sizeof(struct lancer_cmd_req_write_object);
3470         image_size = fw->size;
3471         data_ptr = fw->data;
3472
3473         while (image_size) {
3474                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3475
3476                 /* Copy the image chunk content. */
3477                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3478
3479                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3480                                                  chunk_size, offset,
3481                                                  LANCER_FW_DOWNLOAD_LOCATION,
3482                                                  &data_written, &change_status,
3483                                                  &add_status);
3484                 if (status)
3485                         break;
3486
3487                 offset += data_written;
3488                 data_ptr += data_written;
3489                 image_size -= data_written;
3490         }
3491
3492         if (!status) {
3493                 /* Commit the FW written */
3494                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3495                                                  0, offset,
3496                                                  LANCER_FW_DOWNLOAD_LOCATION,
3497                                                  &data_written, &change_status,
3498                                                  &add_status);
3499         }
3500
3501         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3502                                 flash_cmd.dma);
3503         if (status) {
3504                 dev_err(&adapter->pdev->dev,
3505                         "Firmware load error. "
3506                         "Status code: 0x%x Additional Status: 0x%x\n",
3507                         status, add_status);
3508                 goto lancer_fw_exit;
3509         }
3510
3511         if (change_status == LANCER_FW_RESET_NEEDED) {
3512                 status = lancer_fw_reset(adapter);
3513                 if (status) {
3514                         dev_err(&adapter->pdev->dev,
3515                                 "Adapter busy for FW reset.\n"
3516                                 "New FW will not be active.\n");
3517                         goto lancer_fw_exit;
3518                 }
3519         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3520                         dev_err(&adapter->pdev->dev,
3521                                 "System reboot required for new FW"
3522                                 " to be active\n");
3523         }
3524
3525         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3526 lancer_fw_exit:
3527         return status;
3528 }
3529
3530 #define UFI_TYPE2               2
3531 #define UFI_TYPE3               3
3532 #define UFI_TYPE4               4
3533 static int be_get_ufi_type(struct be_adapter *adapter,
3534                            struct flash_file_hdr_g2 *fhdr)
3535 {
3536         if (fhdr == NULL)
3537                 goto be_get_ufi_exit;
3538
3539         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3540                 return UFI_TYPE4;
3541         else if (BE3_chip(adapter) && fhdr->build[0] == '3')
3542                 return UFI_TYPE3;
3543         else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3544                 return UFI_TYPE2;
3545
3546 be_get_ufi_exit:
3547         dev_err(&adapter->pdev->dev,
3548                 "UFI and Interface are not compatible for flashing\n");
3549         return -1;
3550 }
3551
3552 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3553 {
3554         struct flash_file_hdr_g2 *fhdr;
3555         struct flash_file_hdr_g3 *fhdr3;
3556         struct image_hdr *img_hdr_ptr = NULL;
3557         struct be_dma_mem flash_cmd;
3558         const u8 *p;
3559         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3560
3561         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3562         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3563                                           &flash_cmd.dma, GFP_KERNEL);
3564         if (!flash_cmd.va) {
3565                 status = -ENOMEM;
3566                 dev_err(&adapter->pdev->dev,
3567                         "Memory allocation failure while flashing\n");
3568                 goto be_fw_exit;
3569         }
3570
3571         p = fw->data;
3572         fhdr = (struct flash_file_hdr_g2 *)p;
3573
3574         ufi_type = be_get_ufi_type(adapter, fhdr);
3575
3576         fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
3577         num_imgs = le32_to_cpu(fhdr3->num_imgs);
3578         for (i = 0; i < num_imgs; i++) {
3579                 img_hdr_ptr = (struct image_hdr *)(fw->data +
3580                                 (sizeof(struct flash_file_hdr_g3) +
3581                                  i * sizeof(struct image_hdr)));
3582                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3583                         if (ufi_type == UFI_TYPE4)
3584                                 status = be_flash_skyhawk(adapter, fw,
3585                                                         &flash_cmd, num_imgs);
3586                         else if (ufi_type == UFI_TYPE3)
3587                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
3588                                                       num_imgs);
3589                 }
3590         }
3591
3592         if (ufi_type == UFI_TYPE2)
3593                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3594         else if (ufi_type == -1)
3595                 status = -1;
3596
3597         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3598                           flash_cmd.dma);
3599         if (status) {
3600                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3601                 goto be_fw_exit;
3602         }
3603
3604         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3605
3606 be_fw_exit:
3607         return status;
3608 }
3609
3610 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3611 {
3612         const struct firmware *fw;
3613         int status;
3614
3615         if (!netif_running(adapter->netdev)) {
3616                 dev_err(&adapter->pdev->dev,
3617                         "Firmware load not allowed (interface is down)\n");
3618                 return -1;
3619         }
3620
3621         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3622         if (status)
3623                 goto fw_exit;
3624
3625         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3626
3627         if (lancer_chip(adapter))
3628                 status = lancer_fw_download(adapter, fw);
3629         else
3630                 status = be_fw_download(adapter, fw);
3631
3632 fw_exit:
3633         release_firmware(fw);
3634         return status;
3635 }
3636
3637 static const struct net_device_ops be_netdev_ops = {
3638         .ndo_open               = be_open,
3639         .ndo_stop               = be_close,
3640         .ndo_start_xmit         = be_xmit,
3641         .ndo_set_rx_mode        = be_set_rx_mode,
3642         .ndo_set_mac_address    = be_mac_addr_set,
3643         .ndo_change_mtu         = be_change_mtu,
3644         .ndo_get_stats64        = be_get_stats64,
3645         .ndo_validate_addr      = eth_validate_addr,
3646         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3647         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3648         .ndo_set_vf_mac         = be_set_vf_mac,
3649         .ndo_set_vf_vlan        = be_set_vf_vlan,
3650         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3651         .ndo_get_vf_config      = be_get_vf_config,
3652 #ifdef CONFIG_NET_POLL_CONTROLLER
3653         .ndo_poll_controller    = be_netpoll,
3654 #endif
3655 };
3656
3657 static void be_netdev_init(struct net_device *netdev)
3658 {
3659         struct be_adapter *adapter = netdev_priv(netdev);
3660         struct be_eq_obj *eqo;
3661         int i;
3662
3663         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3664                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3665                 NETIF_F_HW_VLAN_TX;
3666         if (be_multi_rxq(adapter))
3667                 netdev->hw_features |= NETIF_F_RXHASH;
3668
3669         netdev->features |= netdev->hw_features |
3670                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3671
3672         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3673                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3674
3675         netdev->priv_flags |= IFF_UNICAST_FLT;
3676
3677         netdev->flags |= IFF_MULTICAST;
3678
3679         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3680
3681         netdev->netdev_ops = &be_netdev_ops;
3682
3683         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3684
3685         for_all_evt_queues(adapter, eqo, i)
3686                 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3687 }
3688
3689 static void be_unmap_pci_bars(struct be_adapter *adapter)
3690 {
3691         if (adapter->csr)
3692                 pci_iounmap(adapter->pdev, adapter->csr);
3693         if (adapter->db)
3694                 pci_iounmap(adapter->pdev, adapter->db);
3695 }
3696
3697 static int db_bar(struct be_adapter *adapter)
3698 {
3699         if (lancer_chip(adapter) || !be_physfn(adapter))
3700                 return 0;
3701         else
3702                 return 4;
3703 }
3704
3705 static int be_roce_map_pci_bars(struct be_adapter *adapter)
3706 {
3707         if (skyhawk_chip(adapter)) {
3708                 adapter->roce_db.size = 4096;
3709                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3710                                                               db_bar(adapter));
3711                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3712                                                                db_bar(adapter));
3713         }
3714         return 0;
3715 }
3716
3717 static int be_map_pci_bars(struct be_adapter *adapter)
3718 {
3719         u8 __iomem *addr;
3720         u32 sli_intf;
3721
3722         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3723         adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3724                                 SLI_INTF_IF_TYPE_SHIFT;
3725
3726         if (BEx_chip(adapter) && be_physfn(adapter)) {
3727                 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3728                 if (adapter->csr == NULL)
3729                         return -ENOMEM;
3730         }
3731
3732         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3733         if (addr == NULL)
3734                 goto pci_map_err;
3735         adapter->db = addr;
3736
3737         be_roce_map_pci_bars(adapter);
3738         return 0;
3739
3740 pci_map_err:
3741         be_unmap_pci_bars(adapter);
3742         return -ENOMEM;
3743 }
3744
3745 static void be_ctrl_cleanup(struct be_adapter *adapter)
3746 {
3747         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3748
3749         be_unmap_pci_bars(adapter);
3750
3751         if (mem->va)
3752                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3753                                   mem->dma);
3754
3755         mem = &adapter->rx_filter;
3756         if (mem->va)
3757                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3758                                   mem->dma);
3759 }
3760
3761 static int be_ctrl_init(struct be_adapter *adapter)
3762 {
3763         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3764         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3765         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3766         u32 sli_intf;
3767         int status;
3768
3769         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3770         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3771                                  SLI_INTF_FAMILY_SHIFT;
3772         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3773
3774         status = be_map_pci_bars(adapter);
3775         if (status)
3776                 goto done;
3777
3778         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3779         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3780                                                 mbox_mem_alloc->size,
3781                                                 &mbox_mem_alloc->dma,
3782                                                 GFP_KERNEL);
3783         if (!mbox_mem_alloc->va) {
3784                 status = -ENOMEM;
3785                 goto unmap_pci_bars;
3786         }
3787         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3788         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3789         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3790         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3791
3792         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3793         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3794                                         &rx_filter->dma, GFP_KERNEL);
3795         if (rx_filter->va == NULL) {
3796                 status = -ENOMEM;
3797                 goto free_mbox;
3798         }
3799         memset(rx_filter->va, 0, rx_filter->size);
3800         mutex_init(&adapter->mbox_lock);
3801         spin_lock_init(&adapter->mcc_lock);
3802         spin_lock_init(&adapter->mcc_cq_lock);
3803
3804         init_completion(&adapter->flash_compl);
3805         pci_save_state(adapter->pdev);
3806         return 0;
3807
3808 free_mbox:
3809         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3810                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3811
3812 unmap_pci_bars:
3813         be_unmap_pci_bars(adapter);
3814
3815 done:
3816         return status;
3817 }
3818
3819 static void be_stats_cleanup(struct be_adapter *adapter)
3820 {
3821         struct be_dma_mem *cmd = &adapter->stats_cmd;
3822
3823         if (cmd->va)
3824                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3825                                   cmd->va, cmd->dma);
3826 }
3827
3828 static int be_stats_init(struct be_adapter *adapter)
3829 {
3830         struct be_dma_mem *cmd = &adapter->stats_cmd;
3831
3832         if (lancer_chip(adapter))
3833                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3834         else if (BE2_chip(adapter))
3835                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3836         else
3837                 /* BE3 and Skyhawk */
3838                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3839
3840         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3841                                      GFP_KERNEL);
3842         if (cmd->va == NULL)
3843                 return -1;
3844         memset(cmd->va, 0, cmd->size);
3845         return 0;
3846 }
3847
3848 static void be_remove(struct pci_dev *pdev)
3849 {
3850         struct be_adapter *adapter = pci_get_drvdata(pdev);
3851
3852         if (!adapter)
3853                 return;
3854
3855         be_roce_dev_remove(adapter);
3856
3857         cancel_delayed_work_sync(&adapter->func_recovery_work);
3858
3859         unregister_netdev(adapter->netdev);
3860
3861         be_clear(adapter);
3862
3863         /* tell fw we're done with firing cmds */
3864         be_cmd_fw_clean(adapter);
3865
3866         be_stats_cleanup(adapter);
3867
3868         be_ctrl_cleanup(adapter);
3869
3870         pci_disable_pcie_error_reporting(pdev);
3871
3872         pci_set_drvdata(pdev, NULL);
3873         pci_release_regions(pdev);
3874         pci_disable_device(pdev);
3875
3876         free_netdev(adapter->netdev);
3877 }
3878
3879 bool be_is_wol_supported(struct be_adapter *adapter)
3880 {
3881         return ((adapter->wol_cap & BE_WOL_CAP) &&
3882                 !be_is_wol_excluded(adapter)) ? true : false;
3883 }
3884
3885 u32 be_get_fw_log_level(struct be_adapter *adapter)
3886 {
3887         struct be_dma_mem extfat_cmd;
3888         struct be_fat_conf_params *cfgs;
3889         int status;
3890         u32 level = 0;
3891         int j;
3892
3893         if (lancer_chip(adapter))
3894                 return 0;
3895
3896         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3897         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3898         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3899                                              &extfat_cmd.dma);
3900
3901         if (!extfat_cmd.va) {
3902                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3903                         __func__);
3904                 goto err;
3905         }
3906
3907         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3908         if (!status) {
3909                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3910                                                 sizeof(struct be_cmd_resp_hdr));
3911                 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3912                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3913                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3914                 }
3915         }
3916         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3917                             extfat_cmd.dma);
3918 err:
3919         return level;
3920 }
3921
3922 static int be_get_initial_config(struct be_adapter *adapter)
3923 {
3924         int status;
3925         u32 level;
3926
3927         status = be_cmd_get_cntl_attributes(adapter);
3928         if (status)
3929                 return status;
3930
3931         status = be_cmd_get_acpi_wol_cap(adapter);
3932         if (status) {
3933                 /* in case of a failure to get wol capabillities
3934                  * check the exclusion list to determine WOL capability */
3935                 if (!be_is_wol_excluded(adapter))
3936                         adapter->wol_cap |= BE_WOL_CAP;
3937         }
3938
3939         if (be_is_wol_supported(adapter))
3940                 adapter->wol = true;
3941
3942         /* Must be a power of 2 or else MODULO will BUG_ON */
3943         adapter->be_get_temp_freq = 64;
3944
3945         level = be_get_fw_log_level(adapter);
3946         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3947
3948         return 0;
3949 }
3950
3951 static int lancer_recover_func(struct be_adapter *adapter)
3952 {
3953         int status;
3954
3955         status = lancer_test_and_set_rdy_state(adapter);
3956         if (status)
3957                 goto err;
3958
3959         if (netif_running(adapter->netdev))
3960                 be_close(adapter->netdev);
3961
3962         be_clear(adapter);
3963
3964         adapter->hw_error = false;
3965         adapter->fw_timeout = false;
3966
3967         status = be_setup(adapter);
3968         if (status)
3969                 goto err;
3970
3971         if (netif_running(adapter->netdev)) {
3972                 status = be_open(adapter->netdev);
3973                 if (status)
3974                         goto err;
3975         }
3976
3977         dev_err(&adapter->pdev->dev,
3978                 "Adapter SLIPORT recovery succeeded\n");
3979         return 0;
3980 err:
3981         if (adapter->eeh_error)
3982                 dev_err(&adapter->pdev->dev,
3983                         "Adapter SLIPORT recovery failed\n");
3984
3985         return status;
3986 }
3987
3988 static void be_func_recovery_task(struct work_struct *work)
3989 {
3990         struct be_adapter *adapter =
3991                 container_of(work, struct be_adapter,  func_recovery_work.work);
3992         int status;
3993
3994         be_detect_error(adapter);
3995
3996         if (adapter->hw_error && lancer_chip(adapter)) {
3997
3998                 if (adapter->eeh_error)
3999                         goto out;
4000
4001                 rtnl_lock();
4002                 netif_device_detach(adapter->netdev);
4003                 rtnl_unlock();
4004
4005                 status = lancer_recover_func(adapter);
4006
4007                 if (!status)
4008                         netif_device_attach(adapter->netdev);
4009         }
4010
4011 out:
4012         schedule_delayed_work(&adapter->func_recovery_work,
4013                               msecs_to_jiffies(1000));
4014 }
4015
4016 static void be_worker(struct work_struct *work)
4017 {
4018         struct be_adapter *adapter =
4019                 container_of(work, struct be_adapter, work.work);
4020         struct be_rx_obj *rxo;
4021         struct be_eq_obj *eqo;
4022         int i;
4023
4024         /* when interrupts are not yet enabled, just reap any pending
4025         * mcc completions */
4026         if (!netif_running(adapter->netdev)) {
4027                 local_bh_disable();
4028                 be_process_mcc(adapter);
4029                 local_bh_enable();
4030                 goto reschedule;
4031         }
4032
4033         if (!adapter->stats_cmd_sent) {
4034                 if (lancer_chip(adapter))
4035                         lancer_cmd_get_pport_stats(adapter,
4036                                                 &adapter->stats_cmd);
4037                 else
4038                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
4039         }
4040
4041         if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4042                 be_cmd_get_die_temperature(adapter);
4043
4044         for_all_rx_queues(adapter, rxo, i) {
4045                 if (rxo->rx_post_starved) {
4046                         rxo->rx_post_starved = false;
4047                         be_post_rx_frags(rxo, GFP_KERNEL);
4048                 }
4049         }
4050
4051         for_all_evt_queues(adapter, eqo, i)
4052                 be_eqd_update(adapter, eqo);
4053
4054 reschedule:
4055         adapter->work_counter++;
4056         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4057 }
4058
4059 static bool be_reset_required(struct be_adapter *adapter)
4060 {
4061         return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
4062 }
4063
4064 static char *mc_name(struct be_adapter *adapter)
4065 {
4066         if (adapter->function_mode & FLEX10_MODE)
4067                 return "FLEX10";
4068         else if (adapter->function_mode & VNIC_MODE)
4069                 return "vNIC";
4070         else if (adapter->function_mode & UMC_ENABLED)
4071                 return "UMC";
4072         else
4073                 return "";
4074 }
4075
4076 static inline char *func_name(struct be_adapter *adapter)
4077 {
4078         return be_physfn(adapter) ? "PF" : "VF";
4079 }
4080
4081 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4082 {
4083         int status = 0;
4084         struct be_adapter *adapter;
4085         struct net_device *netdev;
4086         char port_name;
4087
4088         status = pci_enable_device(pdev);
4089         if (status)
4090                 goto do_none;
4091
4092         status = pci_request_regions(pdev, DRV_NAME);
4093         if (status)
4094                 goto disable_dev;
4095         pci_set_master(pdev);
4096
4097         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4098         if (netdev == NULL) {
4099                 status = -ENOMEM;
4100                 goto rel_reg;
4101         }
4102         adapter = netdev_priv(netdev);
4103         adapter->pdev = pdev;
4104         pci_set_drvdata(pdev, adapter);
4105         adapter->netdev = netdev;
4106         SET_NETDEV_DEV(netdev, &pdev->dev);
4107
4108         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4109         if (!status) {
4110                 netdev->features |= NETIF_F_HIGHDMA;
4111         } else {
4112                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4113                 if (status) {
4114                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4115                         goto free_netdev;
4116                 }
4117         }
4118
4119         status = pci_enable_pcie_error_reporting(pdev);
4120         if (status)
4121                 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4122
4123         status = be_ctrl_init(adapter);
4124         if (status)
4125                 goto free_netdev;
4126
4127         /* sync up with fw's ready state */
4128         if (be_physfn(adapter)) {
4129                 status = be_fw_wait_ready(adapter);
4130                 if (status)
4131                         goto ctrl_clean;
4132         }
4133
4134         /* tell fw we're ready to fire cmds */
4135         status = be_cmd_fw_init(adapter);
4136         if (status)
4137                 goto ctrl_clean;
4138
4139         if (be_reset_required(adapter)) {
4140                 status = be_cmd_reset_function(adapter);
4141                 if (status)
4142                         goto ctrl_clean;
4143         }
4144
4145         /* The INTR bit may be set in the card when probed by a kdump kernel
4146          * after a crash.
4147          */
4148         if (!lancer_chip(adapter))
4149                 be_intr_set(adapter, false);
4150
4151         status = be_stats_init(adapter);
4152         if (status)
4153                 goto ctrl_clean;
4154
4155         status = be_get_initial_config(adapter);
4156         if (status)
4157                 goto stats_clean;
4158
4159         INIT_DELAYED_WORK(&adapter->work, be_worker);
4160         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4161         adapter->rx_fc = adapter->tx_fc = true;
4162
4163         status = be_setup(adapter);
4164         if (status)
4165                 goto stats_clean;
4166
4167         be_netdev_init(netdev);
4168         status = register_netdev(netdev);
4169         if (status != 0)
4170                 goto unsetup;
4171
4172         be_roce_dev_add(adapter);
4173
4174         schedule_delayed_work(&adapter->func_recovery_work,
4175                               msecs_to_jiffies(1000));
4176
4177         be_cmd_query_port_name(adapter, &port_name);
4178
4179         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4180                  func_name(adapter), mc_name(adapter), port_name);
4181
4182         return 0;
4183
4184 unsetup:
4185         be_clear(adapter);
4186 stats_clean:
4187         be_stats_cleanup(adapter);
4188 ctrl_clean:
4189         be_ctrl_cleanup(adapter);
4190 free_netdev:
4191         free_netdev(netdev);
4192         pci_set_drvdata(pdev, NULL);
4193 rel_reg:
4194         pci_release_regions(pdev);
4195 disable_dev:
4196         pci_disable_device(pdev);
4197 do_none:
4198         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4199         return status;
4200 }
4201
4202 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4203 {
4204         struct be_adapter *adapter = pci_get_drvdata(pdev);
4205         struct net_device *netdev =  adapter->netdev;
4206
4207         if (adapter->wol)
4208                 be_setup_wol(adapter, true);
4209
4210         cancel_delayed_work_sync(&adapter->func_recovery_work);
4211
4212         netif_device_detach(netdev);
4213         if (netif_running(netdev)) {
4214                 rtnl_lock();
4215                 be_close(netdev);
4216                 rtnl_unlock();
4217         }
4218         be_clear(adapter);
4219
4220         pci_save_state(pdev);
4221         pci_disable_device(pdev);
4222         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4223         return 0;
4224 }
4225
4226 static int be_resume(struct pci_dev *pdev)
4227 {
4228         int status = 0;
4229         struct be_adapter *adapter = pci_get_drvdata(pdev);
4230         struct net_device *netdev =  adapter->netdev;
4231
4232         netif_device_detach(netdev);
4233
4234         status = pci_enable_device(pdev);
4235         if (status)
4236                 return status;
4237
4238         pci_set_power_state(pdev, 0);
4239         pci_restore_state(pdev);
4240
4241         /* tell fw we're ready to fire cmds */
4242         status = be_cmd_fw_init(adapter);
4243         if (status)
4244                 return status;
4245
4246         be_setup(adapter);
4247         if (netif_running(netdev)) {
4248                 rtnl_lock();
4249                 be_open(netdev);
4250                 rtnl_unlock();
4251         }
4252
4253         schedule_delayed_work(&adapter->func_recovery_work,
4254                               msecs_to_jiffies(1000));
4255         netif_device_attach(netdev);
4256
4257         if (adapter->wol)
4258                 be_setup_wol(adapter, false);
4259
4260         return 0;
4261 }
4262
4263 /*
4264  * An FLR will stop BE from DMAing any data.
4265  */
4266 static void be_shutdown(struct pci_dev *pdev)
4267 {
4268         struct be_adapter *adapter = pci_get_drvdata(pdev);
4269
4270         if (!adapter)
4271                 return;
4272
4273         cancel_delayed_work_sync(&adapter->work);
4274         cancel_delayed_work_sync(&adapter->func_recovery_work);
4275
4276         netif_device_detach(adapter->netdev);
4277
4278         be_cmd_reset_function(adapter);
4279
4280         pci_disable_device(pdev);
4281 }
4282
4283 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4284                                 pci_channel_state_t state)
4285 {
4286         struct be_adapter *adapter = pci_get_drvdata(pdev);
4287         struct net_device *netdev =  adapter->netdev;
4288
4289         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4290
4291         adapter->eeh_error = true;
4292
4293         cancel_delayed_work_sync(&adapter->func_recovery_work);
4294
4295         rtnl_lock();
4296         netif_device_detach(netdev);
4297         rtnl_unlock();
4298
4299         if (netif_running(netdev)) {
4300                 rtnl_lock();
4301                 be_close(netdev);
4302                 rtnl_unlock();
4303         }
4304         be_clear(adapter);
4305
4306         if (state == pci_channel_io_perm_failure)
4307                 return PCI_ERS_RESULT_DISCONNECT;
4308
4309         pci_disable_device(pdev);
4310
4311         /* The error could cause the FW to trigger a flash debug dump.
4312          * Resetting the card while flash dump is in progress
4313          * can cause it not to recover; wait for it to finish.
4314          * Wait only for first function as it is needed only once per
4315          * adapter.
4316          */
4317         if (pdev->devfn == 0)
4318                 ssleep(30);
4319
4320         return PCI_ERS_RESULT_NEED_RESET;
4321 }
4322
4323 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4324 {
4325         struct be_adapter *adapter = pci_get_drvdata(pdev);
4326         int status;
4327
4328         dev_info(&adapter->pdev->dev, "EEH reset\n");
4329         be_clear_all_error(adapter);
4330
4331         status = pci_enable_device(pdev);
4332         if (status)
4333                 return PCI_ERS_RESULT_DISCONNECT;
4334
4335         pci_set_master(pdev);
4336         pci_set_power_state(pdev, 0);
4337         pci_restore_state(pdev);
4338
4339         /* Check if card is ok and fw is ready */
4340         dev_info(&adapter->pdev->dev,
4341                  "Waiting for FW to be ready after EEH reset\n");
4342         status = be_fw_wait_ready(adapter);
4343         if (status)
4344                 return PCI_ERS_RESULT_DISCONNECT;
4345
4346         pci_cleanup_aer_uncorrect_error_status(pdev);
4347         return PCI_ERS_RESULT_RECOVERED;
4348 }
4349
4350 static void be_eeh_resume(struct pci_dev *pdev)
4351 {
4352         int status = 0;
4353         struct be_adapter *adapter = pci_get_drvdata(pdev);
4354         struct net_device *netdev =  adapter->netdev;
4355
4356         dev_info(&adapter->pdev->dev, "EEH resume\n");
4357
4358         pci_save_state(pdev);
4359
4360         /* tell fw we're ready to fire cmds */
4361         status = be_cmd_fw_init(adapter);
4362         if (status)
4363                 goto err;
4364
4365         status = be_cmd_reset_function(adapter);
4366         if (status)
4367                 goto err;
4368
4369         status = be_setup(adapter);
4370         if (status)
4371                 goto err;
4372
4373         if (netif_running(netdev)) {
4374                 status = be_open(netdev);
4375                 if (status)
4376                         goto err;
4377         }
4378
4379         schedule_delayed_work(&adapter->func_recovery_work,
4380                               msecs_to_jiffies(1000));
4381         netif_device_attach(netdev);
4382         return;
4383 err:
4384         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4385 }
4386
4387 static const struct pci_error_handlers be_eeh_handlers = {
4388         .error_detected = be_eeh_err_detected,
4389         .slot_reset = be_eeh_reset,
4390         .resume = be_eeh_resume,
4391 };
4392
4393 static struct pci_driver be_driver = {
4394         .name = DRV_NAME,
4395         .id_table = be_dev_ids,
4396         .probe = be_probe,
4397         .remove = be_remove,
4398         .suspend = be_suspend,
4399         .resume = be_resume,
4400         .shutdown = be_shutdown,
4401         .err_handler = &be_eeh_handlers
4402 };
4403
4404 static int __init be_init_module(void)
4405 {
4406         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4407             rx_frag_size != 2048) {
4408                 printk(KERN_WARNING DRV_NAME
4409                         " : Module param rx_frag_size must be 2048/4096/8192."
4410                         " Using 2048\n");
4411                 rx_frag_size = 2048;
4412         }
4413
4414         return pci_register_driver(&be_driver);
4415 }
4416 module_init(be_init_module);
4417
4418 static void __exit be_exit_module(void)
4419 {
4420         pci_unregister_driver(&be_driver);
4421 }
4422 module_exit(be_exit_module);