gfs2: Initialize atime of I_NEW inodes
[cascardo/linux.git] / drivers / net / ethernet / qlogic / qede / qede_main.c
1 /* QLogic qede NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 #include <linux/version.h>
12 #include <linux/device.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/skbuff.h>
16 #include <linux/errno.h>
17 #include <linux/list.h>
18 #include <linux/string.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/interrupt.h>
21 #include <asm/byteorder.h>
22 #include <asm/param.h>
23 #include <linux/io.h>
24 #include <linux/netdev_features.h>
25 #include <linux/udp.h>
26 #include <linux/tcp.h>
27 #include <net/udp_tunnel.h>
28 #include <linux/ip.h>
29 #include <net/ipv6.h>
30 #include <net/tcp.h>
31 #include <linux/if_ether.h>
32 #include <linux/if_vlan.h>
33 #include <linux/pkt_sched.h>
34 #include <linux/ethtool.h>
35 #include <linux/in.h>
36 #include <linux/random.h>
37 #include <net/ip6_checksum.h>
38 #include <linux/bitops.h>
39
40 #include "qede.h"
41
42 static char version[] =
43         "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n";
44
45 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
46 MODULE_LICENSE("GPL");
47 MODULE_VERSION(DRV_MODULE_VERSION);
48
49 static uint debug;
50 module_param(debug, uint, 0);
51 MODULE_PARM_DESC(debug, " Default debug msglevel");
52
53 static const struct qed_eth_ops *qed_ops;
54
55 #define CHIP_NUM_57980S_40              0x1634
56 #define CHIP_NUM_57980S_10              0x1666
57 #define CHIP_NUM_57980S_MF              0x1636
58 #define CHIP_NUM_57980S_100             0x1644
59 #define CHIP_NUM_57980S_50              0x1654
60 #define CHIP_NUM_57980S_25              0x1656
61 #define CHIP_NUM_57980S_IOV             0x1664
62
63 #ifndef PCI_DEVICE_ID_NX2_57980E
64 #define PCI_DEVICE_ID_57980S_40         CHIP_NUM_57980S_40
65 #define PCI_DEVICE_ID_57980S_10         CHIP_NUM_57980S_10
66 #define PCI_DEVICE_ID_57980S_MF         CHIP_NUM_57980S_MF
67 #define PCI_DEVICE_ID_57980S_100        CHIP_NUM_57980S_100
68 #define PCI_DEVICE_ID_57980S_50         CHIP_NUM_57980S_50
69 #define PCI_DEVICE_ID_57980S_25         CHIP_NUM_57980S_25
70 #define PCI_DEVICE_ID_57980S_IOV        CHIP_NUM_57980S_IOV
71 #endif
72
73 enum qede_pci_private {
74         QEDE_PRIVATE_PF,
75         QEDE_PRIVATE_VF
76 };
77
78 static const struct pci_device_id qede_pci_tbl[] = {
79         {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
80         {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
81         {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
82         {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
83         {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
84         {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
85 #ifdef CONFIG_QED_SRIOV
86         {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
87 #endif
88         { 0 }
89 };
90
91 MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
92
93 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
94
95 #define TX_TIMEOUT              (5 * HZ)
96
97 static void qede_remove(struct pci_dev *pdev);
98 static int qede_alloc_rx_buffer(struct qede_dev *edev,
99                                 struct qede_rx_queue *rxq);
100 static void qede_link_update(void *dev, struct qed_link_output *link);
101
102 #ifdef CONFIG_QED_SRIOV
103 static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos)
104 {
105         struct qede_dev *edev = netdev_priv(ndev);
106
107         if (vlan > 4095) {
108                 DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
109                 return -EINVAL;
110         }
111
112         DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
113                    vlan, vf);
114
115         return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
116 }
117
118 static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
119 {
120         struct qede_dev *edev = netdev_priv(ndev);
121
122         DP_VERBOSE(edev, QED_MSG_IOV,
123                    "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n",
124                    mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx);
125
126         if (!is_valid_ether_addr(mac)) {
127                 DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
128                 return -EINVAL;
129         }
130
131         return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
132 }
133
134 static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
135 {
136         struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
137         struct qed_dev_info *qed_info = &edev->dev_info.common;
138         int rc;
139
140         DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
141
142         rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
143
144         /* Enable/Disable Tx switching for PF */
145         if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
146             qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) {
147                 struct qed_update_vport_params params;
148
149                 memset(&params, 0, sizeof(params));
150                 params.vport_id = 0;
151                 params.update_tx_switching_flg = 1;
152                 params.tx_switching_flg = num_vfs_param ? 1 : 0;
153                 edev->ops->vport_update(edev->cdev, &params);
154         }
155
156         return rc;
157 }
158 #endif
159
160 static struct pci_driver qede_pci_driver = {
161         .name = "qede",
162         .id_table = qede_pci_tbl,
163         .probe = qede_probe,
164         .remove = qede_remove,
165 #ifdef CONFIG_QED_SRIOV
166         .sriov_configure = qede_sriov_configure,
167 #endif
168 };
169
170 static void qede_force_mac(void *dev, u8 *mac)
171 {
172         struct qede_dev *edev = dev;
173
174         ether_addr_copy(edev->ndev->dev_addr, mac);
175         ether_addr_copy(edev->primary_mac, mac);
176 }
177
178 static struct qed_eth_cb_ops qede_ll_ops = {
179         {
180                 .link_update = qede_link_update,
181         },
182         .force_mac = qede_force_mac,
183 };
184
185 static int qede_netdev_event(struct notifier_block *this, unsigned long event,
186                              void *ptr)
187 {
188         struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
189         struct ethtool_drvinfo drvinfo;
190         struct qede_dev *edev;
191
192         /* Currently only support name change */
193         if (event != NETDEV_CHANGENAME)
194                 goto done;
195
196         /* Check whether this is a qede device */
197         if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
198                 goto done;
199
200         memset(&drvinfo, 0, sizeof(drvinfo));
201         ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
202         if (strcmp(drvinfo.driver, "qede"))
203                 goto done;
204         edev = netdev_priv(ndev);
205
206         /* Notify qed of the name change */
207         if (!edev->ops || !edev->ops->common)
208                 goto done;
209         edev->ops->common->set_id(edev->cdev, edev->ndev->name,
210                                   "qede");
211
212 done:
213         return NOTIFY_DONE;
214 }
215
216 static struct notifier_block qede_netdev_notifier = {
217         .notifier_call = qede_netdev_event,
218 };
219
220 static
221 int __init qede_init(void)
222 {
223         int ret;
224
225         pr_notice("qede_init: %s\n", version);
226
227         qed_ops = qed_get_eth_ops();
228         if (!qed_ops) {
229                 pr_notice("Failed to get qed ethtool operations\n");
230                 return -EINVAL;
231         }
232
233         /* Must register notifier before pci ops, since we might miss
234          * interface rename after pci probe and netdev registeration.
235          */
236         ret = register_netdevice_notifier(&qede_netdev_notifier);
237         if (ret) {
238                 pr_notice("Failed to register netdevice_notifier\n");
239                 qed_put_eth_ops();
240                 return -EINVAL;
241         }
242
243         ret = pci_register_driver(&qede_pci_driver);
244         if (ret) {
245                 pr_notice("Failed to register driver\n");
246                 unregister_netdevice_notifier(&qede_netdev_notifier);
247                 qed_put_eth_ops();
248                 return -EINVAL;
249         }
250
251         return 0;
252 }
253
254 static void __exit qede_cleanup(void)
255 {
256         pr_notice("qede_cleanup called\n");
257
258         unregister_netdevice_notifier(&qede_netdev_notifier);
259         pci_unregister_driver(&qede_pci_driver);
260         qed_put_eth_ops();
261 }
262
263 module_init(qede_init);
264 module_exit(qede_cleanup);
265
266 /* -------------------------------------------------------------------------
267  * START OF FAST-PATH
268  * -------------------------------------------------------------------------
269  */
270
271 /* Unmap the data and free skb */
272 static int qede_free_tx_pkt(struct qede_dev *edev,
273                             struct qede_tx_queue *txq,
274                             int *len)
275 {
276         u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
277         struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
278         struct eth_tx_1st_bd *first_bd;
279         struct eth_tx_bd *tx_data_bd;
280         int bds_consumed = 0;
281         int nbds;
282         bool data_split = txq->sw_tx_ring[idx].flags & QEDE_TSO_SPLIT_BD;
283         int i, split_bd_len = 0;
284
285         if (unlikely(!skb)) {
286                 DP_ERR(edev,
287                        "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
288                        idx, txq->sw_tx_cons, txq->sw_tx_prod);
289                 return -1;
290         }
291
292         *len = skb->len;
293
294         first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
295
296         bds_consumed++;
297
298         nbds = first_bd->data.nbds;
299
300         if (data_split) {
301                 struct eth_tx_bd *split = (struct eth_tx_bd *)
302                         qed_chain_consume(&txq->tx_pbl);
303                 split_bd_len = BD_UNMAP_LEN(split);
304                 bds_consumed++;
305         }
306         dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
307                        BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
308
309         /* Unmap the data of the skb frags */
310         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
311                 tx_data_bd = (struct eth_tx_bd *)
312                         qed_chain_consume(&txq->tx_pbl);
313                 dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
314                                BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
315         }
316
317         while (bds_consumed++ < nbds)
318                 qed_chain_consume(&txq->tx_pbl);
319
320         /* Free skb */
321         dev_kfree_skb_any(skb);
322         txq->sw_tx_ring[idx].skb = NULL;
323         txq->sw_tx_ring[idx].flags = 0;
324
325         return 0;
326 }
327
328 /* Unmap the data and free skb when mapping failed during start_xmit */
329 static void qede_free_failed_tx_pkt(struct qede_dev *edev,
330                                     struct qede_tx_queue *txq,
331                                     struct eth_tx_1st_bd *first_bd,
332                                     int nbd,
333                                     bool data_split)
334 {
335         u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
336         struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
337         struct eth_tx_bd *tx_data_bd;
338         int i, split_bd_len = 0;
339
340         /* Return prod to its position before this skb was handled */
341         qed_chain_set_prod(&txq->tx_pbl,
342                            le16_to_cpu(txq->tx_db.data.bd_prod),
343                            first_bd);
344
345         first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
346
347         if (data_split) {
348                 struct eth_tx_bd *split = (struct eth_tx_bd *)
349                                           qed_chain_produce(&txq->tx_pbl);
350                 split_bd_len = BD_UNMAP_LEN(split);
351                 nbd--;
352         }
353
354         dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
355                        BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
356
357         /* Unmap the data of the skb frags */
358         for (i = 0; i < nbd; i++) {
359                 tx_data_bd = (struct eth_tx_bd *)
360                         qed_chain_produce(&txq->tx_pbl);
361                 if (tx_data_bd->nbytes)
362                         dma_unmap_page(&edev->pdev->dev,
363                                        BD_UNMAP_ADDR(tx_data_bd),
364                                        BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
365         }
366
367         /* Return again prod to its position before this skb was handled */
368         qed_chain_set_prod(&txq->tx_pbl,
369                            le16_to_cpu(txq->tx_db.data.bd_prod),
370                            first_bd);
371
372         /* Free skb */
373         dev_kfree_skb_any(skb);
374         txq->sw_tx_ring[idx].skb = NULL;
375         txq->sw_tx_ring[idx].flags = 0;
376 }
377
378 static u32 qede_xmit_type(struct qede_dev *edev,
379                           struct sk_buff *skb,
380                           int *ipv6_ext)
381 {
382         u32 rc = XMIT_L4_CSUM;
383         __be16 l3_proto;
384
385         if (skb->ip_summed != CHECKSUM_PARTIAL)
386                 return XMIT_PLAIN;
387
388         l3_proto = vlan_get_protocol(skb);
389         if (l3_proto == htons(ETH_P_IPV6) &&
390             (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
391                 *ipv6_ext = 1;
392
393         if (skb->encapsulation)
394                 rc |= XMIT_ENC;
395
396         if (skb_is_gso(skb))
397                 rc |= XMIT_LSO;
398
399         return rc;
400 }
401
402 static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
403                                          struct eth_tx_2nd_bd *second_bd,
404                                          struct eth_tx_3rd_bd *third_bd)
405 {
406         u8 l4_proto;
407         u16 bd2_bits1 = 0, bd2_bits2 = 0;
408
409         bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
410
411         bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
412                      ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
413                     << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
414
415         bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
416                       ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
417
418         if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
419                 l4_proto = ipv6_hdr(skb)->nexthdr;
420         else
421                 l4_proto = ip_hdr(skb)->protocol;
422
423         if (l4_proto == IPPROTO_UDP)
424                 bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
425
426         if (third_bd)
427                 third_bd->data.bitfields |=
428                         cpu_to_le16(((tcp_hdrlen(skb) / 4) &
429                                 ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
430                                 ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT);
431
432         second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1);
433         second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
434 }
435
436 static int map_frag_to_bd(struct qede_dev *edev,
437                           skb_frag_t *frag,
438                           struct eth_tx_bd *bd)
439 {
440         dma_addr_t mapping;
441
442         /* Map skb non-linear frag data for DMA */
443         mapping = skb_frag_dma_map(&edev->pdev->dev, frag, 0,
444                                    skb_frag_size(frag),
445                                    DMA_TO_DEVICE);
446         if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
447                 DP_NOTICE(edev, "Unable to map frag - dropping packet\n");
448                 return -ENOMEM;
449         }
450
451         /* Setup the data pointer of the frag data */
452         BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
453
454         return 0;
455 }
456
457 static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
458 {
459         if (is_encap_pkt)
460                 return (skb_inner_transport_header(skb) +
461                         inner_tcp_hdrlen(skb) - skb->data);
462         else
463                 return (skb_transport_header(skb) +
464                         tcp_hdrlen(skb) - skb->data);
465 }
466
467 /* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
468 #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
469 static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
470                              u8 xmit_type)
471 {
472         int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1;
473
474         if (xmit_type & XMIT_LSO) {
475                 int hlen;
476
477                 hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
478
479                 /* linear payload would require its own BD */
480                 if (skb_headlen(skb) > hlen)
481                         allowed_frags--;
482         }
483
484         return (skb_shinfo(skb)->nr_frags > allowed_frags);
485 }
486 #endif
487
488 static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
489 {
490         /* wmb makes sure that the BDs data is updated before updating the
491          * producer, otherwise FW may read old data from the BDs.
492          */
493         wmb();
494         barrier();
495         writel(txq->tx_db.raw, txq->doorbell_addr);
496
497         /* mmiowb is needed to synchronize doorbell writes from more than one
498          * processor. It guarantees that the write arrives to the device before
499          * the queue lock is released and another start_xmit is called (possibly
500          * on another CPU). Without this barrier, the next doorbell can bypass
501          * this doorbell. This is applicable to IA64/Altix systems.
502          */
503         mmiowb();
504 }
505
506 /* Main transmit function */
507 static
508 netdev_tx_t qede_start_xmit(struct sk_buff *skb,
509                             struct net_device *ndev)
510 {
511         struct qede_dev *edev = netdev_priv(ndev);
512         struct netdev_queue *netdev_txq;
513         struct qede_tx_queue *txq;
514         struct eth_tx_1st_bd *first_bd;
515         struct eth_tx_2nd_bd *second_bd = NULL;
516         struct eth_tx_3rd_bd *third_bd = NULL;
517         struct eth_tx_bd *tx_data_bd = NULL;
518         u16 txq_index;
519         u8 nbd = 0;
520         dma_addr_t mapping;
521         int rc, frag_idx = 0, ipv6_ext = 0;
522         u8 xmit_type;
523         u16 idx;
524         u16 hlen;
525         bool data_split = false;
526
527         /* Get tx-queue context and netdev index */
528         txq_index = skb_get_queue_mapping(skb);
529         WARN_ON(txq_index >= QEDE_TSS_CNT(edev));
530         txq = QEDE_TX_QUEUE(edev, txq_index);
531         netdev_txq = netdev_get_tx_queue(ndev, txq_index);
532
533         WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) <
534                                (MAX_SKB_FRAGS + 1));
535
536         xmit_type = qede_xmit_type(edev, skb, &ipv6_ext);
537
538 #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
539         if (qede_pkt_req_lin(edev, skb, xmit_type)) {
540                 if (skb_linearize(skb)) {
541                         DP_NOTICE(edev,
542                                   "SKB linearization failed - silently dropping this SKB\n");
543                         dev_kfree_skb_any(skb);
544                         return NETDEV_TX_OK;
545                 }
546         }
547 #endif
548
549         /* Fill the entry in the SW ring and the BDs in the FW ring */
550         idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
551         txq->sw_tx_ring[idx].skb = skb;
552         first_bd = (struct eth_tx_1st_bd *)
553                    qed_chain_produce(&txq->tx_pbl);
554         memset(first_bd, 0, sizeof(*first_bd));
555         first_bd->data.bd_flags.bitfields =
556                 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
557
558         /* Map skb linear data for DMA and set in the first BD */
559         mapping = dma_map_single(&edev->pdev->dev, skb->data,
560                                  skb_headlen(skb), DMA_TO_DEVICE);
561         if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
562                 DP_NOTICE(edev, "SKB mapping failed\n");
563                 qede_free_failed_tx_pkt(edev, txq, first_bd, 0, false);
564                 qede_update_tx_producer(txq);
565                 return NETDEV_TX_OK;
566         }
567         nbd++;
568         BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
569
570         /* In case there is IPv6 with extension headers or LSO we need 2nd and
571          * 3rd BDs.
572          */
573         if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) {
574                 second_bd = (struct eth_tx_2nd_bd *)
575                         qed_chain_produce(&txq->tx_pbl);
576                 memset(second_bd, 0, sizeof(*second_bd));
577
578                 nbd++;
579                 third_bd = (struct eth_tx_3rd_bd *)
580                         qed_chain_produce(&txq->tx_pbl);
581                 memset(third_bd, 0, sizeof(*third_bd));
582
583                 nbd++;
584                 /* We need to fill in additional data in second_bd... */
585                 tx_data_bd = (struct eth_tx_bd *)second_bd;
586         }
587
588         if (skb_vlan_tag_present(skb)) {
589                 first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
590                 first_bd->data.bd_flags.bitfields |=
591                         1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
592         }
593
594         /* Fill the parsing flags & params according to the requested offload */
595         if (xmit_type & XMIT_L4_CSUM) {
596                 /* We don't re-calculate IP checksum as it is already done by
597                  * the upper stack
598                  */
599                 first_bd->data.bd_flags.bitfields |=
600                         1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
601
602                 if (xmit_type & XMIT_ENC) {
603                         first_bd->data.bd_flags.bitfields |=
604                                 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
605                         first_bd->data.bitfields |=
606                             1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
607                 }
608
609                 /* If the packet is IPv6 with extension header, indicate that
610                  * to FW and pass few params, since the device cracker doesn't
611                  * support parsing IPv6 with extension header/s.
612                  */
613                 if (unlikely(ipv6_ext))
614                         qede_set_params_for_ipv6_ext(skb, second_bd, third_bd);
615         }
616
617         if (xmit_type & XMIT_LSO) {
618                 first_bd->data.bd_flags.bitfields |=
619                         (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
620                 third_bd->data.lso_mss =
621                         cpu_to_le16(skb_shinfo(skb)->gso_size);
622
623                 if (unlikely(xmit_type & XMIT_ENC)) {
624                         first_bd->data.bd_flags.bitfields |=
625                                 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
626                         hlen = qede_get_skb_hlen(skb, true);
627                 } else {
628                         first_bd->data.bd_flags.bitfields |=
629                                 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
630                         hlen = qede_get_skb_hlen(skb, false);
631                 }
632
633                 /* @@@TBD - if will not be removed need to check */
634                 third_bd->data.bitfields |=
635                         cpu_to_le16((1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT));
636
637                 /* Make life easier for FW guys who can't deal with header and
638                  * data on same BD. If we need to split, use the second bd...
639                  */
640                 if (unlikely(skb_headlen(skb) > hlen)) {
641                         DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
642                                    "TSO split header size is %d (%x:%x)\n",
643                                    first_bd->nbytes, first_bd->addr.hi,
644                                    first_bd->addr.lo);
645
646                         mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi),
647                                            le32_to_cpu(first_bd->addr.lo)) +
648                                            hlen;
649
650                         BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping,
651                                               le16_to_cpu(first_bd->nbytes) -
652                                               hlen);
653
654                         /* this marks the BD as one that has no
655                          * individual mapping
656                          */
657                         txq->sw_tx_ring[idx].flags |= QEDE_TSO_SPLIT_BD;
658
659                         first_bd->nbytes = cpu_to_le16(hlen);
660
661                         tx_data_bd = (struct eth_tx_bd *)third_bd;
662                         data_split = true;
663                 }
664         } else {
665                 first_bd->data.bitfields |=
666                     (skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
667                     ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
668         }
669
670         /* Handle fragmented skb */
671         /* special handle for frags inside 2nd and 3rd bds.. */
672         while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
673                 rc = map_frag_to_bd(edev,
674                                     &skb_shinfo(skb)->frags[frag_idx],
675                                     tx_data_bd);
676                 if (rc) {
677                         qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
678                                                 data_split);
679                         qede_update_tx_producer(txq);
680                         return NETDEV_TX_OK;
681                 }
682
683                 if (tx_data_bd == (struct eth_tx_bd *)second_bd)
684                         tx_data_bd = (struct eth_tx_bd *)third_bd;
685                 else
686                         tx_data_bd = NULL;
687
688                 frag_idx++;
689         }
690
691         /* map last frags into 4th, 5th .... */
692         for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) {
693                 tx_data_bd = (struct eth_tx_bd *)
694                              qed_chain_produce(&txq->tx_pbl);
695
696                 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
697
698                 rc = map_frag_to_bd(edev,
699                                     &skb_shinfo(skb)->frags[frag_idx],
700                                     tx_data_bd);
701                 if (rc) {
702                         qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
703                                                 data_split);
704                         qede_update_tx_producer(txq);
705                         return NETDEV_TX_OK;
706                 }
707         }
708
709         /* update the first BD with the actual num BDs */
710         first_bd->data.nbds = nbd;
711
712         netdev_tx_sent_queue(netdev_txq, skb->len);
713
714         skb_tx_timestamp(skb);
715
716         /* Advance packet producer only before sending the packet since mapping
717          * of pages may fail.
718          */
719         txq->sw_tx_prod++;
720
721         /* 'next page' entries are counted in the producer value */
722         txq->tx_db.data.bd_prod =
723                 cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
724
725         if (!skb->xmit_more || netif_tx_queue_stopped(netdev_txq))
726                 qede_update_tx_producer(txq);
727
728         if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
729                       < (MAX_SKB_FRAGS + 1))) {
730                 netif_tx_stop_queue(netdev_txq);
731                 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
732                            "Stop queue was called\n");
733                 /* paired memory barrier is in qede_tx_int(), we have to keep
734                  * ordering of set_bit() in netif_tx_stop_queue() and read of
735                  * fp->bd_tx_cons
736                  */
737                 smp_mb();
738
739                 if (qed_chain_get_elem_left(&txq->tx_pbl)
740                      >= (MAX_SKB_FRAGS + 1) &&
741                     (edev->state == QEDE_STATE_OPEN)) {
742                         netif_tx_wake_queue(netdev_txq);
743                         DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
744                                    "Wake queue was called\n");
745                 }
746         }
747
748         return NETDEV_TX_OK;
749 }
750
751 int qede_txq_has_work(struct qede_tx_queue *txq)
752 {
753         u16 hw_bd_cons;
754
755         /* Tell compiler that consumer and producer can change */
756         barrier();
757         hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
758         if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1)
759                 return 0;
760
761         return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
762 }
763
764 static int qede_tx_int(struct qede_dev *edev,
765                        struct qede_tx_queue *txq)
766 {
767         struct netdev_queue *netdev_txq;
768         u16 hw_bd_cons;
769         unsigned int pkts_compl = 0, bytes_compl = 0;
770         int rc;
771
772         netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
773
774         hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
775         barrier();
776
777         while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
778                 int len = 0;
779
780                 rc = qede_free_tx_pkt(edev, txq, &len);
781                 if (rc) {
782                         DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n",
783                                   hw_bd_cons,
784                                   qed_chain_get_cons_idx(&txq->tx_pbl));
785                         break;
786                 }
787
788                 bytes_compl += len;
789                 pkts_compl++;
790                 txq->sw_tx_cons++;
791         }
792
793         netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
794
795         /* Need to make the tx_bd_cons update visible to start_xmit()
796          * before checking for netif_tx_queue_stopped().  Without the
797          * memory barrier, there is a small possibility that
798          * start_xmit() will miss it and cause the queue to be stopped
799          * forever.
800          * On the other hand we need an rmb() here to ensure the proper
801          * ordering of bit testing in the following
802          * netif_tx_queue_stopped(txq) call.
803          */
804         smp_mb();
805
806         if (unlikely(netif_tx_queue_stopped(netdev_txq))) {
807                 /* Taking tx_lock is needed to prevent reenabling the queue
808                  * while it's empty. This could have happen if rx_action() gets
809                  * suspended in qede_tx_int() after the condition before
810                  * netif_tx_wake_queue(), while tx_action (qede_start_xmit()):
811                  *
812                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
813                  * sends some packets consuming the whole queue again->
814                  * stops the queue
815                  */
816
817                 __netif_tx_lock(netdev_txq, smp_processor_id());
818
819                 if ((netif_tx_queue_stopped(netdev_txq)) &&
820                     (edev->state == QEDE_STATE_OPEN) &&
821                     (qed_chain_get_elem_left(&txq->tx_pbl)
822                       >= (MAX_SKB_FRAGS + 1))) {
823                         netif_tx_wake_queue(netdev_txq);
824                         DP_VERBOSE(edev, NETIF_MSG_TX_DONE,
825                                    "Wake queue was called\n");
826                 }
827
828                 __netif_tx_unlock(netdev_txq);
829         }
830
831         return 0;
832 }
833
834 bool qede_has_rx_work(struct qede_rx_queue *rxq)
835 {
836         u16 hw_comp_cons, sw_comp_cons;
837
838         /* Tell compiler that status block fields can change */
839         barrier();
840
841         hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
842         sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
843
844         return hw_comp_cons != sw_comp_cons;
845 }
846
847 static bool qede_has_tx_work(struct qede_fastpath *fp)
848 {
849         u8 tc;
850
851         for (tc = 0; tc < fp->edev->num_tc; tc++)
852                 if (qede_txq_has_work(&fp->txqs[tc]))
853                         return true;
854         return false;
855 }
856
857 static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
858 {
859         qed_chain_consume(&rxq->rx_bd_ring);
860         rxq->sw_rx_cons++;
861 }
862
863 /* This function reuses the buffer(from an offset) from
864  * consumer index to producer index in the bd ring
865  */
866 static inline void qede_reuse_page(struct qede_dev *edev,
867                                    struct qede_rx_queue *rxq,
868                                    struct sw_rx_data *curr_cons)
869 {
870         struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
871         struct sw_rx_data *curr_prod;
872         dma_addr_t new_mapping;
873
874         curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
875         *curr_prod = *curr_cons;
876
877         new_mapping = curr_prod->mapping + curr_prod->page_offset;
878
879         rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping));
880         rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping));
881
882         rxq->sw_rx_prod++;
883         curr_cons->data = NULL;
884 }
885
886 /* In case of allocation failures reuse buffers
887  * from consumer index to produce buffers for firmware
888  */
889 void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
890                              struct qede_dev *edev, u8 count)
891 {
892         struct sw_rx_data *curr_cons;
893
894         for (; count > 0; count--) {
895                 curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
896                 qede_reuse_page(edev, rxq, curr_cons);
897                 qede_rx_bd_ring_consume(rxq);
898         }
899 }
900
901 static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
902                                          struct qede_rx_queue *rxq,
903                                          struct sw_rx_data *curr_cons)
904 {
905         /* Move to the next segment in the page */
906         curr_cons->page_offset += rxq->rx_buf_seg_size;
907
908         if (curr_cons->page_offset == PAGE_SIZE) {
909                 if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
910                         /* Since we failed to allocate new buffer
911                          * current buffer can be used again.
912                          */
913                         curr_cons->page_offset -= rxq->rx_buf_seg_size;
914
915                         return -ENOMEM;
916                 }
917
918                 dma_unmap_page(&edev->pdev->dev, curr_cons->mapping,
919                                PAGE_SIZE, DMA_FROM_DEVICE);
920         } else {
921                 /* Increment refcount of the page as we don't want
922                  * network stack to take the ownership of the page
923                  * which can be recycled multiple times by the driver.
924                  */
925                 page_ref_inc(curr_cons->data);
926                 qede_reuse_page(edev, rxq, curr_cons);
927         }
928
929         return 0;
930 }
931
932 static inline void qede_update_rx_prod(struct qede_dev *edev,
933                                        struct qede_rx_queue *rxq)
934 {
935         u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
936         u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
937         struct eth_rx_prod_data rx_prods = {0};
938
939         /* Update producers */
940         rx_prods.bd_prod = cpu_to_le16(bd_prod);
941         rx_prods.cqe_prod = cpu_to_le16(cqe_prod);
942
943         /* Make sure that the BD and SGE data is updated before updating the
944          * producers since FW might read the BD/SGE right after the producer
945          * is updated.
946          */
947         wmb();
948
949         internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
950                         (u32 *)&rx_prods);
951
952         /* mmiowb is needed to synchronize doorbell writes from more than one
953          * processor. It guarantees that the write arrives to the device before
954          * the napi lock is released and another qede_poll is called (possibly
955          * on another CPU). Without this barrier, the next doorbell can bypass
956          * this doorbell. This is applicable to IA64/Altix systems.
957          */
958         mmiowb();
959 }
960
961 static u32 qede_get_rxhash(struct qede_dev *edev,
962                            u8 bitfields,
963                            __le32 rss_hash,
964                            enum pkt_hash_types *rxhash_type)
965 {
966         enum rss_hash_type htype;
967
968         htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
969
970         if ((edev->ndev->features & NETIF_F_RXHASH) && htype) {
971                 *rxhash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
972                                 (htype == RSS_HASH_TYPE_IPV6)) ?
973                                 PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
974                 return le32_to_cpu(rss_hash);
975         }
976         *rxhash_type = PKT_HASH_TYPE_NONE;
977         return 0;
978 }
979
980 static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
981 {
982         skb_checksum_none_assert(skb);
983
984         if (csum_flag & QEDE_CSUM_UNNECESSARY)
985                 skb->ip_summed = CHECKSUM_UNNECESSARY;
986
987         if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY)
988                 skb->csum_level = 1;
989 }
990
991 static inline void qede_skb_receive(struct qede_dev *edev,
992                                     struct qede_fastpath *fp,
993                                     struct sk_buff *skb,
994                                     u16 vlan_tag)
995 {
996         if (vlan_tag)
997                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
998                                        vlan_tag);
999
1000         napi_gro_receive(&fp->napi, skb);
1001 }
1002
1003 static void qede_set_gro_params(struct qede_dev *edev,
1004                                 struct sk_buff *skb,
1005                                 struct eth_fast_path_rx_tpa_start_cqe *cqe)
1006 {
1007         u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags);
1008
1009         if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) &
1010             PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2)
1011                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1012         else
1013                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1014
1015         skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) -
1016                                         cqe->header_len;
1017 }
1018
1019 static int qede_fill_frag_skb(struct qede_dev *edev,
1020                               struct qede_rx_queue *rxq,
1021                               u8 tpa_agg_index,
1022                               u16 len_on_bd)
1023 {
1024         struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
1025                                                          NUM_RX_BDS_MAX];
1026         struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index];
1027         struct sk_buff *skb = tpa_info->skb;
1028
1029         if (unlikely(tpa_info->agg_state != QEDE_AGG_STATE_START))
1030                 goto out;
1031
1032         /* Add one frag and update the appropriate fields in the skb */
1033         skb_fill_page_desc(skb, tpa_info->frag_id++,
1034                            current_bd->data, current_bd->page_offset,
1035                            len_on_bd);
1036
1037         if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) {
1038                 /* Incr page ref count to reuse on allocation failure
1039                  * so that it doesn't get freed while freeing SKB.
1040                  */
1041                 page_ref_inc(current_bd->data);
1042                 goto out;
1043         }
1044
1045         qed_chain_consume(&rxq->rx_bd_ring);
1046         rxq->sw_rx_cons++;
1047
1048         skb->data_len += len_on_bd;
1049         skb->truesize += rxq->rx_buf_seg_size;
1050         skb->len += len_on_bd;
1051
1052         return 0;
1053
1054 out:
1055         tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
1056         qede_recycle_rx_bd_ring(rxq, edev, 1);
1057         return -ENOMEM;
1058 }
1059
1060 static void qede_tpa_start(struct qede_dev *edev,
1061                            struct qede_rx_queue *rxq,
1062                            struct eth_fast_path_rx_tpa_start_cqe *cqe)
1063 {
1064         struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
1065         struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
1066         struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
1067         struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
1068         dma_addr_t mapping = tpa_info->replace_buf_mapping;
1069         struct sw_rx_data *sw_rx_data_cons;
1070         struct sw_rx_data *sw_rx_data_prod;
1071         enum pkt_hash_types rxhash_type;
1072         u32 rxhash;
1073
1074         sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
1075         sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
1076
1077         /* Use pre-allocated replacement buffer - we can't release the agg.
1078          * start until its over and we don't want to risk allocation failing
1079          * here, so re-allocate when aggregation will be over.
1080          */
1081         sw_rx_data_prod->mapping = replace_buf->mapping;
1082
1083         sw_rx_data_prod->data = replace_buf->data;
1084         rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping));
1085         rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(mapping));
1086         sw_rx_data_prod->page_offset = replace_buf->page_offset;
1087
1088         rxq->sw_rx_prod++;
1089
1090         /* move partial skb from cons to pool (don't unmap yet)
1091          * save mapping, incase we drop the packet later on.
1092          */
1093         tpa_info->start_buf = *sw_rx_data_cons;
1094         mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi),
1095                            le32_to_cpu(rx_bd_cons->addr.lo));
1096
1097         tpa_info->start_buf_mapping = mapping;
1098         rxq->sw_rx_cons++;
1099
1100         /* set tpa state to start only if we are able to allocate skb
1101          * for this aggregation, otherwise mark as error and aggregation will
1102          * be dropped
1103          */
1104         tpa_info->skb = netdev_alloc_skb(edev->ndev,
1105                                          le16_to_cpu(cqe->len_on_first_bd));
1106         if (unlikely(!tpa_info->skb)) {
1107                 DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
1108                 tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
1109                 goto cons_buf;
1110         }
1111
1112         skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
1113         memcpy(&tpa_info->start_cqe, cqe, sizeof(tpa_info->start_cqe));
1114
1115         /* Start filling in the aggregation info */
1116         tpa_info->frag_id = 0;
1117         tpa_info->agg_state = QEDE_AGG_STATE_START;
1118
1119         rxhash = qede_get_rxhash(edev, cqe->bitfields,
1120                                  cqe->rss_hash, &rxhash_type);
1121         skb_set_hash(tpa_info->skb, rxhash, rxhash_type);
1122         if ((le16_to_cpu(cqe->pars_flags.flags) >>
1123              PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) &
1124                     PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
1125                 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
1126         else
1127                 tpa_info->vlan_tag = 0;
1128
1129         /* This is needed in order to enable forwarding support */
1130         qede_set_gro_params(edev, tpa_info->skb, cqe);
1131
1132 cons_buf: /* We still need to handle bd_len_list to consume buffers */
1133         if (likely(cqe->ext_bd_len_list[0]))
1134                 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
1135                                    le16_to_cpu(cqe->ext_bd_len_list[0]));
1136
1137         if (unlikely(cqe->ext_bd_len_list[1])) {
1138                 DP_ERR(edev,
1139                        "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n");
1140                 tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
1141         }
1142 }
1143
1144 #ifdef CONFIG_INET
1145 static void qede_gro_ip_csum(struct sk_buff *skb)
1146 {
1147         const struct iphdr *iph = ip_hdr(skb);
1148         struct tcphdr *th;
1149
1150         skb_set_transport_header(skb, sizeof(struct iphdr));
1151         th = tcp_hdr(skb);
1152
1153         th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
1154                                   iph->saddr, iph->daddr, 0);
1155
1156         tcp_gro_complete(skb);
1157 }
1158
1159 static void qede_gro_ipv6_csum(struct sk_buff *skb)
1160 {
1161         struct ipv6hdr *iph = ipv6_hdr(skb);
1162         struct tcphdr *th;
1163
1164         skb_set_transport_header(skb, sizeof(struct ipv6hdr));
1165         th = tcp_hdr(skb);
1166
1167         th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
1168                                   &iph->saddr, &iph->daddr, 0);
1169         tcp_gro_complete(skb);
1170 }
1171 #endif
1172
1173 static void qede_gro_receive(struct qede_dev *edev,
1174                              struct qede_fastpath *fp,
1175                              struct sk_buff *skb,
1176                              u16 vlan_tag)
1177 {
1178         /* FW can send a single MTU sized packet from gro flow
1179          * due to aggregation timeout/last segment etc. which
1180          * is not expected to be a gro packet. If a skb has zero
1181          * frags then simply push it in the stack as non gso skb.
1182          */
1183         if (unlikely(!skb->data_len)) {
1184                 skb_shinfo(skb)->gso_type = 0;
1185                 skb_shinfo(skb)->gso_size = 0;
1186                 goto send_skb;
1187         }
1188
1189 #ifdef CONFIG_INET
1190         if (skb_shinfo(skb)->gso_size) {
1191                 skb_set_network_header(skb, 0);
1192
1193                 switch (skb->protocol) {
1194                 case htons(ETH_P_IP):
1195                         qede_gro_ip_csum(skb);
1196                         break;
1197                 case htons(ETH_P_IPV6):
1198                         qede_gro_ipv6_csum(skb);
1199                         break;
1200                 default:
1201                         DP_ERR(edev,
1202                                "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
1203                                ntohs(skb->protocol));
1204                 }
1205         }
1206 #endif
1207
1208 send_skb:
1209         skb_record_rx_queue(skb, fp->rss_id);
1210         qede_skb_receive(edev, fp, skb, vlan_tag);
1211 }
1212
1213 static inline void qede_tpa_cont(struct qede_dev *edev,
1214                                  struct qede_rx_queue *rxq,
1215                                  struct eth_fast_path_rx_tpa_cont_cqe *cqe)
1216 {
1217         int i;
1218
1219         for (i = 0; cqe->len_list[i]; i++)
1220                 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
1221                                    le16_to_cpu(cqe->len_list[i]));
1222
1223         if (unlikely(i > 1))
1224                 DP_ERR(edev,
1225                        "Strange - TPA cont with more than a single len_list entry\n");
1226 }
1227
1228 static void qede_tpa_end(struct qede_dev *edev,
1229                          struct qede_fastpath *fp,
1230                          struct eth_fast_path_rx_tpa_end_cqe *cqe)
1231 {
1232         struct qede_rx_queue *rxq = fp->rxq;
1233         struct qede_agg_info *tpa_info;
1234         struct sk_buff *skb;
1235         int i;
1236
1237         tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
1238         skb = tpa_info->skb;
1239
1240         for (i = 0; cqe->len_list[i]; i++)
1241                 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
1242                                    le16_to_cpu(cqe->len_list[i]));
1243         if (unlikely(i > 1))
1244                 DP_ERR(edev,
1245                        "Strange - TPA emd with more than a single len_list entry\n");
1246
1247         if (unlikely(tpa_info->agg_state != QEDE_AGG_STATE_START))
1248                 goto err;
1249
1250         /* Sanity */
1251         if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1))
1252                 DP_ERR(edev,
1253                        "Strange - TPA had %02x BDs, but SKB has only %d frags\n",
1254                        cqe->num_of_bds, tpa_info->frag_id);
1255         if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len)))
1256                 DP_ERR(edev,
1257                        "Strange - total packet len [cqe] is %4x but SKB has len %04x\n",
1258                        le16_to_cpu(cqe->total_packet_len), skb->len);
1259
1260         memcpy(skb->data,
1261                page_address(tpa_info->start_buf.data) +
1262                 tpa_info->start_cqe.placement_offset +
1263                 tpa_info->start_buf.page_offset,
1264                le16_to_cpu(tpa_info->start_cqe.len_on_first_bd));
1265
1266         /* Recycle [mapped] start buffer for the next replacement */
1267         tpa_info->replace_buf = tpa_info->start_buf;
1268         tpa_info->replace_buf_mapping = tpa_info->start_buf_mapping;
1269
1270         /* Finalize the SKB */
1271         skb->protocol = eth_type_trans(skb, edev->ndev);
1272         skb->ip_summed = CHECKSUM_UNNECESSARY;
1273
1274         /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
1275          * to skb_shinfo(skb)->gso_segs
1276          */
1277         NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs);
1278
1279         qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag);
1280
1281         tpa_info->agg_state = QEDE_AGG_STATE_NONE;
1282
1283         return;
1284 err:
1285         /* The BD starting the aggregation is still mapped; Re-use it for
1286          * future aggregations [as replacement buffer]
1287          */
1288         memcpy(&tpa_info->replace_buf, &tpa_info->start_buf,
1289                sizeof(struct sw_rx_data));
1290         tpa_info->replace_buf_mapping = tpa_info->start_buf_mapping;
1291         tpa_info->start_buf.data = NULL;
1292         tpa_info->agg_state = QEDE_AGG_STATE_NONE;
1293         dev_kfree_skb_any(tpa_info->skb);
1294         tpa_info->skb = NULL;
1295 }
1296
1297 static bool qede_tunn_exist(u16 flag)
1298 {
1299         return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
1300                           PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT));
1301 }
1302
1303 static u8 qede_check_tunn_csum(u16 flag)
1304 {
1305         u16 csum_flag = 0;
1306         u8 tcsum = 0;
1307
1308         if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
1309                     PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT))
1310                 csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
1311                              PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
1312
1313         if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
1314                     PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
1315                 csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
1316                              PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
1317                 tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
1318         }
1319
1320         csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
1321                      PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
1322                      PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
1323                      PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
1324
1325         if (csum_flag & flag)
1326                 return QEDE_CSUM_ERROR;
1327
1328         return QEDE_CSUM_UNNECESSARY | tcsum;
1329 }
1330
1331 static u8 qede_check_notunn_csum(u16 flag)
1332 {
1333         u16 csum_flag = 0;
1334         u8 csum = 0;
1335
1336         if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
1337                     PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
1338                 csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
1339                              PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
1340                 csum = QEDE_CSUM_UNNECESSARY;
1341         }
1342
1343         csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
1344                      PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
1345
1346         if (csum_flag & flag)
1347                 return QEDE_CSUM_ERROR;
1348
1349         return csum;
1350 }
1351
1352 static u8 qede_check_csum(u16 flag)
1353 {
1354         if (!qede_tunn_exist(flag))
1355                 return qede_check_notunn_csum(flag);
1356         else
1357                 return qede_check_tunn_csum(flag);
1358 }
1359
1360 static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
1361                                       u16 flag)
1362 {
1363         u8 tun_pars_flg = cqe->tunnel_pars_flags.flags;
1364
1365         if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK <<
1366                              ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) ||
1367             (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
1368                      PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT)))
1369                 return true;
1370
1371         return false;
1372 }
1373
1374 static int qede_rx_int(struct qede_fastpath *fp, int budget)
1375 {
1376         struct qede_dev *edev = fp->edev;
1377         struct qede_rx_queue *rxq = fp->rxq;
1378
1379         u16 hw_comp_cons, sw_comp_cons, sw_rx_index, parse_flag;
1380         int rx_pkt = 0;
1381         u8 csum_flag;
1382
1383         hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
1384         sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
1385
1386         /* Memory barrier to prevent the CPU from doing speculative reads of CQE
1387          * / BD in the while-loop before reading hw_comp_cons. If the CQE is
1388          * read before it is written by FW, then FW writes CQE and SB, and then
1389          * the CPU reads the hw_comp_cons, it will use an old CQE.
1390          */
1391         rmb();
1392
1393         /* Loop to complete all indicated BDs */
1394         while (sw_comp_cons != hw_comp_cons) {
1395                 struct eth_fast_path_rx_reg_cqe *fp_cqe;
1396                 enum pkt_hash_types rxhash_type;
1397                 enum eth_rx_cqe_type cqe_type;
1398                 struct sw_rx_data *sw_rx_data;
1399                 union eth_rx_cqe *cqe;
1400                 struct sk_buff *skb;
1401                 struct page *data;
1402                 __le16 flags;
1403                 u16 len, pad;
1404                 u32 rx_hash;
1405
1406                 /* Get the CQE from the completion ring */
1407                 cqe = (union eth_rx_cqe *)
1408                         qed_chain_consume(&rxq->rx_comp_ring);
1409                 cqe_type = cqe->fast_path_regular.type;
1410
1411                 if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
1412                         edev->ops->eth_cqe_completion(
1413                                         edev->cdev, fp->rss_id,
1414                                         (struct eth_slow_path_rx_cqe *)cqe);
1415                         goto next_cqe;
1416                 }
1417
1418                 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
1419                         switch (cqe_type) {
1420                         case ETH_RX_CQE_TYPE_TPA_START:
1421                                 qede_tpa_start(edev, rxq,
1422                                                &cqe->fast_path_tpa_start);
1423                                 goto next_cqe;
1424                         case ETH_RX_CQE_TYPE_TPA_CONT:
1425                                 qede_tpa_cont(edev, rxq,
1426                                               &cqe->fast_path_tpa_cont);
1427                                 goto next_cqe;
1428                         case ETH_RX_CQE_TYPE_TPA_END:
1429                                 qede_tpa_end(edev, fp,
1430                                              &cqe->fast_path_tpa_end);
1431                                 goto next_rx_only;
1432                         default:
1433                                 break;
1434                         }
1435                 }
1436
1437                 /* Get the data from the SW ring */
1438                 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1439                 sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
1440                 data = sw_rx_data->data;
1441
1442                 fp_cqe = &cqe->fast_path_regular;
1443                 len =  le16_to_cpu(fp_cqe->len_on_first_bd);
1444                 pad = fp_cqe->placement_offset;
1445                 flags = cqe->fast_path_regular.pars_flags.flags;
1446
1447                 /* If this is an error packet then drop it */
1448                 parse_flag = le16_to_cpu(flags);
1449
1450                 csum_flag = qede_check_csum(parse_flag);
1451                 if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
1452                         if (qede_pkt_is_ip_fragmented(&cqe->fast_path_regular,
1453                                                       parse_flag)) {
1454                                 rxq->rx_ip_frags++;
1455                                 goto alloc_skb;
1456                         }
1457
1458                         DP_NOTICE(edev,
1459                                   "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
1460                                   sw_comp_cons, parse_flag);
1461                         rxq->rx_hw_errors++;
1462                         qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
1463                         goto next_cqe;
1464                 }
1465
1466 alloc_skb:
1467                 skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
1468                 if (unlikely(!skb)) {
1469                         DP_NOTICE(edev,
1470                                   "Build_skb failed, dropping incoming packet\n");
1471                         qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
1472                         rxq->rx_alloc_errors++;
1473                         goto next_cqe;
1474                 }
1475
1476                 /* Copy data into SKB */
1477                 if (len + pad <= edev->rx_copybreak) {
1478                         memcpy(skb_put(skb, len),
1479                                page_address(data) + pad +
1480                                 sw_rx_data->page_offset, len);
1481                         qede_reuse_page(edev, rxq, sw_rx_data);
1482                 } else {
1483                         struct skb_frag_struct *frag;
1484                         unsigned int pull_len;
1485                         unsigned char *va;
1486
1487                         frag = &skb_shinfo(skb)->frags[0];
1488
1489                         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, data,
1490                                         pad + sw_rx_data->page_offset,
1491                                         len, rxq->rx_buf_seg_size);
1492
1493                         va = skb_frag_address(frag);
1494                         pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE);
1495
1496                         /* Align the pull_len to optimize memcpy */
1497                         memcpy(skb->data, va, ALIGN(pull_len, sizeof(long)));
1498
1499                         skb_frag_size_sub(frag, pull_len);
1500                         frag->page_offset += pull_len;
1501                         skb->data_len -= pull_len;
1502                         skb->tail += pull_len;
1503
1504                         if (unlikely(qede_realloc_rx_buffer(edev, rxq,
1505                                                             sw_rx_data))) {
1506                                 DP_ERR(edev, "Failed to allocate rx buffer\n");
1507                                 /* Incr page ref count to reuse on allocation
1508                                  * failure so that it doesn't get freed while
1509                                  * freeing SKB.
1510                                  */
1511
1512                                 page_ref_inc(sw_rx_data->data);
1513                                 rxq->rx_alloc_errors++;
1514                                 qede_recycle_rx_bd_ring(rxq, edev,
1515                                                         fp_cqe->bd_num);
1516                                 dev_kfree_skb_any(skb);
1517                                 goto next_cqe;
1518                         }
1519                 }
1520
1521                 qede_rx_bd_ring_consume(rxq);
1522
1523                 if (fp_cqe->bd_num != 1) {
1524                         u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len);
1525                         u8 num_frags;
1526
1527                         pkt_len -= len;
1528
1529                         for (num_frags = fp_cqe->bd_num - 1; num_frags > 0;
1530                              num_frags--) {
1531                                 u16 cur_size = pkt_len > rxq->rx_buf_size ?
1532                                                 rxq->rx_buf_size : pkt_len;
1533                                 if (unlikely(!cur_size)) {
1534                                         DP_ERR(edev,
1535                                                "Still got %d BDs for mapping jumbo, but length became 0\n",
1536                                                num_frags);
1537                                         qede_recycle_rx_bd_ring(rxq, edev,
1538                                                                 num_frags);
1539                                         dev_kfree_skb_any(skb);
1540                                         goto next_cqe;
1541                                 }
1542
1543                                 if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
1544                                         qede_recycle_rx_bd_ring(rxq, edev,
1545                                                                 num_frags);
1546                                         dev_kfree_skb_any(skb);
1547                                         goto next_cqe;
1548                                 }
1549
1550                                 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1551                                 sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
1552                                 qede_rx_bd_ring_consume(rxq);
1553
1554                                 dma_unmap_page(&edev->pdev->dev,
1555                                                sw_rx_data->mapping,
1556                                                PAGE_SIZE, DMA_FROM_DEVICE);
1557
1558                                 skb_fill_page_desc(skb,
1559                                                    skb_shinfo(skb)->nr_frags++,
1560                                                    sw_rx_data->data, 0,
1561                                                    cur_size);
1562
1563                                 skb->truesize += PAGE_SIZE;
1564                                 skb->data_len += cur_size;
1565                                 skb->len += cur_size;
1566                                 pkt_len -= cur_size;
1567                         }
1568
1569                         if (unlikely(pkt_len))
1570                                 DP_ERR(edev,
1571                                        "Mapped all BDs of jumbo, but still have %d bytes\n",
1572                                        pkt_len);
1573                 }
1574
1575                 skb->protocol = eth_type_trans(skb, edev->ndev);
1576
1577                 rx_hash = qede_get_rxhash(edev, fp_cqe->bitfields,
1578                                           fp_cqe->rss_hash,
1579                                           &rxhash_type);
1580
1581                 skb_set_hash(skb, rx_hash, rxhash_type);
1582
1583                 qede_set_skb_csum(skb, csum_flag);
1584
1585                 skb_record_rx_queue(skb, fp->rss_id);
1586
1587                 qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
1588 next_rx_only:
1589                 rx_pkt++;
1590
1591 next_cqe: /* don't consume bd rx buffer */
1592                 qed_chain_recycle_consumed(&rxq->rx_comp_ring);
1593                 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
1594                 /* CR TPA - revisit how to handle budget in TPA perhaps
1595                  * increase on "end"
1596                  */
1597                 if (rx_pkt == budget)
1598                         break;
1599         } /* repeat while sw_comp_cons != hw_comp_cons... */
1600
1601         /* Update producers */
1602         qede_update_rx_prod(edev, rxq);
1603
1604         return rx_pkt;
1605 }
1606
1607 static int qede_poll(struct napi_struct *napi, int budget)
1608 {
1609         struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
1610                                                 napi);
1611         struct qede_dev *edev = fp->edev;
1612         int rx_work_done = 0;
1613         u8 tc;
1614
1615         for (tc = 0; tc < edev->num_tc; tc++)
1616                 if (qede_txq_has_work(&fp->txqs[tc]))
1617                         qede_tx_int(edev, &fp->txqs[tc]);
1618
1619         rx_work_done = qede_has_rx_work(fp->rxq) ?
1620                         qede_rx_int(fp, budget) : 0;
1621         if (rx_work_done < budget) {
1622                 qed_sb_update_sb_idx(fp->sb_info);
1623                 /* *_has_*_work() reads the status block,
1624                  * thus we need to ensure that status block indices
1625                  * have been actually read (qed_sb_update_sb_idx)
1626                  * prior to this check (*_has_*_work) so that
1627                  * we won't write the "newer" value of the status block
1628                  * to HW (if there was a DMA right after
1629                  * qede_has_rx_work and if there is no rmb, the memory
1630                  * reading (qed_sb_update_sb_idx) may be postponed
1631                  * to right before *_ack_sb). In this case there
1632                  * will never be another interrupt until there is
1633                  * another update of the status block, while there
1634                  * is still unhandled work.
1635                  */
1636                 rmb();
1637
1638                 /* Fall out from the NAPI loop if needed */
1639                 if (!(qede_has_rx_work(fp->rxq) ||
1640                       qede_has_tx_work(fp))) {
1641                         napi_complete(napi);
1642
1643                         /* Update and reenable interrupts */
1644                         qed_sb_ack(fp->sb_info, IGU_INT_ENABLE,
1645                                    1 /*update*/);
1646                 } else {
1647                         rx_work_done = budget;
1648                 }
1649         }
1650
1651         return rx_work_done;
1652 }
1653
1654 static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
1655 {
1656         struct qede_fastpath *fp = fp_cookie;
1657
1658         qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
1659
1660         napi_schedule_irqoff(&fp->napi);
1661         return IRQ_HANDLED;
1662 }
1663
1664 /* -------------------------------------------------------------------------
1665  * END OF FAST-PATH
1666  * -------------------------------------------------------------------------
1667  */
1668
1669 static int qede_open(struct net_device *ndev);
1670 static int qede_close(struct net_device *ndev);
1671 static int qede_set_mac_addr(struct net_device *ndev, void *p);
1672 static void qede_set_rx_mode(struct net_device *ndev);
1673 static void qede_config_rx_mode(struct net_device *ndev);
1674
1675 static int qede_set_ucast_rx_mac(struct qede_dev *edev,
1676                                  enum qed_filter_xcast_params_type opcode,
1677                                  unsigned char mac[ETH_ALEN])
1678 {
1679         struct qed_filter_params filter_cmd;
1680
1681         memset(&filter_cmd, 0, sizeof(filter_cmd));
1682         filter_cmd.type = QED_FILTER_TYPE_UCAST;
1683         filter_cmd.filter.ucast.type = opcode;
1684         filter_cmd.filter.ucast.mac_valid = 1;
1685         ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
1686
1687         return edev->ops->filter_config(edev->cdev, &filter_cmd);
1688 }
1689
1690 static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
1691                                   enum qed_filter_xcast_params_type opcode,
1692                                   u16 vid)
1693 {
1694         struct qed_filter_params filter_cmd;
1695
1696         memset(&filter_cmd, 0, sizeof(filter_cmd));
1697         filter_cmd.type = QED_FILTER_TYPE_UCAST;
1698         filter_cmd.filter.ucast.type = opcode;
1699         filter_cmd.filter.ucast.vlan_valid = 1;
1700         filter_cmd.filter.ucast.vlan = vid;
1701
1702         return edev->ops->filter_config(edev->cdev, &filter_cmd);
1703 }
1704
1705 void qede_fill_by_demand_stats(struct qede_dev *edev)
1706 {
1707         struct qed_eth_stats stats;
1708
1709         edev->ops->get_vport_stats(edev->cdev, &stats);
1710         edev->stats.no_buff_discards = stats.no_buff_discards;
1711         edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes;
1712         edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes;
1713         edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes;
1714         edev->stats.rx_ucast_pkts = stats.rx_ucast_pkts;
1715         edev->stats.rx_mcast_pkts = stats.rx_mcast_pkts;
1716         edev->stats.rx_bcast_pkts = stats.rx_bcast_pkts;
1717         edev->stats.mftag_filter_discards = stats.mftag_filter_discards;
1718         edev->stats.mac_filter_discards = stats.mac_filter_discards;
1719
1720         edev->stats.tx_ucast_bytes = stats.tx_ucast_bytes;
1721         edev->stats.tx_mcast_bytes = stats.tx_mcast_bytes;
1722         edev->stats.tx_bcast_bytes = stats.tx_bcast_bytes;
1723         edev->stats.tx_ucast_pkts = stats.tx_ucast_pkts;
1724         edev->stats.tx_mcast_pkts = stats.tx_mcast_pkts;
1725         edev->stats.tx_bcast_pkts = stats.tx_bcast_pkts;
1726         edev->stats.tx_err_drop_pkts = stats.tx_err_drop_pkts;
1727         edev->stats.coalesced_pkts = stats.tpa_coalesced_pkts;
1728         edev->stats.coalesced_events = stats.tpa_coalesced_events;
1729         edev->stats.coalesced_aborts_num = stats.tpa_aborts_num;
1730         edev->stats.non_coalesced_pkts = stats.tpa_not_coalesced_pkts;
1731         edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes;
1732
1733         edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets;
1734         edev->stats.rx_65_to_127_byte_packets = stats.rx_65_to_127_byte_packets;
1735         edev->stats.rx_128_to_255_byte_packets =
1736                                 stats.rx_128_to_255_byte_packets;
1737         edev->stats.rx_256_to_511_byte_packets =
1738                                 stats.rx_256_to_511_byte_packets;
1739         edev->stats.rx_512_to_1023_byte_packets =
1740                                 stats.rx_512_to_1023_byte_packets;
1741         edev->stats.rx_1024_to_1518_byte_packets =
1742                                 stats.rx_1024_to_1518_byte_packets;
1743         edev->stats.rx_1519_to_1522_byte_packets =
1744                                 stats.rx_1519_to_1522_byte_packets;
1745         edev->stats.rx_1519_to_2047_byte_packets =
1746                                 stats.rx_1519_to_2047_byte_packets;
1747         edev->stats.rx_2048_to_4095_byte_packets =
1748                                 stats.rx_2048_to_4095_byte_packets;
1749         edev->stats.rx_4096_to_9216_byte_packets =
1750                                 stats.rx_4096_to_9216_byte_packets;
1751         edev->stats.rx_9217_to_16383_byte_packets =
1752                                 stats.rx_9217_to_16383_byte_packets;
1753         edev->stats.rx_crc_errors = stats.rx_crc_errors;
1754         edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames;
1755         edev->stats.rx_pause_frames = stats.rx_pause_frames;
1756         edev->stats.rx_pfc_frames = stats.rx_pfc_frames;
1757         edev->stats.rx_align_errors = stats.rx_align_errors;
1758         edev->stats.rx_carrier_errors = stats.rx_carrier_errors;
1759         edev->stats.rx_oversize_packets = stats.rx_oversize_packets;
1760         edev->stats.rx_jabbers = stats.rx_jabbers;
1761         edev->stats.rx_undersize_packets = stats.rx_undersize_packets;
1762         edev->stats.rx_fragments = stats.rx_fragments;
1763         edev->stats.tx_64_byte_packets = stats.tx_64_byte_packets;
1764         edev->stats.tx_65_to_127_byte_packets = stats.tx_65_to_127_byte_packets;
1765         edev->stats.tx_128_to_255_byte_packets =
1766                                 stats.tx_128_to_255_byte_packets;
1767         edev->stats.tx_256_to_511_byte_packets =
1768                                 stats.tx_256_to_511_byte_packets;
1769         edev->stats.tx_512_to_1023_byte_packets =
1770                                 stats.tx_512_to_1023_byte_packets;
1771         edev->stats.tx_1024_to_1518_byte_packets =
1772                                 stats.tx_1024_to_1518_byte_packets;
1773         edev->stats.tx_1519_to_2047_byte_packets =
1774                                 stats.tx_1519_to_2047_byte_packets;
1775         edev->stats.tx_2048_to_4095_byte_packets =
1776                                 stats.tx_2048_to_4095_byte_packets;
1777         edev->stats.tx_4096_to_9216_byte_packets =
1778                                 stats.tx_4096_to_9216_byte_packets;
1779         edev->stats.tx_9217_to_16383_byte_packets =
1780                                 stats.tx_9217_to_16383_byte_packets;
1781         edev->stats.tx_pause_frames = stats.tx_pause_frames;
1782         edev->stats.tx_pfc_frames = stats.tx_pfc_frames;
1783         edev->stats.tx_lpi_entry_count = stats.tx_lpi_entry_count;
1784         edev->stats.tx_total_collisions = stats.tx_total_collisions;
1785         edev->stats.brb_truncates = stats.brb_truncates;
1786         edev->stats.brb_discards = stats.brb_discards;
1787         edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames;
1788 }
1789
1790 static struct rtnl_link_stats64 *qede_get_stats64(
1791                             struct net_device *dev,
1792                             struct rtnl_link_stats64 *stats)
1793 {
1794         struct qede_dev *edev = netdev_priv(dev);
1795
1796         qede_fill_by_demand_stats(edev);
1797
1798         stats->rx_packets = edev->stats.rx_ucast_pkts +
1799                             edev->stats.rx_mcast_pkts +
1800                             edev->stats.rx_bcast_pkts;
1801         stats->tx_packets = edev->stats.tx_ucast_pkts +
1802                             edev->stats.tx_mcast_pkts +
1803                             edev->stats.tx_bcast_pkts;
1804
1805         stats->rx_bytes = edev->stats.rx_ucast_bytes +
1806                           edev->stats.rx_mcast_bytes +
1807                           edev->stats.rx_bcast_bytes;
1808
1809         stats->tx_bytes = edev->stats.tx_ucast_bytes +
1810                           edev->stats.tx_mcast_bytes +
1811                           edev->stats.tx_bcast_bytes;
1812
1813         stats->tx_errors = edev->stats.tx_err_drop_pkts;
1814         stats->multicast = edev->stats.rx_mcast_pkts +
1815                            edev->stats.rx_bcast_pkts;
1816
1817         stats->rx_fifo_errors = edev->stats.no_buff_discards;
1818
1819         stats->collisions = edev->stats.tx_total_collisions;
1820         stats->rx_crc_errors = edev->stats.rx_crc_errors;
1821         stats->rx_frame_errors = edev->stats.rx_align_errors;
1822
1823         return stats;
1824 }
1825
1826 #ifdef CONFIG_QED_SRIOV
1827 static int qede_get_vf_config(struct net_device *dev, int vfidx,
1828                               struct ifla_vf_info *ivi)
1829 {
1830         struct qede_dev *edev = netdev_priv(dev);
1831
1832         if (!edev->ops)
1833                 return -EINVAL;
1834
1835         return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
1836 }
1837
1838 static int qede_set_vf_rate(struct net_device *dev, int vfidx,
1839                             int min_tx_rate, int max_tx_rate)
1840 {
1841         struct qede_dev *edev = netdev_priv(dev);
1842
1843         return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
1844                                         max_tx_rate);
1845 }
1846
1847 static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
1848 {
1849         struct qede_dev *edev = netdev_priv(dev);
1850
1851         if (!edev->ops)
1852                 return -EINVAL;
1853
1854         return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
1855 }
1856
1857 static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
1858                                   int link_state)
1859 {
1860         struct qede_dev *edev = netdev_priv(dev);
1861
1862         if (!edev->ops)
1863                 return -EINVAL;
1864
1865         return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
1866 }
1867 #endif
1868
1869 static void qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
1870 {
1871         struct qed_update_vport_params params;
1872         int rc;
1873
1874         /* Proceed only if action actually needs to be performed */
1875         if (edev->accept_any_vlan == action)
1876                 return;
1877
1878         memset(&params, 0, sizeof(params));
1879
1880         params.vport_id = 0;
1881         params.accept_any_vlan = action;
1882         params.update_accept_any_vlan_flg = 1;
1883
1884         rc = edev->ops->vport_update(edev->cdev, &params);
1885         if (rc) {
1886                 DP_ERR(edev, "Failed to %s accept-any-vlan\n",
1887                        action ? "enable" : "disable");
1888         } else {
1889                 DP_INFO(edev, "%s accept-any-vlan\n",
1890                         action ? "enabled" : "disabled");
1891                 edev->accept_any_vlan = action;
1892         }
1893 }
1894
1895 static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1896 {
1897         struct qede_dev *edev = netdev_priv(dev);
1898         struct qede_vlan *vlan, *tmp;
1899         int rc;
1900
1901         DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
1902
1903         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1904         if (!vlan) {
1905                 DP_INFO(edev, "Failed to allocate struct for vlan\n");
1906                 return -ENOMEM;
1907         }
1908         INIT_LIST_HEAD(&vlan->list);
1909         vlan->vid = vid;
1910         vlan->configured = false;
1911
1912         /* Verify vlan isn't already configured */
1913         list_for_each_entry(tmp, &edev->vlan_list, list) {
1914                 if (tmp->vid == vlan->vid) {
1915                         DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
1916                                    "vlan already configured\n");
1917                         kfree(vlan);
1918                         return -EEXIST;
1919                 }
1920         }
1921
1922         /* If interface is down, cache this VLAN ID and return */
1923         if (edev->state != QEDE_STATE_OPEN) {
1924                 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
1925                            "Interface is down, VLAN %d will be configured when interface is up\n",
1926                            vid);
1927                 if (vid != 0)
1928                         edev->non_configured_vlans++;
1929                 list_add(&vlan->list, &edev->vlan_list);
1930
1931                 return 0;
1932         }
1933
1934         /* Check for the filter limit.
1935          * Note - vlan0 has a reserved filter and can be added without
1936          * worrying about quota
1937          */
1938         if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) ||
1939             (vlan->vid == 0)) {
1940                 rc = qede_set_ucast_rx_vlan(edev,
1941                                             QED_FILTER_XCAST_TYPE_ADD,
1942                                             vlan->vid);
1943                 if (rc) {
1944                         DP_ERR(edev, "Failed to configure VLAN %d\n",
1945                                vlan->vid);
1946                         kfree(vlan);
1947                         return -EINVAL;
1948                 }
1949                 vlan->configured = true;
1950
1951                 /* vlan0 filter isn't consuming out of our quota */
1952                 if (vlan->vid != 0)
1953                         edev->configured_vlans++;
1954         } else {
1955                 /* Out of quota; Activate accept-any-VLAN mode */
1956                 if (!edev->non_configured_vlans)
1957                         qede_config_accept_any_vlan(edev, true);
1958
1959                 edev->non_configured_vlans++;
1960         }
1961
1962         list_add(&vlan->list, &edev->vlan_list);
1963
1964         return 0;
1965 }
1966
1967 static void qede_del_vlan_from_list(struct qede_dev *edev,
1968                                     struct qede_vlan *vlan)
1969 {
1970         /* vlan0 filter isn't consuming out of our quota */
1971         if (vlan->vid != 0) {
1972                 if (vlan->configured)
1973                         edev->configured_vlans--;
1974                 else
1975                         edev->non_configured_vlans--;
1976         }
1977
1978         list_del(&vlan->list);
1979         kfree(vlan);
1980 }
1981
1982 static int qede_configure_vlan_filters(struct qede_dev *edev)
1983 {
1984         int rc = 0, real_rc = 0, accept_any_vlan = 0;
1985         struct qed_dev_eth_info *dev_info;
1986         struct qede_vlan *vlan = NULL;
1987
1988         if (list_empty(&edev->vlan_list))
1989                 return 0;
1990
1991         dev_info = &edev->dev_info;
1992
1993         /* Configure non-configured vlans */
1994         list_for_each_entry(vlan, &edev->vlan_list, list) {
1995                 if (vlan->configured)
1996                         continue;
1997
1998                 /* We have used all our credits, now enable accept_any_vlan */
1999                 if ((vlan->vid != 0) &&
2000                     (edev->configured_vlans == dev_info->num_vlan_filters)) {
2001                         accept_any_vlan = 1;
2002                         continue;
2003                 }
2004
2005                 DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid);
2006
2007                 rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD,
2008                                             vlan->vid);
2009                 if (rc) {
2010                         DP_ERR(edev, "Failed to configure VLAN %u\n",
2011                                vlan->vid);
2012                         real_rc = rc;
2013                         continue;
2014                 }
2015
2016                 vlan->configured = true;
2017                 /* vlan0 filter doesn't consume our VLAN filter's quota */
2018                 if (vlan->vid != 0) {
2019                         edev->non_configured_vlans--;
2020                         edev->configured_vlans++;
2021                 }
2022         }
2023
2024         /* enable accept_any_vlan mode if we have more VLANs than credits,
2025          * or remove accept_any_vlan mode if we've actually removed
2026          * a non-configured vlan, and all remaining vlans are truly configured.
2027          */
2028
2029         if (accept_any_vlan)
2030                 qede_config_accept_any_vlan(edev, true);
2031         else if (!edev->non_configured_vlans)
2032                 qede_config_accept_any_vlan(edev, false);
2033
2034         return real_rc;
2035 }
2036
2037 static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
2038 {
2039         struct qede_dev *edev = netdev_priv(dev);
2040         struct qede_vlan *vlan = NULL;
2041         int rc;
2042
2043         DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
2044
2045         /* Find whether entry exists */
2046         list_for_each_entry(vlan, &edev->vlan_list, list)
2047                 if (vlan->vid == vid)
2048                         break;
2049
2050         if (!vlan || (vlan->vid != vid)) {
2051                 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
2052                            "Vlan isn't configured\n");
2053                 return 0;
2054         }
2055
2056         if (edev->state != QEDE_STATE_OPEN) {
2057                 /* As interface is already down, we don't have a VPORT
2058                  * instance to remove vlan filter. So just update vlan list
2059                  */
2060                 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
2061                            "Interface is down, removing VLAN from list only\n");
2062                 qede_del_vlan_from_list(edev, vlan);
2063                 return 0;
2064         }
2065
2066         /* Remove vlan */
2067         rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL, vid);
2068         if (rc) {
2069                 DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
2070                 return -EINVAL;
2071         }
2072
2073         qede_del_vlan_from_list(edev, vlan);
2074
2075         /* We have removed a VLAN - try to see if we can
2076          * configure non-configured VLAN from the list.
2077          */
2078         rc = qede_configure_vlan_filters(edev);
2079
2080         return rc;
2081 }
2082
2083 static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
2084 {
2085         struct qede_vlan *vlan = NULL;
2086
2087         if (list_empty(&edev->vlan_list))
2088                 return;
2089
2090         list_for_each_entry(vlan, &edev->vlan_list, list) {
2091                 if (!vlan->configured)
2092                         continue;
2093
2094                 vlan->configured = false;
2095
2096                 /* vlan0 filter isn't consuming out of our quota */
2097                 if (vlan->vid != 0) {
2098                         edev->non_configured_vlans++;
2099                         edev->configured_vlans--;
2100                 }
2101
2102                 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
2103                            "marked vlan %d as non-configured\n",
2104                            vlan->vid);
2105         }
2106
2107         edev->accept_any_vlan = false;
2108 }
2109
2110 int qede_set_features(struct net_device *dev, netdev_features_t features)
2111 {
2112         struct qede_dev *edev = netdev_priv(dev);
2113         netdev_features_t changes = features ^ dev->features;
2114         bool need_reload = false;
2115
2116         /* No action needed if hardware GRO is disabled during driver load */
2117         if (changes & NETIF_F_GRO) {
2118                 if (dev->features & NETIF_F_GRO)
2119                         need_reload = !edev->gro_disable;
2120                 else
2121                         need_reload = edev->gro_disable;
2122         }
2123
2124         if (need_reload && netif_running(edev->ndev)) {
2125                 dev->features = features;
2126                 qede_reload(edev, NULL, NULL);
2127                 return 1;
2128         }
2129
2130         return 0;
2131 }
2132
2133 static void qede_udp_tunnel_add(struct net_device *dev,
2134                                 struct udp_tunnel_info *ti)
2135 {
2136         struct qede_dev *edev = netdev_priv(dev);
2137         u16 t_port = ntohs(ti->port);
2138
2139         switch (ti->type) {
2140         case UDP_TUNNEL_TYPE_VXLAN:
2141                 if (edev->vxlan_dst_port)
2142                         return;
2143
2144                 edev->vxlan_dst_port = t_port;
2145
2146                 DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d",
2147                            t_port);
2148
2149                 set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
2150                 break;
2151         case UDP_TUNNEL_TYPE_GENEVE:
2152                 if (edev->geneve_dst_port)
2153                         return;
2154
2155                 edev->geneve_dst_port = t_port;
2156
2157                 DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d",
2158                            t_port);
2159                 set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
2160                 break;
2161         default:
2162                 return;
2163         }
2164
2165         schedule_delayed_work(&edev->sp_task, 0);
2166 }
2167
2168 static void qede_udp_tunnel_del(struct net_device *dev,
2169                                 struct udp_tunnel_info *ti)
2170 {
2171         struct qede_dev *edev = netdev_priv(dev);
2172         u16 t_port = ntohs(ti->port);
2173
2174         switch (ti->type) {
2175         case UDP_TUNNEL_TYPE_VXLAN:
2176                 if (t_port != edev->vxlan_dst_port)
2177                         return;
2178
2179                 edev->vxlan_dst_port = 0;
2180
2181                 DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d",
2182                            t_port);
2183
2184                 set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
2185                 break;
2186         case UDP_TUNNEL_TYPE_GENEVE:
2187                 if (t_port != edev->geneve_dst_port)
2188                         return;
2189
2190                 edev->geneve_dst_port = 0;
2191
2192                 DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d",
2193                            t_port);
2194                 set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
2195                 break;
2196         default:
2197                 return;
2198         }
2199
2200         schedule_delayed_work(&edev->sp_task, 0);
2201 }
2202
2203 static const struct net_device_ops qede_netdev_ops = {
2204         .ndo_open = qede_open,
2205         .ndo_stop = qede_close,
2206         .ndo_start_xmit = qede_start_xmit,
2207         .ndo_set_rx_mode = qede_set_rx_mode,
2208         .ndo_set_mac_address = qede_set_mac_addr,
2209         .ndo_validate_addr = eth_validate_addr,
2210         .ndo_change_mtu = qede_change_mtu,
2211 #ifdef CONFIG_QED_SRIOV
2212         .ndo_set_vf_mac = qede_set_vf_mac,
2213         .ndo_set_vf_vlan = qede_set_vf_vlan,
2214 #endif
2215         .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
2216         .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
2217         .ndo_set_features = qede_set_features,
2218         .ndo_get_stats64 = qede_get_stats64,
2219 #ifdef CONFIG_QED_SRIOV
2220         .ndo_set_vf_link_state = qede_set_vf_link_state,
2221         .ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
2222         .ndo_get_vf_config = qede_get_vf_config,
2223         .ndo_set_vf_rate = qede_set_vf_rate,
2224 #endif
2225         .ndo_udp_tunnel_add = qede_udp_tunnel_add,
2226         .ndo_udp_tunnel_del = qede_udp_tunnel_del,
2227 };
2228
2229 /* -------------------------------------------------------------------------
2230  * START OF PROBE / REMOVE
2231  * -------------------------------------------------------------------------
2232  */
2233
2234 static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
2235                                             struct pci_dev *pdev,
2236                                             struct qed_dev_eth_info *info,
2237                                             u32 dp_module,
2238                                             u8 dp_level)
2239 {
2240         struct net_device *ndev;
2241         struct qede_dev *edev;
2242
2243         ndev = alloc_etherdev_mqs(sizeof(*edev),
2244                                   info->num_queues,
2245                                   info->num_queues);
2246         if (!ndev) {
2247                 pr_err("etherdev allocation failed\n");
2248                 return NULL;
2249         }
2250
2251         edev = netdev_priv(ndev);
2252         edev->ndev = ndev;
2253         edev->cdev = cdev;
2254         edev->pdev = pdev;
2255         edev->dp_module = dp_module;
2256         edev->dp_level = dp_level;
2257         edev->ops = qed_ops;
2258         edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
2259         edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
2260
2261         SET_NETDEV_DEV(ndev, &pdev->dev);
2262
2263         memset(&edev->stats, 0, sizeof(edev->stats));
2264         memcpy(&edev->dev_info, info, sizeof(*info));
2265
2266         edev->num_tc = edev->dev_info.num_tc;
2267
2268         INIT_LIST_HEAD(&edev->vlan_list);
2269
2270         return edev;
2271 }
2272
2273 static void qede_init_ndev(struct qede_dev *edev)
2274 {
2275         struct net_device *ndev = edev->ndev;
2276         struct pci_dev *pdev = edev->pdev;
2277         u32 hw_features;
2278
2279         pci_set_drvdata(pdev, ndev);
2280
2281         ndev->mem_start = edev->dev_info.common.pci_mem_start;
2282         ndev->base_addr = ndev->mem_start;
2283         ndev->mem_end = edev->dev_info.common.pci_mem_end;
2284         ndev->irq = edev->dev_info.common.pci_irq;
2285
2286         ndev->watchdog_timeo = TX_TIMEOUT;
2287
2288         ndev->netdev_ops = &qede_netdev_ops;
2289
2290         qede_set_ethtool_ops(ndev);
2291
2292         /* user-changeble features */
2293         hw_features = NETIF_F_GRO | NETIF_F_SG |
2294                       NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2295                       NETIF_F_TSO | NETIF_F_TSO6;
2296
2297         /* Encap features*/
2298         hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
2299                        NETIF_F_TSO_ECN;
2300         ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2301                                 NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN |
2302                                 NETIF_F_TSO6 | NETIF_F_GSO_GRE |
2303                                 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM;
2304
2305         ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
2306                               NETIF_F_HIGHDMA;
2307         ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
2308                          NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
2309                          NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX;
2310
2311         ndev->hw_features = hw_features;
2312
2313         /* Set network device HW mac */
2314         ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
2315 }
2316
2317 /* This function converts from 32b param to two params of level and module
2318  * Input 32b decoding:
2319  * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
2320  * 'happy' flow, e.g. memory allocation failed.
2321  * b30 - enable all INFO prints. INFO prints are for major steps in the flow
2322  * and provide important parameters.
2323  * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
2324  * module. VERBOSE prints are for tracking the specific flow in low level.
2325  *
2326  * Notice that the level should be that of the lowest required logs.
2327  */
2328 void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
2329 {
2330         *p_dp_level = QED_LEVEL_NOTICE;
2331         *p_dp_module = 0;
2332
2333         if (debug & QED_LOG_VERBOSE_MASK) {
2334                 *p_dp_level = QED_LEVEL_VERBOSE;
2335                 *p_dp_module = (debug & 0x3FFFFFFF);
2336         } else if (debug & QED_LOG_INFO_MASK) {
2337                 *p_dp_level = QED_LEVEL_INFO;
2338         } else if (debug & QED_LOG_NOTICE_MASK) {
2339                 *p_dp_level = QED_LEVEL_NOTICE;
2340         }
2341 }
2342
2343 static void qede_free_fp_array(struct qede_dev *edev)
2344 {
2345         if (edev->fp_array) {
2346                 struct qede_fastpath *fp;
2347                 int i;
2348
2349                 for_each_rss(i) {
2350                         fp = &edev->fp_array[i];
2351
2352                         kfree(fp->sb_info);
2353                         kfree(fp->rxq);
2354                         kfree(fp->txqs);
2355                 }
2356                 kfree(edev->fp_array);
2357         }
2358         edev->num_rss = 0;
2359 }
2360
2361 static int qede_alloc_fp_array(struct qede_dev *edev)
2362 {
2363         struct qede_fastpath *fp;
2364         int i;
2365
2366         edev->fp_array = kcalloc(QEDE_RSS_CNT(edev),
2367                                  sizeof(*edev->fp_array), GFP_KERNEL);
2368         if (!edev->fp_array) {
2369                 DP_NOTICE(edev, "fp array allocation failed\n");
2370                 goto err;
2371         }
2372
2373         for_each_rss(i) {
2374                 fp = &edev->fp_array[i];
2375
2376                 fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL);
2377                 if (!fp->sb_info) {
2378                         DP_NOTICE(edev, "sb info struct allocation failed\n");
2379                         goto err;
2380                 }
2381
2382                 fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL);
2383                 if (!fp->rxq) {
2384                         DP_NOTICE(edev, "RXQ struct allocation failed\n");
2385                         goto err;
2386                 }
2387
2388                 fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs), GFP_KERNEL);
2389                 if (!fp->txqs) {
2390                         DP_NOTICE(edev, "TXQ array allocation failed\n");
2391                         goto err;
2392                 }
2393         }
2394
2395         return 0;
2396 err:
2397         qede_free_fp_array(edev);
2398         return -ENOMEM;
2399 }
2400
2401 static void qede_sp_task(struct work_struct *work)
2402 {
2403         struct qede_dev *edev = container_of(work, struct qede_dev,
2404                                              sp_task.work);
2405         struct qed_dev *cdev = edev->cdev;
2406
2407         mutex_lock(&edev->qede_lock);
2408
2409         if (edev->state == QEDE_STATE_OPEN) {
2410                 if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
2411                         qede_config_rx_mode(edev->ndev);
2412         }
2413
2414         if (test_and_clear_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags)) {
2415                 struct qed_tunn_params tunn_params;
2416
2417                 memset(&tunn_params, 0, sizeof(tunn_params));
2418                 tunn_params.update_vxlan_port = 1;
2419                 tunn_params.vxlan_port = edev->vxlan_dst_port;
2420                 qed_ops->tunn_config(cdev, &tunn_params);
2421         }
2422
2423         if (test_and_clear_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags)) {
2424                 struct qed_tunn_params tunn_params;
2425
2426                 memset(&tunn_params, 0, sizeof(tunn_params));
2427                 tunn_params.update_geneve_port = 1;
2428                 tunn_params.geneve_port = edev->geneve_dst_port;
2429                 qed_ops->tunn_config(cdev, &tunn_params);
2430         }
2431
2432         mutex_unlock(&edev->qede_lock);
2433 }
2434
2435 static void qede_update_pf_params(struct qed_dev *cdev)
2436 {
2437         struct qed_pf_params pf_params;
2438
2439         /* 64 rx + 64 tx */
2440         memset(&pf_params, 0, sizeof(struct qed_pf_params));
2441         pf_params.eth_pf_params.num_cons = 128;
2442         qed_ops->common->update_pf_params(cdev, &pf_params);
2443 }
2444
2445 enum qede_probe_mode {
2446         QEDE_PROBE_NORMAL,
2447 };
2448
2449 static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
2450                         bool is_vf, enum qede_probe_mode mode)
2451 {
2452         struct qed_probe_params probe_params;
2453         struct qed_slowpath_params params;
2454         struct qed_dev_eth_info dev_info;
2455         struct qede_dev *edev;
2456         struct qed_dev *cdev;
2457         int rc;
2458
2459         if (unlikely(dp_level & QED_LEVEL_INFO))
2460                 pr_notice("Starting qede probe\n");
2461
2462         memset(&probe_params, 0, sizeof(probe_params));
2463         probe_params.protocol = QED_PROTOCOL_ETH;
2464         probe_params.dp_module = dp_module;
2465         probe_params.dp_level = dp_level;
2466         probe_params.is_vf = is_vf;
2467         cdev = qed_ops->common->probe(pdev, &probe_params);
2468         if (!cdev) {
2469                 rc = -ENODEV;
2470                 goto err0;
2471         }
2472
2473         qede_update_pf_params(cdev);
2474
2475         /* Start the Slowpath-process */
2476         memset(&params, 0, sizeof(struct qed_slowpath_params));
2477         params.int_mode = QED_INT_MODE_MSIX;
2478         params.drv_major = QEDE_MAJOR_VERSION;
2479         params.drv_minor = QEDE_MINOR_VERSION;
2480         params.drv_rev = QEDE_REVISION_VERSION;
2481         params.drv_eng = QEDE_ENGINEERING_VERSION;
2482         strlcpy(params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
2483         rc = qed_ops->common->slowpath_start(cdev, &params);
2484         if (rc) {
2485                 pr_notice("Cannot start slowpath\n");
2486                 goto err1;
2487         }
2488
2489         /* Learn information crucial for qede to progress */
2490         rc = qed_ops->fill_dev_info(cdev, &dev_info);
2491         if (rc)
2492                 goto err2;
2493
2494         edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
2495                                    dp_level);
2496         if (!edev) {
2497                 rc = -ENOMEM;
2498                 goto err2;
2499         }
2500
2501         if (is_vf)
2502                 edev->flags |= QEDE_FLAG_IS_VF;
2503
2504         qede_init_ndev(edev);
2505
2506         rc = register_netdev(edev->ndev);
2507         if (rc) {
2508                 DP_NOTICE(edev, "Cannot register net-device\n");
2509                 goto err3;
2510         }
2511
2512         edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION);
2513
2514         edev->ops->register_ops(cdev, &qede_ll_ops, edev);
2515
2516 #ifdef CONFIG_DCB
2517         qede_set_dcbnl_ops(edev->ndev);
2518 #endif
2519
2520         INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
2521         mutex_init(&edev->qede_lock);
2522         edev->rx_copybreak = QEDE_RX_HDR_SIZE;
2523
2524         DP_INFO(edev, "Ending successfully qede probe\n");
2525
2526         return 0;
2527
2528 err3:
2529         free_netdev(edev->ndev);
2530 err2:
2531         qed_ops->common->slowpath_stop(cdev);
2532 err1:
2533         qed_ops->common->remove(cdev);
2534 err0:
2535         return rc;
2536 }
2537
2538 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2539 {
2540         bool is_vf = false;
2541         u32 dp_module = 0;
2542         u8 dp_level = 0;
2543
2544         switch ((enum qede_pci_private)id->driver_data) {
2545         case QEDE_PRIVATE_VF:
2546                 if (debug & QED_LOG_VERBOSE_MASK)
2547                         dev_err(&pdev->dev, "Probing a VF\n");
2548                 is_vf = true;
2549                 break;
2550         default:
2551                 if (debug & QED_LOG_VERBOSE_MASK)
2552                         dev_err(&pdev->dev, "Probing a PF\n");
2553         }
2554
2555         qede_config_debug(debug, &dp_module, &dp_level);
2556
2557         return __qede_probe(pdev, dp_module, dp_level, is_vf,
2558                             QEDE_PROBE_NORMAL);
2559 }
2560
2561 enum qede_remove_mode {
2562         QEDE_REMOVE_NORMAL,
2563 };
2564
2565 static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
2566 {
2567         struct net_device *ndev = pci_get_drvdata(pdev);
2568         struct qede_dev *edev = netdev_priv(ndev);
2569         struct qed_dev *cdev = edev->cdev;
2570
2571         DP_INFO(edev, "Starting qede_remove\n");
2572
2573         cancel_delayed_work_sync(&edev->sp_task);
2574         unregister_netdev(ndev);
2575
2576         edev->ops->common->set_power_state(cdev, PCI_D0);
2577
2578         pci_set_drvdata(pdev, NULL);
2579
2580         free_netdev(ndev);
2581
2582         /* Use global ops since we've freed edev */
2583         qed_ops->common->slowpath_stop(cdev);
2584         qed_ops->common->remove(cdev);
2585
2586         pr_notice("Ending successfully qede_remove\n");
2587 }
2588
2589 static void qede_remove(struct pci_dev *pdev)
2590 {
2591         __qede_remove(pdev, QEDE_REMOVE_NORMAL);
2592 }
2593
2594 /* -------------------------------------------------------------------------
2595  * START OF LOAD / UNLOAD
2596  * -------------------------------------------------------------------------
2597  */
2598
2599 static int qede_set_num_queues(struct qede_dev *edev)
2600 {
2601         int rc;
2602         u16 rss_num;
2603
2604         /* Setup queues according to possible resources*/
2605         if (edev->req_rss)
2606                 rss_num = edev->req_rss;
2607         else
2608                 rss_num = netif_get_num_default_rss_queues() *
2609                           edev->dev_info.common.num_hwfns;
2610
2611         rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
2612
2613         rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
2614         if (rc > 0) {
2615                 /* Managed to request interrupts for our queues */
2616                 edev->num_rss = rc;
2617                 DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
2618                         QEDE_RSS_CNT(edev), rss_num);
2619                 rc = 0;
2620         }
2621         return rc;
2622 }
2623
2624 static void qede_free_mem_sb(struct qede_dev *edev,
2625                              struct qed_sb_info *sb_info)
2626 {
2627         if (sb_info->sb_virt)
2628                 dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
2629                                   (void *)sb_info->sb_virt, sb_info->sb_phys);
2630 }
2631
2632 /* This function allocates fast-path status block memory */
2633 static int qede_alloc_mem_sb(struct qede_dev *edev,
2634                              struct qed_sb_info *sb_info,
2635                              u16 sb_id)
2636 {
2637         struct status_block *sb_virt;
2638         dma_addr_t sb_phys;
2639         int rc;
2640
2641         sb_virt = dma_alloc_coherent(&edev->pdev->dev,
2642                                      sizeof(*sb_virt),
2643                                      &sb_phys, GFP_KERNEL);
2644         if (!sb_virt) {
2645                 DP_ERR(edev, "Status block allocation failed\n");
2646                 return -ENOMEM;
2647         }
2648
2649         rc = edev->ops->common->sb_init(edev->cdev, sb_info,
2650                                         sb_virt, sb_phys, sb_id,
2651                                         QED_SB_TYPE_L2_QUEUE);
2652         if (rc) {
2653                 DP_ERR(edev, "Status block initialization failed\n");
2654                 dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
2655                                   sb_virt, sb_phys);
2656                 return rc;
2657         }
2658
2659         return 0;
2660 }
2661
2662 static void qede_free_rx_buffers(struct qede_dev *edev,
2663                                  struct qede_rx_queue *rxq)
2664 {
2665         u16 i;
2666
2667         for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
2668                 struct sw_rx_data *rx_buf;
2669                 struct page *data;
2670
2671                 rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
2672                 data = rx_buf->data;
2673
2674                 dma_unmap_page(&edev->pdev->dev,
2675                                rx_buf->mapping,
2676                                PAGE_SIZE, DMA_FROM_DEVICE);
2677
2678                 rx_buf->data = NULL;
2679                 __free_page(data);
2680         }
2681 }
2682
2683 static void qede_free_sge_mem(struct qede_dev *edev,
2684                               struct qede_rx_queue *rxq) {
2685         int i;
2686
2687         if (edev->gro_disable)
2688                 return;
2689
2690         for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
2691                 struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
2692                 struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
2693
2694                 if (replace_buf->data) {
2695                         dma_unmap_page(&edev->pdev->dev,
2696                                        replace_buf->mapping,
2697                                        PAGE_SIZE, DMA_FROM_DEVICE);
2698                         __free_page(replace_buf->data);
2699                 }
2700         }
2701 }
2702
2703 static void qede_free_mem_rxq(struct qede_dev *edev,
2704                               struct qede_rx_queue *rxq)
2705 {
2706         qede_free_sge_mem(edev, rxq);
2707
2708         /* Free rx buffers */
2709         qede_free_rx_buffers(edev, rxq);
2710
2711         /* Free the parallel SW ring */
2712         kfree(rxq->sw_rx_ring);
2713
2714         /* Free the real RQ ring used by FW */
2715         edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
2716         edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
2717 }
2718
2719 static int qede_alloc_rx_buffer(struct qede_dev *edev,
2720                                 struct qede_rx_queue *rxq)
2721 {
2722         struct sw_rx_data *sw_rx_data;
2723         struct eth_rx_bd *rx_bd;
2724         dma_addr_t mapping;
2725         struct page *data;
2726         u16 rx_buf_size;
2727
2728         rx_buf_size = rxq->rx_buf_size;
2729
2730         data = alloc_pages(GFP_ATOMIC, 0);
2731         if (unlikely(!data)) {
2732                 DP_NOTICE(edev, "Failed to allocate Rx data [page]\n");
2733                 return -ENOMEM;
2734         }
2735
2736         /* Map the entire page as it would be used
2737          * for multiple RX buffer segment size mapping.
2738          */
2739         mapping = dma_map_page(&edev->pdev->dev, data, 0,
2740                                PAGE_SIZE, DMA_FROM_DEVICE);
2741         if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
2742                 __free_page(data);
2743                 DP_NOTICE(edev, "Failed to map Rx buffer\n");
2744                 return -ENOMEM;
2745         }
2746
2747         sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
2748         sw_rx_data->page_offset = 0;
2749         sw_rx_data->data = data;
2750         sw_rx_data->mapping = mapping;
2751
2752         /* Advance PROD and get BD pointer */
2753         rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
2754         WARN_ON(!rx_bd);
2755         rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
2756         rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping));
2757
2758         rxq->sw_rx_prod++;
2759
2760         return 0;
2761 }
2762
2763 static int qede_alloc_sge_mem(struct qede_dev *edev,
2764                               struct qede_rx_queue *rxq)
2765 {
2766         dma_addr_t mapping;
2767         int i;
2768
2769         if (edev->gro_disable)
2770                 return 0;
2771
2772         if (edev->ndev->mtu > PAGE_SIZE) {
2773                 edev->gro_disable = 1;
2774                 return 0;
2775         }
2776
2777         for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
2778                 struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
2779                 struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
2780
2781                 replace_buf->data = alloc_pages(GFP_ATOMIC, 0);
2782                 if (unlikely(!replace_buf->data)) {
2783                         DP_NOTICE(edev,
2784                                   "Failed to allocate TPA skb pool [replacement buffer]\n");
2785                         goto err;
2786                 }
2787
2788                 mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0,
2789                                        rxq->rx_buf_size, DMA_FROM_DEVICE);
2790                 if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
2791                         DP_NOTICE(edev,
2792                                   "Failed to map TPA replacement buffer\n");
2793                         goto err;
2794                 }
2795
2796                 replace_buf->mapping = mapping;
2797                 tpa_info->replace_buf.page_offset = 0;
2798
2799                 tpa_info->replace_buf_mapping = mapping;
2800                 tpa_info->agg_state = QEDE_AGG_STATE_NONE;
2801         }
2802
2803         return 0;
2804 err:
2805         qede_free_sge_mem(edev, rxq);
2806         edev->gro_disable = 1;
2807         return -ENOMEM;
2808 }
2809
2810 /* This function allocates all memory needed per Rx queue */
2811 static int qede_alloc_mem_rxq(struct qede_dev *edev,
2812                               struct qede_rx_queue *rxq)
2813 {
2814         int i, rc, size;
2815
2816         rxq->num_rx_buffers = edev->q_num_rx_buffers;
2817
2818         rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD +
2819                            edev->ndev->mtu;
2820         if (rxq->rx_buf_size > PAGE_SIZE)
2821                 rxq->rx_buf_size = PAGE_SIZE;
2822
2823         /* Segment size to spilt a page in multiple equal parts */
2824         rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size);
2825
2826         /* Allocate the parallel driver ring for Rx buffers */
2827         size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
2828         rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
2829         if (!rxq->sw_rx_ring) {
2830                 DP_ERR(edev, "Rx buffers ring allocation failed\n");
2831                 rc = -ENOMEM;
2832                 goto err;
2833         }
2834
2835         /* Allocate FW Rx ring  */
2836         rc = edev->ops->common->chain_alloc(edev->cdev,
2837                                             QED_CHAIN_USE_TO_CONSUME_PRODUCE,
2838                                             QED_CHAIN_MODE_NEXT_PTR,
2839                                             QED_CHAIN_CNT_TYPE_U16,
2840                                             RX_RING_SIZE,
2841                                             sizeof(struct eth_rx_bd),
2842                                             &rxq->rx_bd_ring);
2843
2844         if (rc)
2845                 goto err;
2846
2847         /* Allocate FW completion ring */
2848         rc = edev->ops->common->chain_alloc(edev->cdev,
2849                                             QED_CHAIN_USE_TO_CONSUME,
2850                                             QED_CHAIN_MODE_PBL,
2851                                             QED_CHAIN_CNT_TYPE_U16,
2852                                             RX_RING_SIZE,
2853                                             sizeof(union eth_rx_cqe),
2854                                             &rxq->rx_comp_ring);
2855         if (rc)
2856                 goto err;
2857
2858         /* Allocate buffers for the Rx ring */
2859         for (i = 0; i < rxq->num_rx_buffers; i++) {
2860                 rc = qede_alloc_rx_buffer(edev, rxq);
2861                 if (rc) {
2862                         DP_ERR(edev,
2863                                "Rx buffers allocation failed at index %d\n", i);
2864                         goto err;
2865                 }
2866         }
2867
2868         rc = qede_alloc_sge_mem(edev, rxq);
2869 err:
2870         return rc;
2871 }
2872
2873 static void qede_free_mem_txq(struct qede_dev *edev,
2874                               struct qede_tx_queue *txq)
2875 {
2876         /* Free the parallel SW ring */
2877         kfree(txq->sw_tx_ring);
2878
2879         /* Free the real RQ ring used by FW */
2880         edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
2881 }
2882
2883 /* This function allocates all memory needed per Tx queue */
2884 static int qede_alloc_mem_txq(struct qede_dev *edev,
2885                               struct qede_tx_queue *txq)
2886 {
2887         int size, rc;
2888         union eth_tx_bd_types *p_virt;
2889
2890         txq->num_tx_buffers = edev->q_num_tx_buffers;
2891
2892         /* Allocate the parallel driver ring for Tx buffers */
2893         size = sizeof(*txq->sw_tx_ring) * NUM_TX_BDS_MAX;
2894         txq->sw_tx_ring = kzalloc(size, GFP_KERNEL);
2895         if (!txq->sw_tx_ring) {
2896                 DP_NOTICE(edev, "Tx buffers ring allocation failed\n");
2897                 goto err;
2898         }
2899
2900         rc = edev->ops->common->chain_alloc(edev->cdev,
2901                                             QED_CHAIN_USE_TO_CONSUME_PRODUCE,
2902                                             QED_CHAIN_MODE_PBL,
2903                                             QED_CHAIN_CNT_TYPE_U16,
2904                                             NUM_TX_BDS_MAX,
2905                                             sizeof(*p_virt), &txq->tx_pbl);
2906         if (rc)
2907                 goto err;
2908
2909         return 0;
2910
2911 err:
2912         qede_free_mem_txq(edev, txq);
2913         return -ENOMEM;
2914 }
2915
2916 /* This function frees all memory of a single fp */
2917 static void qede_free_mem_fp(struct qede_dev *edev,
2918                              struct qede_fastpath *fp)
2919 {
2920         int tc;
2921
2922         qede_free_mem_sb(edev, fp->sb_info);
2923
2924         qede_free_mem_rxq(edev, fp->rxq);
2925
2926         for (tc = 0; tc < edev->num_tc; tc++)
2927                 qede_free_mem_txq(edev, &fp->txqs[tc]);
2928 }
2929
2930 /* This function allocates all memory needed for a single fp (i.e. an entity
2931  * which contains status block, one rx queue and multiple per-TC tx queues.
2932  */
2933 static int qede_alloc_mem_fp(struct qede_dev *edev,
2934                              struct qede_fastpath *fp)
2935 {
2936         int rc, tc;
2937
2938         rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->rss_id);
2939         if (rc)
2940                 goto err;
2941
2942         rc = qede_alloc_mem_rxq(edev, fp->rxq);
2943         if (rc)
2944                 goto err;
2945
2946         for (tc = 0; tc < edev->num_tc; tc++) {
2947                 rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]);
2948                 if (rc)
2949                         goto err;
2950         }
2951
2952         return 0;
2953 err:
2954         return rc;
2955 }
2956
2957 static void qede_free_mem_load(struct qede_dev *edev)
2958 {
2959         int i;
2960
2961         for_each_rss(i) {
2962                 struct qede_fastpath *fp = &edev->fp_array[i];
2963
2964                 qede_free_mem_fp(edev, fp);
2965         }
2966 }
2967
2968 /* This function allocates all qede memory at NIC load. */
2969 static int qede_alloc_mem_load(struct qede_dev *edev)
2970 {
2971         int rc = 0, rss_id;
2972
2973         for (rss_id = 0; rss_id < QEDE_RSS_CNT(edev); rss_id++) {
2974                 struct qede_fastpath *fp = &edev->fp_array[rss_id];
2975
2976                 rc = qede_alloc_mem_fp(edev, fp);
2977                 if (rc) {
2978                         DP_ERR(edev,
2979                                "Failed to allocate memory for fastpath - rss id = %d\n",
2980                                rss_id);
2981                         qede_free_mem_load(edev);
2982                         return rc;
2983                 }
2984         }
2985
2986         return 0;
2987 }
2988
2989 /* This function inits fp content and resets the SB, RXQ and TXQ structures */
2990 static void qede_init_fp(struct qede_dev *edev)
2991 {
2992         int rss_id, txq_index, tc;
2993         struct qede_fastpath *fp;
2994
2995         for_each_rss(rss_id) {
2996                 fp = &edev->fp_array[rss_id];
2997
2998                 fp->edev = edev;
2999                 fp->rss_id = rss_id;
3000
3001                 memset((void *)&fp->napi, 0, sizeof(fp->napi));
3002
3003                 memset((void *)fp->sb_info, 0, sizeof(*fp->sb_info));
3004
3005                 memset((void *)fp->rxq, 0, sizeof(*fp->rxq));
3006                 fp->rxq->rxq_id = rss_id;
3007
3008                 memset((void *)fp->txqs, 0, (edev->num_tc * sizeof(*fp->txqs)));
3009                 for (tc = 0; tc < edev->num_tc; tc++) {
3010                         txq_index = tc * QEDE_RSS_CNT(edev) + rss_id;
3011                         fp->txqs[tc].index = txq_index;
3012                 }
3013
3014                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
3015                          edev->ndev->name, rss_id);
3016         }
3017
3018         edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO);
3019 }
3020
3021 static int qede_set_real_num_queues(struct qede_dev *edev)
3022 {
3023         int rc = 0;
3024
3025         rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_CNT(edev));
3026         if (rc) {
3027                 DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
3028                 return rc;
3029         }
3030         rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_CNT(edev));
3031         if (rc) {
3032                 DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
3033                 return rc;
3034         }
3035
3036         return 0;
3037 }
3038
3039 static void qede_napi_disable_remove(struct qede_dev *edev)
3040 {
3041         int i;
3042
3043         for_each_rss(i) {
3044                 napi_disable(&edev->fp_array[i].napi);
3045
3046                 netif_napi_del(&edev->fp_array[i].napi);
3047         }
3048 }
3049
3050 static void qede_napi_add_enable(struct qede_dev *edev)
3051 {
3052         int i;
3053
3054         /* Add NAPI objects */
3055         for_each_rss(i) {
3056                 netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
3057                                qede_poll, NAPI_POLL_WEIGHT);
3058                 napi_enable(&edev->fp_array[i].napi);
3059         }
3060 }
3061
3062 static void qede_sync_free_irqs(struct qede_dev *edev)
3063 {
3064         int i;
3065
3066         for (i = 0; i < edev->int_info.used_cnt; i++) {
3067                 if (edev->int_info.msix_cnt) {
3068                         synchronize_irq(edev->int_info.msix[i].vector);
3069                         free_irq(edev->int_info.msix[i].vector,
3070                                  &edev->fp_array[i]);
3071                 } else {
3072                         edev->ops->common->simd_handler_clean(edev->cdev, i);
3073                 }
3074         }
3075
3076         edev->int_info.used_cnt = 0;
3077 }
3078
3079 static int qede_req_msix_irqs(struct qede_dev *edev)
3080 {
3081         int i, rc;
3082
3083         /* Sanitize number of interrupts == number of prepared RSS queues */
3084         if (QEDE_RSS_CNT(edev) > edev->int_info.msix_cnt) {
3085                 DP_ERR(edev,
3086                        "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
3087                        QEDE_RSS_CNT(edev), edev->int_info.msix_cnt);
3088                 return -EINVAL;
3089         }
3090
3091         for (i = 0; i < QEDE_RSS_CNT(edev); i++) {
3092                 rc = request_irq(edev->int_info.msix[i].vector,
3093                                  qede_msix_fp_int, 0, edev->fp_array[i].name,
3094                                  &edev->fp_array[i]);
3095                 if (rc) {
3096                         DP_ERR(edev, "Request fp %d irq failed\n", i);
3097                         qede_sync_free_irqs(edev);
3098                         return rc;
3099                 }
3100                 DP_VERBOSE(edev, NETIF_MSG_INTR,
3101                            "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
3102                            edev->fp_array[i].name, i,
3103                            &edev->fp_array[i]);
3104                 edev->int_info.used_cnt++;
3105         }
3106
3107         return 0;
3108 }
3109
3110 static void qede_simd_fp_handler(void *cookie)
3111 {
3112         struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
3113
3114         napi_schedule_irqoff(&fp->napi);
3115 }
3116
3117 static int qede_setup_irqs(struct qede_dev *edev)
3118 {
3119         int i, rc = 0;
3120
3121         /* Learn Interrupt configuration */
3122         rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
3123         if (rc)
3124                 return rc;
3125
3126         if (edev->int_info.msix_cnt) {
3127                 rc = qede_req_msix_irqs(edev);
3128                 if (rc)
3129                         return rc;
3130                 edev->ndev->irq = edev->int_info.msix[0].vector;
3131         } else {
3132                 const struct qed_common_ops *ops;
3133
3134                 /* qed should learn receive the RSS ids and callbacks */
3135                 ops = edev->ops->common;
3136                 for (i = 0; i < QEDE_RSS_CNT(edev); i++)
3137                         ops->simd_handler_config(edev->cdev,
3138                                                  &edev->fp_array[i], i,
3139                                                  qede_simd_fp_handler);
3140                 edev->int_info.used_cnt = QEDE_RSS_CNT(edev);
3141         }
3142         return 0;
3143 }
3144
3145 static int qede_drain_txq(struct qede_dev *edev,
3146                           struct qede_tx_queue *txq,
3147                           bool allow_drain)
3148 {
3149         int rc, cnt = 1000;
3150
3151         while (txq->sw_tx_cons != txq->sw_tx_prod) {
3152                 if (!cnt) {
3153                         if (allow_drain) {
3154                                 DP_NOTICE(edev,
3155                                           "Tx queue[%d] is stuck, requesting MCP to drain\n",
3156                                           txq->index);
3157                                 rc = edev->ops->common->drain(edev->cdev);
3158                                 if (rc)
3159                                         return rc;
3160                                 return qede_drain_txq(edev, txq, false);
3161                         }
3162                         DP_NOTICE(edev,
3163                                   "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
3164                                   txq->index, txq->sw_tx_prod,
3165                                   txq->sw_tx_cons);
3166                         return -ENODEV;
3167                 }
3168                 cnt--;
3169                 usleep_range(1000, 2000);
3170                 barrier();
3171         }
3172
3173         /* FW finished processing, wait for HW to transmit all tx packets */
3174         usleep_range(1000, 2000);
3175
3176         return 0;
3177 }
3178
3179 static int qede_stop_queues(struct qede_dev *edev)
3180 {
3181         struct qed_update_vport_params vport_update_params;
3182         struct qed_dev *cdev = edev->cdev;
3183         int rc, tc, i;
3184
3185         /* Disable the vport */
3186         memset(&vport_update_params, 0, sizeof(vport_update_params));
3187         vport_update_params.vport_id = 0;
3188         vport_update_params.update_vport_active_flg = 1;
3189         vport_update_params.vport_active_flg = 0;
3190         vport_update_params.update_rss_flg = 0;
3191
3192         rc = edev->ops->vport_update(cdev, &vport_update_params);
3193         if (rc) {
3194                 DP_ERR(edev, "Failed to update vport\n");
3195                 return rc;
3196         }
3197
3198         /* Flush Tx queues. If needed, request drain from MCP */
3199         for_each_rss(i) {
3200                 struct qede_fastpath *fp = &edev->fp_array[i];
3201
3202                 for (tc = 0; tc < edev->num_tc; tc++) {
3203                         struct qede_tx_queue *txq = &fp->txqs[tc];
3204
3205                         rc = qede_drain_txq(edev, txq, true);
3206                         if (rc)
3207                                 return rc;
3208                 }
3209         }
3210
3211         /* Stop all Queues in reverse order*/
3212         for (i = QEDE_RSS_CNT(edev) - 1; i >= 0; i--) {
3213                 struct qed_stop_rxq_params rx_params;
3214
3215                 /* Stop the Tx Queue(s)*/
3216                 for (tc = 0; tc < edev->num_tc; tc++) {
3217                         struct qed_stop_txq_params tx_params;
3218
3219                         tx_params.rss_id = i;
3220                         tx_params.tx_queue_id = tc * QEDE_RSS_CNT(edev) + i;
3221                         rc = edev->ops->q_tx_stop(cdev, &tx_params);
3222                         if (rc) {
3223                                 DP_ERR(edev, "Failed to stop TXQ #%d\n",
3224                                        tx_params.tx_queue_id);
3225                                 return rc;
3226                         }
3227                 }
3228
3229                 /* Stop the Rx Queue*/
3230                 memset(&rx_params, 0, sizeof(rx_params));
3231                 rx_params.rss_id = i;
3232                 rx_params.rx_queue_id = i;
3233
3234                 rc = edev->ops->q_rx_stop(cdev, &rx_params);
3235                 if (rc) {
3236                         DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
3237                         return rc;
3238                 }
3239         }
3240
3241         /* Stop the vport */
3242         rc = edev->ops->vport_stop(cdev, 0);
3243         if (rc)
3244                 DP_ERR(edev, "Failed to stop VPORT\n");
3245
3246         return rc;
3247 }
3248
3249 static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
3250 {
3251         int rc, tc, i;
3252         int vlan_removal_en = 1;
3253         struct qed_dev *cdev = edev->cdev;
3254         struct qed_update_vport_params vport_update_params;
3255         struct qed_queue_start_common_params q_params;
3256         struct qed_dev_info *qed_info = &edev->dev_info.common;
3257         struct qed_start_vport_params start = {0};
3258         bool reset_rss_indir = false;
3259
3260         if (!edev->num_rss) {
3261                 DP_ERR(edev,
3262                        "Cannot update V-VPORT as active as there are no Rx queues\n");
3263                 return -EINVAL;
3264         }
3265
3266         start.gro_enable = !edev->gro_disable;
3267         start.mtu = edev->ndev->mtu;
3268         start.vport_id = 0;
3269         start.drop_ttl0 = true;
3270         start.remove_inner_vlan = vlan_removal_en;
3271
3272         rc = edev->ops->vport_start(cdev, &start);
3273
3274         if (rc) {
3275                 DP_ERR(edev, "Start V-PORT failed %d\n", rc);
3276                 return rc;
3277         }
3278
3279         DP_VERBOSE(edev, NETIF_MSG_IFUP,
3280                    "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
3281                    start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
3282
3283         for_each_rss(i) {
3284                 struct qede_fastpath *fp = &edev->fp_array[i];
3285                 dma_addr_t phys_table = fp->rxq->rx_comp_ring.pbl.p_phys_table;
3286
3287                 memset(&q_params, 0, sizeof(q_params));
3288                 q_params.rss_id = i;
3289                 q_params.queue_id = i;
3290                 q_params.vport_id = 0;
3291                 q_params.sb = fp->sb_info->igu_sb_id;
3292                 q_params.sb_idx = RX_PI;
3293
3294                 rc = edev->ops->q_rx_start(cdev, &q_params,
3295                                            fp->rxq->rx_buf_size,
3296                                            fp->rxq->rx_bd_ring.p_phys_addr,
3297                                            phys_table,
3298                                            fp->rxq->rx_comp_ring.page_cnt,
3299                                            &fp->rxq->hw_rxq_prod_addr);
3300                 if (rc) {
3301                         DP_ERR(edev, "Start RXQ #%d failed %d\n", i, rc);
3302                         return rc;
3303                 }
3304
3305                 fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
3306
3307                 qede_update_rx_prod(edev, fp->rxq);
3308
3309                 for (tc = 0; tc < edev->num_tc; tc++) {
3310                         struct qede_tx_queue *txq = &fp->txqs[tc];
3311                         int txq_index = tc * QEDE_RSS_CNT(edev) + i;
3312
3313                         memset(&q_params, 0, sizeof(q_params));
3314                         q_params.rss_id = i;
3315                         q_params.queue_id = txq_index;
3316                         q_params.vport_id = 0;
3317                         q_params.sb = fp->sb_info->igu_sb_id;
3318                         q_params.sb_idx = TX_PI(tc);
3319
3320                         rc = edev->ops->q_tx_start(cdev, &q_params,
3321                                                    txq->tx_pbl.pbl.p_phys_table,
3322                                                    txq->tx_pbl.page_cnt,
3323                                                    &txq->doorbell_addr);
3324                         if (rc) {
3325                                 DP_ERR(edev, "Start TXQ #%d failed %d\n",
3326                                        txq_index, rc);
3327                                 return rc;
3328                         }
3329
3330                         txq->hw_cons_ptr =
3331                                 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
3332                         SET_FIELD(txq->tx_db.data.params,
3333                                   ETH_DB_DATA_DEST, DB_DEST_XCM);
3334                         SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
3335                                   DB_AGG_CMD_SET);
3336                         SET_FIELD(txq->tx_db.data.params,
3337                                   ETH_DB_DATA_AGG_VAL_SEL,
3338                                   DQ_XCM_ETH_TX_BD_PROD_CMD);
3339
3340                         txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
3341                 }
3342         }
3343
3344         /* Prepare and send the vport enable */
3345         memset(&vport_update_params, 0, sizeof(vport_update_params));
3346         vport_update_params.vport_id = start.vport_id;
3347         vport_update_params.update_vport_active_flg = 1;
3348         vport_update_params.vport_active_flg = 1;
3349
3350         if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) &&
3351             qed_info->tx_switching) {
3352                 vport_update_params.update_tx_switching_flg = 1;
3353                 vport_update_params.tx_switching_flg = 1;
3354         }
3355
3356         /* Fill struct with RSS params */
3357         if (QEDE_RSS_CNT(edev) > 1) {
3358                 vport_update_params.update_rss_flg = 1;
3359
3360                 /* Need to validate current RSS config uses valid entries */
3361                 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
3362                         if (edev->rss_params.rss_ind_table[i] >=
3363                             edev->num_rss) {
3364                                 reset_rss_indir = true;
3365                                 break;
3366                         }
3367                 }
3368
3369                 if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) ||
3370                     reset_rss_indir) {
3371                         u16 val;
3372
3373                         for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
3374                                 u16 indir_val;
3375
3376                                 val = QEDE_RSS_CNT(edev);
3377                                 indir_val = ethtool_rxfh_indir_default(i, val);
3378                                 edev->rss_params.rss_ind_table[i] = indir_val;
3379                         }
3380                         edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
3381                 }
3382
3383                 if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
3384                         netdev_rss_key_fill(edev->rss_params.rss_key,
3385                                             sizeof(edev->rss_params.rss_key));
3386                         edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
3387                 }
3388
3389                 if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
3390                         edev->rss_params.rss_caps = QED_RSS_IPV4 |
3391                                                     QED_RSS_IPV6 |
3392                                                     QED_RSS_IPV4_TCP |
3393                                                     QED_RSS_IPV6_TCP;
3394                         edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
3395                 }
3396
3397                 memcpy(&vport_update_params.rss_params, &edev->rss_params,
3398                        sizeof(vport_update_params.rss_params));
3399         } else {
3400                 memset(&vport_update_params.rss_params, 0,
3401                        sizeof(vport_update_params.rss_params));
3402         }
3403
3404         rc = edev->ops->vport_update(cdev, &vport_update_params);
3405         if (rc) {
3406                 DP_ERR(edev, "Update V-PORT failed %d\n", rc);
3407                 return rc;
3408         }
3409
3410         return 0;
3411 }
3412
3413 static int qede_set_mcast_rx_mac(struct qede_dev *edev,
3414                                  enum qed_filter_xcast_params_type opcode,
3415                                  unsigned char *mac, int num_macs)
3416 {
3417         struct qed_filter_params filter_cmd;
3418         int i;
3419
3420         memset(&filter_cmd, 0, sizeof(filter_cmd));
3421         filter_cmd.type = QED_FILTER_TYPE_MCAST;
3422         filter_cmd.filter.mcast.type = opcode;
3423         filter_cmd.filter.mcast.num = num_macs;
3424
3425         for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
3426                 ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
3427
3428         return edev->ops->filter_config(edev->cdev, &filter_cmd);
3429 }
3430
3431 enum qede_unload_mode {
3432         QEDE_UNLOAD_NORMAL,
3433 };
3434
3435 static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode)
3436 {
3437         struct qed_link_params link_params;
3438         int rc;
3439
3440         DP_INFO(edev, "Starting qede unload\n");
3441
3442         mutex_lock(&edev->qede_lock);
3443         edev->state = QEDE_STATE_CLOSED;
3444
3445         /* Close OS Tx */
3446         netif_tx_disable(edev->ndev);
3447         netif_carrier_off(edev->ndev);
3448
3449         /* Reset the link */
3450         memset(&link_params, 0, sizeof(link_params));
3451         link_params.link_up = false;
3452         edev->ops->common->set_link(edev->cdev, &link_params);
3453         rc = qede_stop_queues(edev);
3454         if (rc) {
3455                 qede_sync_free_irqs(edev);
3456                 goto out;
3457         }
3458
3459         DP_INFO(edev, "Stopped Queues\n");
3460
3461         qede_vlan_mark_nonconfigured(edev);
3462         edev->ops->fastpath_stop(edev->cdev);
3463
3464         /* Release the interrupts */
3465         qede_sync_free_irqs(edev);
3466         edev->ops->common->set_fp_int(edev->cdev, 0);
3467
3468         qede_napi_disable_remove(edev);
3469
3470         qede_free_mem_load(edev);
3471         qede_free_fp_array(edev);
3472
3473 out:
3474         mutex_unlock(&edev->qede_lock);
3475         DP_INFO(edev, "Ending qede unload\n");
3476 }
3477
3478 enum qede_load_mode {
3479         QEDE_LOAD_NORMAL,
3480         QEDE_LOAD_RELOAD,
3481 };
3482
3483 static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
3484 {
3485         struct qed_link_params link_params;
3486         struct qed_link_output link_output;
3487         int rc;
3488
3489         DP_INFO(edev, "Starting qede load\n");
3490
3491         rc = qede_set_num_queues(edev);
3492         if (rc)
3493                 goto err0;
3494
3495         rc = qede_alloc_fp_array(edev);
3496         if (rc)
3497                 goto err0;
3498
3499         qede_init_fp(edev);
3500
3501         rc = qede_alloc_mem_load(edev);
3502         if (rc)
3503                 goto err1;
3504         DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
3505                 QEDE_RSS_CNT(edev), edev->num_tc);
3506
3507         rc = qede_set_real_num_queues(edev);
3508         if (rc)
3509                 goto err2;
3510
3511         qede_napi_add_enable(edev);
3512         DP_INFO(edev, "Napi added and enabled\n");
3513
3514         rc = qede_setup_irqs(edev);
3515         if (rc)
3516                 goto err3;
3517         DP_INFO(edev, "Setup IRQs succeeded\n");
3518
3519         rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
3520         if (rc)
3521                 goto err4;
3522         DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
3523
3524         /* Add primary mac and set Rx filters */
3525         ether_addr_copy(edev->primary_mac, edev->ndev->dev_addr);
3526
3527         mutex_lock(&edev->qede_lock);
3528         edev->state = QEDE_STATE_OPEN;
3529         mutex_unlock(&edev->qede_lock);
3530
3531         /* Program un-configured VLANs */
3532         qede_configure_vlan_filters(edev);
3533
3534         /* Ask for link-up using current configuration */
3535         memset(&link_params, 0, sizeof(link_params));
3536         link_params.link_up = true;
3537         edev->ops->common->set_link(edev->cdev, &link_params);
3538
3539         /* Query whether link is already-up */
3540         memset(&link_output, 0, sizeof(link_output));
3541         edev->ops->common->get_link(edev->cdev, &link_output);
3542         qede_link_update(edev, &link_output);
3543
3544         DP_INFO(edev, "Ending successfully qede load\n");
3545
3546         return 0;
3547
3548 err4:
3549         qede_sync_free_irqs(edev);
3550         memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info));
3551 err3:
3552         qede_napi_disable_remove(edev);
3553 err2:
3554         qede_free_mem_load(edev);
3555 err1:
3556         edev->ops->common->set_fp_int(edev->cdev, 0);
3557         qede_free_fp_array(edev);
3558         edev->num_rss = 0;
3559 err0:
3560         return rc;
3561 }
3562
3563 void qede_reload(struct qede_dev *edev,
3564                  void (*func)(struct qede_dev *, union qede_reload_args *),
3565                  union qede_reload_args *args)
3566 {
3567         qede_unload(edev, QEDE_UNLOAD_NORMAL);
3568         /* Call function handler to update parameters
3569          * needed for function load.
3570          */
3571         if (func)
3572                 func(edev, args);
3573
3574         qede_load(edev, QEDE_LOAD_RELOAD);
3575
3576         mutex_lock(&edev->qede_lock);
3577         qede_config_rx_mode(edev->ndev);
3578         mutex_unlock(&edev->qede_lock);
3579 }
3580
3581 /* called with rtnl_lock */
3582 static int qede_open(struct net_device *ndev)
3583 {
3584         struct qede_dev *edev = netdev_priv(ndev);
3585         int rc;
3586
3587         netif_carrier_off(ndev);
3588
3589         edev->ops->common->set_power_state(edev->cdev, PCI_D0);
3590
3591         rc = qede_load(edev, QEDE_LOAD_NORMAL);
3592
3593         if (rc)
3594                 return rc;
3595
3596         udp_tunnel_get_rx_info(ndev);
3597
3598         return 0;
3599 }
3600
3601 static int qede_close(struct net_device *ndev)
3602 {
3603         struct qede_dev *edev = netdev_priv(ndev);
3604
3605         qede_unload(edev, QEDE_UNLOAD_NORMAL);
3606
3607         return 0;
3608 }
3609
3610 static void qede_link_update(void *dev, struct qed_link_output *link)
3611 {
3612         struct qede_dev *edev = dev;
3613
3614         if (!netif_running(edev->ndev)) {
3615                 DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not running\n");
3616                 return;
3617         }
3618
3619         if (link->link_up) {
3620                 if (!netif_carrier_ok(edev->ndev)) {
3621                         DP_NOTICE(edev, "Link is up\n");
3622                         netif_tx_start_all_queues(edev->ndev);
3623                         netif_carrier_on(edev->ndev);
3624                 }
3625         } else {
3626                 if (netif_carrier_ok(edev->ndev)) {
3627                         DP_NOTICE(edev, "Link is down\n");
3628                         netif_tx_disable(edev->ndev);
3629                         netif_carrier_off(edev->ndev);
3630                 }
3631         }
3632 }
3633
3634 static int qede_set_mac_addr(struct net_device *ndev, void *p)
3635 {
3636         struct qede_dev *edev = netdev_priv(ndev);
3637         struct sockaddr *addr = p;
3638         int rc;
3639
3640         ASSERT_RTNL(); /* @@@TBD To be removed */
3641
3642         DP_INFO(edev, "Set_mac_addr called\n");
3643
3644         if (!is_valid_ether_addr(addr->sa_data)) {
3645                 DP_NOTICE(edev, "The MAC address is not valid\n");
3646                 return -EFAULT;
3647         }
3648
3649         if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
3650                 DP_NOTICE(edev, "qed prevents setting MAC\n");
3651                 return -EINVAL;
3652         }
3653
3654         ether_addr_copy(ndev->dev_addr, addr->sa_data);
3655
3656         if (!netif_running(ndev))  {
3657                 DP_NOTICE(edev, "The device is currently down\n");
3658                 return 0;
3659         }
3660
3661         /* Remove the previous primary mac */
3662         rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
3663                                    edev->primary_mac);
3664         if (rc)
3665                 return rc;
3666
3667         /* Add MAC filter according to the new unicast HW MAC address */
3668         ether_addr_copy(edev->primary_mac, ndev->dev_addr);
3669         return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
3670                                       edev->primary_mac);
3671 }
3672
3673 static int
3674 qede_configure_mcast_filtering(struct net_device *ndev,
3675                                enum qed_filter_rx_mode_type *accept_flags)
3676 {
3677         struct qede_dev *edev = netdev_priv(ndev);
3678         unsigned char *mc_macs, *temp;
3679         struct netdev_hw_addr *ha;
3680         int rc = 0, mc_count;
3681         size_t size;
3682
3683         size = 64 * ETH_ALEN;
3684
3685         mc_macs = kzalloc(size, GFP_KERNEL);
3686         if (!mc_macs) {
3687                 DP_NOTICE(edev,
3688                           "Failed to allocate memory for multicast MACs\n");
3689                 rc = -ENOMEM;
3690                 goto exit;
3691         }
3692
3693         temp = mc_macs;
3694
3695         /* Remove all previously configured MAC filters */
3696         rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
3697                                    mc_macs, 1);
3698         if (rc)
3699                 goto exit;
3700
3701         netif_addr_lock_bh(ndev);
3702
3703         mc_count = netdev_mc_count(ndev);
3704         if (mc_count < 64) {
3705                 netdev_for_each_mc_addr(ha, ndev) {
3706                         ether_addr_copy(temp, ha->addr);
3707                         temp += ETH_ALEN;
3708                 }
3709         }
3710
3711         netif_addr_unlock_bh(ndev);
3712
3713         /* Check for all multicast @@@TBD resource allocation */
3714         if ((ndev->flags & IFF_ALLMULTI) ||
3715             (mc_count > 64)) {
3716                 if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
3717                         *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
3718         } else {
3719                 /* Add all multicast MAC filters */
3720                 rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
3721                                            mc_macs, mc_count);
3722         }
3723
3724 exit:
3725         kfree(mc_macs);
3726         return rc;
3727 }
3728
3729 static void qede_set_rx_mode(struct net_device *ndev)
3730 {
3731         struct qede_dev *edev = netdev_priv(ndev);
3732
3733         DP_INFO(edev, "qede_set_rx_mode called\n");
3734
3735         if (edev->state != QEDE_STATE_OPEN) {
3736                 DP_INFO(edev,
3737                         "qede_set_rx_mode called while interface is down\n");
3738         } else {
3739                 set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
3740                 schedule_delayed_work(&edev->sp_task, 0);
3741         }
3742 }
3743
3744 /* Must be called with qede_lock held */
3745 static void qede_config_rx_mode(struct net_device *ndev)
3746 {
3747         enum qed_filter_rx_mode_type accept_flags = QED_FILTER_TYPE_UCAST;
3748         struct qede_dev *edev = netdev_priv(ndev);
3749         struct qed_filter_params rx_mode;
3750         unsigned char *uc_macs, *temp;
3751         struct netdev_hw_addr *ha;
3752         int rc, uc_count;
3753         size_t size;
3754
3755         netif_addr_lock_bh(ndev);
3756
3757         uc_count = netdev_uc_count(ndev);
3758         size = uc_count * ETH_ALEN;
3759
3760         uc_macs = kzalloc(size, GFP_ATOMIC);
3761         if (!uc_macs) {
3762                 DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
3763                 netif_addr_unlock_bh(ndev);
3764                 return;
3765         }
3766
3767         temp = uc_macs;
3768         netdev_for_each_uc_addr(ha, ndev) {
3769                 ether_addr_copy(temp, ha->addr);
3770                 temp += ETH_ALEN;
3771         }
3772
3773         netif_addr_unlock_bh(ndev);
3774
3775         /* Configure the struct for the Rx mode */
3776         memset(&rx_mode, 0, sizeof(struct qed_filter_params));
3777         rx_mode.type = QED_FILTER_TYPE_RX_MODE;
3778
3779         /* Remove all previous unicast secondary macs and multicast macs
3780          * (configrue / leave the primary mac)
3781          */
3782         rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
3783                                    edev->primary_mac);
3784         if (rc)
3785                 goto out;
3786
3787         /* Check for promiscuous */
3788         if ((ndev->flags & IFF_PROMISC) ||
3789             (uc_count > 15)) { /* @@@TBD resource allocation - 1 */
3790                 accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
3791         } else {
3792                 /* Add MAC filters according to the unicast secondary macs */
3793                 int i;
3794
3795                 temp = uc_macs;
3796                 for (i = 0; i < uc_count; i++) {
3797                         rc = qede_set_ucast_rx_mac(edev,
3798                                                    QED_FILTER_XCAST_TYPE_ADD,
3799                                                    temp);
3800                         if (rc)
3801                                 goto out;
3802
3803                         temp += ETH_ALEN;
3804                 }
3805
3806                 rc = qede_configure_mcast_filtering(ndev, &accept_flags);
3807                 if (rc)
3808                         goto out;
3809         }
3810
3811         /* take care of VLAN mode */
3812         if (ndev->flags & IFF_PROMISC) {
3813                 qede_config_accept_any_vlan(edev, true);
3814         } else if (!edev->non_configured_vlans) {
3815                 /* It's possible that accept_any_vlan mode is set due to a
3816                  * previous setting of IFF_PROMISC. If vlan credits are
3817                  * sufficient, disable accept_any_vlan.
3818                  */
3819                 qede_config_accept_any_vlan(edev, false);
3820         }
3821
3822         rx_mode.filter.accept_flags = accept_flags;
3823         edev->ops->filter_config(edev->cdev, &rx_mode);
3824 out:
3825         kfree(uc_macs);
3826 }