Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[cascardo/linux.git] / drivers / net / ethernet / qlogic / qede / qede_main.c
index 9544e4c..cd23a29 100644 (file)
@@ -222,7 +222,7 @@ int __init qede_init(void)
 {
        int ret;
 
-       pr_notice("qede_init: %s\n", version);
+       pr_info("qede_init: %s\n", version);
 
        qed_ops = qed_get_eth_ops();
        if (!qed_ops) {
@@ -253,7 +253,8 @@ int __init qede_init(void)
 
 static void __exit qede_cleanup(void)
 {
-       pr_notice("qede_cleanup called\n");
+       if (debug & QED_LOG_INFO_MASK)
+               pr_info("qede_cleanup called\n");
 
        unregister_netdevice_notifier(&qede_netdev_notifier);
        pci_unregister_driver(&qede_pci_driver);
@@ -270,8 +271,7 @@ module_exit(qede_cleanup);
 
 /* Unmap the data and free skb */
 static int qede_free_tx_pkt(struct qede_dev *edev,
-                           struct qede_tx_queue *txq,
-                           int *len)
+                           struct qede_tx_queue *txq, int *len)
 {
        u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
        struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
@@ -329,8 +329,7 @@ static int qede_free_tx_pkt(struct qede_dev *edev,
 static void qede_free_failed_tx_pkt(struct qede_dev *edev,
                                    struct qede_tx_queue *txq,
                                    struct eth_tx_1st_bd *first_bd,
-                                   int nbd,
-                                   bool data_split)
+                                   int nbd, bool data_split)
 {
        u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
        struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
@@ -339,8 +338,7 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
 
        /* Return prod to its position before this skb was handled */
        qed_chain_set_prod(&txq->tx_pbl,
-                          le16_to_cpu(txq->tx_db.data.bd_prod),
-                          first_bd);
+                          le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
 
        first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
 
@@ -366,8 +364,7 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
 
        /* Return again prod to its position before this skb was handled */
        qed_chain_set_prod(&txq->tx_pbl,
-                          le16_to_cpu(txq->tx_db.data.bd_prod),
-                          first_bd);
+                          le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
 
        /* Free skb */
        dev_kfree_skb_any(skb);
@@ -376,8 +373,7 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
 }
 
 static u32 qede_xmit_type(struct qede_dev *edev,
-                         struct sk_buff *skb,
-                         int *ipv6_ext)
+                         struct sk_buff *skb, int *ipv6_ext)
 {
        u32 rc = XMIT_L4_CSUM;
        __be16 l3_proto;
@@ -434,15 +430,13 @@ static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
 }
 
 static int map_frag_to_bd(struct qede_dev *edev,
-                         skb_frag_t *frag,
-                         struct eth_tx_bd *bd)
+                         skb_frag_t *frag, struct eth_tx_bd *bd)
 {
        dma_addr_t mapping;
 
        /* Map skb non-linear frag data for DMA */
        mapping = skb_frag_dma_map(&edev->pdev->dev, frag, 0,
-                                  skb_frag_size(frag),
-                                  DMA_TO_DEVICE);
+                                  skb_frag_size(frag), DMA_TO_DEVICE);
        if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
                DP_NOTICE(edev, "Unable to map frag - dropping packet\n");
                return -ENOMEM;
@@ -504,9 +498,8 @@ static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
 }
 
 /* Main transmit function */
-static
-netdev_tx_t qede_start_xmit(struct sk_buff *skb,
-                           struct net_device *ndev)
+static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
+                                  struct net_device *ndev)
 {
        struct qede_dev *edev = netdev_priv(ndev);
        struct netdev_queue *netdev_txq;
@@ -526,12 +519,11 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
 
        /* Get tx-queue context and netdev index */
        txq_index = skb_get_queue_mapping(skb);
-       WARN_ON(txq_index >= QEDE_TSS_CNT(edev));
+       WARN_ON(txq_index >= QEDE_TSS_COUNT(edev));
        txq = QEDE_TX_QUEUE(edev, txq_index);
        netdev_txq = netdev_get_tx_queue(ndev, txq_index);
 
-       WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) <
-                              (MAX_SKB_FRAGS + 1));
+       WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
 
        xmit_type = qede_xmit_type(edev, skb, &ipv6_ext);
 
@@ -606,6 +598,14 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
                            1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
                }
 
+               /* Legacy FW had flipped behavior in regard to this bit -
+                * I.e., needed to set to prevent FW from touching encapsulated
+                * packets when it didn't need to.
+                */
+               if (unlikely(txq->is_legacy))
+                       first_bd->data.bitfields ^=
+                           1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
+
                /* If the packet is IPv6 with extension header, indicate that
                 * to FW and pass few params, since the device cracker doesn't
                 * support parsing IPv6 with extension header/s.
@@ -731,6 +731,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
                        qede_update_tx_producer(txq);
 
                netif_tx_stop_queue(netdev_txq);
+               txq->stopped_cnt++;
                DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
                           "Stop queue was called\n");
                /* paired memory barrier is in qede_tx_int(), we have to keep
@@ -764,8 +765,7 @@ int qede_txq_has_work(struct qede_tx_queue *txq)
        return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
 }
 
-static int qede_tx_int(struct qede_dev *edev,
-                      struct qede_tx_queue *txq)
+static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
 {
        struct netdev_queue *netdev_txq;
        u16 hw_bd_cons;
@@ -791,6 +791,7 @@ static int qede_tx_int(struct qede_dev *edev,
                bytes_compl += len;
                pkts_compl++;
                txq->sw_tx_cons++;
+               txq->xmit_pkts++;
        }
 
        netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
@@ -963,8 +964,7 @@ static inline void qede_update_rx_prod(struct qede_dev *edev,
 
 static u32 qede_get_rxhash(struct qede_dev *edev,
                           u8 bitfields,
-                          __le32 rss_hash,
-                          enum pkt_hash_types *rxhash_type)
+                          __le32 rss_hash, enum pkt_hash_types *rxhash_type)
 {
        enum rss_hash_type htype;
 
@@ -993,12 +993,10 @@ static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
 
 static inline void qede_skb_receive(struct qede_dev *edev,
                                    struct qede_fastpath *fp,
-                                   struct sk_buff *skb,
-                                   u16 vlan_tag)
+                                   struct sk_buff *skb, u16 vlan_tag)
 {
        if (vlan_tag)
-               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
-                                      vlan_tag);
+               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
 
        napi_gro_receive(&fp->napi, skb);
 }
@@ -1021,8 +1019,7 @@ static void qede_set_gro_params(struct qede_dev *edev,
 
 static int qede_fill_frag_skb(struct qede_dev *edev,
                              struct qede_rx_queue *rxq,
-                             u8 tpa_agg_index,
-                             u16 len_on_bd)
+                             u8 tpa_agg_index, u16 len_on_bd)
 {
        struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
                                                         NUM_RX_BDS_MAX];
@@ -1209,7 +1206,7 @@ static void qede_gro_receive(struct qede_dev *edev,
 #endif
 
 send_skb:
-       skb_record_rx_queue(skb, fp->rss_id);
+       skb_record_rx_queue(skb, fp->rxq->rxq_id);
        qede_skb_receive(edev, fp, skb, vlan_tag);
 }
 
@@ -1413,7 +1410,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
 
                if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
                        edev->ops->eth_cqe_completion(
-                                       edev->cdev, fp->rss_id,
+                                       edev->cdev, fp->id,
                                        (struct eth_slow_path_rx_cqe *)cqe);
                        goto next_cqe;
                }
@@ -1470,7 +1467,7 @@ alloc_skb:
                skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
                if (unlikely(!skb)) {
                        DP_NOTICE(edev,
-                                 "Build_skb failed, dropping incoming packet\n");
+                                 "skb allocation failed, dropping incoming packet\n");
                        qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
                        rxq->rx_alloc_errors++;
                        goto next_cqe;
@@ -1578,14 +1575,13 @@ alloc_skb:
                skb->protocol = eth_type_trans(skb, edev->ndev);
 
                rx_hash = qede_get_rxhash(edev, fp_cqe->bitfields,
-                                         fp_cqe->rss_hash,
-                                         &rxhash_type);
+                                         fp_cqe->rss_hash, &rxhash_type);
 
                skb_set_hash(skb, rx_hash, rxhash_type);
 
                qede_set_skb_csum(skb, csum_flag);
 
-               skb_record_rx_queue(skb, fp->rss_id);
+               skb_record_rx_queue(skb, fp->rxq->rxq_id);
 
                qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
 next_rx_only:
@@ -1604,6 +1600,8 @@ next_cqe: /* don't consume bd rx buffer */
        /* Update producers */
        qede_update_rx_prod(edev, rxq);
 
+       rxq->rcv_pkts += rx_pkt;
+
        return rx_pkt;
 }
 
@@ -1616,10 +1614,12 @@ static int qede_poll(struct napi_struct *napi, int budget)
        u8 tc;
 
        for (tc = 0; tc < edev->num_tc; tc++)
-               if (qede_txq_has_work(&fp->txqs[tc]))
+               if (likely(fp->type & QEDE_FASTPATH_TX) &&
+                   qede_txq_has_work(&fp->txqs[tc]))
                        qede_tx_int(edev, &fp->txqs[tc]);
 
-       rx_work_done = qede_has_rx_work(fp->rxq) ?
+       rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
+                       qede_has_rx_work(fp->rxq)) ?
                        qede_rx_int(fp, budget) : 0;
        if (rx_work_done < budget) {
                qed_sb_update_sb_idx(fp->sb_info);
@@ -1639,8 +1639,10 @@ static int qede_poll(struct napi_struct *napi, int budget)
                rmb();
 
                /* Fall out from the NAPI loop if needed */
-               if (!(qede_has_rx_work(fp->rxq) ||
-                     qede_has_tx_work(fp))) {
+               if (!((likely(fp->type & QEDE_FASTPATH_RX) &&
+                      qede_has_rx_work(fp->rxq)) ||
+                     (likely(fp->type & QEDE_FASTPATH_TX) &&
+                      qede_has_tx_work(fp)))) {
                        napi_complete(napi);
 
                        /* Update and reenable interrupts */
@@ -1711,6 +1713,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
 
        edev->ops->get_vport_stats(edev->cdev, &stats);
        edev->stats.no_buff_discards = stats.no_buff_discards;
+       edev->stats.packet_too_big_discard = stats.packet_too_big_discard;
+       edev->stats.ttl0_discard = stats.ttl0_discard;
        edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes;
        edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes;
        edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes;
@@ -1790,9 +1794,9 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
        edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames;
 }
 
-static struct rtnl_link_stats64 *qede_get_stats64(
-                           struct net_device *dev,
-                           struct rtnl_link_stats64 *stats)
+static
+struct rtnl_link_stats64 *qede_get_stats64(struct net_device *dev,
+                                          struct rtnl_link_stats64 *stats)
 {
        struct qede_dev *edev = netdev_priv(dev);
 
@@ -2106,14 +2110,13 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
                }
 
                DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
-                          "marked vlan %d as non-configured\n",
-                          vlan->vid);
+                          "marked vlan %d as non-configured\n", vlan->vid);
        }
 
        edev->accept_any_vlan = false;
 }
 
-int qede_set_features(struct net_device *dev, netdev_features_t features)
+static int qede_set_features(struct net_device *dev, netdev_features_t features)
 {
        struct qede_dev *edev = netdev_priv(dev);
        netdev_features_t changes = features ^ dev->features;
@@ -2149,7 +2152,7 @@ static void qede_udp_tunnel_add(struct net_device *dev,
 
                edev->vxlan_dst_port = t_port;
 
-               DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d",
+               DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n",
                           t_port);
 
                set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
@@ -2160,7 +2163,7 @@ static void qede_udp_tunnel_add(struct net_device *dev,
 
                edev->geneve_dst_port = t_port;
 
-               DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d",
+               DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d\n",
                           t_port);
                set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
                break;
@@ -2184,7 +2187,7 @@ static void qede_udp_tunnel_del(struct net_device *dev,
 
                edev->vxlan_dst_port = 0;
 
-               DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d",
+               DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n",
                           t_port);
 
                set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
@@ -2195,7 +2198,7 @@ static void qede_udp_tunnel_del(struct net_device *dev,
 
                edev->geneve_dst_port = 0;
 
-               DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d",
+               DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n",
                           t_port);
                set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
                break;
@@ -2240,15 +2243,13 @@ static const struct net_device_ops qede_netdev_ops = {
 static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
                                            struct pci_dev *pdev,
                                            struct qed_dev_eth_info *info,
-                                           u32 dp_module,
-                                           u8 dp_level)
+                                           u32 dp_module, u8 dp_level)
 {
        struct net_device *ndev;
        struct qede_dev *edev;
 
        ndev = alloc_etherdev_mqs(sizeof(*edev),
-                                 info->num_queues,
-                                 info->num_queues);
+                                 info->num_queues, info->num_queues);
        if (!ndev) {
                pr_err("etherdev allocation failed\n");
                return NULL;
@@ -2264,6 +2265,9 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
        edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
        edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
 
+       DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
+               info->num_queues, info->num_queues);
+
        SET_NETDEV_DEV(ndev, &pdev->dev);
 
        memset(&edev->stats, 0, sizeof(edev->stats));
@@ -2352,7 +2356,7 @@ static void qede_free_fp_array(struct qede_dev *edev)
                struct qede_fastpath *fp;
                int i;
 
-               for_each_rss(i) {
+               for_each_queue(i) {
                        fp = &edev->fp_array[i];
 
                        kfree(fp->sb_info);
@@ -2361,22 +2365,33 @@ static void qede_free_fp_array(struct qede_dev *edev)
                }
                kfree(edev->fp_array);
        }
-       edev->num_rss = 0;
+
+       edev->num_queues = 0;
+       edev->fp_num_tx = 0;
+       edev->fp_num_rx = 0;
 }
 
 static int qede_alloc_fp_array(struct qede_dev *edev)
 {
+       u8 fp_combined, fp_rx = edev->fp_num_rx;
        struct qede_fastpath *fp;
        int i;
 
-       edev->fp_array = kcalloc(QEDE_RSS_CNT(edev),
+       edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
                                 sizeof(*edev->fp_array), GFP_KERNEL);
        if (!edev->fp_array) {
                DP_NOTICE(edev, "fp array allocation failed\n");
                goto err;
        }
 
-       for_each_rss(i) {
+       fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
+
+       /* Allocate the FP elements for Rx queues followed by combined and then
+        * the Tx. This ordering should be maintained so that the respective
+        * queues (Rx or Tx) will be together in the fastpath array and the
+        * associated ids will be sequential.
+        */
+       for_each_queue(i) {
                fp = &edev->fp_array[i];
 
                fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL);
@@ -2385,16 +2400,33 @@ static int qede_alloc_fp_array(struct qede_dev *edev)
                        goto err;
                }
 
-               fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL);
-               if (!fp->rxq) {
-                       DP_NOTICE(edev, "RXQ struct allocation failed\n");
-                       goto err;
+               if (fp_rx) {
+                       fp->type = QEDE_FASTPATH_RX;
+                       fp_rx--;
+               } else if (fp_combined) {
+                       fp->type = QEDE_FASTPATH_COMBINED;
+                       fp_combined--;
+               } else {
+                       fp->type = QEDE_FASTPATH_TX;
                }
 
-               fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs), GFP_KERNEL);
-               if (!fp->txqs) {
-                       DP_NOTICE(edev, "TXQ array allocation failed\n");
-                       goto err;
+               if (fp->type & QEDE_FASTPATH_TX) {
+                       fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs),
+                                          GFP_KERNEL);
+                       if (!fp->txqs) {
+                               DP_NOTICE(edev,
+                                         "TXQ array allocation failed\n");
+                               goto err;
+                       }
+               }
+
+               if (fp->type & QEDE_FASTPATH_RX) {
+                       fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL);
+                       if (!fp->rxq) {
+                               DP_NOTICE(edev,
+                                         "RXQ struct allocation failed\n");
+                               goto err;
+                       }
                }
        }
 
@@ -2456,7 +2488,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
                        bool is_vf, enum qede_probe_mode mode)
 {
        struct qed_probe_params probe_params;
-       struct qed_slowpath_params params;
+       struct qed_slowpath_params sp_params;
        struct qed_dev_eth_info dev_info;
        struct qede_dev *edev;
        struct qed_dev *cdev;
@@ -2479,14 +2511,14 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
        qede_update_pf_params(cdev);
 
        /* Start the Slowpath-process */
-       memset(&params, 0, sizeof(struct qed_slowpath_params));
-       params.int_mode = QED_INT_MODE_MSIX;
-       params.drv_major = QEDE_MAJOR_VERSION;
-       params.drv_minor = QEDE_MINOR_VERSION;
-       params.drv_rev = QEDE_REVISION_VERSION;
-       params.drv_eng = QEDE_ENGINEERING_VERSION;
-       strlcpy(params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
-       rc = qed_ops->common->slowpath_start(cdev, &params);
+       memset(&sp_params, 0, sizeof(sp_params));
+       sp_params.int_mode = QED_INT_MODE_MSIX;
+       sp_params.drv_major = QEDE_MAJOR_VERSION;
+       sp_params.drv_minor = QEDE_MINOR_VERSION;
+       sp_params.drv_rev = QEDE_REVISION_VERSION;
+       sp_params.drv_eng = QEDE_ENGINEERING_VERSION;
+       strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
+       rc = qed_ops->common->slowpath_start(cdev, &sp_params);
        if (rc) {
                pr_notice("Cannot start slowpath\n");
                goto err1;
@@ -2590,7 +2622,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
        qed_ops->common->slowpath_stop(cdev);
        qed_ops->common->remove(cdev);
 
-       pr_notice("Ending successfully qede_remove\n");
+       dev_info(&pdev->dev, "Ending qede_remove successfully\n");
 }
 
 static void qede_remove(struct pci_dev *pdev)
@@ -2609,8 +2641,8 @@ static int qede_set_num_queues(struct qede_dev *edev)
        u16 rss_num;
 
        /* Setup queues according to possible resources*/
-       if (edev->req_rss)
-               rss_num = edev->req_rss;
+       if (edev->req_queues)
+               rss_num = edev->req_queues;
        else
                rss_num = netif_get_num_default_rss_queues() *
                          edev->dev_info.common.num_hwfns;
@@ -2620,11 +2652,15 @@ static int qede_set_num_queues(struct qede_dev *edev)
        rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
        if (rc > 0) {
                /* Managed to request interrupts for our queues */
-               edev->num_rss = rc;
+               edev->num_queues = rc;
                DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
-                       QEDE_RSS_CNT(edev), rss_num);
+                       QEDE_QUEUE_CNT(edev), rss_num);
                rc = 0;
        }
+
+       edev->fp_num_tx = edev->req_num_tx;
+       edev->fp_num_rx = edev->req_num_rx;
+
        return rc;
 }
 
@@ -2638,16 +2674,14 @@ static void qede_free_mem_sb(struct qede_dev *edev,
 
 /* This function allocates fast-path status block memory */
 static int qede_alloc_mem_sb(struct qede_dev *edev,
-                            struct qed_sb_info *sb_info,
-                            u16 sb_id)
+                            struct qed_sb_info *sb_info, u16 sb_id)
 {
        struct status_block *sb_virt;
        dma_addr_t sb_phys;
        int rc;
 
        sb_virt = dma_alloc_coherent(&edev->pdev->dev,
-                                    sizeof(*sb_virt),
-                                    &sb_phys, GFP_KERNEL);
+                                    sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
        if (!sb_virt) {
                DP_ERR(edev, "Status block allocation failed\n");
                return -ENOMEM;
@@ -2679,16 +2713,15 @@ static void qede_free_rx_buffers(struct qede_dev *edev,
                data = rx_buf->data;
 
                dma_unmap_page(&edev->pdev->dev,
-                              rx_buf->mapping,
-                              PAGE_SIZE, DMA_FROM_DEVICE);
+                              rx_buf->mapping, PAGE_SIZE, DMA_FROM_DEVICE);
 
                rx_buf->data = NULL;
                __free_page(data);
        }
 }
 
-static void qede_free_sge_mem(struct qede_dev *edev,
-                             struct qede_rx_queue *rxq) {
+static void qede_free_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
+{
        int i;
 
        if (edev->gro_disable)
@@ -2707,8 +2740,7 @@ static void qede_free_sge_mem(struct qede_dev *edev,
        }
 }
 
-static void qede_free_mem_rxq(struct qede_dev *edev,
-                             struct qede_rx_queue *rxq)
+static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
 {
        qede_free_sge_mem(edev, rxq);
 
@@ -2730,9 +2762,6 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev,
        struct eth_rx_bd *rx_bd;
        dma_addr_t mapping;
        struct page *data;
-       u16 rx_buf_size;
-
-       rx_buf_size = rxq->rx_buf_size;
 
        data = alloc_pages(GFP_ATOMIC, 0);
        if (unlikely(!data)) {
@@ -2767,8 +2796,7 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev,
        return 0;
 }
 
-static int qede_alloc_sge_mem(struct qede_dev *edev,
-                             struct qede_rx_queue *rxq)
+static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
 {
        dma_addr_t mapping;
        int i;
@@ -2815,15 +2843,14 @@ err:
 }
 
 /* This function allocates all memory needed per Rx queue */
-static int qede_alloc_mem_rxq(struct qede_dev *edev,
-                             struct qede_rx_queue *rxq)
+static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
 {
        int i, rc, size;
 
        rxq->num_rx_buffers = edev->q_num_rx_buffers;
 
-       rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD +
-                          edev->ndev->mtu;
+       rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
+
        if (rxq->rx_buf_size > PAGE_SIZE)
                rxq->rx_buf_size = PAGE_SIZE;
 
@@ -2877,8 +2904,7 @@ err:
        return rc;
 }
 
-static void qede_free_mem_txq(struct qede_dev *edev,
-                             struct qede_tx_queue *txq)
+static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
 {
        /* Free the parallel SW ring */
        kfree(txq->sw_tx_ring);
@@ -2888,8 +2914,7 @@ static void qede_free_mem_txq(struct qede_dev *edev,
 }
 
 /* This function allocates all memory needed per Tx queue */
-static int qede_alloc_mem_txq(struct qede_dev *edev,
-                             struct qede_tx_queue *txq)
+static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
 {
        int size, rc;
        union eth_tx_bd_types *p_virt;
@@ -2921,41 +2946,45 @@ err:
 }
 
 /* This function frees all memory of a single fp */
-static void qede_free_mem_fp(struct qede_dev *edev,
-                            struct qede_fastpath *fp)
+static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
 {
        int tc;
 
        qede_free_mem_sb(edev, fp->sb_info);
 
-       qede_free_mem_rxq(edev, fp->rxq);
+       if (fp->type & QEDE_FASTPATH_RX)
+               qede_free_mem_rxq(edev, fp->rxq);
 
-       for (tc = 0; tc < edev->num_tc; tc++)
-               qede_free_mem_txq(edev, &fp->txqs[tc]);
+       if (fp->type & QEDE_FASTPATH_TX)
+               for (tc = 0; tc < edev->num_tc; tc++)
+                       qede_free_mem_txq(edev, &fp->txqs[tc]);
 }
 
 /* This function allocates all memory needed for a single fp (i.e. an entity
- * which contains status block, one rx queue and multiple per-TC tx queues.
+ * which contains status block, one rx queue and/or multiple per-TC tx queues.
  */
-static int qede_alloc_mem_fp(struct qede_dev *edev,
-                            struct qede_fastpath *fp)
+static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
 {
        int rc, tc;
 
-       rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->rss_id);
-       if (rc)
-               goto err;
-
-       rc = qede_alloc_mem_rxq(edev, fp->rxq);
+       rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
        if (rc)
                goto err;
 
-       for (tc = 0; tc < edev->num_tc; tc++) {
-               rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]);
+       if (fp->type & QEDE_FASTPATH_RX) {
+               rc = qede_alloc_mem_rxq(edev, fp->rxq);
                if (rc)
                        goto err;
        }
 
+       if (fp->type & QEDE_FASTPATH_TX) {
+               for (tc = 0; tc < edev->num_tc; tc++) {
+                       rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]);
+                       if (rc)
+                               goto err;
+               }
+       }
+
        return 0;
 err:
        return rc;
@@ -2965,7 +2994,7 @@ static void qede_free_mem_load(struct qede_dev *edev)
 {
        int i;
 
-       for_each_rss(i) {
+       for_each_queue(i) {
                struct qede_fastpath *fp = &edev->fp_array[i];
 
                qede_free_mem_fp(edev, fp);
@@ -2975,16 +3004,16 @@ static void qede_free_mem_load(struct qede_dev *edev)
 /* This function allocates all qede memory at NIC load. */
 static int qede_alloc_mem_load(struct qede_dev *edev)
 {
-       int rc = 0, rss_id;
+       int rc = 0, queue_id;
 
-       for (rss_id = 0; rss_id < QEDE_RSS_CNT(edev); rss_id++) {
-               struct qede_fastpath *fp = &edev->fp_array[rss_id];
+       for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) {
+               struct qede_fastpath *fp = &edev->fp_array[queue_id];
 
                rc = qede_alloc_mem_fp(edev, fp);
                if (rc) {
                        DP_ERR(edev,
                               "Failed to allocate memory for fastpath - rss id = %d\n",
-                              rss_id);
+                              queue_id);
                        qede_free_mem_load(edev);
                        return rc;
                }
@@ -2996,30 +3025,38 @@ static int qede_alloc_mem_load(struct qede_dev *edev)
 /* This function inits fp content and resets the SB, RXQ and TXQ structures */
 static void qede_init_fp(struct qede_dev *edev)
 {
-       int rss_id, txq_index, tc;
+       int queue_id, rxq_index = 0, txq_index = 0, tc;
        struct qede_fastpath *fp;
 
-       for_each_rss(rss_id) {
-               fp = &edev->fp_array[rss_id];
+       for_each_queue(queue_id) {
+               fp = &edev->fp_array[queue_id];
 
                fp->edev = edev;
-               fp->rss_id = rss_id;
+               fp->id = queue_id;
 
                memset((void *)&fp->napi, 0, sizeof(fp->napi));
 
                memset((void *)fp->sb_info, 0, sizeof(*fp->sb_info));
 
-               memset((void *)fp->rxq, 0, sizeof(*fp->rxq));
-               fp->rxq->rxq_id = rss_id;
+               if (fp->type & QEDE_FASTPATH_RX) {
+                       memset((void *)fp->rxq, 0, sizeof(*fp->rxq));
+                       fp->rxq->rxq_id = rxq_index++;
+               }
 
-               memset((void *)fp->txqs, 0, (edev->num_tc * sizeof(*fp->txqs)));
-               for (tc = 0; tc < edev->num_tc; tc++) {
-                       txq_index = tc * QEDE_RSS_CNT(edev) + rss_id;
-                       fp->txqs[tc].index = txq_index;
+               if (fp->type & QEDE_FASTPATH_TX) {
+                       memset((void *)fp->txqs, 0,
+                              (edev->num_tc * sizeof(*fp->txqs)));
+                       for (tc = 0; tc < edev->num_tc; tc++) {
+                               fp->txqs[tc].index = txq_index +
+                                   tc * QEDE_TSS_COUNT(edev);
+                               if (edev->dev_info.is_legacy)
+                                       fp->txqs[tc].is_legacy = true;
+                       }
+                       txq_index++;
                }
 
                snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
-                        edev->ndev->name, rss_id);
+                        edev->ndev->name, queue_id);
        }
 
        edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO);
@@ -3029,12 +3066,13 @@ static int qede_set_real_num_queues(struct qede_dev *edev)
 {
        int rc = 0;
 
-       rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_CNT(edev));
+       rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_COUNT(edev));
        if (rc) {
                DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
                return rc;
        }
-       rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_CNT(edev));
+
+       rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev));
        if (rc) {
                DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
                return rc;
@@ -3047,7 +3085,7 @@ static void qede_napi_disable_remove(struct qede_dev *edev)
 {
        int i;
 
-       for_each_rss(i) {
+       for_each_queue(i) {
                napi_disable(&edev->fp_array[i].napi);
 
                netif_napi_del(&edev->fp_array[i].napi);
@@ -3059,7 +3097,7 @@ static void qede_napi_add_enable(struct qede_dev *edev)
        int i;
 
        /* Add NAPI objects */
-       for_each_rss(i) {
+       for_each_queue(i) {
                netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
                               qede_poll, NAPI_POLL_WEIGHT);
                napi_enable(&edev->fp_array[i].napi);
@@ -3088,14 +3126,14 @@ static int qede_req_msix_irqs(struct qede_dev *edev)
        int i, rc;
 
        /* Sanitize number of interrupts == number of prepared RSS queues */
-       if (QEDE_RSS_CNT(edev) > edev->int_info.msix_cnt) {
+       if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) {
                DP_ERR(edev,
                       "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
-                      QEDE_RSS_CNT(edev), edev->int_info.msix_cnt);
+                      QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt);
                return -EINVAL;
        }
 
-       for (i = 0; i < QEDE_RSS_CNT(edev); i++) {
+       for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
                rc = request_irq(edev->int_info.msix[i].vector,
                                 qede_msix_fp_int, 0, edev->fp_array[i].name,
                                 &edev->fp_array[i]);
@@ -3140,18 +3178,17 @@ static int qede_setup_irqs(struct qede_dev *edev)
 
                /* qed should learn receive the RSS ids and callbacks */
                ops = edev->ops->common;
-               for (i = 0; i < QEDE_RSS_CNT(edev); i++)
+               for (i = 0; i < QEDE_QUEUE_CNT(edev); i++)
                        ops->simd_handler_config(edev->cdev,
                                                 &edev->fp_array[i], i,
                                                 qede_simd_fp_handler);
-               edev->int_info.used_cnt = QEDE_RSS_CNT(edev);
+               edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev);
        }
        return 0;
 }
 
 static int qede_drain_txq(struct qede_dev *edev,
-                         struct qede_tx_queue *txq,
-                         bool allow_drain)
+                         struct qede_tx_queue *txq, bool allow_drain)
 {
        int rc, cnt = 1000;
 
@@ -3203,45 +3240,53 @@ static int qede_stop_queues(struct qede_dev *edev)
        }
 
        /* Flush Tx queues. If needed, request drain from MCP */
-       for_each_rss(i) {
+       for_each_queue(i) {
                struct qede_fastpath *fp = &edev->fp_array[i];
 
-               for (tc = 0; tc < edev->num_tc; tc++) {
-                       struct qede_tx_queue *txq = &fp->txqs[tc];
+               if (fp->type & QEDE_FASTPATH_TX) {
+                       for (tc = 0; tc < edev->num_tc; tc++) {
+                               struct qede_tx_queue *txq = &fp->txqs[tc];
 
-                       rc = qede_drain_txq(edev, txq, true);
-                       if (rc)
-                               return rc;
+                               rc = qede_drain_txq(edev, txq, true);
+                               if (rc)
+                                       return rc;
+                       }
                }
        }
 
-       /* Stop all Queues in reverse order*/
-       for (i = QEDE_RSS_CNT(edev) - 1; i >= 0; i--) {
+       /* Stop all Queues in reverse order */
+       for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) {
                struct qed_stop_rxq_params rx_params;
 
-               /* Stop the Tx Queue(s)*/
-               for (tc = 0; tc < edev->num_tc; tc++) {
-                       struct qed_stop_txq_params tx_params;
-
-                       tx_params.rss_id = i;
-                       tx_params.tx_queue_id = tc * QEDE_RSS_CNT(edev) + i;
-                       rc = edev->ops->q_tx_stop(cdev, &tx_params);
-                       if (rc) {
-                               DP_ERR(edev, "Failed to stop TXQ #%d\n",
-                                      tx_params.tx_queue_id);
-                               return rc;
+               /* Stop the Tx Queue(s) */
+               if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
+                       for (tc = 0; tc < edev->num_tc; tc++) {
+                               struct qed_stop_txq_params tx_params;
+                               u8 val;
+
+                               tx_params.rss_id = i;
+                               val = edev->fp_array[i].txqs[tc].index;
+                               tx_params.tx_queue_id = val;
+                               rc = edev->ops->q_tx_stop(cdev, &tx_params);
+                               if (rc) {
+                                       DP_ERR(edev, "Failed to stop TXQ #%d\n",
+                                              tx_params.tx_queue_id);
+                                       return rc;
+                               }
                        }
                }
 
-               /* Stop the Rx Queue*/
-               memset(&rx_params, 0, sizeof(rx_params));
-               rx_params.rss_id = i;
-               rx_params.rx_queue_id = i;
+               /* Stop the Rx Queue */
+               if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
+                       memset(&rx_params, 0, sizeof(rx_params));
+                       rx_params.rss_id = i;
+                       rx_params.rx_queue_id = edev->fp_array[i].rxq->rxq_id;
 
-               rc = edev->ops->q_rx_stop(cdev, &rx_params);
-               if (rc) {
-                       DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
-                       return rc;
+                       rc = edev->ops->q_rx_stop(cdev, &rx_params);
+                       if (rc) {
+                               DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
+                               return rc;
+                       }
                }
        }
 
@@ -3264,7 +3309,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
        struct qed_start_vport_params start = {0};
        bool reset_rss_indir = false;
 
-       if (!edev->num_rss) {
+       if (!edev->num_queues) {
                DP_ERR(edev,
                       "Cannot update V-VPORT as active as there are no Rx queues\n");
                return -EINVAL;
@@ -3288,50 +3333,66 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
                   "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
                   start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
 
-       for_each_rss(i) {
+       for_each_queue(i) {
                struct qede_fastpath *fp = &edev->fp_array[i];
-               dma_addr_t phys_table = fp->rxq->rx_comp_ring.pbl.p_phys_table;
-
-               memset(&q_params, 0, sizeof(q_params));
-               q_params.rss_id = i;
-               q_params.queue_id = i;
-               q_params.vport_id = 0;
-               q_params.sb = fp->sb_info->igu_sb_id;
-               q_params.sb_idx = RX_PI;
-
-               rc = edev->ops->q_rx_start(cdev, &q_params,
-                                          fp->rxq->rx_buf_size,
-                                          fp->rxq->rx_bd_ring.p_phys_addr,
-                                          phys_table,
-                                          fp->rxq->rx_comp_ring.page_cnt,
-                                          &fp->rxq->hw_rxq_prod_addr);
-               if (rc) {
-                       DP_ERR(edev, "Start RXQ #%d failed %d\n", i, rc);
-                       return rc;
-               }
+               dma_addr_t p_phys_table;
+               u32 page_cnt;
+
+               if (fp->type & QEDE_FASTPATH_RX) {
+                       struct qede_rx_queue *rxq = fp->rxq;
+                       __le16 *val;
+
+                       memset(&q_params, 0, sizeof(q_params));
+                       q_params.rss_id = i;
+                       q_params.queue_id = rxq->rxq_id;
+                       q_params.vport_id = 0;
+                       q_params.sb = fp->sb_info->igu_sb_id;
+                       q_params.sb_idx = RX_PI;
+
+                       p_phys_table =
+                           qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
+                       page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
+
+                       rc = edev->ops->q_rx_start(cdev, &q_params,
+                                                  rxq->rx_buf_size,
+                                                  rxq->rx_bd_ring.p_phys_addr,
+                                                  p_phys_table,
+                                                  page_cnt,
+                                                  &rxq->hw_rxq_prod_addr);
+                       if (rc) {
+                               DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
+                                      rc);
+                               return rc;
+                       }
 
-               fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
+                       val = &fp->sb_info->sb_virt->pi_array[RX_PI];
+                       rxq->hw_cons_ptr = val;
 
-               qede_update_rx_prod(edev, fp->rxq);
+                       qede_update_rx_prod(edev, rxq);
+               }
+
+               if (!(fp->type & QEDE_FASTPATH_TX))
+                       continue;
 
                for (tc = 0; tc < edev->num_tc; tc++) {
                        struct qede_tx_queue *txq = &fp->txqs[tc];
-                       int txq_index = tc * QEDE_RSS_CNT(edev) + i;
+
+                       p_phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
+                       page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
 
                        memset(&q_params, 0, sizeof(q_params));
                        q_params.rss_id = i;
-                       q_params.queue_id = txq_index;
+                       q_params.queue_id = txq->index;
                        q_params.vport_id = 0;
                        q_params.sb = fp->sb_info->igu_sb_id;
                        q_params.sb_idx = TX_PI(tc);
 
                        rc = edev->ops->q_tx_start(cdev, &q_params,
-                                                  txq->tx_pbl.pbl.p_phys_table,
-                                                  txq->tx_pbl.page_cnt,
+                                                  p_phys_table, page_cnt,
                                                   &txq->doorbell_addr);
                        if (rc) {
                                DP_ERR(edev, "Start TXQ #%d failed %d\n",
-                                      txq_index, rc);
+                                      txq->index, rc);
                                return rc;
                        }
 
@@ -3362,13 +3423,13 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
        }
 
        /* Fill struct with RSS params */
-       if (QEDE_RSS_CNT(edev) > 1) {
+       if (QEDE_RSS_COUNT(edev) > 1) {
                vport_update_params.update_rss_flg = 1;
 
                /* Need to validate current RSS config uses valid entries */
                for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
                        if (edev->rss_params.rss_ind_table[i] >=
-                           edev->num_rss) {
+                           QEDE_RSS_COUNT(edev)) {
                                reset_rss_indir = true;
                                break;
                        }
@@ -3381,7 +3442,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
                        for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
                                u16 indir_val;
 
-                               val = QEDE_RSS_CNT(edev);
+                               val = QEDE_RSS_COUNT(edev);
                                indir_val = ethtool_rxfh_indir_default(i, val);
                                edev->rss_params.rss_ind_table[i] = indir_val;
                        }
@@ -3510,7 +3571,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
        if (rc)
                goto err1;
        DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
-               QEDE_RSS_CNT(edev), edev->num_tc);
+               QEDE_QUEUE_CNT(edev), edev->num_tc);
 
        rc = qede_set_real_num_queues(edev);
        if (rc)
@@ -3563,7 +3624,9 @@ err2:
 err1:
        edev->ops->common->set_fp_int(edev->cdev, 0);
        qede_free_fp_array(edev);
-       edev->num_rss = 0;
+       edev->num_queues = 0;
+       edev->fp_num_tx = 0;
+       edev->fp_num_rx = 0;
 err0:
        return rc;
 }