ixgbevf: Fix ordering of shutdown to correctly disable Rx and Tx
[cascardo/linux.git] / drivers / net / ethernet / intel / ixgbevf / ixgbevf_main.c
index 62a0d8e..a4b3d66 100644 (file)
@@ -1584,6 +1584,39 @@ static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
                       reg_idx);
 }
 
+static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 vfmrqc = 0, vfreta = 0;
+       u32 rss_key[10];
+       u16 rss_i = adapter->num_rx_queues;
+       int i, j;
+
+       /* Fill out hash function seeds */
+       netdev_rss_key_fill(rss_key, sizeof(rss_key));
+       for (i = 0; i < 10; i++)
+               IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
+
+       /* Fill out redirection table */
+       for (i = 0, j = 0; i < 64; i++, j++) {
+               if (j == rss_i)
+                       j = 0;
+               vfreta = (vfreta << 8) | (j * 0x1);
+               if ((i & 3) == 3)
+                       IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
+       }
+
+       /* Perform hash on these packet types */
+       vfmrqc |= IXGBE_VFMRQC_RSS_FIELD_IPV4 |
+               IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP |
+               IXGBE_VFMRQC_RSS_FIELD_IPV6 |
+               IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP;
+
+       vfmrqc |= IXGBE_VFMRQC_RSSEN;
+
+       IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, vfmrqc);
+}
+
 static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
                                      struct ixgbevf_ring *ring)
 {
@@ -1640,6 +1673,8 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
        struct net_device *netdev = adapter->netdev;
 
        ixgbevf_setup_psrtype(adapter);
+       if (hw->mac.type >= ixgbe_mac_X550_vf)
+               ixgbevf_setup_vfmrqc(adapter);
 
        /* notify the PF of our intent to use this size of frame */
        ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
@@ -1794,7 +1829,8 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
        struct ixgbe_hw *hw = &adapter->hw;
        unsigned int def_q = 0;
        unsigned int num_tcs = 0;
-       unsigned int num_rx_queues = 1;
+       unsigned int num_rx_queues = adapter->num_rx_queues;
+       unsigned int num_tx_queues = adapter->num_tx_queues;
        int err;
 
        spin_lock_bh(&adapter->mbx_lock);
@@ -1808,6 +1844,9 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
                return err;
 
        if (num_tcs > 1) {
+               /* we need only one Tx queue */
+               num_tx_queues = 1;
+
                /* update default Tx ring register index */
                adapter->tx_ring[0]->reg_idx = def_q;
 
@@ -1816,7 +1855,8 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
        }
 
        /* if we have a bad config abort request queue reset */
-       if (adapter->num_rx_queues != num_rx_queues) {
+       if ((adapter->num_rx_queues != num_rx_queues) ||
+           (adapter->num_tx_queues != num_tx_queues)) {
                /* force mailbox timeout to prevent further messages */
                hw->mbx.timeout = 0;
 
@@ -1917,6 +1957,10 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
        clear_bit(__IXGBEVF_DOWN, &adapter->state);
        ixgbevf_napi_enable_all(adapter);
 
+       /* clear any pending interrupts, may auto mask */
+       IXGBE_READ_REG(hw, IXGBE_VTEICR);
+       ixgbevf_irq_enable(adapter);
+
        /* enable transmits */
        netif_tx_start_all_queues(netdev);
 
@@ -1929,16 +1973,9 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
 
 void ixgbevf_up(struct ixgbevf_adapter *adapter)
 {
-       struct ixgbe_hw *hw = &adapter->hw;
-
        ixgbevf_configure(adapter);
 
        ixgbevf_up_complete(adapter);
-
-       /* clear any pending interrupts, may auto mask */
-       IXGBE_READ_REG(hw, IXGBE_VTEICR);
-
-       ixgbevf_irq_enable(adapter);
 }
 
 /**
@@ -2045,17 +2082,20 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
        for (i = 0; i < adapter->num_rx_queues; i++)
                ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
 
-       netif_tx_disable(netdev);
-
-       msleep(10);
+       usleep_range(10000, 20000);
 
        netif_tx_stop_all_queues(netdev);
 
+       /* call carrier off first to avoid false dev_watchdog timeouts */
+       netif_carrier_off(netdev);
+       netif_tx_disable(netdev);
+
        ixgbevf_irq_disable(adapter);
 
        ixgbevf_napi_disable_all(adapter);
 
        del_timer_sync(&adapter->watchdog_timer);
+
        /* can't call flush scheduled work here because it can deadlock
         * if linkwatch_event tries to acquire the rtnl_lock which we are
         * holding */
@@ -2070,8 +2110,6 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
                                IXGBE_TXDCTL_SWFLSH);
        }
 
-       netif_carrier_off(netdev);
-
        if (!pci_channel_offline(adapter->pdev))
                ixgbevf_reset(adapter);
 
@@ -2181,8 +2219,19 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
                return;
 
        /* we need as many queues as traffic classes */
-       if (num_tcs > 1)
+       if (num_tcs > 1) {
                adapter->num_rx_queues = num_tcs;
+       } else {
+               u16 rss = min_t(u16, num_online_cpus(), IXGBEVF_MAX_RSS_QUEUES);
+
+               switch (hw->api_version) {
+               case ixgbe_mbox_api_11:
+                       adapter->num_rx_queues = rss;
+                       adapter->num_tx_queues = rss;
+               default:
+                       break;
+               }
+       }
 }
 
 /**
@@ -2944,10 +2993,6 @@ static int ixgbevf_open(struct net_device *netdev)
        if (!adapter->num_msix_vectors)
                return -ENOMEM;
 
-       /* disallow open during test */
-       if (test_bit(__IXGBEVF_TESTING, &adapter->state))
-               return -EBUSY;
-
        if (hw->adapter_stopped) {
                ixgbevf_reset(adapter);
                /* if adapter is still stopped then PF isn't up and
@@ -2960,6 +3005,12 @@ static int ixgbevf_open(struct net_device *netdev)
                }
        }
 
+       /* disallow open during test */
+       if (test_bit(__IXGBEVF_TESTING, &adapter->state))
+               return -EBUSY;
+
+       netif_carrier_off(netdev);
+
        /* allocate transmit descriptors */
        err = ixgbevf_setup_all_tx_resources(adapter);
        if (err)
@@ -2979,15 +3030,11 @@ static int ixgbevf_open(struct net_device *netdev)
         */
        ixgbevf_map_rings_to_vectors(adapter);
 
-       ixgbevf_up_complete(adapter);
-
-       /* clear any pending interrupts, may auto mask */
-       IXGBE_READ_REG(hw, IXGBE_VTEICR);
        err = ixgbevf_request_irq(adapter);
        if (err)
                goto err_req_irq;
 
-       ixgbevf_irq_enable(adapter);
+       ixgbevf_up_complete(adapter);
 
        return 0;
 
@@ -3099,7 +3146,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
        /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
        type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
 
-       if (skb->protocol == htons(ETH_P_IP)) {
+       if (first->protocol == htons(ETH_P_IP)) {
                struct iphdr *iph = ip_hdr(skb);
                iph->tot_len = 0;
                iph->check = 0;
@@ -3156,7 +3203,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
 
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                u8 l4_hdr = 0;
-               switch (skb->protocol) {
+               switch (first->protocol) {
                case htons(ETH_P_IP):
                        vlan_macip_lens |= skb_network_header_len(skb);
                        type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
@@ -3452,8 +3499,8 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        first->bytecount = skb->len;
        first->gso_segs = 1;
 
-       if (vlan_tx_tag_present(skb)) {
-               tx_flags |= vlan_tx_tag_get(skb);
+       if (skb_vlan_tag_present(skb)) {
+               tx_flags |= skb_vlan_tag_get(skb);
                tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
                tx_flags |= IXGBE_TX_FLAGS_VLAN;
        }
@@ -3822,11 +3869,11 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                           NETIF_F_HW_VLAN_CTAG_RX |
                           NETIF_F_HW_VLAN_CTAG_FILTER;
 
-       netdev->vlan_features |= NETIF_F_TSO;
-       netdev->vlan_features |= NETIF_F_TSO6;
-       netdev->vlan_features |= NETIF_F_IP_CSUM;
-       netdev->vlan_features |= NETIF_F_IPV6_CSUM;
-       netdev->vlan_features |= NETIF_F_SG;
+       netdev->vlan_features |= NETIF_F_TSO |
+                                NETIF_F_TSO6 |
+                                NETIF_F_IP_CSUM |
+                                NETIF_F_IPV6_CSUM |
+                                NETIF_F_SG;
 
        if (pci_using_dac)
                netdev->features |= NETIF_F_HIGHDMA;