mlx4: Fix tx ring affinity_mask creation
authorBenjamin Poirier <bpoirier@suse.de>
Tue, 28 Apr 2015 21:49:29 +0000 (14:49 -0700)
committerDavid S. Miller <davem@davemloft.net>
Wed, 29 Apr 2015 19:16:57 +0000 (15:16 -0400)
By default, the number of tx queues is limited by the number of online cpus
in mlx4_en_get_profile(). However, this limit no longer holds after the
ethtool .set_channels method has been called. In that situation, the driver
may access invalid bits of certain cpumask variables when queue_index >=
nr_cpu_ids.

Signed-off-by: Benjamin Poirier <bpoirier@suse.de>
Acked-by: Ido Shamay <idos@mellanox.com>
Fixes: d03a68f ("net/mlx4_en: Configure the XPS queue mapping on driver load")
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/mellanox/mlx4/en_tx.c

index 1783705..f7bf312 100644 (file)
@@ -143,8 +143,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
        ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
        ring->queue_index = queue_index;
 
-       if (queue_index < priv->num_tx_rings_p_up && cpu_online(queue_index))
-               cpumask_set_cpu(queue_index, &ring->affinity_mask);
+       if (queue_index < priv->num_tx_rings_p_up)
+               cpumask_set_cpu_local_first(queue_index,
+                                           priv->mdev->dev->numa_node,
+                                           &ring->affinity_mask);
 
        *pring = ring;
        return 0;
@@ -213,7 +215,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
 
        err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
                               &ring->qp, &ring->qp_state);
-       if (!user_prio && cpu_online(ring->queue_index))
+       if (!cpumask_empty(&ring->affinity_mask))
                netif_set_xps_queue(priv->dev, &ring->affinity_mask,
                                    ring->queue_index);