net/mlx4_en: Configure the XPS queue mapping on driver load
authorIdo Shamay <idos@mellanox.com>
Thu, 19 Dec 2013 19:20:14 +0000 (21:20 +0200)
committerDavid S. Miller <davem@davemloft.net>
Fri, 20 Dec 2013 00:04:44 +0000 (19:04 -0500)
Only TX rings of User Piority 0 are mapped.
TX rings of other UP's are using UP 0 mapping.
XPS is not in use when num_tc is set.

Signed-off-by: Ido Shamay <idos@mellanox.com>
Signed-off-by: Amir Vadai <amirv@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h

index d2e9666..6f92090 100644 (file)
@@ -1910,8 +1910,10 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
                                      prof->tx_ring_size, i, TX, node))
                        goto err;
 
-               if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i,
-                                          prof->tx_ring_size, TXBB_SIZE, node))
+               if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
+                                          priv->base_tx_qpn + i,
+                                          prof->tx_ring_size, TXBB_SIZE,
+                                          node, i))
                        goto err;
        }
 
index f54ebd5..5e22d7d 100644 (file)
@@ -55,7 +55,7 @@ MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
 
 int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
                           struct mlx4_en_tx_ring **pring, int qpn, u32 size,
-                          u16 stride, int node)
+                          u16 stride, int node, int queue_index)
 {
        struct mlx4_en_dev *mdev = priv->mdev;
        struct mlx4_en_tx_ring *ring;
@@ -140,6 +140,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
                ring->bf_enabled = true;
 
        ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
+       ring->queue_index = queue_index;
+
+       if (queue_index < priv->num_tx_rings_p_up && cpu_online(queue_index))
+               cpumask_set_cpu(queue_index, &ring->affinity_mask);
 
        *pring = ring;
        return 0;
@@ -206,6 +210,9 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
 
        err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
                               &ring->qp, &ring->qp_state);
+       if (!user_prio && cpu_online(ring->queue_index))
+               netif_set_xps_queue(priv->dev, &ring->affinity_mask,
+                                   ring->queue_index);
 
        return err;
 }
index f3758de..202d8e5 100644 (file)
@@ -255,6 +255,8 @@ struct mlx4_en_tx_ring {
        u16 poll_cnt;
        struct mlx4_en_tx_info *tx_info;
        u8 *bounce_buf;
+       u8 queue_index;
+       cpumask_t affinity_mask;
        u32 last_nr_txbb;
        struct mlx4_qp qp;
        struct mlx4_qp_context context;
@@ -719,7 +721,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
 
 int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
                           struct mlx4_en_tx_ring **pring,
-                          int qpn, u32 size, u16 stride, int node);
+                          int qpn, u32 size, u16 stride,
+                          int node, int queue_index);
 void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
                             struct mlx4_en_tx_ring **pring);
 int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,