net/mlx5e: Use only close NUMA node for default RSS
authorTariq Toukan <tariqt@mellanox.com>
Wed, 20 Apr 2016 19:02:11 +0000 (22:02 +0300)
committerDavid S. Miller <davem@davemloft.net>
Thu, 21 Apr 2016 19:09:04 +0000 (15:09 -0400)
Distribute default RSS table uniformly over the rings of the
close NUMA node, instead of all available channels.
This way we enforce the preference of close rings over far ones.

Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c

index c4ddbe8501a7ed0d5fda8f3d5e9beb5749ed47cb..7f19644689f2cc34677f53c5b228ca6c0e9b79c3 100644 (file)
@@ -671,7 +671,8 @@ void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv);
 
 int mlx5e_open_locked(struct net_device *netdev);
 int mlx5e_close_locked(struct net_device *netdev);
-void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
+void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
+                                  u32 *indirection_rqt, int len,
                                   int num_channels);
 
 static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
index 39c19021d154a4ffd20add078ff3522c5083d282..6f40ba448f07b1bcba662adeae2f8f4d1ebd62da 100644 (file)
@@ -397,7 +397,7 @@ static int mlx5e_set_channels(struct net_device *dev,
                mlx5e_close_locked(dev);
 
        priv->params.num_channels = count;
-       mlx5e_build_default_indir_rqt(priv->params.indirection_rqt,
+       mlx5e_build_default_indir_rqt(priv->mdev, priv->params.indirection_rqt,
                                      MLX5E_INDIR_RQT_SIZE, count);
 
        if (was_opened)
index 7fbe1ba86294d78e5cf3168afcdfe3564fe4b634..9b58ef6cab9336aa0f78db0daf799b4714b38b98 100644 (file)
@@ -2297,11 +2297,22 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
 }
 #endif
 
-void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
+void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
+                                  u32 *indirection_rqt, int len,
                                   int num_channels)
 {
+       int node = mdev->priv.numa_node;
+       int node_num_of_cores;
        int i;
 
+       if (node == -1)
+               node = first_online_node;
+
+       node_num_of_cores = cpumask_weight(cpumask_of_node(node));
+
+       if (node_num_of_cores)
+               num_channels = min_t(int, num_channels, node_num_of_cores);
+
        for (i = 0; i < len; i++)
                indirection_rqt[i] = i % num_channels;
 }
@@ -2333,7 +2344,7 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
        netdev_rss_key_fill(priv->params.toeplitz_hash_key,
                            sizeof(priv->params.toeplitz_hash_key));
 
-       mlx5e_build_default_indir_rqt(priv->params.indirection_rqt,
+       mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt,
                                      MLX5E_INDIR_RQT_SIZE, num_channels);
 
        priv->params.lro_wqe_sz            =