return err;
}
+static void mlx4_en_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ memset(channel, 0, sizeof(*channel));
+
+ channel->max_rx = MAX_RX_RINGS;
+ channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP;
+
+ channel->rx_count = priv->rx_ring_num;
+ channel->tx_count = priv->tx_ring_num / MLX4_EN_NUM_UP;
+}
+
+static int mlx4_en_set_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int port_up;
+ int err = 0;
+
+ if (channel->other_count || channel->combined_count ||
+ channel->tx_count > MLX4_EN_MAX_TX_RING_P_UP ||
+ channel->rx_count > MAX_RX_RINGS ||
+ !channel->tx_count || !channel->rx_count)
+ return -EINVAL;
+
+ mutex_lock(&mdev->state_lock);
+ if (priv->port_up) {
+ port_up = 1;
+ mlx4_en_stop_port(dev);
+ }
+
+ mlx4_en_free_resources(priv);
+
+ priv->num_tx_rings_p_up = channel->tx_count;
+ priv->tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
+ priv->rx_ring_num = channel->rx_count;
+
+ err = mlx4_en_alloc_resources(priv);
+ if (err) {
+ en_err(priv, "Failed reallocating port resources\n");
+ goto out;
+ }
+
+ netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
+ netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
+
+ mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
+
+ en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num);
+ en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num);
+
+ if (port_up) {
+ err = mlx4_en_start_port(dev);
+ if (err)
+ en_err(priv, "Failed starting port\n");
+ }
+
+ err = mlx4_en_moderation_update(priv);
+
+out:
+ mutex_unlock(&mdev->state_lock);
+ return err;
+}
+
const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_drvinfo = mlx4_en_get_drvinfo,
.get_settings = mlx4_en_get_settings,
.get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
.get_rxfh_indir = mlx4_en_get_rxfh_indir,
.set_rxfh_indir = mlx4_en_set_rxfh_indir,
+ .get_channels = mlx4_en_get_channels,
+ .set_channels = mlx4_en_set_channels,
};
rounddown_pow_of_two(max_t(int, MIN_RX_RINGS,
min_t(int,
dev->caps.num_comp_vectors,
- MAX_RX_RINGS)));
+ DEF_RX_RINGS)));
} else {
mdev->profile.prof[i].rx_ring_num = rounddown_pow_of_two(
min_t(int, dev->caps.comp_pool/
#include "mlx4_en.h"
#include "en_port.h"
-static int mlx4_en_setup_tc(struct net_device *dev, u8 up)
+int mlx4_en_setup_tc(struct net_device *dev, u8 up)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int i;
- unsigned int q, offset = 0;
+ unsigned int offset = 0;
if (up && up != MLX4_EN_NUM_UP)
return -EINVAL;
netdev_set_num_tc(dev, up);
/* Partition Tx queues evenly amongst UP's */
- q = priv->tx_ring_num / up;
for (i = 0; i < up; i++) {
- netdev_set_tc_queue(dev, i, q, offset);
- offset += q;
+ netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset);
+ offset += priv->num_tx_rings_p_up;
}
return 0;
/* Configure ring */
tx_ring = &priv->tx_ring[i];
err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
- i / priv->mdev->profile.num_tx_rings_p_up);
+ i / priv->num_tx_rings_p_up);
if (err) {
en_err(priv, "Failed allocating Tx ring\n");
mlx4_en_deactivate_cq(priv, cq);
int err;
dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
- prof->tx_ring_num, prof->rx_ring_num);
+ MAX_TX_RINGS, MAX_RX_RINGS);
if (dev == NULL)
return -ENOMEM;
+ netif_set_real_num_tx_queues(dev, prof->tx_ring_num);
+ netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
+
SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
dev->dev_id = port - 1;
priv->flags = prof->flags;
priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
MLX4_WQE_CTRL_SOLICITED);
+ priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
priv->tx_ring_num = prof->tx_ring_num;
- priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring) *
- priv->tx_ring_num, GFP_KERNEL);
+
+ priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring) * MAX_TX_RINGS,
+ GFP_KERNEL);
if (!priv->tx_ring) {
err = -ENOMEM;
goto out;
}
- priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * priv->tx_ring_num,
- GFP_KERNEL);
+ priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * MAX_RX_RINGS,
+ GFP_KERNEL);
if (!priv->tx_cq) {
err = -ENOMEM;
goto out;
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- u16 rings_p_up = priv->mdev->profile.num_tx_rings_p_up;
+ u16 rings_p_up = priv->num_tx_rings_p_up;
u8 up = 0;
if (dev->num_tc)
#define MLX4_EN_PAGE_SHIFT 12
#define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT)
-#define MAX_RX_RINGS 16
+#define DEF_RX_RINGS 16
+#define MAX_RX_RINGS 128
#define MIN_RX_RINGS 4
#define TXBB_SIZE 64
#define HEADROOM (2048 / TXBB_SIZE + 1)
#define MLX4_EN_NUM_UP 8
#define MLX4_EN_DEF_TX_RING_SIZE 512
#define MLX4_EN_DEF_RX_RING_SIZE 1024
+#define MAX_TX_RINGS (MLX4_EN_MAX_TX_RING_P_UP * \
+ MLX4_EN_NUM_UP)
/* Target number of packets to coalesce with interrupt moderation */
#define MLX4_EN_RX_COAL_TARGET 44
u32 flags;
#define MLX4_EN_FLAG_PROMISC 0x1
#define MLX4_EN_FLAG_MC_PROMISC 0x2
+ u8 num_tx_rings_p_up;
u32 tx_ring_num;
u32 rx_ring_num;
u32 rx_skb_size;
extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops;
#endif
+int mlx4_en_setup_tc(struct net_device *dev, u8 up);
+
#ifdef CONFIG_RFS_ACCEL
void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *rx_ring);