return err;
}
+static int mlx5e_set_sq_maxrate(struct net_device *dev,
+ struct mlx5e_txqsq *sq, u32 rate);
+
static int mlx5e_open_txqsq(struct mlx5e_channel *c,
int tc,
struct mlx5e_sq_param *param,
{
struct mlx5e_create_sq_param csp = {};
struct mlx5e_priv *priv = c->priv;
+ u32 tx_rate;
+ int txq_ix;
int err;
err = mlx5e_alloc_txqsq(c, tc, param, sq);
if (err)
goto err_free_txqsq;
+ txq_ix = c->ix + tc * priv->params.num_channels;
+ tx_rate = priv->tx_rates[txq_ix];
+ if (tx_rate)
+ mlx5e_set_sq_maxrate(priv->netdev, sq, tx_rate);
+
netdev_tx_reset_queue(sq->txq);
netif_tx_start_queue(sq->txq);
return 0;
int cpu = mlx5e_get_cpu(priv, ix);
struct mlx5e_channel *c;
int err;
- int i;
c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
if (!c)
if (err)
goto err_close_icosq;
- for (i = 0; i < priv->params.num_tc; i++) {
- u32 txq_ix = priv->channeltc_to_txq_map[ix][i];
-
- if (priv->tx_rates[txq_ix]) {
- struct mlx5e_txqsq *sq = priv->txq_to_sq_map[txq_ix];
-
- mlx5e_set_sq_maxrate(priv->netdev, sq,
- priv->tx_rates[txq_ix]);
- }
- }
-
err = c->xdp ? mlx5e_open_xdpsq(c, &cparam->xdp_sq, &c->rq.xdpsq) : 0;
if (err)
goto err_close_sqs;