dev->counters[qp->port - 1];
optpar |= MLX4_QP_OPTPAR_COUNTER_INDEX;
} else
- context->pri_path.counter_index = 0xff;
+ context->pri_path.counter_index =
+ MLX4_SINK_COUNTER_INDEX(dev->dev);
if (qp->flags & MLX4_IB_QP_NETIF) {
mlx4_ib_steer_qp_reg(dev, qp, 1);
context->pri_path.sched_queue |= user_prio << 3;
context->pri_path.feup = MLX4_FEUP_FORCE_ETH_UP;
}
- context->pri_path.counter_index = 0xff;
+ context->pri_path.counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
context->cqn_send = cpu_to_be32(cqn);
context->cqn_recv = cpu_to_be32(cqn);
context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2);
}
}
- dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters);
+ dev->caps.max_counters = dev_cap->max_counters;
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
static int mlx4_init_counters_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- int nent;
+ int nent_pow2;
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
return -ENOENT;
- nent = dev->caps.max_counters;
- return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0);
+ nent_pow2 = roundup_pow_of_two(dev->caps.max_counters);
+ /* reserve last counter index for sink counter */
+ return mlx4_bitmap_init(&priv->counters_bitmap, nent_pow2,
+ nent_pow2 - 1, 0,
+ nent_pow2 - dev->caps.max_counters + 1);
}
static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
#define MLX4_INVALID_SLAVE_ID 0xFF
+#define MLX4_SINK_COUNTER_INDEX(dev) (dev->caps.max_counters - 1)
void handle_port_mgmt_change_event(struct work_struct *work);