unregister_netdevice_notifier(&dev->roce.nb);
}
+static void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev)
+{
+ unsigned int i;
+
+ for (i = 0; i < dev->num_ports; i++)
+ mlx5_core_dealloc_q_counter(dev->mdev,
+ dev->port[i].q_cnt_id);
+}
+
+static int mlx5_ib_alloc_q_counters(struct mlx5_ib_dev *dev)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < dev->num_ports; i++) {
+ ret = mlx5_core_alloc_q_counter(dev->mdev,
+ &dev->port[i].q_cnt_id);
+ if (ret) {
+ mlx5_ib_warn(dev,
+ "couldn't allocate queue counter for port %d, err %d\n",
+ i + 1, ret);
+ goto dealloc_counters;
+ }
+ }
+
+ return 0;
+
+dealloc_counters:
+ while (--i >= 0)
+ mlx5_core_dealloc_q_counter(dev->mdev,
+ dev->port[i].q_cnt_id);
+
+ return ret;
+}
+
static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
{
struct mlx5_ib_dev *dev;
dev->mdev = mdev;
+ dev->port = kcalloc(MLX5_CAP_GEN(mdev, num_ports), sizeof(*dev->port),
+ GFP_KERNEL);
+ if (!dev->port)
+ goto err_dealloc;
+
rwlock_init(&dev->roce.netdev_lock);
err = get_port_caps(dev);
if (err)
- goto err_dealloc;
+ goto err_free_port;
if (mlx5_use_mad_ifc(dev))
get_ext_port_caps(dev);
if (err)
goto err_rsrc;
- err = ib_register_device(&dev->ib_dev, NULL);
+ err = mlx5_ib_alloc_q_counters(dev);
if (err)
goto err_odp;
+ err = ib_register_device(&dev->ib_dev, NULL);
+ if (err)
+ goto err_q_cnt;
+
err = create_umr_res(dev);
if (err)
goto err_dev;
err_dev:
ib_unregister_device(&dev->ib_dev);
+err_q_cnt:
+ mlx5_ib_dealloc_q_counters(dev);
+
err_odp:
mlx5_ib_odp_remove_one(dev);
if (ll == IB_LINK_LAYER_ETHERNET)
mlx5_disable_roce(dev);
+err_free_port:
+ kfree(dev->port);
+
err_dealloc:
ib_dealloc_device((struct ib_device *)dev);
enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
ib_unregister_device(&dev->ib_dev);
+ mlx5_ib_dealloc_q_counters(dev);
destroy_umrc_res(dev);
mlx5_ib_odp_remove_one(dev);
destroy_dev_resources(&dev->devr);
if (ll == IB_LINK_LAYER_ETHERNET)
mlx5_disable_roce(dev);
+ kfree(dev->port);
ib_dealloc_device(&dev->ib_dev);
}
struct mutex mutex;
};
+struct mlx5_ib_port {
+ u16 q_cnt_id;
+};
+
struct mlx5_roce {
/* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
* netdev pointer
/* protect resources needed as part of reset flow */
spinlock_t reset_flow_resource_lock;
struct list_head qp_list;
+ /* Array with num_ports elements */
+ struct mlx5_ib_port *port;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
else
sqd_event = 0;
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num :
+ qp->port) - 1;
+ struct mlx5_ib_port *mibport = &dev->port[port_num];
+
+ context->qp_counter_set_usr_page |=
+ cpu_to_be32(mibport->q_cnt_id << 16);
+ }
+
if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
context->sq_crq_size |= cpu_to_be16(1 << 4);