IB/mlx5: Add per port counters
authorMark Bloch <markb@mellanox.com>
Fri, 17 Jun 2016 12:10:55 +0000 (15:10 +0300)
committerDoug Ledford <dledford@redhat.com>
Thu, 23 Jun 2016 15:39:14 +0000 (11:39 -0400)
In order to support statistics for ports, we attach
each QP to a counter set which is dedicate to this port.

Signed-off-by: Mark Bloch <markb@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
Signed-off-by: Doug Ledford <dledford@redhat.com>
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/qp.c

index a7cc6d74d3b0ea6ce71629c8c12258446211d3b3..b29b84134c4cc83a9ee9cd2c6e83cd3e469b1d45 100644 (file)
@@ -2541,6 +2541,41 @@ static void mlx5_disable_roce(struct mlx5_ib_dev *dev)
        unregister_netdevice_notifier(&dev->roce.nb);
 }
 
+static void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev)
+{
+       unsigned int i;
+
+       for (i = 0; i < dev->num_ports; i++)
+               mlx5_core_dealloc_q_counter(dev->mdev,
+                                           dev->port[i].q_cnt_id);
+}
+
+static int mlx5_ib_alloc_q_counters(struct mlx5_ib_dev *dev)
+{
+       int i;
+       int ret;
+
+       for (i = 0; i < dev->num_ports; i++) {
+               ret = mlx5_core_alloc_q_counter(dev->mdev,
+                                               &dev->port[i].q_cnt_id);
+               if (ret) {
+                       mlx5_ib_warn(dev,
+                                    "couldn't allocate queue counter for port %d, err %d\n",
+                                    i + 1, ret);
+                       goto dealloc_counters;
+               }
+       }
+
+       return 0;
+
+dealloc_counters:
+       while (--i >= 0)
+               mlx5_core_dealloc_q_counter(dev->mdev,
+                                           dev->port[i].q_cnt_id);
+
+       return ret;
+}
+
 static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
 {
        struct mlx5_ib_dev *dev;
@@ -2563,10 +2598,15 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
 
        dev->mdev = mdev;
 
+       dev->port = kcalloc(MLX5_CAP_GEN(mdev, num_ports), sizeof(*dev->port),
+                           GFP_KERNEL);
+       if (!dev->port)
+               goto err_dealloc;
+
        rwlock_init(&dev->roce.netdev_lock);
        err = get_port_caps(dev);
        if (err)
-               goto err_dealloc;
+               goto err_free_port;
 
        if (mlx5_use_mad_ifc(dev))
                get_ext_port_caps(dev);
@@ -2729,10 +2769,14 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
        if (err)
                goto err_rsrc;
 
-       err = ib_register_device(&dev->ib_dev, NULL);
+       err = mlx5_ib_alloc_q_counters(dev);
        if (err)
                goto err_odp;
 
+       err = ib_register_device(&dev->ib_dev, NULL);
+       if (err)
+               goto err_q_cnt;
+
        err = create_umr_res(dev);
        if (err)
                goto err_dev;
@@ -2754,6 +2798,9 @@ err_umrc:
 err_dev:
        ib_unregister_device(&dev->ib_dev);
 
+err_q_cnt:
+       mlx5_ib_dealloc_q_counters(dev);
+
 err_odp:
        mlx5_ib_odp_remove_one(dev);
 
@@ -2764,6 +2811,9 @@ err_disable_roce:
        if (ll == IB_LINK_LAYER_ETHERNET)
                mlx5_disable_roce(dev);
 
+err_free_port:
+       kfree(dev->port);
+
 err_dealloc:
        ib_dealloc_device((struct ib_device *)dev);
 
@@ -2776,11 +2826,13 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
        enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
 
        ib_unregister_device(&dev->ib_dev);
+       mlx5_ib_dealloc_q_counters(dev);
        destroy_umrc_res(dev);
        mlx5_ib_odp_remove_one(dev);
        destroy_dev_resources(&dev->devr);
        if (ll == IB_LINK_LAYER_ETHERNET)
                mlx5_disable_roce(dev);
+       kfree(dev->port);
        ib_dealloc_device(&dev->ib_dev);
 }
 
index 0001ed51e1bd889f6abfa7eae660978774b29223..372385d0f99384785123ba5e24fcb2ceddcc520e 100644 (file)
@@ -591,6 +591,10 @@ struct mlx5_ib_resources {
        struct mutex    mutex;
 };
 
+struct mlx5_ib_port {
+       u16 q_cnt_id;
+};
+
 struct mlx5_roce {
        /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
         * netdev pointer
@@ -629,6 +633,8 @@ struct mlx5_ib_dev {
        /* protect resources needed as part of reset flow */
        spinlock_t              reset_flow_resource_lock;
        struct list_head        qp_list;
+       /* Array with num_ports elements */
+       struct mlx5_ib_port     *port;
 };
 
 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
index 9004905d3d931d4d42344a35f4592ecf025ca4f0..5ca14a22868f6fa44e6842282871516add48a35e 100644 (file)
@@ -2651,6 +2651,15 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
        else
                sqd_event = 0;
 
+       if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+               u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num :
+                              qp->port) - 1;
+               struct mlx5_ib_port *mibport = &dev->port[port_num];
+
+               context->qp_counter_set_usr_page |=
+                       cpu_to_be32(mibport->q_cnt_id << 16);
+       }
+
        if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
                context->sq_crq_size |= cpu_to_be16(1 << 4);