{net, ib}/mlx5: Make cache line size determination at runtime.
authorDaniel Jurgens <danielj@mellanox.com>
Tue, 25 Oct 2016 15:36:24 +0000 (18:36 +0300)
committerDavid S. Miller <davem@davemloft.net>
Sat, 29 Oct 2016 16:00:39 +0000 (12:00 -0400)
ARM 64B cache line systems have L1_CACHE_BYTES set to 128.
cache_line_size() will return the correct size.

Fixes: cf50b5efa2fe('net/mlx5_core/ib: New device capabilities
handling.')
Signed-off-by: Daniel Jurgens <danielj@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/qp.c
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
include/linux/mlx5/driver.h

index 22174774dbb8c392709936b0eb225d3e6768d2c4..63036c7316264fec532c286a33373743cf163224 100644 (file)
@@ -1019,7 +1019,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
        resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
        if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
                resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
-       resp.cache_line_size = L1_CACHE_BYTES;
+       resp.cache_line_size = cache_line_size();
        resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
        resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
        resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
index 41f4c2afbcdd6264a05c38d9c0cd2ce7d807bccc..7ce97daf26c62f7de7bb0fb8693024ed5b371289 100644 (file)
@@ -52,7 +52,6 @@ enum {
 
 enum {
        MLX5_IB_SQ_STRIDE       = 6,
-       MLX5_IB_CACHE_LINE_SIZE = 64,
 };
 
 static const u32 mlx5_ib_opcode[] = {
index 6cb38304669f6e5618edfea860a8c8d5f49e5c54..2c6e3c7b7417943b643f21cd3e7d25ebd0061d9a 100644 (file)
 
 #include "mlx5_core.h"
 
+struct mlx5_db_pgdir {
+       struct list_head        list;
+       unsigned long          *bitmap;
+       __be32                 *db_page;
+       dma_addr_t              db_dma;
+};
+
 /* Handling for queue buffers -- we allocate a bunch of memory and
  * register it in a memory region at HCA virtual address 0.
  */
@@ -102,17 +109,28 @@ EXPORT_SYMBOL_GPL(mlx5_buf_free);
 static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
                                                 int node)
 {
+       u32 db_per_page = PAGE_SIZE / cache_line_size();
        struct mlx5_db_pgdir *pgdir;
 
        pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
        if (!pgdir)
                return NULL;
 
-       bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE);
+       pgdir->bitmap = kcalloc(BITS_TO_LONGS(db_per_page),
+                               sizeof(unsigned long),
+                               GFP_KERNEL);
+
+       if (!pgdir->bitmap) {
+               kfree(pgdir);
+               return NULL;
+       }
+
+       bitmap_fill(pgdir->bitmap, db_per_page);
 
        pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
                                                       &pgdir->db_dma, node);
        if (!pgdir->db_page) {
+               kfree(pgdir->bitmap);
                kfree(pgdir);
                return NULL;
        }
@@ -123,18 +141,19 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
 static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
                                    struct mlx5_db *db)
 {
+       u32 db_per_page = PAGE_SIZE / cache_line_size();
        int offset;
        int i;
 
-       i = find_first_bit(pgdir->bitmap, MLX5_DB_PER_PAGE);
-       if (i >= MLX5_DB_PER_PAGE)
+       i = find_first_bit(pgdir->bitmap, db_per_page);
+       if (i >= db_per_page)
                return -ENOMEM;
 
        __clear_bit(i, pgdir->bitmap);
 
        db->u.pgdir = pgdir;
        db->index   = i;
-       offset = db->index * L1_CACHE_BYTES;
+       offset = db->index * cache_line_size();
        db->db      = pgdir->db_page + offset / sizeof(*pgdir->db_page);
        db->dma     = pgdir->db_dma  + offset;
 
@@ -181,14 +200,16 @@ EXPORT_SYMBOL_GPL(mlx5_db_alloc);
 
 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
 {
+       u32 db_per_page = PAGE_SIZE / cache_line_size();
        mutex_lock(&dev->priv.pgdir_mutex);
 
        __set_bit(db->index, db->u.pgdir->bitmap);
 
-       if (bitmap_full(db->u.pgdir->bitmap, MLX5_DB_PER_PAGE)) {
+       if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) {
                dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
                                  db->u.pgdir->db_page, db->u.pgdir->db_dma);
                list_del(&db->u.pgdir->list);
+               kfree(db->u.pgdir->bitmap);
                kfree(db->u.pgdir);
        }
 
index 85c4786427e49686f5adedeb962e67dfbc375616..5dbda60a09f4f7eea36637a3168857c355f8c7e0 100644 (file)
@@ -625,10 +625,6 @@ struct mlx5_db {
        int                     index;
 };
 
-enum {
-       MLX5_DB_PER_PAGE = PAGE_SIZE / L1_CACHE_BYTES,
-};
-
 enum {
        MLX5_COMP_EQ_SIZE = 1024,
 };
@@ -638,13 +634,6 @@ enum {
        MLX5_PTYS_EN = 1 << 2,
 };
 
-struct mlx5_db_pgdir {
-       struct list_head        list;
-       DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE);
-       __be32                 *db_page;
-       dma_addr_t              db_dma;
-};
-
 typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
 
 struct mlx5_cmd_work_ent {