IB: Replace ib_umem page_size by page_shift
authorArtemy Kovalyov <artemyko@mellanox.com>
Wed, 5 Apr 2017 06:23:50 +0000 (09:23 +0300)
committerDoug Ledford <dledford@redhat.com>
Tue, 25 Apr 2017 19:40:28 +0000 (15:40 -0400)
Size of pages are held by struct ib_umem in page_size field.

It is better to store it as an exponent, because page size by nature
is always power-of-two and used as a factor, divisor or ilog2's argument.

The conversion of page_size to be page_shift allows to have portable
code and avoid following error while compiling on ARM:

  ERROR: "__aeabi_uldivmod" [drivers/infiniband/core/ib_core.ko] undefined!

CC: Selvin Xavier <selvin.xavier@broadcom.com>
CC: Steve Wise <swise@chelsio.com>
CC: Lijun Ou <oulijun@huawei.com>
CC: Shiraz Saleem <shiraz.saleem@intel.com>
CC: Adit Ranadive <aditr@vmware.com>
CC: Dennis Dalessandro <dennis.dalessandro@intel.com>
CC: Ram Amrani <Ram.Amrani@Cavium.com>
Signed-off-by: Artemy Kovalyov <artemyko@mellanox.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
Acked-by: Ram Amrani <Ram.Amrani@cavium.com>
Acked-by: Shiraz Saleem <shiraz.saleem@intel.com>
Acked-by: Selvin Xavier <selvin.xavier@broadcom.com>
Acked-by: Selvin Xavier <selvin.xavier@broadcom.com>
Acked-by: Adit Ranadive <aditr@vmware.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
23 files changed:
drivers/infiniband/core/umem.c
drivers/infiniband/core/umem_odp.c
drivers/infiniband/hw/bnxt_re/ib_verbs.c
drivers/infiniband/hw/cxgb3/iwch_provider.c
drivers/infiniband/hw/cxgb4/mem.c
drivers/infiniband/hw/hns/hns_roce_cq.c
drivers/infiniband/hw/hns/hns_roce_mr.c
drivers/infiniband/hw/hns/hns_roce_qp.c
drivers/infiniband/hw/i40iw/i40iw_verbs.c
drivers/infiniband/hw/mlx4/cq.c
drivers/infiniband/hw/mlx4/mr.c
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mlx4/srq.c
drivers/infiniband/hw/mlx5/mem.c
drivers/infiniband/hw/mlx5/odp.c
drivers/infiniband/hw/mthca/mthca_provider.c
drivers/infiniband/hw/nes/nes_verbs.c
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
drivers/infiniband/hw/qedr/verbs.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c
drivers/infiniband/sw/rdmavt/mr.c
drivers/infiniband/sw/rxe/rxe_mr.c
include/rdma/ib_umem.h

index 27f155d2df8da679bb713d88fc80b1a05568ff41..6b87c051ffd41af5a48c9c44d6863b21795d1c50 100644 (file)
@@ -115,11 +115,11 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
        if (!umem)
                return ERR_PTR(-ENOMEM);
 
-       umem->context   = context;
-       umem->length    = size;
-       umem->address   = addr;
-       umem->page_size = PAGE_SIZE;
-       umem->pid       = get_task_pid(current, PIDTYPE_PID);
+       umem->context    = context;
+       umem->length     = size;
+       umem->address    = addr;
+       umem->page_shift = PAGE_SHIFT;
+       umem->pid        = get_task_pid(current, PIDTYPE_PID);
        /*
         * We ask for writable memory if any of the following
         * access flags are set.  "Local write" and "remote write"
@@ -315,7 +315,6 @@ EXPORT_SYMBOL(ib_umem_release);
 
 int ib_umem_page_count(struct ib_umem *umem)
 {
-       int shift;
        int i;
        int n;
        struct scatterlist *sg;
@@ -323,11 +322,9 @@ int ib_umem_page_count(struct ib_umem *umem)
        if (umem->odp_data)
                return ib_umem_num_pages(umem);
 
-       shift = ilog2(umem->page_size);
-
        n = 0;
        for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
-               n += sg_dma_len(sg) >> shift;
+               n += sg_dma_len(sg) >> umem->page_shift;
 
        return n;
 }
index cb2742b548bbed85241fd4ed45c25235484e1d7b..8ee30163497d4c2753e88f10172ff97f583fcb54 100644 (file)
@@ -254,11 +254,11 @@ struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context,
        if (!umem)
                return ERR_PTR(-ENOMEM);
 
-       umem->context   = context;
-       umem->length    = size;
-       umem->address   = addr;
-       umem->page_size = PAGE_SIZE;
-       umem->writable  = 1;
+       umem->context    = context;
+       umem->length     = size;
+       umem->address    = addr;
+       umem->page_shift = PAGE_SHIFT;
+       umem->writable   = 1;
 
        odp_data = kzalloc(sizeof(*odp_data), GFP_KERNEL);
        if (!odp_data) {
@@ -707,7 +707,7 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
         * invalidations, so we must make sure we free each page only
         * once. */
        mutex_lock(&umem->odp_data->umem_mutex);
-       for (addr = virt; addr < bound; addr += (u64)umem->page_size) {
+       for (addr = virt; addr < bound; addr += BIT(umem->page_shift)) {
                idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
                if (umem->odp_data->page_list[idx]) {
                        struct page *page = umem->odp_data->page_list[idx];
index 17741146968640afc7a38ccf7f7d4ed241e2c046..a4e8e0b075d214947bdb8f526faf741428419b37 100644 (file)
@@ -3016,7 +3016,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
        struct bnxt_re_mr *mr;
        struct ib_umem *umem;
        u64 *pbl_tbl, *pbl_tbl_orig;
-       int i, umem_pgs, pages, page_shift, rc;
+       int i, umem_pgs, pages, rc;
        struct scatterlist *sg;
        int entry;
 
@@ -3062,22 +3062,22 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
        }
        pbl_tbl_orig = pbl_tbl;
 
-       page_shift = ilog2(umem->page_size);
        if (umem->hugetlb) {
                dev_err(rdev_to_dev(rdev), "umem hugetlb not supported!");
                rc = -EFAULT;
                goto fail;
        }
-       if (umem->page_size != PAGE_SIZE) {
-               dev_err(rdev_to_dev(rdev), "umem page size unsupported!");
+
+       if (umem->page_shift != PAGE_SHIFT) {
+               dev_err(rdev_to_dev(rdev), "umem page shift unsupported!");
                rc = -EFAULT;
                goto fail;
        }
        /* Map umem buf ptrs to the PBL */
        for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
-               pages = sg_dma_len(sg) >> page_shift;
+               pages = sg_dma_len(sg) >> umem->page_shift;
                for (i = 0; i < pages; i++, pbl_tbl++)
-                       *pbl_tbl = sg_dma_address(sg) + (i << page_shift);
+                       *pbl_tbl = sg_dma_address(sg) + (i << umem->page_shift);
        }
        rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl_orig,
                               umem_pgs, false);
index 8f4b408c8bb0502cd26570305d2cdaf5476c6525..790d7c79fe3e4a6adb5b1dcad27c73058313acc6 100644 (file)
@@ -581,7 +581,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
                return ERR_PTR(err);
        }
 
-       shift = ffs(mhp->umem->page_size) - 1;
+       shift = mhp->umem->page_shift;
 
        n = mhp->umem->nmap;
 
@@ -601,7 +601,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
                        len = sg_dma_len(sg) >> shift;
                        for (k = 0; k < len; ++k) {
                                pages[i++] = cpu_to_be64(sg_dma_address(sg) +
-                                       mhp->umem->page_size * k);
+                                                        (k << shift));
                                if (i == PAGE_SIZE / sizeof *pages) {
                                        err = iwch_write_pbl(mhp, pages, i, n);
                                        if (err)
index 19dc548e1b73baa4e37f1382641ee23c30482102..3ee7f43e419ae9ff7d2dc33ff9f5776bfd01c910 100644 (file)
@@ -515,7 +515,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
                return ERR_PTR(err);
        }
 
-       shift = ffs(mhp->umem->page_size) - 1;
+       shift = mhp->umem->page_shift;
 
        n = mhp->umem->nmap;
        err = alloc_pbl(mhp, n);
@@ -534,7 +534,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
                len = sg_dma_len(sg) >> shift;
                for (k = 0; k < len; ++k) {
                        pages[i++] = cpu_to_be64(sg_dma_address(sg) +
-                               mhp->umem->page_size * k);
+                                                (k << shift));
                        if (i == PAGE_SIZE / sizeof *pages) {
                                err = write_pbl(&mhp->rhp->rdev,
                                      pages,
index 589496c8fb9ec90cc6d8de087a342533206ca99a..b89fd711019e2b0d90c90a11d87a0a1df6299606 100644 (file)
@@ -219,8 +219,7 @@ static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
                return PTR_ERR(*umem);
 
        ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem),
-                               ilog2((unsigned int)(*umem)->page_size),
-                               &buf->hr_mtt);
+                               (*umem)->page_shift, &buf->hr_mtt);
        if (ret)
                goto err_buf;
 
index b48693510727105d8bfb42585906e7a616ef0831..dc5c97c8f070c9c02ac79fb8d51211df5271ee3b 100644 (file)
@@ -504,7 +504,8 @@ int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
        for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
                len = sg_dma_len(sg) >> mtt->page_shift;
                for (k = 0; k < len; ++k) {
-                       pages[i++] = sg_dma_address(sg) + umem->page_size * k;
+                       pages[i++] = sg_dma_address(sg) +
+                               (k << umem->page_shift);
                        if (i == PAGE_SIZE / sizeof(u64)) {
                                ret = hns_roce_write_mtt(hr_dev, mtt, n, i,
                                                         pages);
@@ -564,9 +565,9 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
        }
 
        n = ib_umem_page_count(mr->umem);
-       if (mr->umem->page_size != HNS_ROCE_HEM_PAGE_SIZE) {
-               dev_err(dev, "Just support 4K page size but is 0x%x now!\n",
-                       mr->umem->page_size);
+       if (mr->umem->page_shift != HNS_ROCE_HEM_PAGE_SHIFT) {
+               dev_err(dev, "Just support 4K page size but is 0x%lx now!\n",
+                       BIT(mr->umem->page_shift));
                ret = -EINVAL;
                goto err_umem;
        }
index 3f44f2f91f03d84d51084079662374910409e94b..054c52699090dfa09fba494e810094111b0bc7f4 100644 (file)
@@ -437,8 +437,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
                }
 
                ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(hr_qp->umem),
-                                   ilog2((unsigned int)hr_qp->umem->page_size),
-                                   &hr_qp->mtt);
+                                       hr_qp->umem->page_shift, &hr_qp->mtt);
                if (ret) {
                        dev_err(dev, "hns_roce_mtt_init error for create qp\n");
                        goto err_buf;
index 9b2849979756ba05ebf2c1f25073c1740d4b1520..378c75759be48e039b8ec8a4febaa2fb4ac26b9b 100644 (file)
@@ -1345,7 +1345,7 @@ static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
 {
        struct ib_umem *region = iwmr->region;
        struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
-       int chunk_pages, entry, pg_shift, i;
+       int chunk_pages, entry, i;
        struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
        struct i40iw_pble_info *pinfo;
        struct scatterlist *sg;
@@ -1354,14 +1354,14 @@ static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
 
        pinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf;
 
-       pg_shift = ffs(region->page_size) - 1;
        for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
-               chunk_pages = sg_dma_len(sg) >> pg_shift;
+               chunk_pages = sg_dma_len(sg) >> region->page_shift;
                if ((iwmr->type == IW_MEMREG_TYPE_QP) &&
                    !iwpbl->qp_mr.sq_page)
                        iwpbl->qp_mr.sq_page = sg_page(sg);
                for (i = 0; i < chunk_pages; i++) {
-                       pg_addr = sg_dma_address(sg) + region->page_size * i;
+                       pg_addr = sg_dma_address(sg) +
+                               (i << region->page_shift);
 
                        if ((entry + i) == 0)
                                *pbl = cpu_to_le64(pg_addr & iwmr->page_msk);
@@ -1847,7 +1847,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
        iwmr->ibmr.device = pd->device;
        ucontext = to_ucontext(pd->uobject->context);
 
-       iwmr->page_size = region->page_size;
+       iwmr->page_size = PAGE_SIZE;
        iwmr->page_msk = PAGE_MASK;
 
        if (region->hugetlb && (req.reg_type == IW_MEMREG_TYPE_MEM))
index 6a0fec357daecdd1e049690af1495fc333b8c9bc..4f5a143fc0a7229a703d0ba99a6fdd1cddb00313 100644 (file)
@@ -147,7 +147,7 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *cont
                return PTR_ERR(*umem);
 
        err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem),
-                           ilog2((*umem)->page_size), &buf->mtt);
+                           (*umem)->page_shift, &buf->mtt);
        if (err)
                goto err_buf;
 
index 433bcdbdd680534bb757a64080118446059ed814..e6f77f63da75f0f84054c0e93674067c01589bfa 100644 (file)
@@ -107,7 +107,7 @@ int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
                len = sg_dma_len(sg) >> mtt->page_shift;
                for (k = 0; k < len; ++k) {
                        pages[i++] = sg_dma_address(sg) +
-                               umem->page_size * k;
+                               (k << umem->page_shift);
                        /*
                         * Be friendly to mlx4_write_mtt() and
                         * pass it chunks of appropriate size.
@@ -155,7 +155,7 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
        }
 
        n = ib_umem_page_count(mr->umem);
-       shift = ilog2(mr->umem->page_size);
+       shift = mr->umem->page_shift;
 
        err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
                            convert_access(access_flags), n, shift, &mr->mmr);
@@ -239,7 +239,7 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
                        goto release_mpt_entry;
                }
                n = ib_umem_page_count(mmr->umem);
-               shift = ilog2(mmr->umem->page_size);
+               shift = mmr->umem->page_shift;
 
                err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr,
                                              virt_addr, length, n, shift,
index c34eebc7db65a9bbf4deb0ce611e6ec61e88af7f..8f382318f88831f99646018c3d547cfe7e6cc8b7 100644 (file)
@@ -745,7 +745,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
                }
 
                err = mlx4_mtt_init(dev->dev, ib_umem_page_count(qp->umem),
-                                   ilog2(qp->umem->page_size), &qp->mtt);
+                                   qp->umem->page_shift, &qp->mtt);
                if (err)
                        goto err_buf;
 
index 7dd3f267f06b2393f1ba0ac24cc4555c2aa39002..e32dd58937a821a914c3e7d79d2d4482b158db47 100644 (file)
@@ -122,7 +122,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
                }
 
                err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem),
-                                   ilog2(srq->umem->page_size), &srq->mtt);
+                                   srq->umem->page_shift, &srq->mtt);
                if (err)
                        goto err_buf;
 
index 778d8a18925f909d7f65e8e47f329f0f4ad8f44f..a0c2af96424916510107381c0f17c31e0dc9bc0c 100644 (file)
@@ -59,7 +59,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
        u64 pfn;
        struct scatterlist *sg;
        int entry;
-       unsigned long page_shift = ilog2(umem->page_size);
+       unsigned long page_shift = umem->page_shift;
 
        /* With ODP we must always match OS page size. */
        if (umem->odp_data) {
@@ -156,7 +156,7 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
                            int page_shift, size_t offset, size_t num_pages,
                            __be64 *pas, int access_flags)
 {
-       unsigned long umem_page_shift = ilog2(umem->page_size);
+       unsigned long umem_page_shift = umem->page_shift;
        int shift = page_shift - umem_page_shift;
        int mask = (1 << shift) - 1;
        int i, k, idx;
index d7b12f0750e275afefe413b0b676c0ea65756356..3bfa3a9c3be03f5996ed8053bba48ebf5e91646e 100644 (file)
@@ -206,7 +206,7 @@ void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
         * but they will write 0s as well, so no difference in the end result.
         */
 
-       for (addr = start; addr < end; addr += (u64)umem->page_size) {
+       for (addr = start; addr < end; addr += BIT(umem->page_shift)) {
                idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
                /*
                 * Strive to write the MTTs in chunks, but avoid overwriting
index 22d0e6ee5af6aaed90c754ddb142ccee9d188867..e1b8940558d230dd2d5b9e57fc00abb6499b5854 100644 (file)
@@ -937,7 +937,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
                goto err;
        }
 
-       shift = ffs(mr->umem->page_size) - 1;
+       shift = mr->umem->page_shift;
        n = mr->umem->nmap;
 
        mr->mtt = mthca_alloc_mtt(dev, n);
@@ -959,8 +959,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
        for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
                len = sg_dma_len(sg) >> shift;
                for (k = 0; k < len; ++k) {
-                       pages[i++] = sg_dma_address(sg) +
-                               mr->umem->page_size * k;
+                       pages[i++] = sg_dma_address(sg) + (k << shift);
                        /*
                         * Be friendly to write_mtt and pass it chunks
                         * of appropriate size.
index ccf0a4cffe9c1b359deceed34b939181b161b110..11f7c308c7ad4f285d112edc12a6615877c3274d 100644 (file)
@@ -2165,9 +2165,9 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
        }
 
        nes_debug(NES_DBG_MR, "User base = 0x%lX, Virt base = 0x%lX, length = %u,"
-                       " offset = %u, page size = %u.\n",
+                       " offset = %u, page size = %lu.\n",
                        (unsigned long int)start, (unsigned long int)virt, (u32)length,
-                       ib_umem_offset(region), region->page_size);
+                       ib_umem_offset(region), BIT(region->page_shift));
 
        skip_pages = ((u32)ib_umem_offset(region)) >> 12;
 
index c52edeafd616a3be52fec913430405e5adff13d2..c57e387b55a24690b025dce17332786a77bba40c 100644 (file)
@@ -914,21 +914,18 @@ static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
        pbe = (struct ocrdma_pbe *)pbl_tbl->va;
        pbe_cnt = 0;
 
-       shift = ilog2(umem->page_size);
+       shift = umem->page_shift;
 
        for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
                pages = sg_dma_len(sg) >> shift;
                for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
                        /* store the page address in pbe */
                        pbe->pa_lo =
-                           cpu_to_le32(sg_dma_address
-                                       (sg) +
-                                       (umem->page_size * pg_cnt));
+                           cpu_to_le32(sg_dma_address(sg) +
+                                       (pg_cnt << shift));
                        pbe->pa_hi =
-                           cpu_to_le32(upper_32_bits
-                                       ((sg_dma_address
-                                         (sg) +
-                                         umem->page_size * pg_cnt)));
+                           cpu_to_le32(upper_32_bits(sg_dma_address(sg) +
+                                        (pg_cnt << shift)));
                        pbe_cnt += 1;
                        total_num_pbes += 1;
                        pbe++;
@@ -978,7 +975,7 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
        if (status)
                goto umem_err;
 
-       mr->hwmr.pbe_size = mr->umem->page_size;
+       mr->hwmr.pbe_size = BIT(mr->umem->page_shift);
        mr->hwmr.fbo = ib_umem_offset(mr->umem);
        mr->hwmr.va = usr_addr;
        mr->hwmr.len = len;
index 2091902848e6c47bb1f3dfdc4cc08cec99386dc0..49b7edc42adc5a38d616ea0bf21f4047507ecb04 100644 (file)
@@ -681,16 +681,16 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
 
        pbe_cnt = 0;
 
-       shift = ilog2(umem->page_size);
+       shift = umem->page_shift;
 
        for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
                pages = sg_dma_len(sg) >> shift;
                for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
                        /* store the page address in pbe */
                        pbe->lo = cpu_to_le32(sg_dma_address(sg) +
-                                             umem->page_size * pg_cnt);
+                                             (pg_cnt << shift));
                        addr = upper_32_bits(sg_dma_address(sg) +
-                                            umem->page_size * pg_cnt);
+                                            (pg_cnt << shift));
                        pbe->hi = cpu_to_le32(addr);
                        pbe_cnt++;
                        total_num_pbes++;
@@ -2190,7 +2190,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
        mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
        mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
        mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
-       mr->hw_mr.page_size_log = ilog2(mr->umem->page_size);
+       mr->hw_mr.page_size_log = mr->umem->page_shift;
        mr->hw_mr.fbo = ib_umem_offset(mr->umem);
        mr->hw_mr.length = len;
        mr->hw_mr.vaddr = usr_addr;
index 948b5ccd2a70aa79a56798e5b397267b1f2cf54e..6ef4df6c8c4a86d4b1767caf94f181fac74a43b2 100644 (file)
@@ -194,7 +194,7 @@ int pvrdma_page_dir_insert_umem(struct pvrdma_page_dir *pdir,
                len = sg_dma_len(sg) >> PAGE_SHIFT;
                for (j = 0; j < len; j++) {
                        dma_addr_t addr = sg_dma_address(sg) +
-                                         umem->page_size * j;
+                                         (j << umem->page_shift);
 
                        ret = pvrdma_page_dir_insert_dma(pdir, i, addr);
                        if (ret)
index 7c869555cf73f0a2b4cd24520e87f1c529dfd6ce..aa5f9ea318e45ab0ecd7169c0f14a675ffc121a3 100644 (file)
@@ -408,8 +408,7 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
        mr->mr.access_flags = mr_access_flags;
        mr->umem = umem;
 
-       if (is_power_of_2(umem->page_size))
-               mr->mr.page_shift = ilog2(umem->page_size);
+       mr->mr.page_shift = umem->page_shift;
        m = 0;
        n = 0;
        for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
@@ -421,8 +420,9 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
                        goto bail_inval;
                }
                mr->mr.map[m]->segs[n].vaddr = vaddr;
-               mr->mr.map[m]->segs[n].length = umem->page_size;
-               trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr, umem->page_size);
+               mr->mr.map[m]->segs[n].length = BIT(umem->page_shift);
+               trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr,
+                                     BIT(umem->page_shift));
                n++;
                if (n == RVT_SEGSZ) {
                        m++;
index 154c3ee211aeb8e85913a20e15c028a4a72dddf8..ced15c4446bd90b4c8d13abaf21925423e7f4345 100644 (file)
@@ -191,10 +191,8 @@ int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start,
                goto err1;
        }
 
-       WARN_ON_ONCE(!is_power_of_2(umem->page_size));
-
-       mem->page_shift         = ilog2(umem->page_size);
-       mem->page_mask          = umem->page_size - 1;
+       mem->page_shift         = umem->page_shift;
+       mem->page_mask          = BIT(umem->page_shift) - 1;
 
        num_buf                 = 0;
        map                     = mem->map;
@@ -210,7 +208,7 @@ int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start,
                        }
 
                        buf->addr = (uintptr_t)vaddr;
-                       buf->size = umem->page_size;
+                       buf->size = BIT(umem->page_shift);
                        num_buf++;
                        buf++;
 
index 2d83cfd7e6ce20da3b1606cfe4335fada792fb51..7f4af1e1ae64d4e5d67d33d3f13ac9342bac52f5 100644 (file)
@@ -44,7 +44,7 @@ struct ib_umem {
        struct ib_ucontext     *context;
        size_t                  length;
        unsigned long           address;
-       int                     page_size;
+       int                     page_shift;
        int                     writable;
        int                     hugetlb;
        struct work_struct      work;
@@ -60,7 +60,7 @@ struct ib_umem {
 /* Returns the offset of the umem start relative to the first page. */
 static inline int ib_umem_offset(struct ib_umem *umem)
 {
-       return umem->address & ((unsigned long)umem->page_size - 1);
+       return umem->address & (BIT(umem->page_shift) - 1);
 }
 
 /* Returns the first page of an ODP umem. */