IB/core: generic RDMA READ/WRITE API
authorChristoph Hellwig <hch@lst.de>
Tue, 3 May 2016 16:01:09 +0000 (18:01 +0200)
committerDoug Ledford <dledford@redhat.com>
Fri, 13 May 2016 17:37:19 +0000 (13:37 -0400)
This supports both manual mapping of lots of SGEs, as well as using MRs
from the QP's MR pool, for iWarp or other cases where it's more optimal.
For now, MRs are only used for iWARP transports.  The user of the RDMA-RW
API must allocate the QP MR pool as well as size the SQ accordingly.

Thanks to Steve Wise for testing, fixing and rewriting the iWarp support,
and to Sagi Grimberg for ideas, reviews and fixes.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Doug Ledford <dledford@redhat.com>
drivers/infiniband/core/Makefile
drivers/infiniband/core/rw.c [new file with mode: 0644]
drivers/infiniband/core/verbs.c
include/rdma/ib_verbs.h
include/rdma/rw.h [new file with mode: 0644]

index 48bd9d8292896aabf0738ff667959f0f58751a98..26987d9d7e1cdccd5922322e34fd31466d8c064d 100644 (file)
@@ -8,7 +8,7 @@ obj-$(CONFIG_INFINIBAND_USER_MAD) +=    ib_umad.o
 obj-$(CONFIG_INFINIBAND_USER_ACCESS) +=        ib_uverbs.o ib_ucm.o \
                                        $(user_access-y)
 
-ib_core-y :=                   packer.o ud_header.o verbs.o cq.o sysfs.o \
+ib_core-y :=                   packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \
                                device.o fmr_pool.o cache.o netlink.o \
                                roce_gid_mgmt.o mr_pool.o
 ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
new file mode 100644 (file)
index 0000000..bd700ff
--- /dev/null
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <rdma/mr_pool.h>
+#include <rdma/rw.h>
+
+enum {
+       RDMA_RW_SINGLE_WR,
+       RDMA_RW_MULTI_WR,
+       RDMA_RW_MR,
+};
+
+static bool rdma_rw_force_mr;
+module_param_named(force_mr, rdma_rw_force_mr, bool, 0);
+MODULE_PARM_DESC(force_mr, "Force usage of MRs for RDMA READ/WRITE operations");
+
+/*
+ * Check if the device might use memory registration.  This is currently only
+ * true for iWarp devices. In the future we can hopefully fine tune this based
+ * on HCA driver input.
+ */
+static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u8 port_num)
+{
+       if (rdma_protocol_iwarp(dev, port_num))
+               return true;
+       if (unlikely(rdma_rw_force_mr))
+               return true;
+       return false;
+}
+
+/*
+ * Check if the device will use memory registration for this RW operation.
+ * We currently always use memory registrations for iWarp RDMA READs, and
+ * have a debug option to force usage of MRs.
+ *
+ * XXX: In the future we can hopefully fine tune this based on HCA driver
+ * input.
+ */
+static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u8 port_num,
+               enum dma_data_direction dir, int dma_nents)
+{
+       if (rdma_protocol_iwarp(dev, port_num) && dir == DMA_FROM_DEVICE)
+               return true;
+       if (unlikely(rdma_rw_force_mr))
+               return true;
+       return false;
+}
+
+static inline u32 rdma_rw_max_sge(struct ib_device *dev,
+               enum dma_data_direction dir)
+{
+       return dir == DMA_TO_DEVICE ?
+               dev->attrs.max_sge : dev->attrs.max_sge_rd;
+}
+
+static inline u32 rdma_rw_fr_page_list_len(struct ib_device *dev)
+{
+       /* arbitrary limit to avoid allocating gigantic resources */
+       return min_t(u32, dev->attrs.max_fast_reg_page_list_len, 256);
+}
+
+static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num,
+               struct rdma_rw_reg_ctx *reg, struct scatterlist *sg,
+               u32 sg_cnt, u32 offset)
+{
+       u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device);
+       u32 nents = min(sg_cnt, pages_per_mr);
+       int count = 0, ret;
+
+       reg->mr = ib_mr_pool_get(qp, &qp->rdma_mrs);
+       if (!reg->mr)
+               return -EAGAIN;
+
+       if (reg->mr->need_inval) {
+               reg->inv_wr.opcode = IB_WR_LOCAL_INV;
+               reg->inv_wr.ex.invalidate_rkey = reg->mr->lkey;
+               reg->inv_wr.next = &reg->reg_wr.wr;
+               count++;
+       } else {
+               reg->inv_wr.next = NULL;
+       }
+
+       ret = ib_map_mr_sg(reg->mr, sg, nents, offset, PAGE_SIZE);
+       if (ret < nents) {
+               ib_mr_pool_put(qp, &qp->rdma_mrs, reg->mr);
+               return -EINVAL;
+       }
+
+       reg->reg_wr.wr.opcode = IB_WR_REG_MR;
+       reg->reg_wr.mr = reg->mr;
+       reg->reg_wr.access = IB_ACCESS_LOCAL_WRITE;
+       if (rdma_protocol_iwarp(qp->device, port_num))
+               reg->reg_wr.access |= IB_ACCESS_REMOTE_WRITE;
+       count++;
+
+       reg->sge.addr = reg->mr->iova;
+       reg->sge.length = reg->mr->length;
+       return count;
+}
+
+static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+               u8 port_num, struct scatterlist *sg, u32 sg_cnt, u32 offset,
+               u64 remote_addr, u32 rkey, enum dma_data_direction dir)
+{
+       u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device);
+       int i, j, ret = 0, count = 0;
+
+       ctx->nr_ops = (sg_cnt + pages_per_mr - 1) / pages_per_mr;
+       ctx->reg = kcalloc(ctx->nr_ops, sizeof(*ctx->reg), GFP_KERNEL);
+       if (!ctx->reg) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       for (i = 0; i < ctx->nr_ops; i++) {
+               struct rdma_rw_reg_ctx *prev = i ? &ctx->reg[i - 1] : NULL;
+               struct rdma_rw_reg_ctx *reg = &ctx->reg[i];
+               u32 nents = min(sg_cnt, pages_per_mr);
+
+               ret = rdma_rw_init_one_mr(qp, port_num, reg, sg, sg_cnt,
+                               offset);
+               if (ret < 0)
+                       goto out_free;
+               count += ret;
+
+               if (prev) {
+                       if (reg->mr->need_inval)
+                               prev->wr.wr.next = &reg->inv_wr;
+                       else
+                               prev->wr.wr.next = &reg->reg_wr.wr;
+               }
+
+               reg->reg_wr.wr.next = &reg->wr.wr;
+
+               reg->wr.wr.sg_list = &reg->sge;
+               reg->wr.wr.num_sge = 1;
+               reg->wr.remote_addr = remote_addr;
+               reg->wr.rkey = rkey;
+               if (dir == DMA_TO_DEVICE) {
+                       reg->wr.wr.opcode = IB_WR_RDMA_WRITE;
+               } else if (!rdma_cap_read_inv(qp->device, port_num)) {
+                       reg->wr.wr.opcode = IB_WR_RDMA_READ;
+               } else {
+                       reg->wr.wr.opcode = IB_WR_RDMA_READ_WITH_INV;
+                       reg->wr.wr.ex.invalidate_rkey = reg->mr->lkey;
+               }
+               count++;
+
+               remote_addr += reg->sge.length;
+               sg_cnt -= nents;
+               for (j = 0; j < nents; j++)
+                       sg = sg_next(sg);
+               offset = 0;
+       }
+
+       ctx->type = RDMA_RW_MR;
+       return count;
+
+out_free:
+       while (--i >= 0)
+               ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->reg[i].mr);
+       kfree(ctx->reg);
+out:
+       return ret;
+}
+
+static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+               struct scatterlist *sg, u32 sg_cnt, u32 offset,
+               u64 remote_addr, u32 rkey, enum dma_data_direction dir)
+{
+       struct ib_device *dev = qp->pd->device;
+       u32 max_sge = rdma_rw_max_sge(dev, dir);
+       struct ib_sge *sge;
+       u32 total_len = 0, i, j;
+
+       ctx->nr_ops = DIV_ROUND_UP(sg_cnt, max_sge);
+
+       ctx->map.sges = sge = kcalloc(sg_cnt, sizeof(*sge), GFP_KERNEL);
+       if (!ctx->map.sges)
+               goto out;
+
+       ctx->map.wrs = kcalloc(ctx->nr_ops, sizeof(*ctx->map.wrs), GFP_KERNEL);
+       if (!ctx->map.wrs)
+               goto out_free_sges;
+
+       for (i = 0; i < ctx->nr_ops; i++) {
+               struct ib_rdma_wr *rdma_wr = &ctx->map.wrs[i];
+               u32 nr_sge = min(sg_cnt, max_sge);
+
+               if (dir == DMA_TO_DEVICE)
+                       rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
+               else
+                       rdma_wr->wr.opcode = IB_WR_RDMA_READ;
+               rdma_wr->remote_addr = remote_addr + total_len;
+               rdma_wr->rkey = rkey;
+               rdma_wr->wr.sg_list = sge;
+
+               for (j = 0; j < nr_sge; j++, sg = sg_next(sg)) {
+                       rdma_wr->wr.num_sge++;
+
+                       sge->addr = ib_sg_dma_address(dev, sg) + offset;
+                       sge->length = ib_sg_dma_len(dev, sg) - offset;
+                       sge->lkey = qp->pd->local_dma_lkey;
+
+                       total_len += sge->length;
+                       sge++;
+                       sg_cnt--;
+                       offset = 0;
+               }
+
+               if (i + 1 < ctx->nr_ops)
+                       rdma_wr->wr.next = &ctx->map.wrs[i + 1].wr;
+       }
+
+       ctx->type = RDMA_RW_MULTI_WR;
+       return ctx->nr_ops;
+
+out_free_sges:
+       kfree(ctx->map.sges);
+out:
+       return -ENOMEM;
+}
+
+static int rdma_rw_init_single_wr(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+               struct scatterlist *sg, u32 offset, u64 remote_addr, u32 rkey,
+               enum dma_data_direction dir)
+{
+       struct ib_device *dev = qp->pd->device;
+       struct ib_rdma_wr *rdma_wr = &ctx->single.wr;
+
+       ctx->nr_ops = 1;
+
+       ctx->single.sge.lkey = qp->pd->local_dma_lkey;
+       ctx->single.sge.addr = ib_sg_dma_address(dev, sg) + offset;
+       ctx->single.sge.length = ib_sg_dma_len(dev, sg) - offset;
+
+       memset(rdma_wr, 0, sizeof(*rdma_wr));
+       if (dir == DMA_TO_DEVICE)
+               rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
+       else
+               rdma_wr->wr.opcode = IB_WR_RDMA_READ;
+       rdma_wr->wr.sg_list = &ctx->single.sge;
+       rdma_wr->wr.num_sge = 1;
+       rdma_wr->remote_addr = remote_addr;
+       rdma_wr->rkey = rkey;
+
+       ctx->type = RDMA_RW_SINGLE_WR;
+       return 1;
+}
+
+/**
+ * rdma_rw_ctx_init - initialize a RDMA READ/WRITE context
+ * @ctx:       context to initialize
+ * @qp:                queue pair to operate on
+ * @port_num:  port num to which the connection is bound
+ * @sg:                scatterlist to READ/WRITE from/to
+ * @sg_cnt:    number of entries in @sg
+ * @sg_offset: current byte offset into @sg
+ * @remote_addr:remote address to read/write (relative to @rkey)
+ * @rkey:      remote key to operate on
+ * @dir:       %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
+ *
+ * Returns the number of WQEs that will be needed on the workqueue if
+ * successful, or a negative error code.
+ */
+int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
+               struct scatterlist *sg, u32 sg_cnt, u32 sg_offset,
+               u64 remote_addr, u32 rkey, enum dma_data_direction dir)
+{
+       struct ib_device *dev = qp->pd->device;
+       int ret;
+
+       ret = ib_dma_map_sg(dev, sg, sg_cnt, dir);
+       if (!ret)
+               return -ENOMEM;
+       sg_cnt = ret;
+
+       /*
+        * Skip to the S/G entry that sg_offset falls into:
+        */
+       for (;;) {
+               u32 len = ib_sg_dma_len(dev, sg);
+
+               if (sg_offset < len)
+                       break;
+
+               sg = sg_next(sg);
+               sg_offset -= len;
+               sg_cnt--;
+       }
+
+       ret = -EIO;
+       if (WARN_ON_ONCE(sg_cnt == 0))
+               goto out_unmap_sg;
+
+       if (rdma_rw_io_needs_mr(qp->device, port_num, dir, sg_cnt)) {
+               ret = rdma_rw_init_mr_wrs(ctx, qp, port_num, sg, sg_cnt,
+                               sg_offset, remote_addr, rkey, dir);
+       } else if (sg_cnt > 1) {
+               ret = rdma_rw_init_map_wrs(ctx, qp, sg, sg_cnt, sg_offset,
+                               remote_addr, rkey, dir);
+       } else {
+               ret = rdma_rw_init_single_wr(ctx, qp, sg, sg_offset,
+                               remote_addr, rkey, dir);
+       }
+
+       if (ret < 0)
+               goto out_unmap_sg;
+       return ret;
+
+out_unmap_sg:
+       ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
+       return ret;
+}
+EXPORT_SYMBOL(rdma_rw_ctx_init);
+
+/*
+ * Now that we are going to post the WRs we can update the lkey and need_inval
+ * state on the MRs.  If we were doing this at init time, we would get double
+ * or missing invalidations if a context was initialized but not actually
+ * posted.
+ */
+static void rdma_rw_update_lkey(struct rdma_rw_reg_ctx *reg, bool need_inval)
+{
+       reg->mr->need_inval = need_inval;
+       ib_update_fast_reg_key(reg->mr, ib_inc_rkey(reg->mr->lkey));
+       reg->reg_wr.key = reg->mr->lkey;
+       reg->sge.lkey = reg->mr->lkey;
+}
+
+/**
+ * rdma_rw_ctx_wrs - return chain of WRs for a RDMA READ or WRITE operation
+ * @ctx:       context to operate on
+ * @qp:                queue pair to operate on
+ * @port_num:  port num to which the connection is bound
+ * @cqe:       completion queue entry for the last WR
+ * @chain_wr:  WR to append to the posted chain
+ *
+ * Return the WR chain for the set of RDMA READ/WRITE operations described by
+ * @ctx, as well as any memory registration operations needed.  If @chain_wr
+ * is non-NULL the WR it points to will be appended to the chain of WRs posted.
+ * If @chain_wr is not set @cqe must be set so that the caller gets a
+ * completion notification.
+ */
+struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+               u8 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
+{
+       struct ib_send_wr *first_wr, *last_wr;
+       int i;
+
+       switch (ctx->type) {
+       case RDMA_RW_MR:
+               for (i = 0; i < ctx->nr_ops; i++) {
+                       rdma_rw_update_lkey(&ctx->reg[i],
+                               ctx->reg[i].wr.wr.opcode !=
+                                       IB_WR_RDMA_READ_WITH_INV);
+               }
+
+               if (ctx->reg[0].inv_wr.next)
+                       first_wr = &ctx->reg[0].inv_wr;
+               else
+                       first_wr = &ctx->reg[0].reg_wr.wr;
+               last_wr = &ctx->reg[ctx->nr_ops - 1].wr.wr;
+               break;
+       case RDMA_RW_MULTI_WR:
+               first_wr = &ctx->map.wrs[0].wr;
+               last_wr = &ctx->map.wrs[ctx->nr_ops - 1].wr;
+               break;
+       case RDMA_RW_SINGLE_WR:
+               first_wr = &ctx->single.wr.wr;
+               last_wr = &ctx->single.wr.wr;
+               break;
+       default:
+               BUG();
+       }
+
+       if (chain_wr) {
+               last_wr->next = chain_wr;
+       } else {
+               last_wr->wr_cqe = cqe;
+               last_wr->send_flags |= IB_SEND_SIGNALED;
+       }
+
+       return first_wr;
+}
+EXPORT_SYMBOL(rdma_rw_ctx_wrs);
+
+/**
+ * rdma_rw_ctx_post - post a RDMA READ or RDMA WRITE operation
+ * @ctx:       context to operate on
+ * @qp:                queue pair to operate on
+ * @port_num:  port num to which the connection is bound
+ * @cqe:       completion queue entry for the last WR
+ * @chain_wr:  WR to append to the posted chain
+ *
+ * Post the set of RDMA READ/WRITE operations described by @ctx, as well as
+ * any memory registration operations needed.  If @chain_wr is non-NULL the
+ * WR it points to will be appended to the chain of WRs posted.  If @chain_wr
+ * is not set @cqe must be set so that the caller gets a completion
+ * notification.
+ */
+int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
+               struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
+{
+       struct ib_send_wr *first_wr, *bad_wr;
+
+       first_wr = rdma_rw_ctx_wrs(ctx, qp, port_num, cqe, chain_wr);
+       return ib_post_send(qp, first_wr, &bad_wr);
+}
+EXPORT_SYMBOL(rdma_rw_ctx_post);
+
+/**
+ * rdma_rw_ctx_destroy - release all resources allocated by rdma_rw_ctx_init
+ * @ctx:       context to release
+ * @qp:                queue pair to operate on
+ * @port_num:  port num to which the connection is bound
+ * @sg:                scatterlist that was used for the READ/WRITE
+ * @sg_cnt:    number of entries in @sg
+ * @dir:       %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
+ */
+void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
+               struct scatterlist *sg, u32 sg_cnt, enum dma_data_direction dir)
+{
+       int i;
+
+       switch (ctx->type) {
+       case RDMA_RW_MR:
+               for (i = 0; i < ctx->nr_ops; i++)
+                       ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->reg[i].mr);
+               kfree(ctx->reg);
+               break;
+       case RDMA_RW_MULTI_WR:
+               kfree(ctx->map.wrs);
+               kfree(ctx->map.sges);
+               break;
+       case RDMA_RW_SINGLE_WR:
+               break;
+       default:
+               BUG();
+               break;
+       }
+
+       ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
+}
+EXPORT_SYMBOL(rdma_rw_ctx_destroy);
+
+void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr)
+{
+       u32 factor;
+
+       WARN_ON_ONCE(attr->port_num == 0);
+
+       /*
+        * Each context needs at least one RDMA READ or WRITE WR.
+        *
+        * For some hardware we might need more, eventually we should ask the
+        * HCA driver for a multiplier here.
+        */
+       factor = 1;
+
+       /*
+        * If the devices needs MRs to perform RDMA READ or WRITE operations,
+        * we'll need two additional MRs for the registrations and the
+        * invalidation.
+        */
+       if (rdma_rw_can_use_mr(dev, attr->port_num))
+               factor += 2;    /* inv + reg */
+
+       attr->cap.max_send_wr += factor * attr->cap.max_rdma_ctxs;
+
+       /*
+        * But maybe we were just too high in the sky and the device doesn't
+        * even support all we need, and we'll have to live with what we get..
+        */
+       attr->cap.max_send_wr =
+               min_t(u32, attr->cap.max_send_wr, dev->attrs.max_qp_wr);
+}
+
+int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr)
+{
+       struct ib_device *dev = qp->pd->device;
+       int ret = 0;
+
+       if (rdma_rw_can_use_mr(dev, attr->port_num)) {
+               ret = ib_mr_pool_init(qp, &qp->rdma_mrs,
+                               attr->cap.max_rdma_ctxs, IB_MR_TYPE_MEM_REG,
+                               rdma_rw_fr_page_list_len(dev));
+               if (ret)
+                       return ret;
+       }
+
+       return ret;
+}
+
+void rdma_rw_cleanup_mrs(struct ib_qp *qp)
+{
+       ib_mr_pool_destroy(qp, &qp->rdma_mrs);
+}
index 76c9c3faac202993fd714f795eb411705f90f397..566bfb31cadb7782b6140bbc5992e943ca6bde44 100644 (file)
@@ -48,6 +48,7 @@
 #include <rdma/ib_verbs.h>
 #include <rdma/ib_cache.h>
 #include <rdma/ib_addr.h>
+#include <rdma/rw.h>
 
 #include "core_priv.h"
 
@@ -751,6 +752,16 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
 {
        struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device;
        struct ib_qp *qp;
+       int ret;
+
+       /*
+        * If the callers is using the RDMA API calculate the resources
+        * needed for the RDMA READ/WRITE operations.
+        *
+        * Note that these callers need to pass in a port number.
+        */
+       if (qp_init_attr->cap.max_rdma_ctxs)
+               rdma_rw_init_qp(device, qp_init_attr);
 
        qp = device->create_qp(pd, qp_init_attr, NULL);
        if (IS_ERR(qp))
@@ -764,6 +775,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
        atomic_set(&qp->usecnt, 0);
        qp->mrs_used = 0;
        spin_lock_init(&qp->mr_lock);
+       INIT_LIST_HEAD(&qp->rdma_mrs);
 
        if (qp_init_attr->qp_type == IB_QPT_XRC_TGT)
                return ib_create_xrc_qp(qp, qp_init_attr);
@@ -787,6 +799,16 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
 
        atomic_inc(&pd->usecnt);
        atomic_inc(&qp_init_attr->send_cq->usecnt);
+
+       if (qp_init_attr->cap.max_rdma_ctxs) {
+               ret = rdma_rw_init_mrs(qp, qp_init_attr);
+               if (ret) {
+                       pr_err("failed to init MR pool ret= %d\n", ret);
+                       ib_destroy_qp(qp);
+                       qp = ERR_PTR(ret);
+               }
+       }
+
        return qp;
 }
 EXPORT_SYMBOL(ib_create_qp);
@@ -1271,6 +1293,9 @@ int ib_destroy_qp(struct ib_qp *qp)
        rcq  = qp->recv_cq;
        srq  = qp->srq;
 
+       if (!qp->uobject)
+               rdma_rw_cleanup_mrs(qp);
+
        ret = qp->device->destroy_qp(qp);
        if (!ret) {
                if (pd)
index 3f66647749cafdb095c9f041003970d5ee9e8bd6..dd8e15dfc1a80461f4de55c93843e2208ae32b35 100644 (file)
@@ -931,6 +931,13 @@ struct ib_qp_cap {
        u32     max_send_sge;
        u32     max_recv_sge;
        u32     max_inline_data;
+
+       /*
+        * Maximum number of rdma_rw_ctx structures in flight at a time.
+        * ib_create_qp() will calculate the right amount of neededed WRs
+        * and MRs based on this.
+        */
+       u32     max_rdma_ctxs;
 };
 
 enum ib_sig_type {
@@ -1002,7 +1009,11 @@ struct ib_qp_init_attr {
        enum ib_sig_type        sq_sig_type;
        enum ib_qp_type         qp_type;
        enum ib_qp_create_flags create_flags;
-       u8                      port_num; /* special QP types only */
+
+       /*
+        * Only needed for special QP types, or when using the RW API.
+        */
+       u8                      port_num;
 };
 
 struct ib_qp_open_attr {
@@ -1423,6 +1434,7 @@ struct ib_qp {
        struct ib_cq           *recv_cq;
        spinlock_t              mr_lock;
        int                     mrs_used;
+       struct list_head        rdma_mrs;
        struct ib_srq          *srq;
        struct ib_xrcd         *xrcd; /* XRC TGT QPs only */
        struct list_head        xrcd_list;
diff --git a/include/rdma/rw.h b/include/rdma/rw.h
new file mode 100644 (file)
index 0000000..d3896bb
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+#ifndef _RDMA_RW_H
+#define _RDMA_RW_H
+
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_cm.h>
+#include <rdma/mr_pool.h>
+
+struct rdma_rw_ctx {
+       /* number of RDMA READ/WRITE WRs (not counting MR WRs) */
+       u32                     nr_ops;
+
+       /* tag for the union below: */
+       u8                      type;
+
+       union {
+               /* for mapping a single SGE: */
+               struct {
+                       struct ib_sge           sge;
+                       struct ib_rdma_wr       wr;
+               } single;
+
+               /* for mapping of multiple SGEs: */
+               struct {
+                       struct ib_sge           *sges;
+                       struct ib_rdma_wr       *wrs;
+               } map;
+
+               /* for registering multiple WRs: */
+               struct rdma_rw_reg_ctx {
+                       struct ib_sge           sge;
+                       struct ib_rdma_wr       wr;
+                       struct ib_reg_wr        reg_wr;
+                       struct ib_send_wr       inv_wr;
+                       struct ib_mr            *mr;
+               } *reg;
+       };
+};
+
+int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
+               struct scatterlist *sg, u32 sg_cnt, u32 sg_offset,
+               u64 remote_addr, u32 rkey, enum dma_data_direction dir);
+void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
+               struct scatterlist *sg, u32 sg_cnt,
+               enum dma_data_direction dir);
+
+struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+               u8 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr);
+int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
+               struct ib_cqe *cqe, struct ib_send_wr *chain_wr);
+
+void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr);
+int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr);
+void rdma_rw_cleanup_mrs(struct ib_qp *qp);
+
+#endif /* _RDMA_RW_H */