IB/core, RDMA RW API: Do not exceed QP SGE send limit
authorBart Van Assche <bart.vanassche@sandisk.com>
Thu, 21 Jul 2016 20:03:30 +0000 (13:03 -0700)
committerDoug Ledford <dledford@redhat.com>
Tue, 2 Aug 2016 16:02:41 +0000 (12:02 -0400)
Compute the SGE limit for RDMA READ and WRITE requests in
ib_create_qp(). Use that limit in the RDMA RW API implementation.

Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Sagi Grimberg <sagi@grimberg.me>
Cc: Steve Wise <swise@opengridcomputing.com>
Cc: Parav Pandit <pandit.parav@gmail.com>
Cc: Nicholas Bellinger <nab@linux-iscsi.org>
Cc: Laurence Oberman <loberman@redhat.com>
Cc: <stable@vger.kernel.org> #v4.7+
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Doug Ledford <dledford@redhat.com>
drivers/infiniband/core/rw.c
drivers/infiniband/core/verbs.c
include/rdma/ib_verbs.h

index 1ad2baaa6c8cef2848df2374b07ac947bb96c93a..dbfd854c32c936b8e934bcc1cb27ce4325b61d8d 100644 (file)
@@ -58,13 +58,6 @@ static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u8 port_num,
        return false;
 }
 
-static inline u32 rdma_rw_max_sge(struct ib_device *dev,
-               enum dma_data_direction dir)
-{
-       return dir == DMA_TO_DEVICE ?
-               dev->attrs.max_sge : dev->attrs.max_sge_rd;
-}
-
 static inline u32 rdma_rw_fr_page_list_len(struct ib_device *dev)
 {
        /* arbitrary limit to avoid allocating gigantic resources */
@@ -186,7 +179,8 @@ static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
                u64 remote_addr, u32 rkey, enum dma_data_direction dir)
 {
        struct ib_device *dev = qp->pd->device;
-       u32 max_sge = rdma_rw_max_sge(dev, dir);
+       u32 max_sge = dir == DMA_TO_DEVICE ? qp->max_write_sge :
+                     qp->max_read_sge;
        struct ib_sge *sge;
        u32 total_len = 0, i, j;
 
index 6298f54b413756a5bf0f19891a080739ec6300ef..e39a0b59723426ce0957f48092f369c4f319308a 100644 (file)
@@ -814,6 +814,15 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
                }
        }
 
+       /*
+        * Note: all hw drivers guarantee that max_send_sge is lower than
+        * the device RDMA WRITE SGE limit but not all hw drivers ensure that
+        * max_send_sge <= max_sge_rd.
+        */
+       qp->max_write_sge = qp_init_attr->cap.max_send_sge;
+       qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge,
+                                device->attrs.max_sge_rd);
+
        return qp;
 }
 EXPORT_SYMBOL(ib_create_qp);
index 7e440d41487aa8671fbb8952fb01e2bbf63b11f0..e694f02d42e3dc70ca0b20747404da15a8ed3229 100644 (file)
@@ -1428,6 +1428,10 @@ struct ib_srq {
        } ext;
 };
 
+/*
+ * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
+ * @max_read_sge:  Maximum SGE elements per RDMA READ request.
+ */
 struct ib_qp {
        struct ib_device       *device;
        struct ib_pd           *pd;
@@ -1449,6 +1453,8 @@ struct ib_qp {
        void                  (*event_handler)(struct ib_event *, void *);
        void                   *qp_context;
        u32                     qp_num;
+       u32                     max_write_sge;
+       u32                     max_read_sge;
        enum ib_qp_type         qp_type;
 };