From 40e64e07213201710a51e270595d6e6c028f9502 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Thu, 28 Jul 2016 18:04:09 +0300 Subject: [PATCH] nvmet-rdma: Don't use the inline buffer in order to avoid allocation for small reads Under extreme conditions this might cause data corruptions. By doing that we we repost the buffer and then post this buffer for the device to send. If we happen to use shared receive queues the device might write to the buffer before it sends it (there is no ordering between send and recv queues). Without SRQs we probably won't get that if the host doesn't mis-behave and send more than we allowed it, but relying on that is not really a good idea. Signed-off-by: Sagi Grimberg Reviewed-by: Christoph Hellwig --- drivers/nvme/target/rdma.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 48c811850c29..b4d648536c3e 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -616,15 +616,10 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp, if (!len) return 0; - /* use the already allocated data buffer if possible */ - if (len <= NVMET_RDMA_INLINE_DATA_SIZE && rsp->queue->host_qid) { - nvmet_rdma_use_inline_sg(rsp, len, 0); - } else { - status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt, - len); - if (status) - return status; - } + status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt, + len); + if (status) + return status; ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num, rsp->req.sg, rsp->req.sg_cnt, 0, addr, key, -- 2.20.1