rds: ib: unmap the scatter/gather list when error
authorZhu Yanjun <yanjun.zhu@oracle.com>
Mon, 13 Mar 2017 05:43:48 +0000 (01:43 -0400)
committerDavid S. Miller <davem@davemloft.net>
Tue, 14 Mar 2017 06:20:05 +0000 (23:20 -0700)
When some errors occur, the scatter/gather list mapped to DMA addresses
should be handled.

Cc: Joe Jin <joe.jin@oracle.com>
Cc: Junxiao Bi <junxiao.bi@oracle.com>
Acked-by: Santosh Shilimkar <santosh.shilimkar@oracle.com>
Signed-off-by: Zhu Yanjun <yanjun.zhu@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/rds/ib_fmr.c

index c936b0d47693edb3e1ab73fa82ba10bc20490da8..86ef907067bb084e01ac4f8d5f00d0c17f40ac55 100644 (file)
@@ -112,29 +112,39 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev,
                u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
 
                if (dma_addr & ~PAGE_MASK) {
-                       if (i > 0)
+                       if (i > 0) {
+                               ib_dma_unmap_sg(dev, sg, nents,
+                                               DMA_BIDIRECTIONAL);
                                return -EINVAL;
-                       else
+                       } else {
                                ++page_cnt;
+                       }
                }
                if ((dma_addr + dma_len) & ~PAGE_MASK) {
-                       if (i < sg_dma_len - 1)
+                       if (i < sg_dma_len - 1) {
+                               ib_dma_unmap_sg(dev, sg, nents,
+                                               DMA_BIDIRECTIONAL);
                                return -EINVAL;
-                       else
+                       } else {
                                ++page_cnt;
+                       }
                }
 
                len += dma_len;
        }
 
        page_cnt += len >> PAGE_SHIFT;
-       if (page_cnt > ibmr->pool->fmr_attr.max_pages)
+       if (page_cnt > ibmr->pool->fmr_attr.max_pages) {
+               ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
                return -EINVAL;
+       }
 
        dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC,
                                 rdsibdev_to_node(rds_ibdev));
-       if (!dma_pages)
+       if (!dma_pages) {
+               ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
                return -ENOMEM;
+       }
 
        page_cnt = 0;
        for (i = 0; i < sg_dma_len; ++i) {
@@ -147,8 +157,10 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev,
        }
 
        ret = ib_map_phys_fmr(fmr->fmr, dma_pages, page_cnt, io_addr);
-       if (ret)
+       if (ret) {
+               ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
                goto out;
+       }
 
        /* Success - we successfully remapped the MR, so we can
         * safely tear down the old mapping.