RDMA/cxgb4: Map pbl buffers for dma if using DSGL.
authorVipul Pandya <vipul@chelsio.com>
Thu, 14 Mar 2013 05:09:02 +0000 (05:09 +0000)
committerDavid S. Miller <davem@davemloft.net>
Thu, 14 Mar 2013 15:35:59 +0000 (11:35 -0400)
Signed-off-by: Vipul Pandya <vipul@chelsio.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/infiniband/hw/cxgb4/mem.c

index 33db9ee307dc204ed370b85e5b667a41a0e30190..4cb8eb24497cfa61821b4863c4026ff5a061405a 100644 (file)
@@ -51,7 +51,7 @@ module_param(inline_threshold, int, 0644);
 MODULE_PARM_DESC(inline_threshold, "inline vs dsgl threshold (default=128)");
 
 static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
-                                      u32 len, void *data, int wait)
+                                      u32 len, dma_addr_t data, int wait)
 {
        struct sk_buff *skb;
        struct ulp_mem_io *req;
@@ -88,7 +88,7 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
        sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_DSGL) |
                                    ULPTX_NSGE(1));
        sgl->len0 = cpu_to_be32(len);
-       sgl->addr0 = cpu_to_be64(virt_to_phys(data));
+       sgl->addr0 = cpu_to_be64(data);
 
        ret = c4iw_ofld_send(rdev, skb);
        if (ret)
@@ -178,6 +178,13 @@ int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
        u32 remain = len;
        u32 dmalen;
        int ret = 0;
+       dma_addr_t daddr;
+       dma_addr_t save;
+
+       daddr = dma_map_single(&rdev->lldi.pdev->dev, data, len, DMA_TO_DEVICE);
+       if (dma_mapping_error(&rdev->lldi.pdev->dev, daddr))
+               return -1;
+       save = daddr;
 
        while (remain > inline_threshold) {
                if (remain < T4_ULPTX_MAX_DMA) {
@@ -188,16 +195,18 @@ int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
                } else
                        dmalen = T4_ULPTX_MAX_DMA;
                remain -= dmalen;
-               ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, data,
+               ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, daddr,
                                                 !remain);
                if (ret)
                        goto out;
                addr += dmalen >> 5;
                data += dmalen;
+               daddr += dmalen;
        }
        if (remain)
                ret = _c4iw_write_mem_inline(rdev, addr, remain, data);
 out:
+       dma_unmap_single(&rdev->lldi.pdev->dev, save, len, DMA_TO_DEVICE);
        return ret;
 }
 
@@ -209,9 +218,17 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
                             void *data)
 {
        if (is_t5(rdev->lldi.adapter_type) && use_dsgl) {
-               if (len > inline_threshold)
-                       return _c4iw_write_mem_dma(rdev, addr, len, data);
-               else
+               if (len > inline_threshold) {
+                       if (_c4iw_write_mem_dma(rdev, addr, len, data)) {
+                               printk_ratelimited(KERN_WARNING
+                                                  "%s: dma map"
+                                                  " failure (non fatal)\n",
+                                                  pci_name(rdev->lldi.pdev));
+                               return _c4iw_write_mem_inline(rdev, addr, len,
+                                                             data);
+                       } else
+                               return 0;
+               } else
                        return _c4iw_write_mem_inline(rdev, addr, len, data);
        } else
                return _c4iw_write_mem_inline(rdev, addr, len, data);