dmaengine: rcar-dmac: add iommu support for slave transfers
authorNiklas Söderlund <niklas.soderlund+renesas@ragnatech.se>
Wed, 10 Aug 2016 11:22:19 +0000 (13:22 +0200)
committerVinod Koul <vinod.koul@intel.com>
Mon, 26 Sep 2016 16:46:41 +0000 (22:16 +0530)
Enable slave transfers to a device behind a IPMMU by mapping the slave
addresses using the dma-mapping API.

Signed-off-by: Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
drivers/dma/sh/rcar-dmac.c

index cf983a9df3f11a17d968b1899fdfed4c0eec5926..22a5e406a589a35247436136e0255b14e1445055 100644 (file)
@@ -127,6 +127,18 @@ struct rcar_dmac_chan_slave {
        unsigned int xfer_size;
 };
 
+/*
+ * struct rcar_dmac_chan_map - Map of slave device phys to dma address
+ * @addr: slave dma address
+ * @dir: direction of mapping
+ * @slave: slave configuration that is mapped
+ */
+struct rcar_dmac_chan_map {
+       dma_addr_t addr;
+       enum dma_data_direction dir;
+       struct rcar_dmac_chan_slave slave;
+};
+
 /*
  * struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel
  * @chan: base DMA channel object
@@ -152,6 +164,7 @@ struct rcar_dmac_chan {
 
        struct rcar_dmac_chan_slave src;
        struct rcar_dmac_chan_slave dst;
+       struct rcar_dmac_chan_map map;
        int mid_rid;
 
        spinlock_t lock;
@@ -1029,13 +1042,65 @@ rcar_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
                                      DMA_MEM_TO_MEM, flags, false);
 }
 
+static int rcar_dmac_map_slave_addr(struct dma_chan *chan,
+                                   enum dma_transfer_direction dir)
+{
+       struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+       struct rcar_dmac_chan_map *map = &rchan->map;
+       phys_addr_t dev_addr;
+       size_t dev_size;
+       enum dma_data_direction dev_dir;
+
+       if (dir == DMA_DEV_TO_MEM) {
+               dev_addr = rchan->src.slave_addr;
+               dev_size = rchan->src.xfer_size;
+               dev_dir = DMA_TO_DEVICE;
+       } else {
+               dev_addr = rchan->dst.slave_addr;
+               dev_size = rchan->dst.xfer_size;
+               dev_dir = DMA_FROM_DEVICE;
+       }
+
+       /* Reuse current map if possible. */
+       if (dev_addr == map->slave.slave_addr &&
+           dev_size == map->slave.xfer_size &&
+           dev_dir == map->dir)
+               return 0;
+
+       /* Remove old mapping if present. */
+       if (map->slave.xfer_size)
+               dma_unmap_resource(chan->device->dev, map->addr,
+                                  map->slave.xfer_size, map->dir, 0);
+       map->slave.xfer_size = 0;
+
+       /* Create new slave address map. */
+       map->addr = dma_map_resource(chan->device->dev, dev_addr, dev_size,
+                                    dev_dir, 0);
+
+       if (dma_mapping_error(chan->device->dev, map->addr)) {
+               dev_err(chan->device->dev,
+                       "chan%u: failed to map %zx@%pap", rchan->index,
+                       dev_size, &dev_addr);
+               return -EIO;
+       }
+
+       dev_dbg(chan->device->dev, "chan%u: map %zx@%pap to %pad dir: %s\n",
+               rchan->index, dev_size, &dev_addr, &map->addr,
+               dev_dir == DMA_TO_DEVICE ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE");
+
+       map->slave.slave_addr = dev_addr;
+       map->slave.xfer_size = dev_size;
+       map->dir = dev_dir;
+
+       return 0;
+}
+
 static struct dma_async_tx_descriptor *
 rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                        unsigned int sg_len, enum dma_transfer_direction dir,
                        unsigned long flags, void *context)
 {
        struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
-       dma_addr_t dev_addr;
 
        /* Someone calling slave DMA on a generic channel? */
        if (rchan->mid_rid < 0 || !sg_len) {
@@ -1045,9 +1110,10 @@ rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                return NULL;
        }
 
-       dev_addr = dir == DMA_DEV_TO_MEM
-                ? rchan->src.slave_addr : rchan->dst.slave_addr;
-       return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr,
+       if (rcar_dmac_map_slave_addr(chan, dir))
+               return NULL;
+
+       return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr,
                                      dir, flags, false);
 }
 
@@ -1061,7 +1127,6 @@ rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
        struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
        struct dma_async_tx_descriptor *desc;
        struct scatterlist *sgl;
-       dma_addr_t dev_addr;
        unsigned int sg_len;
        unsigned int i;
 
@@ -1073,6 +1138,9 @@ rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
                return NULL;
        }
 
+       if (rcar_dmac_map_slave_addr(chan, dir))
+               return NULL;
+
        sg_len = buf_len / period_len;
        if (sg_len > RCAR_DMAC_MAX_SG_LEN) {
                dev_err(chan->device->dev,
@@ -1100,9 +1168,7 @@ rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
                sg_dma_len(&sgl[i]) = period_len;
        }
 
-       dev_addr = dir == DMA_DEV_TO_MEM
-                ? rchan->src.slave_addr : rchan->dst.slave_addr;
-       desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr,
+       desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr,
                                      dir, flags, true);
 
        kfree(sgl);