struct dma_slave_config slave_cfg;
int id; /* Request phy chan id */
u32 ccfg;
+ u32 cyclic;
struct virt_dma_chan vc;
struct zx_dma_phy *phy;
struct list_head node;
u32 serr = readl_relaxed(d->base + REG_ZX_SRC_ERR_IRQ);
u32 derr = readl_relaxed(d->base + REG_ZX_DST_ERR_IRQ);
u32 cfg = readl_relaxed(d->base + REG_ZX_CFG_ERR_IRQ);
- u32 i, irq_chan = 0;
+ u32 i, irq_chan = 0, task = 0;
while (tc) {
i = __ffs(tc);
unsigned long flags;
spin_lock_irqsave(&c->vc.lock, flags);
- vchan_cookie_complete(&p->ds_run->vd);
- p->ds_done = p->ds_run;
+ if (c->cyclic) {
+ vchan_cyclic_callback(&p->ds_run->vd);
+ } else {
+ vchan_cookie_complete(&p->ds_run->vd);
+ p->ds_done = p->ds_run;
+ task = 1;
+ }
spin_unlock_irqrestore(&c->vc.lock, flags);
+ irq_chan |= BIT(i);
}
- irq_chan |= BIT(i);
}
if (serr || derr || cfg)
writel_relaxed(derr, d->base + REG_ZX_DST_ERR_IRQ_RAW);
writel_relaxed(cfg, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
- if (irq_chan) {
+ if (task)
zx_dma_task(d);
- return IRQ_HANDLED;
- } else {
- return IRQ_NONE;
- }
+ return IRQ_HANDLED;
}
static void zx_dma_free_chan_resources(struct dma_chan *chan)
len -= copy;
} while (len);
+ c->cyclic = 0;
ds->desc_hw[num - 1].lli = 0; /* end of link */
ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL;
return vchan_tx_prep(&c->vc, &ds->vd, flags);
if (!ds)
return NULL;
+ c->cyclic = 0;
num = 0;
for_each_sg(sgl, sg, sglen, i) {
addr = sg_dma_address(sg);
return vchan_tx_prep(&c->vc, &ds->vd, flags);
}
+static struct dma_async_tx_descriptor *zx_dma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction dir,
+ unsigned long flags)
+{
+ struct zx_dma_chan *c = to_zx_chan(chan);
+ struct zx_dma_desc_sw *ds;
+ dma_addr_t src = 0, dst = 0;
+ int num_periods = buf_len / period_len;
+ int buf = 0, num = 0;
+
+ if (period_len > DMA_MAX_SIZE) {
+ dev_err(chan->device->dev, "maximum period size exceeded\n");
+ return NULL;
+ }
+
+ if (zx_pre_config(c, dir))
+ return NULL;
+
+ ds = zx_alloc_desc_resource(num_periods, chan);
+ if (!ds)
+ return NULL;
+ c->cyclic = 1;
+
+ while (buf < buf_len) {
+ if (dir == DMA_MEM_TO_DEV) {
+ src = dma_addr;
+ dst = c->dev_addr;
+ } else if (dir == DMA_DEV_TO_MEM) {
+ src = c->dev_addr;
+ dst = dma_addr;
+ }
+ zx_dma_fill_desc(ds, dst, src, period_len, num++,
+ c->ccfg | ZX_IRQ_ENABLE_ALL);
+ dma_addr += period_len;
+ buf += period_len;
+ }
+
+ ds->desc_hw[num - 1].lli = ds->desc_hw_lli;
+ ds->size = buf_len;
+ return vchan_tx_prep(&c->vc, &ds->vd, flags);
+}
+
static int zx_dma_config(struct dma_chan *chan,
struct dma_slave_config *cfg)
{
return 0;
}
+static int zx_dma_transfer_pause(struct dma_chan *chan)
+{
+ struct zx_dma_chan *c = to_zx_chan(chan);
+ u32 val = 0;
+
+ val = readl_relaxed(c->phy->base + REG_ZX_CTRL);
+ val &= ~ZX_CH_ENABLE;
+ writel_relaxed(val, c->phy->base + REG_ZX_CTRL);
+
+ return 0;
+}
+
+static int zx_dma_transfer_resume(struct dma_chan *chan)
+{
+ struct zx_dma_chan *c = to_zx_chan(chan);
+ u32 val = 0;
+
+ val = readl_relaxed(c->phy->base + REG_ZX_CTRL);
+ val |= ZX_CH_ENABLE;
+ writel_relaxed(val, c->phy->base + REG_ZX_CTRL);
+
+ return 0;
+}
+
static void zx_dma_free_desc(struct virt_dma_desc *vd)
{
struct zx_dma_desc_sw *ds =
d->slave.device_tx_status = zx_dma_tx_status;
d->slave.device_prep_dma_memcpy = zx_dma_prep_memcpy;
d->slave.device_prep_slave_sg = zx_dma_prep_slave_sg;
+ d->slave.device_prep_dma_cyclic = zx_dma_prep_dma_cyclic;
d->slave.device_issue_pending = zx_dma_issue_pending;
d->slave.device_config = zx_dma_config;
d->slave.device_terminate_all = zx_dma_terminate_all;
+ d->slave.device_pause = zx_dma_transfer_pause;
+ d->slave.device_resume = zx_dma_transfer_resume;
d->slave.copy_align = DMA_ALIGN;
d->slave.src_addr_widths = ZX_DMA_BUSWIDTHS;
d->slave.dst_addr_widths = ZX_DMA_BUSWIDTHS;