INIT_LIST_HEAD(&mchan->prepared);
INIT_LIST_HEAD(&mchan->active);
INIT_LIST_HEAD(&mchan->completed);
+ INIT_LIST_HEAD(&mchan->queued);
spin_lock_init(&mchan->lock);
list_add_tail(&mchan->chan.device_node, &ddev->channels);
struct hidma_chan *mchan = to_hidma_chan(dmach);
struct hidma_dev *dmadev = mchan->dmadev;
unsigned long flags;
+ struct hidma_desc *qdesc, *next;
int status;
spin_lock_irqsave(&mchan->lock, flags);
+ list_for_each_entry_safe(qdesc, next, &mchan->queued, node) {
+ hidma_ll_queue_request(dmadev->lldev, qdesc->tre_ch);
+ list_move_tail(&qdesc->node, &mchan->active);
+ }
+
if (!mchan->running) {
struct hidma_desc *desc = list_first_entry(&mchan->active,
struct hidma_desc,
pm_runtime_put_autosuspend(dmadev->ddev.dev);
return -ENODEV;
}
+ pm_runtime_mark_last_busy(dmadev->ddev.dev);
+ pm_runtime_put_autosuspend(dmadev->ddev.dev);
mdesc = container_of(txd, struct hidma_desc, desc);
spin_lock_irqsave(&mchan->lock, irqflags);
- /* Move descriptor to active */
- list_move_tail(&mdesc->node, &mchan->active);
+ /* Move descriptor to queued */
+ list_move_tail(&mdesc->node, &mchan->queued);
/* Update cookie */
cookie = dma_cookie_assign(txd);
- hidma_ll_queue_request(dmadev->lldev, mdesc->tre_ch);
spin_unlock_irqrestore(&mchan->lock, irqflags);
return cookie;
list_splice_init(&mchan->active, &list);
list_splice_init(&mchan->prepared, &list);
list_splice_init(&mchan->completed, &list);
+ list_splice_init(&mchan->queued, &list);
spin_unlock_irqrestore(&mchan->lock, irqflags);
/* this suspends the existing transfer */