2 * Core driver for the Synopsys DesignWare DMA Controller
4 * Copyright (C) 2007-2008 Atmel Corporation
5 * Copyright (C) 2010-2011 ST Microelectronics
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/bitops.h>
13 #include <linux/clk.h>
14 #include <linux/delay.h>
15 #include <linux/dmaengine.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/dmapool.h>
18 #include <linux/err.h>
19 #include <linux/init.h>
20 #include <linux/interrupt.h>
23 #include <linux/of_dma.h>
25 #include <linux/module.h>
26 #include <linux/platform_device.h>
27 #include <linux/slab.h>
28 #include <linux/acpi.h>
29 #include <linux/acpi_dma.h>
31 #include "dw_dmac_regs.h"
32 #include "dmaengine.h"
35 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
36 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
37 * of which use ARM any more). See the "Databook" from Synopsys for
38 * information beyond what licensees probably provide.
40 * The driver has currently been tested only with the Atmel AT32AP7000,
41 * which does not support descriptor writeback.
44 static inline unsigned int dwc_get_dms(struct dw_dma_slave
*slave
)
46 return slave
? slave
->dst_master
: 0;
49 static inline unsigned int dwc_get_sms(struct dw_dma_slave
*slave
)
51 return slave
? slave
->src_master
: 1;
54 static inline void dwc_set_masters(struct dw_dma_chan
*dwc
)
56 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
57 struct dw_dma_slave
*dws
= dwc
->chan
.private;
58 unsigned char mmax
= dw
->nr_masters
- 1;
60 if (dwc
->request_line
== ~0) {
61 dwc
->src_master
= min_t(unsigned char, mmax
, dwc_get_sms(dws
));
62 dwc
->dst_master
= min_t(unsigned char, mmax
, dwc_get_dms(dws
));
66 #define DWC_DEFAULT_CTLLO(_chan) ({ \
67 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
68 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
69 bool _is_slave = is_slave_direction(_dwc->direction); \
70 u8 _smsize = _is_slave ? _sconfig->src_maxburst : \
72 u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \
75 (DWC_CTLL_DST_MSIZE(_dmsize) \
76 | DWC_CTLL_SRC_MSIZE(_smsize) \
79 | DWC_CTLL_DMS(_dwc->dst_master) \
80 | DWC_CTLL_SMS(_dwc->src_master)); \
84 * Number of descriptors to allocate for each channel. This should be
85 * made configurable somehow; preferably, the clients (at least the
86 * ones using slave transfers) should be able to give us a hint.
88 #define NR_DESCS_PER_CHANNEL 64
90 /*----------------------------------------------------------------------*/
92 static struct device
*chan2dev(struct dma_chan
*chan
)
94 return &chan
->dev
->device
;
96 static struct device
*chan2parent(struct dma_chan
*chan
)
98 return chan
->dev
->device
.parent
;
101 static struct dw_desc
*dwc_first_active(struct dw_dma_chan
*dwc
)
103 return to_dw_desc(dwc
->active_list
.next
);
106 static struct dw_desc
*dwc_desc_get(struct dw_dma_chan
*dwc
)
108 struct dw_desc
*desc
, *_desc
;
109 struct dw_desc
*ret
= NULL
;
113 spin_lock_irqsave(&dwc
->lock
, flags
);
114 list_for_each_entry_safe(desc
, _desc
, &dwc
->free_list
, desc_node
) {
116 if (async_tx_test_ack(&desc
->txd
)) {
117 list_del(&desc
->desc_node
);
121 dev_dbg(chan2dev(&dwc
->chan
), "desc %p not ACKed\n", desc
);
123 spin_unlock_irqrestore(&dwc
->lock
, flags
);
125 dev_vdbg(chan2dev(&dwc
->chan
), "scanned %u descriptors on freelist\n", i
);
131 * Move a descriptor, including any children, to the free list.
132 * `desc' must not be on any lists.
134 static void dwc_desc_put(struct dw_dma_chan
*dwc
, struct dw_desc
*desc
)
139 struct dw_desc
*child
;
141 spin_lock_irqsave(&dwc
->lock
, flags
);
142 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
143 dev_vdbg(chan2dev(&dwc
->chan
),
144 "moving child desc %p to freelist\n",
146 list_splice_init(&desc
->tx_list
, &dwc
->free_list
);
147 dev_vdbg(chan2dev(&dwc
->chan
), "moving desc %p to freelist\n", desc
);
148 list_add(&desc
->desc_node
, &dwc
->free_list
);
149 spin_unlock_irqrestore(&dwc
->lock
, flags
);
153 static void dwc_initialize(struct dw_dma_chan
*dwc
)
155 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
156 struct dw_dma_slave
*dws
= dwc
->chan
.private;
157 u32 cfghi
= DWC_CFGH_FIFO_MODE
;
158 u32 cfglo
= DWC_CFGL_CH_PRIOR(dwc
->priority
);
160 if (dwc
->initialized
== true)
165 * We need controller-specific data to set up slave
168 BUG_ON(!dws
->dma_dev
|| dws
->dma_dev
!= dw
->dma
.dev
);
171 cfglo
|= dws
->cfg_lo
& ~DWC_CFGL_CH_PRIOR_MASK
;
173 if (dwc
->direction
== DMA_MEM_TO_DEV
)
174 cfghi
= DWC_CFGH_DST_PER(dwc
->request_line
);
175 else if (dwc
->direction
== DMA_DEV_TO_MEM
)
176 cfghi
= DWC_CFGH_SRC_PER(dwc
->request_line
);
179 channel_writel(dwc
, CFG_LO
, cfglo
);
180 channel_writel(dwc
, CFG_HI
, cfghi
);
182 /* Enable interrupts */
183 channel_set_bit(dw
, MASK
.XFER
, dwc
->mask
);
184 channel_set_bit(dw
, MASK
.ERROR
, dwc
->mask
);
186 dwc
->initialized
= true;
189 /*----------------------------------------------------------------------*/
191 static inline unsigned int dwc_fast_fls(unsigned long long v
)
194 * We can be a lot more clever here, but this should take care
195 * of the most common optimization.
206 static inline void dwc_dump_chan_regs(struct dw_dma_chan
*dwc
)
208 dev_err(chan2dev(&dwc
->chan
),
209 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
210 channel_readl(dwc
, SAR
),
211 channel_readl(dwc
, DAR
),
212 channel_readl(dwc
, LLP
),
213 channel_readl(dwc
, CTL_HI
),
214 channel_readl(dwc
, CTL_LO
));
217 static inline void dwc_chan_disable(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
219 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
220 while (dma_readl(dw
, CH_EN
) & dwc
->mask
)
224 /*----------------------------------------------------------------------*/
226 /* Perform single block transfer */
227 static inline void dwc_do_single_block(struct dw_dma_chan
*dwc
,
228 struct dw_desc
*desc
)
230 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
233 /* Software emulation of LLP mode relies on interrupts to continue
234 * multi block transfer. */
235 ctllo
= desc
->lli
.ctllo
| DWC_CTLL_INT_EN
;
237 channel_writel(dwc
, SAR
, desc
->lli
.sar
);
238 channel_writel(dwc
, DAR
, desc
->lli
.dar
);
239 channel_writel(dwc
, CTL_LO
, ctllo
);
240 channel_writel(dwc
, CTL_HI
, desc
->lli
.ctlhi
);
241 channel_set_bit(dw
, CH_EN
, dwc
->mask
);
243 /* Move pointer to next descriptor */
244 dwc
->tx_node_active
= dwc
->tx_node_active
->next
;
247 /* Called with dwc->lock held and bh disabled */
248 static void dwc_dostart(struct dw_dma_chan
*dwc
, struct dw_desc
*first
)
250 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
251 unsigned long was_soft_llp
;
253 /* ASSERT: channel is idle */
254 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
255 dev_err(chan2dev(&dwc
->chan
),
256 "BUG: Attempted to start non-idle channel\n");
257 dwc_dump_chan_regs(dwc
);
259 /* The tasklet will hopefully advance the queue... */
264 was_soft_llp
= test_and_set_bit(DW_DMA_IS_SOFT_LLP
,
267 dev_err(chan2dev(&dwc
->chan
),
268 "BUG: Attempted to start new LLP transfer "
269 "inside ongoing one\n");
275 dwc
->residue
= first
->total_len
;
276 dwc
->tx_node_active
= &first
->tx_list
;
278 /* Submit first block */
279 dwc_do_single_block(dwc
, first
);
286 channel_writel(dwc
, LLP
, first
->txd
.phys
);
287 channel_writel(dwc
, CTL_LO
,
288 DWC_CTLL_LLP_D_EN
| DWC_CTLL_LLP_S_EN
);
289 channel_writel(dwc
, CTL_HI
, 0);
290 channel_set_bit(dw
, CH_EN
, dwc
->mask
);
293 /*----------------------------------------------------------------------*/
296 dwc_descriptor_complete(struct dw_dma_chan
*dwc
, struct dw_desc
*desc
,
297 bool callback_required
)
299 dma_async_tx_callback callback
= NULL
;
301 struct dma_async_tx_descriptor
*txd
= &desc
->txd
;
302 struct dw_desc
*child
;
305 dev_vdbg(chan2dev(&dwc
->chan
), "descriptor %u complete\n", txd
->cookie
);
307 spin_lock_irqsave(&dwc
->lock
, flags
);
308 dma_cookie_complete(txd
);
309 if (callback_required
) {
310 callback
= txd
->callback
;
311 param
= txd
->callback_param
;
315 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
316 async_tx_ack(&child
->txd
);
317 async_tx_ack(&desc
->txd
);
319 list_splice_init(&desc
->tx_list
, &dwc
->free_list
);
320 list_move(&desc
->desc_node
, &dwc
->free_list
);
322 if (!is_slave_direction(dwc
->direction
)) {
323 struct device
*parent
= chan2parent(&dwc
->chan
);
324 if (!(txd
->flags
& DMA_COMPL_SKIP_DEST_UNMAP
)) {
325 if (txd
->flags
& DMA_COMPL_DEST_UNMAP_SINGLE
)
326 dma_unmap_single(parent
, desc
->lli
.dar
,
327 desc
->total_len
, DMA_FROM_DEVICE
);
329 dma_unmap_page(parent
, desc
->lli
.dar
,
330 desc
->total_len
, DMA_FROM_DEVICE
);
332 if (!(txd
->flags
& DMA_COMPL_SKIP_SRC_UNMAP
)) {
333 if (txd
->flags
& DMA_COMPL_SRC_UNMAP_SINGLE
)
334 dma_unmap_single(parent
, desc
->lli
.sar
,
335 desc
->total_len
, DMA_TO_DEVICE
);
337 dma_unmap_page(parent
, desc
->lli
.sar
,
338 desc
->total_len
, DMA_TO_DEVICE
);
342 spin_unlock_irqrestore(&dwc
->lock
, flags
);
348 static void dwc_complete_all(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
350 struct dw_desc
*desc
, *_desc
;
354 spin_lock_irqsave(&dwc
->lock
, flags
);
355 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
356 dev_err(chan2dev(&dwc
->chan
),
357 "BUG: XFER bit set, but channel not idle!\n");
359 /* Try to continue after resetting the channel... */
360 dwc_chan_disable(dw
, dwc
);
364 * Submit queued descriptors ASAP, i.e. before we go through
365 * the completed ones.
367 list_splice_init(&dwc
->active_list
, &list
);
368 if (!list_empty(&dwc
->queue
)) {
369 list_move(dwc
->queue
.next
, &dwc
->active_list
);
370 dwc_dostart(dwc
, dwc_first_active(dwc
));
373 spin_unlock_irqrestore(&dwc
->lock
, flags
);
375 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
)
376 dwc_descriptor_complete(dwc
, desc
, true);
379 /* Returns how many bytes were already received from source */
380 static inline u32
dwc_get_sent(struct dw_dma_chan
*dwc
)
382 u32 ctlhi
= channel_readl(dwc
, CTL_HI
);
383 u32 ctllo
= channel_readl(dwc
, CTL_LO
);
385 return (ctlhi
& DWC_CTLH_BLOCK_TS_MASK
) * (1 << (ctllo
>> 4 & 7));
388 static void dwc_scan_descriptors(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
391 struct dw_desc
*desc
, *_desc
;
392 struct dw_desc
*child
;
396 spin_lock_irqsave(&dwc
->lock
, flags
);
397 llp
= channel_readl(dwc
, LLP
);
398 status_xfer
= dma_readl(dw
, RAW
.XFER
);
400 if (status_xfer
& dwc
->mask
) {
401 /* Everything we've submitted is done */
402 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
404 if (test_bit(DW_DMA_IS_SOFT_LLP
, &dwc
->flags
)) {
405 struct list_head
*head
, *active
= dwc
->tx_node_active
;
408 * We are inside first active descriptor.
409 * Otherwise something is really wrong.
411 desc
= dwc_first_active(dwc
);
413 head
= &desc
->tx_list
;
414 if (active
!= head
) {
415 /* Update desc to reflect last sent one */
416 if (active
!= head
->next
)
417 desc
= to_dw_desc(active
->prev
);
419 dwc
->residue
-= desc
->len
;
421 child
= to_dw_desc(active
);
423 /* Submit next block */
424 dwc_do_single_block(dwc
, child
);
426 spin_unlock_irqrestore(&dwc
->lock
, flags
);
430 /* We are done here */
431 clear_bit(DW_DMA_IS_SOFT_LLP
, &dwc
->flags
);
436 spin_unlock_irqrestore(&dwc
->lock
, flags
);
438 dwc_complete_all(dw
, dwc
);
442 if (list_empty(&dwc
->active_list
)) {
444 spin_unlock_irqrestore(&dwc
->lock
, flags
);
448 if (test_bit(DW_DMA_IS_SOFT_LLP
, &dwc
->flags
)) {
449 dev_vdbg(chan2dev(&dwc
->chan
), "%s: soft LLP mode\n", __func__
);
450 spin_unlock_irqrestore(&dwc
->lock
, flags
);
454 dev_vdbg(chan2dev(&dwc
->chan
), "%s: llp=0x%llx\n", __func__
,
455 (unsigned long long)llp
);
457 list_for_each_entry_safe(desc
, _desc
, &dwc
->active_list
, desc_node
) {
458 /* Initial residue value */
459 dwc
->residue
= desc
->total_len
;
461 /* Check first descriptors addr */
462 if (desc
->txd
.phys
== llp
) {
463 spin_unlock_irqrestore(&dwc
->lock
, flags
);
467 /* Check first descriptors llp */
468 if (desc
->lli
.llp
== llp
) {
469 /* This one is currently in progress */
470 dwc
->residue
-= dwc_get_sent(dwc
);
471 spin_unlock_irqrestore(&dwc
->lock
, flags
);
475 dwc
->residue
-= desc
->len
;
476 list_for_each_entry(child
, &desc
->tx_list
, desc_node
) {
477 if (child
->lli
.llp
== llp
) {
478 /* Currently in progress */
479 dwc
->residue
-= dwc_get_sent(dwc
);
480 spin_unlock_irqrestore(&dwc
->lock
, flags
);
483 dwc
->residue
-= child
->len
;
487 * No descriptors so far seem to be in progress, i.e.
488 * this one must be done.
490 spin_unlock_irqrestore(&dwc
->lock
, flags
);
491 dwc_descriptor_complete(dwc
, desc
, true);
492 spin_lock_irqsave(&dwc
->lock
, flags
);
495 dev_err(chan2dev(&dwc
->chan
),
496 "BUG: All descriptors done, but channel not idle!\n");
498 /* Try to continue after resetting the channel... */
499 dwc_chan_disable(dw
, dwc
);
501 if (!list_empty(&dwc
->queue
)) {
502 list_move(dwc
->queue
.next
, &dwc
->active_list
);
503 dwc_dostart(dwc
, dwc_first_active(dwc
));
505 spin_unlock_irqrestore(&dwc
->lock
, flags
);
508 static inline void dwc_dump_lli(struct dw_dma_chan
*dwc
, struct dw_lli
*lli
)
510 dev_crit(chan2dev(&dwc
->chan
), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
511 lli
->sar
, lli
->dar
, lli
->llp
, lli
->ctlhi
, lli
->ctllo
);
514 static void dwc_handle_error(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
516 struct dw_desc
*bad_desc
;
517 struct dw_desc
*child
;
520 dwc_scan_descriptors(dw
, dwc
);
522 spin_lock_irqsave(&dwc
->lock
, flags
);
525 * The descriptor currently at the head of the active list is
526 * borked. Since we don't have any way to report errors, we'll
527 * just have to scream loudly and try to carry on.
529 bad_desc
= dwc_first_active(dwc
);
530 list_del_init(&bad_desc
->desc_node
);
531 list_move(dwc
->queue
.next
, dwc
->active_list
.prev
);
533 /* Clear the error flag and try to restart the controller */
534 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
535 if (!list_empty(&dwc
->active_list
))
536 dwc_dostart(dwc
, dwc_first_active(dwc
));
539 * WARN may seem harsh, but since this only happens
540 * when someone submits a bad physical address in a
541 * descriptor, we should consider ourselves lucky that the
542 * controller flagged an error instead of scribbling over
543 * random memory locations.
545 dev_WARN(chan2dev(&dwc
->chan
), "Bad descriptor submitted for DMA!\n"
546 " cookie: %d\n", bad_desc
->txd
.cookie
);
547 dwc_dump_lli(dwc
, &bad_desc
->lli
);
548 list_for_each_entry(child
, &bad_desc
->tx_list
, desc_node
)
549 dwc_dump_lli(dwc
, &child
->lli
);
551 spin_unlock_irqrestore(&dwc
->lock
, flags
);
553 /* Pretend the descriptor completed successfully */
554 dwc_descriptor_complete(dwc
, bad_desc
, true);
557 /* --------------------- Cyclic DMA API extensions -------------------- */
559 inline dma_addr_t
dw_dma_get_src_addr(struct dma_chan
*chan
)
561 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
562 return channel_readl(dwc
, SAR
);
564 EXPORT_SYMBOL(dw_dma_get_src_addr
);
566 inline dma_addr_t
dw_dma_get_dst_addr(struct dma_chan
*chan
)
568 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
569 return channel_readl(dwc
, DAR
);
571 EXPORT_SYMBOL(dw_dma_get_dst_addr
);
573 /* Called with dwc->lock held and all DMAC interrupts disabled */
574 static void dwc_handle_cyclic(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
,
575 u32 status_err
, u32 status_xfer
)
580 void (*callback
)(void *param
);
581 void *callback_param
;
583 dev_vdbg(chan2dev(&dwc
->chan
), "new cyclic period llp 0x%08x\n",
584 channel_readl(dwc
, LLP
));
586 callback
= dwc
->cdesc
->period_callback
;
587 callback_param
= dwc
->cdesc
->period_callback_param
;
590 callback(callback_param
);
594 * Error and transfer complete are highly unlikely, and will most
595 * likely be due to a configuration error by the user.
597 if (unlikely(status_err
& dwc
->mask
) ||
598 unlikely(status_xfer
& dwc
->mask
)) {
601 dev_err(chan2dev(&dwc
->chan
), "cyclic DMA unexpected %s "
602 "interrupt, stopping DMA transfer\n",
603 status_xfer
? "xfer" : "error");
605 spin_lock_irqsave(&dwc
->lock
, flags
);
607 dwc_dump_chan_regs(dwc
);
609 dwc_chan_disable(dw
, dwc
);
611 /* Make sure DMA does not restart by loading a new list */
612 channel_writel(dwc
, LLP
, 0);
613 channel_writel(dwc
, CTL_LO
, 0);
614 channel_writel(dwc
, CTL_HI
, 0);
616 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
617 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
619 for (i
= 0; i
< dwc
->cdesc
->periods
; i
++)
620 dwc_dump_lli(dwc
, &dwc
->cdesc
->desc
[i
]->lli
);
622 spin_unlock_irqrestore(&dwc
->lock
, flags
);
626 /* ------------------------------------------------------------------------- */
628 static void dw_dma_tasklet(unsigned long data
)
630 struct dw_dma
*dw
= (struct dw_dma
*)data
;
631 struct dw_dma_chan
*dwc
;
636 status_xfer
= dma_readl(dw
, RAW
.XFER
);
637 status_err
= dma_readl(dw
, RAW
.ERROR
);
639 dev_vdbg(dw
->dma
.dev
, "%s: status_err=%x\n", __func__
, status_err
);
641 for (i
= 0; i
< dw
->dma
.chancnt
; i
++) {
643 if (test_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
))
644 dwc_handle_cyclic(dw
, dwc
, status_err
, status_xfer
);
645 else if (status_err
& (1 << i
))
646 dwc_handle_error(dw
, dwc
);
647 else if (status_xfer
& (1 << i
))
648 dwc_scan_descriptors(dw
, dwc
);
652 * Re-enable interrupts.
654 channel_set_bit(dw
, MASK
.XFER
, dw
->all_chan_mask
);
655 channel_set_bit(dw
, MASK
.ERROR
, dw
->all_chan_mask
);
658 static irqreturn_t
dw_dma_interrupt(int irq
, void *dev_id
)
660 struct dw_dma
*dw
= dev_id
;
663 dev_vdbg(dw
->dma
.dev
, "%s: status=0x%x\n", __func__
,
664 dma_readl(dw
, STATUS_INT
));
667 * Just disable the interrupts. We'll turn them back on in the
670 channel_clear_bit(dw
, MASK
.XFER
, dw
->all_chan_mask
);
671 channel_clear_bit(dw
, MASK
.ERROR
, dw
->all_chan_mask
);
673 status
= dma_readl(dw
, STATUS_INT
);
676 "BUG: Unexpected interrupts pending: 0x%x\n",
680 channel_clear_bit(dw
, MASK
.XFER
, (1 << 8) - 1);
681 channel_clear_bit(dw
, MASK
.SRC_TRAN
, (1 << 8) - 1);
682 channel_clear_bit(dw
, MASK
.DST_TRAN
, (1 << 8) - 1);
683 channel_clear_bit(dw
, MASK
.ERROR
, (1 << 8) - 1);
686 tasklet_schedule(&dw
->tasklet
);
691 /*----------------------------------------------------------------------*/
693 static dma_cookie_t
dwc_tx_submit(struct dma_async_tx_descriptor
*tx
)
695 struct dw_desc
*desc
= txd_to_dw_desc(tx
);
696 struct dw_dma_chan
*dwc
= to_dw_dma_chan(tx
->chan
);
700 spin_lock_irqsave(&dwc
->lock
, flags
);
701 cookie
= dma_cookie_assign(tx
);
704 * REVISIT: We should attempt to chain as many descriptors as
705 * possible, perhaps even appending to those already submitted
706 * for DMA. But this is hard to do in a race-free manner.
708 if (list_empty(&dwc
->active_list
)) {
709 dev_vdbg(chan2dev(tx
->chan
), "%s: started %u\n", __func__
,
711 list_add_tail(&desc
->desc_node
, &dwc
->active_list
);
712 dwc_dostart(dwc
, dwc_first_active(dwc
));
714 dev_vdbg(chan2dev(tx
->chan
), "%s: queued %u\n", __func__
,
717 list_add_tail(&desc
->desc_node
, &dwc
->queue
);
720 spin_unlock_irqrestore(&dwc
->lock
, flags
);
725 static struct dma_async_tx_descriptor
*
726 dwc_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
727 size_t len
, unsigned long flags
)
729 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
730 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
731 struct dw_desc
*desc
;
732 struct dw_desc
*first
;
733 struct dw_desc
*prev
;
736 unsigned int src_width
;
737 unsigned int dst_width
;
738 unsigned int data_width
;
741 dev_vdbg(chan2dev(chan
),
742 "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__
,
743 (unsigned long long)dest
, (unsigned long long)src
,
746 if (unlikely(!len
)) {
747 dev_dbg(chan2dev(chan
), "%s: length is zero!\n", __func__
);
751 dwc
->direction
= DMA_MEM_TO_MEM
;
753 data_width
= min_t(unsigned int, dw
->data_width
[dwc
->src_master
],
754 dw
->data_width
[dwc
->dst_master
]);
756 src_width
= dst_width
= min_t(unsigned int, data_width
,
757 dwc_fast_fls(src
| dest
| len
));
759 ctllo
= DWC_DEFAULT_CTLLO(chan
)
760 | DWC_CTLL_DST_WIDTH(dst_width
)
761 | DWC_CTLL_SRC_WIDTH(src_width
)
767 for (offset
= 0; offset
< len
; offset
+= xfer_count
<< src_width
) {
768 xfer_count
= min_t(size_t, (len
- offset
) >> src_width
,
771 desc
= dwc_desc_get(dwc
);
775 desc
->lli
.sar
= src
+ offset
;
776 desc
->lli
.dar
= dest
+ offset
;
777 desc
->lli
.ctllo
= ctllo
;
778 desc
->lli
.ctlhi
= xfer_count
;
779 desc
->len
= xfer_count
<< src_width
;
784 prev
->lli
.llp
= desc
->txd
.phys
;
785 list_add_tail(&desc
->desc_node
,
791 if (flags
& DMA_PREP_INTERRUPT
)
792 /* Trigger interrupt after last block */
793 prev
->lli
.ctllo
|= DWC_CTLL_INT_EN
;
796 first
->txd
.flags
= flags
;
797 first
->total_len
= len
;
802 dwc_desc_put(dwc
, first
);
806 static struct dma_async_tx_descriptor
*
807 dwc_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
808 unsigned int sg_len
, enum dma_transfer_direction direction
,
809 unsigned long flags
, void *context
)
811 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
812 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
813 struct dma_slave_config
*sconfig
= &dwc
->dma_sconfig
;
814 struct dw_desc
*prev
;
815 struct dw_desc
*first
;
818 unsigned int reg_width
;
819 unsigned int mem_width
;
820 unsigned int data_width
;
822 struct scatterlist
*sg
;
823 size_t total_len
= 0;
825 dev_vdbg(chan2dev(chan
), "%s\n", __func__
);
827 if (unlikely(!is_slave_direction(direction
) || !sg_len
))
830 dwc
->direction
= direction
;
836 reg_width
= __fls(sconfig
->dst_addr_width
);
837 reg
= sconfig
->dst_addr
;
838 ctllo
= (DWC_DEFAULT_CTLLO(chan
)
839 | DWC_CTLL_DST_WIDTH(reg_width
)
843 ctllo
|= sconfig
->device_fc
? DWC_CTLL_FC(DW_DMA_FC_P_M2P
) :
844 DWC_CTLL_FC(DW_DMA_FC_D_M2P
);
846 data_width
= dw
->data_width
[dwc
->src_master
];
848 for_each_sg(sgl
, sg
, sg_len
, i
) {
849 struct dw_desc
*desc
;
852 mem
= sg_dma_address(sg
);
853 len
= sg_dma_len(sg
);
855 mem_width
= min_t(unsigned int,
856 data_width
, dwc_fast_fls(mem
| len
));
858 slave_sg_todev_fill_desc
:
859 desc
= dwc_desc_get(dwc
);
861 dev_err(chan2dev(chan
),
862 "not enough descriptors available\n");
868 desc
->lli
.ctllo
= ctllo
| DWC_CTLL_SRC_WIDTH(mem_width
);
869 if ((len
>> mem_width
) > dwc
->block_size
) {
870 dlen
= dwc
->block_size
<< mem_width
;
878 desc
->lli
.ctlhi
= dlen
>> mem_width
;
884 prev
->lli
.llp
= desc
->txd
.phys
;
885 list_add_tail(&desc
->desc_node
,
892 goto slave_sg_todev_fill_desc
;
896 reg_width
= __fls(sconfig
->src_addr_width
);
897 reg
= sconfig
->src_addr
;
898 ctllo
= (DWC_DEFAULT_CTLLO(chan
)
899 | DWC_CTLL_SRC_WIDTH(reg_width
)
903 ctllo
|= sconfig
->device_fc
? DWC_CTLL_FC(DW_DMA_FC_P_P2M
) :
904 DWC_CTLL_FC(DW_DMA_FC_D_P2M
);
906 data_width
= dw
->data_width
[dwc
->dst_master
];
908 for_each_sg(sgl
, sg
, sg_len
, i
) {
909 struct dw_desc
*desc
;
912 mem
= sg_dma_address(sg
);
913 len
= sg_dma_len(sg
);
915 mem_width
= min_t(unsigned int,
916 data_width
, dwc_fast_fls(mem
| len
));
918 slave_sg_fromdev_fill_desc
:
919 desc
= dwc_desc_get(dwc
);
921 dev_err(chan2dev(chan
),
922 "not enough descriptors available\n");
928 desc
->lli
.ctllo
= ctllo
| DWC_CTLL_DST_WIDTH(mem_width
);
929 if ((len
>> reg_width
) > dwc
->block_size
) {
930 dlen
= dwc
->block_size
<< reg_width
;
937 desc
->lli
.ctlhi
= dlen
>> reg_width
;
943 prev
->lli
.llp
= desc
->txd
.phys
;
944 list_add_tail(&desc
->desc_node
,
951 goto slave_sg_fromdev_fill_desc
;
958 if (flags
& DMA_PREP_INTERRUPT
)
959 /* Trigger interrupt after last block */
960 prev
->lli
.ctllo
|= DWC_CTLL_INT_EN
;
963 first
->total_len
= total_len
;
968 dwc_desc_put(dwc
, first
);
973 * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
974 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
976 * NOTE: burst size 2 is not supported by controller.
978 * This can be done by finding least significant bit set: n & (n - 1)
980 static inline void convert_burst(u32
*maxburst
)
983 *maxburst
= fls(*maxburst
) - 2;
989 set_runtime_config(struct dma_chan
*chan
, struct dma_slave_config
*sconfig
)
991 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
993 /* Check if chan will be configured for slave transfers */
994 if (!is_slave_direction(sconfig
->direction
))
997 memcpy(&dwc
->dma_sconfig
, sconfig
, sizeof(*sconfig
));
998 dwc
->direction
= sconfig
->direction
;
1000 /* Take the request line from slave_id member */
1001 if (dwc
->request_line
== ~0)
1002 dwc
->request_line
= sconfig
->slave_id
;
1004 convert_burst(&dwc
->dma_sconfig
.src_maxburst
);
1005 convert_burst(&dwc
->dma_sconfig
.dst_maxburst
);
1010 static inline void dwc_chan_pause(struct dw_dma_chan
*dwc
)
1012 u32 cfglo
= channel_readl(dwc
, CFG_LO
);
1013 unsigned int count
= 20; /* timeout iterations */
1015 channel_writel(dwc
, CFG_LO
, cfglo
| DWC_CFGL_CH_SUSP
);
1016 while (!(channel_readl(dwc
, CFG_LO
) & DWC_CFGL_FIFO_EMPTY
) && count
--)
1022 static inline void dwc_chan_resume(struct dw_dma_chan
*dwc
)
1024 u32 cfglo
= channel_readl(dwc
, CFG_LO
);
1026 channel_writel(dwc
, CFG_LO
, cfglo
& ~DWC_CFGL_CH_SUSP
);
1028 dwc
->paused
= false;
1031 static int dwc_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
1034 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1035 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
1036 struct dw_desc
*desc
, *_desc
;
1037 unsigned long flags
;
1040 if (cmd
== DMA_PAUSE
) {
1041 spin_lock_irqsave(&dwc
->lock
, flags
);
1043 dwc_chan_pause(dwc
);
1045 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1046 } else if (cmd
== DMA_RESUME
) {
1050 spin_lock_irqsave(&dwc
->lock
, flags
);
1052 dwc_chan_resume(dwc
);
1054 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1055 } else if (cmd
== DMA_TERMINATE_ALL
) {
1056 spin_lock_irqsave(&dwc
->lock
, flags
);
1058 clear_bit(DW_DMA_IS_SOFT_LLP
, &dwc
->flags
);
1060 dwc_chan_disable(dw
, dwc
);
1062 dwc_chan_resume(dwc
);
1064 /* active_list entries will end up before queued entries */
1065 list_splice_init(&dwc
->queue
, &list
);
1066 list_splice_init(&dwc
->active_list
, &list
);
1068 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1070 /* Flush all pending and queued descriptors */
1071 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
)
1072 dwc_descriptor_complete(dwc
, desc
, false);
1073 } else if (cmd
== DMA_SLAVE_CONFIG
) {
1074 return set_runtime_config(chan
, (struct dma_slave_config
*)arg
);
1082 static inline u32
dwc_get_residue(struct dw_dma_chan
*dwc
)
1084 unsigned long flags
;
1087 spin_lock_irqsave(&dwc
->lock
, flags
);
1089 residue
= dwc
->residue
;
1090 if (test_bit(DW_DMA_IS_SOFT_LLP
, &dwc
->flags
) && residue
)
1091 residue
-= dwc_get_sent(dwc
);
1093 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1097 static enum dma_status
1098 dwc_tx_status(struct dma_chan
*chan
,
1099 dma_cookie_t cookie
,
1100 struct dma_tx_state
*txstate
)
1102 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1103 enum dma_status ret
;
1105 ret
= dma_cookie_status(chan
, cookie
, txstate
);
1106 if (ret
!= DMA_SUCCESS
) {
1107 dwc_scan_descriptors(to_dw_dma(chan
->device
), dwc
);
1109 ret
= dma_cookie_status(chan
, cookie
, txstate
);
1112 if (ret
!= DMA_SUCCESS
)
1113 dma_set_residue(txstate
, dwc_get_residue(dwc
));
1121 static void dwc_issue_pending(struct dma_chan
*chan
)
1123 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1125 if (!list_empty(&dwc
->queue
))
1126 dwc_scan_descriptors(to_dw_dma(chan
->device
), dwc
);
1129 static int dwc_alloc_chan_resources(struct dma_chan
*chan
)
1131 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1132 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
1133 struct dw_desc
*desc
;
1135 unsigned long flags
;
1137 dev_vdbg(chan2dev(chan
), "%s\n", __func__
);
1139 /* ASSERT: channel is idle */
1140 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
1141 dev_dbg(chan2dev(chan
), "DMA channel not idle?\n");
1145 dma_cookie_init(chan
);
1148 * NOTE: some controllers may have additional features that we
1149 * need to initialize here, like "scatter-gather" (which
1150 * doesn't mean what you think it means), and status writeback.
1153 dwc_set_masters(dwc
);
1155 spin_lock_irqsave(&dwc
->lock
, flags
);
1156 i
= dwc
->descs_allocated
;
1157 while (dwc
->descs_allocated
< NR_DESCS_PER_CHANNEL
) {
1160 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1162 desc
= dma_pool_alloc(dw
->desc_pool
, GFP_ATOMIC
, &phys
);
1164 goto err_desc_alloc
;
1166 memset(desc
, 0, sizeof(struct dw_desc
));
1168 INIT_LIST_HEAD(&desc
->tx_list
);
1169 dma_async_tx_descriptor_init(&desc
->txd
, chan
);
1170 desc
->txd
.tx_submit
= dwc_tx_submit
;
1171 desc
->txd
.flags
= DMA_CTRL_ACK
;
1172 desc
->txd
.phys
= phys
;
1174 dwc_desc_put(dwc
, desc
);
1176 spin_lock_irqsave(&dwc
->lock
, flags
);
1177 i
= ++dwc
->descs_allocated
;
1180 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1182 dev_dbg(chan2dev(chan
), "%s: allocated %d descriptors\n", __func__
, i
);
1187 dev_info(chan2dev(chan
), "only allocated %d descriptors\n", i
);
1192 static void dwc_free_chan_resources(struct dma_chan
*chan
)
1194 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1195 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
1196 struct dw_desc
*desc
, *_desc
;
1197 unsigned long flags
;
1200 dev_dbg(chan2dev(chan
), "%s: descs allocated=%u\n", __func__
,
1201 dwc
->descs_allocated
);
1203 /* ASSERT: channel is idle */
1204 BUG_ON(!list_empty(&dwc
->active_list
));
1205 BUG_ON(!list_empty(&dwc
->queue
));
1206 BUG_ON(dma_readl(to_dw_dma(chan
->device
), CH_EN
) & dwc
->mask
);
1208 spin_lock_irqsave(&dwc
->lock
, flags
);
1209 list_splice_init(&dwc
->free_list
, &list
);
1210 dwc
->descs_allocated
= 0;
1211 dwc
->initialized
= false;
1212 dwc
->request_line
= ~0;
1214 /* Disable interrupts */
1215 channel_clear_bit(dw
, MASK
.XFER
, dwc
->mask
);
1216 channel_clear_bit(dw
, MASK
.ERROR
, dwc
->mask
);
1218 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1220 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
) {
1221 dev_vdbg(chan2dev(chan
), " freeing descriptor %p\n", desc
);
1222 dma_pool_free(dw
->desc_pool
, desc
, desc
->txd
.phys
);
1225 dev_vdbg(chan2dev(chan
), "%s: done\n", __func__
);
1228 /*----------------------------------------------------------------------*/
1230 struct dw_dma_of_filter_args
{
1237 static bool dw_dma_of_filter(struct dma_chan
*chan
, void *param
)
1239 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1240 struct dw_dma_of_filter_args
*fargs
= param
;
1242 /* Ensure the device matches our channel */
1243 if (chan
->device
!= &fargs
->dw
->dma
)
1246 dwc
->request_line
= fargs
->req
;
1247 dwc
->src_master
= fargs
->src
;
1248 dwc
->dst_master
= fargs
->dst
;
1253 static struct dma_chan
*dw_dma_of_xlate(struct of_phandle_args
*dma_spec
,
1254 struct of_dma
*ofdma
)
1256 struct dw_dma
*dw
= ofdma
->of_dma_data
;
1257 struct dw_dma_of_filter_args fargs
= {
1262 if (dma_spec
->args_count
!= 3)
1265 fargs
.req
= dma_spec
->args
[0];
1266 fargs
.src
= dma_spec
->args
[1];
1267 fargs
.dst
= dma_spec
->args
[2];
1269 if (WARN_ON(fargs
.req
>= DW_DMA_MAX_NR_REQUESTS
||
1270 fargs
.src
>= dw
->nr_masters
||
1271 fargs
.dst
>= dw
->nr_masters
))
1275 dma_cap_set(DMA_SLAVE
, cap
);
1277 /* TODO: there should be a simpler way to do this */
1278 return dma_request_channel(cap
, dw_dma_of_filter
, &fargs
);
1282 static bool dw_dma_acpi_filter(struct dma_chan
*chan
, void *param
)
1284 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1285 struct acpi_dma_spec
*dma_spec
= param
;
1287 if (chan
->device
->dev
!= dma_spec
->dev
||
1288 chan
->chan_id
!= dma_spec
->chan_id
)
1291 dwc
->request_line
= dma_spec
->slave_id
;
1292 dwc
->src_master
= dwc_get_sms(NULL
);
1293 dwc
->dst_master
= dwc_get_dms(NULL
);
1298 static void dw_dma_acpi_controller_register(struct dw_dma
*dw
)
1300 struct device
*dev
= dw
->dma
.dev
;
1301 struct acpi_dma_filter_info
*info
;
1304 info
= devm_kzalloc(dev
, sizeof(*info
), GFP_KERNEL
);
1308 dma_cap_zero(info
->dma_cap
);
1309 dma_cap_set(DMA_SLAVE
, info
->dma_cap
);
1310 info
->filter_fn
= dw_dma_acpi_filter
;
1312 ret
= devm_acpi_dma_controller_register(dev
, acpi_dma_simple_xlate
,
1315 dev_err(dev
, "could not register acpi_dma_controller\n");
1317 #else /* !CONFIG_ACPI */
1318 static inline void dw_dma_acpi_controller_register(struct dw_dma
*dw
) {}
1319 #endif /* !CONFIG_ACPI */
1321 /* --------------------- Cyclic DMA API extensions -------------------- */
1324 * dw_dma_cyclic_start - start the cyclic DMA transfer
1325 * @chan: the DMA channel to start
1327 * Must be called with soft interrupts disabled. Returns zero on success or
1328 * -errno on failure.
1330 int dw_dma_cyclic_start(struct dma_chan
*chan
)
1332 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1333 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
1334 unsigned long flags
;
1336 if (!test_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
)) {
1337 dev_err(chan2dev(&dwc
->chan
), "missing prep for cyclic DMA\n");
1341 spin_lock_irqsave(&dwc
->lock
, flags
);
1343 /* Assert channel is idle */
1344 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
1345 dev_err(chan2dev(&dwc
->chan
),
1346 "BUG: Attempted to start non-idle channel\n");
1347 dwc_dump_chan_regs(dwc
);
1348 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1352 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
1353 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
1355 /* Setup DMAC channel registers */
1356 channel_writel(dwc
, LLP
, dwc
->cdesc
->desc
[0]->txd
.phys
);
1357 channel_writel(dwc
, CTL_LO
, DWC_CTLL_LLP_D_EN
| DWC_CTLL_LLP_S_EN
);
1358 channel_writel(dwc
, CTL_HI
, 0);
1360 channel_set_bit(dw
, CH_EN
, dwc
->mask
);
1362 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1366 EXPORT_SYMBOL(dw_dma_cyclic_start
);
1369 * dw_dma_cyclic_stop - stop the cyclic DMA transfer
1370 * @chan: the DMA channel to stop
1372 * Must be called with soft interrupts disabled.
1374 void dw_dma_cyclic_stop(struct dma_chan
*chan
)
1376 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1377 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
1378 unsigned long flags
;
1380 spin_lock_irqsave(&dwc
->lock
, flags
);
1382 dwc_chan_disable(dw
, dwc
);
1384 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1386 EXPORT_SYMBOL(dw_dma_cyclic_stop
);
1389 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
1390 * @chan: the DMA channel to prepare
1391 * @buf_addr: physical DMA address where the buffer starts
1392 * @buf_len: total number of bytes for the entire buffer
1393 * @period_len: number of bytes for each period
1394 * @direction: transfer direction, to or from device
1396 * Must be called before trying to start the transfer. Returns a valid struct
1397 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
1399 struct dw_cyclic_desc
*dw_dma_cyclic_prep(struct dma_chan
*chan
,
1400 dma_addr_t buf_addr
, size_t buf_len
, size_t period_len
,
1401 enum dma_transfer_direction direction
)
1403 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1404 struct dma_slave_config
*sconfig
= &dwc
->dma_sconfig
;
1405 struct dw_cyclic_desc
*cdesc
;
1406 struct dw_cyclic_desc
*retval
= NULL
;
1407 struct dw_desc
*desc
;
1408 struct dw_desc
*last
= NULL
;
1409 unsigned long was_cyclic
;
1410 unsigned int reg_width
;
1411 unsigned int periods
;
1413 unsigned long flags
;
1415 spin_lock_irqsave(&dwc
->lock
, flags
);
1417 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1418 dev_dbg(chan2dev(&dwc
->chan
),
1419 "channel doesn't support LLP transfers\n");
1420 return ERR_PTR(-EINVAL
);
1423 if (!list_empty(&dwc
->queue
) || !list_empty(&dwc
->active_list
)) {
1424 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1425 dev_dbg(chan2dev(&dwc
->chan
),
1426 "queue and/or active list are not empty\n");
1427 return ERR_PTR(-EBUSY
);
1430 was_cyclic
= test_and_set_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
);
1431 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1433 dev_dbg(chan2dev(&dwc
->chan
),
1434 "channel already prepared for cyclic DMA\n");
1435 return ERR_PTR(-EBUSY
);
1438 retval
= ERR_PTR(-EINVAL
);
1440 if (unlikely(!is_slave_direction(direction
)))
1443 dwc
->direction
= direction
;
1445 if (direction
== DMA_MEM_TO_DEV
)
1446 reg_width
= __ffs(sconfig
->dst_addr_width
);
1448 reg_width
= __ffs(sconfig
->src_addr_width
);
1450 periods
= buf_len
/ period_len
;
1452 /* Check for too big/unaligned periods and unaligned DMA buffer. */
1453 if (period_len
> (dwc
->block_size
<< reg_width
))
1455 if (unlikely(period_len
& ((1 << reg_width
) - 1)))
1457 if (unlikely(buf_addr
& ((1 << reg_width
) - 1)))
1460 retval
= ERR_PTR(-ENOMEM
);
1462 if (periods
> NR_DESCS_PER_CHANNEL
)
1465 cdesc
= kzalloc(sizeof(struct dw_cyclic_desc
), GFP_KERNEL
);
1469 cdesc
->desc
= kzalloc(sizeof(struct dw_desc
*) * periods
, GFP_KERNEL
);
1473 for (i
= 0; i
< periods
; i
++) {
1474 desc
= dwc_desc_get(dwc
);
1476 goto out_err_desc_get
;
1478 switch (direction
) {
1479 case DMA_MEM_TO_DEV
:
1480 desc
->lli
.dar
= sconfig
->dst_addr
;
1481 desc
->lli
.sar
= buf_addr
+ (period_len
* i
);
1482 desc
->lli
.ctllo
= (DWC_DEFAULT_CTLLO(chan
)
1483 | DWC_CTLL_DST_WIDTH(reg_width
)
1484 | DWC_CTLL_SRC_WIDTH(reg_width
)
1489 desc
->lli
.ctllo
|= sconfig
->device_fc
?
1490 DWC_CTLL_FC(DW_DMA_FC_P_M2P
) :
1491 DWC_CTLL_FC(DW_DMA_FC_D_M2P
);
1494 case DMA_DEV_TO_MEM
:
1495 desc
->lli
.dar
= buf_addr
+ (period_len
* i
);
1496 desc
->lli
.sar
= sconfig
->src_addr
;
1497 desc
->lli
.ctllo
= (DWC_DEFAULT_CTLLO(chan
)
1498 | DWC_CTLL_SRC_WIDTH(reg_width
)
1499 | DWC_CTLL_DST_WIDTH(reg_width
)
1504 desc
->lli
.ctllo
|= sconfig
->device_fc
?
1505 DWC_CTLL_FC(DW_DMA_FC_P_P2M
) :
1506 DWC_CTLL_FC(DW_DMA_FC_D_P2M
);
1513 desc
->lli
.ctlhi
= (period_len
>> reg_width
);
1514 cdesc
->desc
[i
] = desc
;
1517 last
->lli
.llp
= desc
->txd
.phys
;
1522 /* Let's make a cyclic list */
1523 last
->lli
.llp
= cdesc
->desc
[0]->txd
.phys
;
1525 dev_dbg(chan2dev(&dwc
->chan
), "cyclic prepared buf 0x%llx len %zu "
1526 "period %zu periods %d\n", (unsigned long long)buf_addr
,
1527 buf_len
, period_len
, periods
);
1529 cdesc
->periods
= periods
;
1536 dwc_desc_put(dwc
, cdesc
->desc
[i
]);
1540 clear_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
);
1541 return (struct dw_cyclic_desc
*)retval
;
1543 EXPORT_SYMBOL(dw_dma_cyclic_prep
);
1546 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1547 * @chan: the DMA channel to free
1549 void dw_dma_cyclic_free(struct dma_chan
*chan
)
1551 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1552 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
1553 struct dw_cyclic_desc
*cdesc
= dwc
->cdesc
;
1555 unsigned long flags
;
1557 dev_dbg(chan2dev(&dwc
->chan
), "%s\n", __func__
);
1562 spin_lock_irqsave(&dwc
->lock
, flags
);
1564 dwc_chan_disable(dw
, dwc
);
1566 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
1567 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
1569 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1571 for (i
= 0; i
< cdesc
->periods
; i
++)
1572 dwc_desc_put(dwc
, cdesc
->desc
[i
]);
1577 clear_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
);
1579 EXPORT_SYMBOL(dw_dma_cyclic_free
);
1581 /*----------------------------------------------------------------------*/
1583 static void dw_dma_off(struct dw_dma
*dw
)
1587 dma_writel(dw
, CFG
, 0);
1589 channel_clear_bit(dw
, MASK
.XFER
, dw
->all_chan_mask
);
1590 channel_clear_bit(dw
, MASK
.SRC_TRAN
, dw
->all_chan_mask
);
1591 channel_clear_bit(dw
, MASK
.DST_TRAN
, dw
->all_chan_mask
);
1592 channel_clear_bit(dw
, MASK
.ERROR
, dw
->all_chan_mask
);
1594 while (dma_readl(dw
, CFG
) & DW_CFG_DMA_EN
)
1597 for (i
= 0; i
< dw
->dma
.chancnt
; i
++)
1598 dw
->chan
[i
].initialized
= false;
1602 static struct dw_dma_platform_data
*
1603 dw_dma_parse_dt(struct platform_device
*pdev
)
1605 struct device_node
*np
= pdev
->dev
.of_node
;
1606 struct dw_dma_platform_data
*pdata
;
1610 dev_err(&pdev
->dev
, "Missing DT data\n");
1614 pdata
= devm_kzalloc(&pdev
->dev
, sizeof(*pdata
), GFP_KERNEL
);
1618 if (of_property_read_u32(np
, "dma-channels", &pdata
->nr_channels
))
1621 if (of_property_read_bool(np
, "is_private"))
1622 pdata
->is_private
= true;
1624 if (!of_property_read_u32(np
, "chan_allocation_order", &tmp
))
1625 pdata
->chan_allocation_order
= (unsigned char)tmp
;
1627 if (!of_property_read_u32(np
, "chan_priority", &tmp
))
1628 pdata
->chan_priority
= tmp
;
1630 if (!of_property_read_u32(np
, "block_size", &tmp
))
1631 pdata
->block_size
= tmp
;
1633 if (!of_property_read_u32(np
, "dma-masters", &tmp
)) {
1637 pdata
->nr_masters
= tmp
;
1640 if (!of_property_read_u32_array(np
, "data_width", arr
,
1642 for (tmp
= 0; tmp
< pdata
->nr_masters
; tmp
++)
1643 pdata
->data_width
[tmp
] = arr
[tmp
];
1648 static inline struct dw_dma_platform_data
*
1649 dw_dma_parse_dt(struct platform_device
*pdev
)
1655 static int dw_probe(struct platform_device
*pdev
)
1657 struct dw_dma_platform_data
*pdata
;
1658 struct resource
*io
;
1663 unsigned int dw_params
;
1664 unsigned int nr_channels
;
1665 unsigned int max_blk_size
= 0;
1670 io
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1674 irq
= platform_get_irq(pdev
, 0);
1678 regs
= devm_ioremap_resource(&pdev
->dev
, io
);
1680 return PTR_ERR(regs
);
1682 /* Apply default dma_mask if needed */
1683 if (!pdev
->dev
.dma_mask
) {
1684 pdev
->dev
.dma_mask
= &pdev
->dev
.coherent_dma_mask
;
1685 pdev
->dev
.coherent_dma_mask
= DMA_BIT_MASK(32);
1688 dw_params
= dma_read_byaddr(regs
, DW_PARAMS
);
1689 autocfg
= dw_params
>> DW_PARAMS_EN
& 0x1;
1691 dev_dbg(&pdev
->dev
, "DW_PARAMS: 0x%08x\n", dw_params
);
1693 pdata
= dev_get_platdata(&pdev
->dev
);
1695 pdata
= dw_dma_parse_dt(pdev
);
1697 if (!pdata
&& autocfg
) {
1698 pdata
= devm_kzalloc(&pdev
->dev
, sizeof(*pdata
), GFP_KERNEL
);
1702 /* Fill platform data with the default values */
1703 pdata
->is_private
= true;
1704 pdata
->chan_allocation_order
= CHAN_ALLOCATION_ASCENDING
;
1705 pdata
->chan_priority
= CHAN_PRIORITY_ASCENDING
;
1706 } else if (!pdata
|| pdata
->nr_channels
> DW_DMA_MAX_NR_CHANNELS
)
1710 nr_channels
= (dw_params
>> DW_PARAMS_NR_CHAN
& 0x7) + 1;
1712 nr_channels
= pdata
->nr_channels
;
1714 size
= sizeof(struct dw_dma
) + nr_channels
* sizeof(struct dw_dma_chan
);
1715 dw
= devm_kzalloc(&pdev
->dev
, size
, GFP_KERNEL
);
1719 dw
->clk
= devm_clk_get(&pdev
->dev
, "hclk");
1720 if (IS_ERR(dw
->clk
))
1721 return PTR_ERR(dw
->clk
);
1722 clk_prepare_enable(dw
->clk
);
1726 /* Get hardware configuration parameters */
1728 max_blk_size
= dma_readl(dw
, MAX_BLK_SIZE
);
1730 dw
->nr_masters
= (dw_params
>> DW_PARAMS_NR_MASTER
& 3) + 1;
1731 for (i
= 0; i
< dw
->nr_masters
; i
++) {
1733 (dw_params
>> DW_PARAMS_DATA_WIDTH(i
) & 3) + 2;
1736 dw
->nr_masters
= pdata
->nr_masters
;
1737 memcpy(dw
->data_width
, pdata
->data_width
, 4);
1740 /* Calculate all channel mask before DMA setup */
1741 dw
->all_chan_mask
= (1 << nr_channels
) - 1;
1743 /* Force dma off, just in case */
1746 /* Disable BLOCK interrupts as well */
1747 channel_clear_bit(dw
, MASK
.BLOCK
, dw
->all_chan_mask
);
1749 err
= devm_request_irq(&pdev
->dev
, irq
, dw_dma_interrupt
, 0,
1754 platform_set_drvdata(pdev
, dw
);
1756 /* Create a pool of consistent memory blocks for hardware descriptors */
1757 dw
->desc_pool
= dmam_pool_create("dw_dmac_desc_pool", &pdev
->dev
,
1758 sizeof(struct dw_desc
), 4, 0);
1759 if (!dw
->desc_pool
) {
1760 dev_err(&pdev
->dev
, "No memory for descriptors dma pool\n");
1764 tasklet_init(&dw
->tasklet
, dw_dma_tasklet
, (unsigned long)dw
);
1766 INIT_LIST_HEAD(&dw
->dma
.channels
);
1767 for (i
= 0; i
< nr_channels
; i
++) {
1768 struct dw_dma_chan
*dwc
= &dw
->chan
[i
];
1769 int r
= nr_channels
- i
- 1;
1771 dwc
->chan
.device
= &dw
->dma
;
1772 dma_cookie_init(&dwc
->chan
);
1773 if (pdata
->chan_allocation_order
== CHAN_ALLOCATION_ASCENDING
)
1774 list_add_tail(&dwc
->chan
.device_node
,
1777 list_add(&dwc
->chan
.device_node
, &dw
->dma
.channels
);
1779 /* 7 is highest priority & 0 is lowest. */
1780 if (pdata
->chan_priority
== CHAN_PRIORITY_ASCENDING
)
1785 dwc
->ch_regs
= &__dw_regs(dw
)->CHAN
[i
];
1786 spin_lock_init(&dwc
->lock
);
1789 INIT_LIST_HEAD(&dwc
->active_list
);
1790 INIT_LIST_HEAD(&dwc
->queue
);
1791 INIT_LIST_HEAD(&dwc
->free_list
);
1793 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
1795 dwc
->direction
= DMA_TRANS_NONE
;
1796 dwc
->request_line
= ~0;
1798 /* Hardware configuration */
1800 unsigned int dwc_params
;
1802 dwc_params
= dma_read_byaddr(regs
+ r
* sizeof(u32
),
1805 dev_dbg(&pdev
->dev
, "DWC_PARAMS[%d]: 0x%08x\n", i
,
1808 /* Decode maximum block size for given channel. The
1809 * stored 4 bit value represents blocks from 0x00 for 3
1810 * up to 0x0a for 4095. */
1812 (4 << ((max_blk_size
>> 4 * i
) & 0xf)) - 1;
1814 (dwc_params
>> DWC_PARAMS_MBLK_EN
& 0x1) == 0;
1816 dwc
->block_size
= pdata
->block_size
;
1818 /* Check if channel supports multi block transfer */
1819 channel_writel(dwc
, LLP
, 0xfffffffc);
1821 (channel_readl(dwc
, LLP
) & 0xfffffffc) == 0;
1822 channel_writel(dwc
, LLP
, 0);
1826 /* Clear all interrupts on all channels. */
1827 dma_writel(dw
, CLEAR
.XFER
, dw
->all_chan_mask
);
1828 dma_writel(dw
, CLEAR
.BLOCK
, dw
->all_chan_mask
);
1829 dma_writel(dw
, CLEAR
.SRC_TRAN
, dw
->all_chan_mask
);
1830 dma_writel(dw
, CLEAR
.DST_TRAN
, dw
->all_chan_mask
);
1831 dma_writel(dw
, CLEAR
.ERROR
, dw
->all_chan_mask
);
1833 dma_cap_set(DMA_MEMCPY
, dw
->dma
.cap_mask
);
1834 dma_cap_set(DMA_SLAVE
, dw
->dma
.cap_mask
);
1835 if (pdata
->is_private
)
1836 dma_cap_set(DMA_PRIVATE
, dw
->dma
.cap_mask
);
1837 dw
->dma
.dev
= &pdev
->dev
;
1838 dw
->dma
.device_alloc_chan_resources
= dwc_alloc_chan_resources
;
1839 dw
->dma
.device_free_chan_resources
= dwc_free_chan_resources
;
1841 dw
->dma
.device_prep_dma_memcpy
= dwc_prep_dma_memcpy
;
1843 dw
->dma
.device_prep_slave_sg
= dwc_prep_slave_sg
;
1844 dw
->dma
.device_control
= dwc_control
;
1846 dw
->dma
.device_tx_status
= dwc_tx_status
;
1847 dw
->dma
.device_issue_pending
= dwc_issue_pending
;
1849 dma_writel(dw
, CFG
, DW_CFG_DMA_EN
);
1851 dev_info(&pdev
->dev
, "DesignWare DMA Controller, %d channels\n",
1854 dma_async_device_register(&dw
->dma
);
1856 if (pdev
->dev
.of_node
) {
1857 err
= of_dma_controller_register(pdev
->dev
.of_node
,
1858 dw_dma_of_xlate
, dw
);
1861 "could not register of_dma_controller\n");
1864 if (ACPI_HANDLE(&pdev
->dev
))
1865 dw_dma_acpi_controller_register(dw
);
1870 static int dw_remove(struct platform_device
*pdev
)
1872 struct dw_dma
*dw
= platform_get_drvdata(pdev
);
1873 struct dw_dma_chan
*dwc
, *_dwc
;
1875 if (pdev
->dev
.of_node
)
1876 of_dma_controller_free(pdev
->dev
.of_node
);
1878 dma_async_device_unregister(&dw
->dma
);
1880 tasklet_kill(&dw
->tasklet
);
1882 list_for_each_entry_safe(dwc
, _dwc
, &dw
->dma
.channels
,
1884 list_del(&dwc
->chan
.device_node
);
1885 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
1891 static void dw_shutdown(struct platform_device
*pdev
)
1893 struct dw_dma
*dw
= platform_get_drvdata(pdev
);
1896 clk_disable_unprepare(dw
->clk
);
1899 static int dw_suspend_noirq(struct device
*dev
)
1901 struct platform_device
*pdev
= to_platform_device(dev
);
1902 struct dw_dma
*dw
= platform_get_drvdata(pdev
);
1905 clk_disable_unprepare(dw
->clk
);
1910 static int dw_resume_noirq(struct device
*dev
)
1912 struct platform_device
*pdev
= to_platform_device(dev
);
1913 struct dw_dma
*dw
= platform_get_drvdata(pdev
);
1915 clk_prepare_enable(dw
->clk
);
1916 dma_writel(dw
, CFG
, DW_CFG_DMA_EN
);
1921 static const struct dev_pm_ops dw_dev_pm_ops
= {
1922 .suspend_noirq
= dw_suspend_noirq
,
1923 .resume_noirq
= dw_resume_noirq
,
1924 .freeze_noirq
= dw_suspend_noirq
,
1925 .thaw_noirq
= dw_resume_noirq
,
1926 .restore_noirq
= dw_resume_noirq
,
1927 .poweroff_noirq
= dw_suspend_noirq
,
1931 static const struct of_device_id dw_dma_of_id_table
[] = {
1932 { .compatible
= "snps,dma-spear1340" },
1935 MODULE_DEVICE_TABLE(of
, dw_dma_of_id_table
);
1939 static const struct acpi_device_id dw_dma_acpi_id_table
[] = {
1945 static struct platform_driver dw_driver
= {
1947 .remove
= dw_remove
,
1948 .shutdown
= dw_shutdown
,
1951 .pm
= &dw_dev_pm_ops
,
1952 .of_match_table
= of_match_ptr(dw_dma_of_id_table
),
1953 .acpi_match_table
= ACPI_PTR(dw_dma_acpi_id_table
),
1957 static int __init
dw_init(void)
1959 return platform_driver_register(&dw_driver
);
1961 subsys_initcall(dw_init
);
1963 static void __exit
dw_exit(void)
1965 platform_driver_unregister(&dw_driver
);
1967 module_exit(dw_exit
);
1969 MODULE_LICENSE("GPL v2");
1970 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
1971 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1972 MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");