2 * drivers/dma/imx-dma.c
4 * This file contains a driver for the Freescale i.MX DMA engine
7 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
8 * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
10 * The code contained herein is licensed under the GNU General Public
11 * License. You may obtain a copy of the GNU General Public License
12 * Version 2 or later at the following locations:
14 * http://www.opensource.org/licenses/gpl-license.html
15 * http://www.gnu.org/copyleft/gpl.html
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/types.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/device.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/slab.h>
26 #include <linux/platform_device.h>
27 #include <linux/clk.h>
28 #include <linux/dmaengine.h>
29 #include <linux/module.h>
33 #include <mach/hardware.h>
35 #include "dmaengine.h"
36 #define IMXDMA_MAX_CHAN_DESCRIPTORS 16
37 #define IMX_DMA_CHANNELS 16
39 #define DMA_MODE_READ 0
40 #define DMA_MODE_WRITE 1
41 #define DMA_MODE_MASK 1
43 #define IMX_DMA_LENGTH_LOOP ((unsigned int)-1)
44 #define IMX_DMA_MEMSIZE_32 (0 << 4)
45 #define IMX_DMA_MEMSIZE_8 (1 << 4)
46 #define IMX_DMA_MEMSIZE_16 (2 << 4)
47 #define IMX_DMA_TYPE_LINEAR (0 << 10)
48 #define IMX_DMA_TYPE_2D (1 << 10)
49 #define IMX_DMA_TYPE_FIFO (2 << 10)
51 #define IMX_DMA_ERR_BURST (1 << 0)
52 #define IMX_DMA_ERR_REQUEST (1 << 1)
53 #define IMX_DMA_ERR_TRANSFER (1 << 2)
54 #define IMX_DMA_ERR_BUFFER (1 << 3)
55 #define IMX_DMA_ERR_TIMEOUT (1 << 4)
57 #define DMA_DCR 0x00 /* Control Register */
58 #define DMA_DISR 0x04 /* Interrupt status Register */
59 #define DMA_DIMR 0x08 /* Interrupt mask Register */
60 #define DMA_DBTOSR 0x0c /* Burst timeout status Register */
61 #define DMA_DRTOSR 0x10 /* Request timeout Register */
62 #define DMA_DSESR 0x14 /* Transfer Error Status Register */
63 #define DMA_DBOSR 0x18 /* Buffer overflow status Register */
64 #define DMA_DBTOCR 0x1c /* Burst timeout control Register */
65 #define DMA_WSRA 0x40 /* W-Size Register A */
66 #define DMA_XSRA 0x44 /* X-Size Register A */
67 #define DMA_YSRA 0x48 /* Y-Size Register A */
68 #define DMA_WSRB 0x4c /* W-Size Register B */
69 #define DMA_XSRB 0x50 /* X-Size Register B */
70 #define DMA_YSRB 0x54 /* Y-Size Register B */
71 #define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */
72 #define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */
73 #define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */
74 #define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */
75 #define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */
76 #define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */
77 #define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */
78 #define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */
79 #define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */
81 #define DCR_DRST (1<<1)
82 #define DCR_DEN (1<<0)
83 #define DBTOCR_EN (1<<15)
84 #define DBTOCR_CNT(x) ((x) & 0x7fff)
85 #define CNTR_CNT(x) ((x) & 0xffffff)
86 #define CCR_ACRPT (1<<14)
87 #define CCR_DMOD_LINEAR (0x0 << 12)
88 #define CCR_DMOD_2D (0x1 << 12)
89 #define CCR_DMOD_FIFO (0x2 << 12)
90 #define CCR_DMOD_EOBFIFO (0x3 << 12)
91 #define CCR_SMOD_LINEAR (0x0 << 10)
92 #define CCR_SMOD_2D (0x1 << 10)
93 #define CCR_SMOD_FIFO (0x2 << 10)
94 #define CCR_SMOD_EOBFIFO (0x3 << 10)
95 #define CCR_MDIR_DEC (1<<9)
96 #define CCR_MSEL_B (1<<8)
97 #define CCR_DSIZ_32 (0x0 << 6)
98 #define CCR_DSIZ_8 (0x1 << 6)
99 #define CCR_DSIZ_16 (0x2 << 6)
100 #define CCR_SSIZ_32 (0x0 << 4)
101 #define CCR_SSIZ_8 (0x1 << 4)
102 #define CCR_SSIZ_16 (0x2 << 4)
103 #define CCR_REN (1<<3)
104 #define CCR_RPT (1<<2)
105 #define CCR_FRC (1<<1)
106 #define CCR_CEN (1<<0)
107 #define RTOR_EN (1<<15)
108 #define RTOR_CLK (1<<14)
109 #define RTOR_PSC (1<<13)
111 enum imxdma_prep_type
{
113 IMXDMA_DESC_INTERLEAVED
,
114 IMXDMA_DESC_SLAVE_SG
,
119 * struct imxdma_channel_internal - i.MX specific DMA extension
120 * @name: name specified by DMA client
121 * @irq_handler: client callback for end of transfer
122 * @err_handler: client callback for error condition
123 * @data: clients context data for callbacks
124 * @dma_mode: direction of the transfer %DMA_MODE_READ or %DMA_MODE_WRITE
125 * @sg: pointer to the actual read/written chunk for scatter-gather emulation
126 * @resbytes: total residual number of bytes to transfer
127 * (it can be lower or same as sum of SG mapped chunk sizes)
128 * @sgcount: number of chunks to be read/written
130 * Structure is used for IMX DMA processing. It would be probably good
131 * @struct dma_struct in the future for external interfacing and use
132 * @struct imxdma_channel_internal only as extension to it.
135 struct imxdma_channel_internal
{
136 unsigned int dma_mode
;
137 struct scatterlist
*sg
;
138 unsigned int resbytes
;
145 struct timer_list watchdog
;
151 struct list_head node
;
152 struct dma_async_tx_descriptor desc
;
153 enum dma_status status
;
157 unsigned int dmamode
;
158 enum imxdma_prep_type type
;
159 /* For memcpy and interleaved */
160 unsigned int config_port
;
161 unsigned int config_mem
;
162 /* For interleaved transfers */
166 /* For slave sg and cyclic */
167 struct scatterlist
*sg
;
168 unsigned int sgcount
;
171 struct imxdma_channel
{
172 struct imxdma_channel_internal internal
;
173 struct imxdma_engine
*imxdma
;
174 unsigned int channel
;
176 struct tasklet_struct dma_tasklet
;
177 struct list_head ld_free
;
178 struct list_head ld_queue
;
179 struct list_head ld_active
;
181 enum dma_slave_buswidth word_size
;
182 dma_addr_t per_address
;
184 struct dma_chan chan
;
186 struct dma_async_tx_descriptor desc
;
187 enum dma_status status
;
189 struct scatterlist
*sg_list
;
192 struct imxdma_engine
{
194 struct device_dma_parameters dma_parms
;
195 struct dma_device dma_device
;
196 struct imxdma_channel channel
[IMX_DMA_CHANNELS
];
199 static struct imxdma_channel
*to_imxdma_chan(struct dma_chan
*chan
)
201 return container_of(chan
, struct imxdma_channel
, chan
);
204 static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel
*imxdmac
)
206 struct imxdma_desc
*desc
;
208 if (!list_empty(&imxdmac
->ld_active
)) {
209 desc
= list_first_entry(&imxdmac
->ld_active
, struct imxdma_desc
,
211 if (desc
->type
== IMXDMA_DESC_CYCLIC
)
217 /* TODO: put this inside any struct */
218 static void __iomem
*imx_dmav1_baseaddr
;
219 static struct clk
*dma_clk
;
221 static void imx_dmav1_writel(unsigned val
, unsigned offset
)
223 __raw_writel(val
, imx_dmav1_baseaddr
+ offset
);
226 static unsigned imx_dmav1_readl(unsigned offset
)
228 return __raw_readl(imx_dmav1_baseaddr
+ offset
);
231 static int imxdma_hw_chain(struct imxdma_channel_internal
*imxdma
)
234 return imxdma
->hw_chaining
;
240 * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation
242 static inline int imxdma_sg_next(struct imxdma_channel
*imxdmac
, struct scatterlist
*sg
)
244 struct imxdma_channel_internal
*imxdma
= &imxdmac
->internal
;
247 now
= min(imxdma
->resbytes
, sg
->length
);
248 if (imxdma
->resbytes
!= IMX_DMA_LENGTH_LOOP
)
249 imxdma
->resbytes
-= now
;
251 if ((imxdma
->dma_mode
& DMA_MODE_MASK
) == DMA_MODE_READ
)
252 imx_dmav1_writel(sg
->dma_address
, DMA_DAR(imxdmac
->channel
));
254 imx_dmav1_writel(sg
->dma_address
, DMA_SAR(imxdmac
->channel
));
256 imx_dmav1_writel(now
, DMA_CNTR(imxdmac
->channel
));
258 pr_debug("imxdma%d: next sg chunk dst 0x%08x, src 0x%08x, "
259 "size 0x%08x\n", imxdmac
->channel
,
260 imx_dmav1_readl(DMA_DAR(imxdmac
->channel
)),
261 imx_dmav1_readl(DMA_SAR(imxdmac
->channel
)),
262 imx_dmav1_readl(DMA_CNTR(imxdmac
->channel
)));
268 imxdma_setup_single_hw(struct imxdma_channel
*imxdmac
, dma_addr_t dma_address
,
269 unsigned int dma_length
, unsigned int dev_addr
,
270 unsigned int dmamode
)
272 int channel
= imxdmac
->channel
;
274 imxdmac
->internal
.sg
= NULL
;
275 imxdmac
->internal
.dma_mode
= dmamode
;
278 printk(KERN_ERR
"imxdma%d: imx_dma_setup_single null address\n",
284 printk(KERN_ERR
"imxdma%d: imx_dma_setup_single zero length\n",
289 if ((dmamode
& DMA_MODE_MASK
) == DMA_MODE_READ
) {
290 pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d "
291 "dev_addr=0x%08x for read\n",
292 channel
, __func__
, (unsigned int)dma_address
,
293 dma_length
, dev_addr
);
295 imx_dmav1_writel(dev_addr
, DMA_SAR(channel
));
296 imx_dmav1_writel(dma_address
, DMA_DAR(channel
));
297 imx_dmav1_writel(imxdmac
->internal
.ccr_from_device
, DMA_CCR(channel
));
298 } else if ((dmamode
& DMA_MODE_MASK
) == DMA_MODE_WRITE
) {
299 pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d "
300 "dev_addr=0x%08x for write\n",
301 channel
, __func__
, (unsigned int)dma_address
,
302 dma_length
, dev_addr
);
304 imx_dmav1_writel(dma_address
, DMA_SAR(channel
));
305 imx_dmav1_writel(dev_addr
, DMA_DAR(channel
));
306 imx_dmav1_writel(imxdmac
->internal
.ccr_to_device
,
309 printk(KERN_ERR
"imxdma%d: imx_dma_setup_single bad dmamode\n",
314 imx_dmav1_writel(dma_length
, DMA_CNTR(channel
));
319 static void imxdma_enable_hw(struct imxdma_channel
*imxdmac
)
321 int channel
= imxdmac
->channel
;
324 pr_debug("imxdma%d: imx_dma_enable\n", channel
);
326 if (imxdmac
->internal
.in_use
)
329 local_irq_save(flags
);
331 imx_dmav1_writel(1 << channel
, DMA_DISR
);
332 imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR
) & ~(1 << channel
), DMA_DIMR
);
333 imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel
)) | CCR_CEN
|
334 CCR_ACRPT
, DMA_CCR(channel
));
336 if ((cpu_is_mx21() || cpu_is_mx27()) &&
337 imxdmac
->internal
.sg
&& imxdma_hw_chain(&imxdmac
->internal
)) {
338 imxdmac
->internal
.sg
= sg_next(imxdmac
->internal
.sg
);
339 if (imxdmac
->internal
.sg
) {
341 imxdma_sg_next(imxdmac
, imxdmac
->internal
.sg
);
342 tmp
= imx_dmav1_readl(DMA_CCR(channel
));
343 imx_dmav1_writel(tmp
| CCR_RPT
| CCR_ACRPT
,
347 imxdmac
->internal
.in_use
= 1;
349 local_irq_restore(flags
);
352 static void imxdma_disable_hw(struct imxdma_channel
*imxdmac
)
354 int channel
= imxdmac
->channel
;
357 pr_debug("imxdma%d: imx_dma_disable\n", channel
);
359 if (imxdma_hw_chain(&imxdmac
->internal
))
360 del_timer(&imxdmac
->internal
.watchdog
);
362 local_irq_save(flags
);
363 imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR
) | (1 << channel
), DMA_DIMR
);
364 imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel
)) & ~CCR_CEN
,
366 imx_dmav1_writel(1 << channel
, DMA_DISR
);
367 imxdmac
->internal
.in_use
= 0;
368 local_irq_restore(flags
);
372 imxdma_config_channel_hw(struct imxdma_channel
*imxdmac
, unsigned int config_port
,
373 unsigned int config_mem
, unsigned int dmareq
, int hw_chaining
)
375 int channel
= imxdmac
->channel
;
378 imxdmac
->internal
.hw_chaining
= 0;
381 imxdmac
->internal
.hw_chaining
= 1;
382 if (!imxdma_hw_chain(&imxdmac
->internal
))
389 imxdmac
->internal
.ccr_from_device
= config_port
| (config_mem
<< 2) | dreq
;
390 imxdmac
->internal
.ccr_to_device
= config_mem
| (config_port
<< 2) | dreq
;
392 imx_dmav1_writel(dmareq
, DMA_RSSR(channel
));
398 imxdma_setup_sg_hw(struct imxdma_channel
*imxdmac
,
399 struct scatterlist
*sg
, unsigned int sgcount
,
400 unsigned int dma_length
, unsigned int dev_addr
,
401 unsigned int dmamode
)
403 int channel
= imxdmac
->channel
;
405 if (imxdmac
->internal
.in_use
)
408 imxdmac
->internal
.sg
= sg
;
409 imxdmac
->internal
.dma_mode
= dmamode
;
410 imxdmac
->internal
.resbytes
= dma_length
;
412 if (!sg
|| !sgcount
) {
413 printk(KERN_ERR
"imxdma%d: imx_dma_setup_sg empty sg list\n",
419 printk(KERN_ERR
"imxdma%d: imx_dma_setup_sg zero length\n",
424 if ((dmamode
& DMA_MODE_MASK
) == DMA_MODE_READ
) {
425 pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d "
426 "dev_addr=0x%08x for read\n",
427 channel
, __func__
, sg
, sgcount
, dma_length
, dev_addr
);
429 imx_dmav1_writel(dev_addr
, DMA_SAR(channel
));
430 imx_dmav1_writel(imxdmac
->internal
.ccr_from_device
, DMA_CCR(channel
));
431 } else if ((dmamode
& DMA_MODE_MASK
) == DMA_MODE_WRITE
) {
432 pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d "
433 "dev_addr=0x%08x for write\n",
434 channel
, __func__
, sg
, sgcount
, dma_length
, dev_addr
);
436 imx_dmav1_writel(dev_addr
, DMA_DAR(channel
));
437 imx_dmav1_writel(imxdmac
->internal
.ccr_to_device
, DMA_CCR(channel
));
439 printk(KERN_ERR
"imxdma%d: imx_dma_setup_sg bad dmamode\n",
444 imxdma_sg_next(imxdmac
, sg
);
449 static void imxdma_watchdog(unsigned long data
)
451 struct imxdma_channel
*imxdmac
= (struct imxdma_channel
*)data
;
452 int channel
= imxdmac
->channel
;
454 imx_dmav1_writel(0, DMA_CCR(channel
));
455 imxdmac
->internal
.in_use
= 0;
456 imxdmac
->internal
.sg
= NULL
;
458 /* Tasklet watchdog error handler */
459 tasklet_schedule(&imxdmac
->dma_tasklet
);
460 pr_debug("imxdma%d: watchdog timeout!\n", imxdmac
->channel
);
463 static irqreturn_t
imxdma_err_handler(int irq
, void *dev_id
)
465 struct imxdma_engine
*imxdma
= dev_id
;
466 struct imxdma_channel_internal
*internal
;
467 unsigned int err_mask
;
471 disr
= imx_dmav1_readl(DMA_DISR
);
473 err_mask
= imx_dmav1_readl(DMA_DBTOSR
) |
474 imx_dmav1_readl(DMA_DRTOSR
) |
475 imx_dmav1_readl(DMA_DSESR
) |
476 imx_dmav1_readl(DMA_DBOSR
);
481 imx_dmav1_writel(disr
& err_mask
, DMA_DISR
);
483 for (i
= 0; i
< IMX_DMA_CHANNELS
; i
++) {
484 if (!(err_mask
& (1 << i
)))
486 internal
= &imxdma
->channel
[i
].internal
;
489 if (imx_dmav1_readl(DMA_DBTOSR
) & (1 << i
)) {
490 imx_dmav1_writel(1 << i
, DMA_DBTOSR
);
491 errcode
|= IMX_DMA_ERR_BURST
;
493 if (imx_dmav1_readl(DMA_DRTOSR
) & (1 << i
)) {
494 imx_dmav1_writel(1 << i
, DMA_DRTOSR
);
495 errcode
|= IMX_DMA_ERR_REQUEST
;
497 if (imx_dmav1_readl(DMA_DSESR
) & (1 << i
)) {
498 imx_dmav1_writel(1 << i
, DMA_DSESR
);
499 errcode
|= IMX_DMA_ERR_TRANSFER
;
501 if (imx_dmav1_readl(DMA_DBOSR
) & (1 << i
)) {
502 imx_dmav1_writel(1 << i
, DMA_DBOSR
);
503 errcode
|= IMX_DMA_ERR_BUFFER
;
505 /* Tasklet error handler */
506 tasklet_schedule(&imxdma
->channel
[i
].dma_tasklet
);
509 "DMA timeout on channel %d -%s%s%s%s\n", i
,
510 errcode
& IMX_DMA_ERR_BURST
? " burst" : "",
511 errcode
& IMX_DMA_ERR_REQUEST
? " request" : "",
512 errcode
& IMX_DMA_ERR_TRANSFER
? " transfer" : "",
513 errcode
& IMX_DMA_ERR_BUFFER
? " buffer" : "");
518 static void dma_irq_handle_channel(struct imxdma_channel
*imxdmac
)
520 struct imxdma_channel_internal
*imxdma
= &imxdmac
->internal
;
521 int chno
= imxdmac
->channel
;
525 imxdma
->sg
= sg_next(imxdma
->sg
);
528 imxdma_sg_next(imxdmac
, imxdma
->sg
);
530 tmp
= imx_dmav1_readl(DMA_CCR(chno
));
532 if (imxdma_hw_chain(imxdma
)) {
533 /* FIXME: The timeout should probably be
536 mod_timer(&imxdma
->watchdog
,
537 jiffies
+ msecs_to_jiffies(500));
539 tmp
|= CCR_CEN
| CCR_RPT
| CCR_ACRPT
;
540 imx_dmav1_writel(tmp
, DMA_CCR(chno
));
542 imx_dmav1_writel(tmp
& ~CCR_CEN
, DMA_CCR(chno
));
546 imx_dmav1_writel(tmp
, DMA_CCR(chno
));
548 if (imxdma_chan_is_doing_cyclic(imxdmac
))
549 /* Tasklet progression */
550 tasklet_schedule(&imxdmac
->dma_tasklet
);
555 if (imxdma_hw_chain(imxdma
)) {
556 del_timer(&imxdma
->watchdog
);
561 imx_dmav1_writel(0, DMA_CCR(chno
));
564 tasklet_schedule(&imxdmac
->dma_tasklet
);
567 static irqreturn_t
dma_irq_handler(int irq
, void *dev_id
)
569 struct imxdma_engine
*imxdma
= dev_id
;
570 struct imxdma_channel_internal
*internal
;
573 if (cpu_is_mx21() || cpu_is_mx27())
574 imxdma_err_handler(irq
, dev_id
);
576 disr
= imx_dmav1_readl(DMA_DISR
);
578 pr_debug("imxdma: dma_irq_handler called, disr=0x%08x\n",
581 imx_dmav1_writel(disr
, DMA_DISR
);
582 for (i
= 0; i
< IMX_DMA_CHANNELS
; i
++) {
583 if (disr
& (1 << i
)) {
584 internal
= &imxdma
->channel
[i
].internal
;
585 dma_irq_handle_channel(&imxdma
->channel
[i
]);
592 static int imxdma_xfer_desc(struct imxdma_desc
*d
)
594 struct imxdma_channel
*imxdmac
= to_imxdma_chan(d
->desc
.chan
);
597 /* Configure and enable */
599 case IMXDMA_DESC_MEMCPY
:
600 ret
= imxdma_config_channel_hw(imxdmac
,
601 d
->config_port
, d
->config_mem
, 0, 0);
604 ret
= imxdma_setup_single_hw(imxdmac
, d
->src
,
605 d
->len
, d
->dest
, d
->dmamode
);
610 /* Cyclic transfer is the same as slave_sg with special sg configuration. */
611 case IMXDMA_DESC_CYCLIC
:
612 case IMXDMA_DESC_SLAVE_SG
:
613 if (d
->dmamode
== DMA_MODE_READ
)
614 ret
= imxdma_setup_sg_hw(imxdmac
, d
->sg
,
615 d
->sgcount
, d
->len
, d
->src
, d
->dmamode
);
617 ret
= imxdma_setup_sg_hw(imxdmac
, d
->sg
,
618 d
->sgcount
, d
->len
, d
->dest
, d
->dmamode
);
625 imxdma_enable_hw(imxdmac
);
629 static void imxdma_tasklet(unsigned long data
)
631 struct imxdma_channel
*imxdmac
= (void *)data
;
632 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
633 struct imxdma_desc
*desc
;
635 spin_lock(&imxdmac
->lock
);
637 if (list_empty(&imxdmac
->ld_active
)) {
638 /* Someone might have called terminate all */
641 desc
= list_first_entry(&imxdmac
->ld_active
, struct imxdma_desc
, node
);
643 if (desc
->desc
.callback
)
644 desc
->desc
.callback(desc
->desc
.callback_param
);
646 dma_cookie_complete(&desc
->desc
);
648 /* If we are dealing with a cyclic descriptor keep it on ld_active */
649 if (imxdma_chan_is_doing_cyclic(imxdmac
))
652 list_move_tail(imxdmac
->ld_active
.next
, &imxdmac
->ld_free
);
654 if (!list_empty(&imxdmac
->ld_queue
)) {
655 desc
= list_first_entry(&imxdmac
->ld_queue
, struct imxdma_desc
,
657 list_move_tail(imxdmac
->ld_queue
.next
, &imxdmac
->ld_active
);
658 if (imxdma_xfer_desc(desc
) < 0)
659 dev_warn(imxdma
->dev
, "%s: channel: %d couldn't xfer desc\n",
660 __func__
, imxdmac
->channel
);
663 spin_unlock(&imxdmac
->lock
);
666 static int imxdma_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
669 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
670 struct dma_slave_config
*dmaengine_cfg
= (void *)arg
;
673 unsigned int mode
= 0;
676 case DMA_TERMINATE_ALL
:
677 imxdma_disable_hw(imxdmac
);
679 spin_lock_irqsave(&imxdmac
->lock
, flags
);
680 list_splice_tail_init(&imxdmac
->ld_active
, &imxdmac
->ld_free
);
681 list_splice_tail_init(&imxdmac
->ld_queue
, &imxdmac
->ld_free
);
682 spin_unlock_irqrestore(&imxdmac
->lock
, flags
);
684 case DMA_SLAVE_CONFIG
:
685 if (dmaengine_cfg
->direction
== DMA_DEV_TO_MEM
) {
686 imxdmac
->per_address
= dmaengine_cfg
->src_addr
;
687 imxdmac
->watermark_level
= dmaengine_cfg
->src_maxburst
;
688 imxdmac
->word_size
= dmaengine_cfg
->src_addr_width
;
690 imxdmac
->per_address
= dmaengine_cfg
->dst_addr
;
691 imxdmac
->watermark_level
= dmaengine_cfg
->dst_maxburst
;
692 imxdmac
->word_size
= dmaengine_cfg
->dst_addr_width
;
695 switch (imxdmac
->word_size
) {
696 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
697 mode
= IMX_DMA_MEMSIZE_8
;
699 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
700 mode
= IMX_DMA_MEMSIZE_16
;
703 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
704 mode
= IMX_DMA_MEMSIZE_32
;
707 ret
= imxdma_config_channel_hw(imxdmac
,
708 mode
| IMX_DMA_TYPE_FIFO
,
709 IMX_DMA_MEMSIZE_32
| IMX_DMA_TYPE_LINEAR
,
710 imxdmac
->dma_request
, 1);
714 /* Set burst length */
715 imx_dmav1_writel(imxdmac
->watermark_level
* imxdmac
->word_size
,
716 DMA_BLR(imxdmac
->channel
));
726 static enum dma_status
imxdma_tx_status(struct dma_chan
*chan
,
728 struct dma_tx_state
*txstate
)
730 return dma_cookie_status(chan
, cookie
, txstate
);
733 static dma_cookie_t
imxdma_tx_submit(struct dma_async_tx_descriptor
*tx
)
735 struct imxdma_channel
*imxdmac
= to_imxdma_chan(tx
->chan
);
739 spin_lock_irqsave(&imxdmac
->lock
, flags
);
740 cookie
= dma_cookie_assign(tx
);
741 spin_unlock_irqrestore(&imxdmac
->lock
, flags
);
746 static int imxdma_alloc_chan_resources(struct dma_chan
*chan
)
748 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
749 struct imx_dma_data
*data
= chan
->private;
752 imxdmac
->dma_request
= data
->dma_request
;
754 while (imxdmac
->descs_allocated
< IMXDMA_MAX_CHAN_DESCRIPTORS
) {
755 struct imxdma_desc
*desc
;
757 desc
= kzalloc(sizeof(*desc
), GFP_KERNEL
);
760 __memzero(&desc
->desc
, sizeof(struct dma_async_tx_descriptor
));
761 dma_async_tx_descriptor_init(&desc
->desc
, chan
);
762 desc
->desc
.tx_submit
= imxdma_tx_submit
;
763 /* txd.flags will be overwritten in prep funcs */
764 desc
->desc
.flags
= DMA_CTRL_ACK
;
765 desc
->status
= DMA_SUCCESS
;
767 list_add_tail(&desc
->node
, &imxdmac
->ld_free
);
768 imxdmac
->descs_allocated
++;
771 if (!imxdmac
->descs_allocated
)
774 return imxdmac
->descs_allocated
;
777 static void imxdma_free_chan_resources(struct dma_chan
*chan
)
779 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
780 struct imxdma_desc
*desc
, *_desc
;
783 spin_lock_irqsave(&imxdmac
->lock
, flags
);
785 imxdma_disable_hw(imxdmac
);
786 list_splice_tail_init(&imxdmac
->ld_active
, &imxdmac
->ld_free
);
787 list_splice_tail_init(&imxdmac
->ld_queue
, &imxdmac
->ld_free
);
789 spin_unlock_irqrestore(&imxdmac
->lock
, flags
);
791 list_for_each_entry_safe(desc
, _desc
, &imxdmac
->ld_free
, node
) {
793 imxdmac
->descs_allocated
--;
795 INIT_LIST_HEAD(&imxdmac
->ld_free
);
797 if (imxdmac
->sg_list
) {
798 kfree(imxdmac
->sg_list
);
799 imxdmac
->sg_list
= NULL
;
803 static struct dma_async_tx_descriptor
*imxdma_prep_slave_sg(
804 struct dma_chan
*chan
, struct scatterlist
*sgl
,
805 unsigned int sg_len
, enum dma_transfer_direction direction
,
806 unsigned long flags
, void *context
)
808 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
809 struct scatterlist
*sg
;
810 int i
, dma_length
= 0;
811 struct imxdma_desc
*desc
;
813 if (list_empty(&imxdmac
->ld_free
) ||
814 imxdma_chan_is_doing_cyclic(imxdmac
))
817 desc
= list_first_entry(&imxdmac
->ld_free
, struct imxdma_desc
, node
);
819 for_each_sg(sgl
, sg
, sg_len
, i
) {
820 dma_length
+= sg
->length
;
823 switch (imxdmac
->word_size
) {
824 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
825 if (sgl
->length
& 3 || sgl
->dma_address
& 3)
828 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
829 if (sgl
->length
& 1 || sgl
->dma_address
& 1)
832 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
838 desc
->type
= IMXDMA_DESC_SLAVE_SG
;
840 desc
->sgcount
= sg_len
;
841 desc
->len
= dma_length
;
842 if (direction
== DMA_DEV_TO_MEM
) {
843 desc
->dmamode
= DMA_MODE_READ
;
844 desc
->src
= imxdmac
->per_address
;
846 desc
->dmamode
= DMA_MODE_WRITE
;
847 desc
->dest
= imxdmac
->per_address
;
849 desc
->desc
.callback
= NULL
;
850 desc
->desc
.callback_param
= NULL
;
855 static struct dma_async_tx_descriptor
*imxdma_prep_dma_cyclic(
856 struct dma_chan
*chan
, dma_addr_t dma_addr
, size_t buf_len
,
857 size_t period_len
, enum dma_transfer_direction direction
,
860 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
861 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
862 struct imxdma_desc
*desc
;
864 unsigned int periods
= buf_len
/ period_len
;
866 dev_dbg(imxdma
->dev
, "%s channel: %d buf_len=%d period_len=%d\n",
867 __func__
, imxdmac
->channel
, buf_len
, period_len
);
869 if (list_empty(&imxdmac
->ld_free
) ||
870 imxdma_chan_is_doing_cyclic(imxdmac
))
873 desc
= list_first_entry(&imxdmac
->ld_free
, struct imxdma_desc
, node
);
875 if (imxdmac
->sg_list
)
876 kfree(imxdmac
->sg_list
);
878 imxdmac
->sg_list
= kcalloc(periods
+ 1,
879 sizeof(struct scatterlist
), GFP_KERNEL
);
880 if (!imxdmac
->sg_list
)
883 sg_init_table(imxdmac
->sg_list
, periods
);
885 for (i
= 0; i
< periods
; i
++) {
886 imxdmac
->sg_list
[i
].page_link
= 0;
887 imxdmac
->sg_list
[i
].offset
= 0;
888 imxdmac
->sg_list
[i
].dma_address
= dma_addr
;
889 imxdmac
->sg_list
[i
].length
= period_len
;
890 dma_addr
+= period_len
;
894 imxdmac
->sg_list
[periods
].offset
= 0;
895 imxdmac
->sg_list
[periods
].length
= 0;
896 imxdmac
->sg_list
[periods
].page_link
=
897 ((unsigned long)imxdmac
->sg_list
| 0x01) & ~0x02;
899 desc
->type
= IMXDMA_DESC_CYCLIC
;
900 desc
->sg
= imxdmac
->sg_list
;
901 desc
->sgcount
= periods
;
902 desc
->len
= IMX_DMA_LENGTH_LOOP
;
903 if (direction
== DMA_DEV_TO_MEM
) {
904 desc
->dmamode
= DMA_MODE_READ
;
905 desc
->src
= imxdmac
->per_address
;
907 desc
->dmamode
= DMA_MODE_WRITE
;
908 desc
->dest
= imxdmac
->per_address
;
910 desc
->desc
.callback
= NULL
;
911 desc
->desc
.callback_param
= NULL
;
916 static struct dma_async_tx_descriptor
*imxdma_prep_dma_memcpy(
917 struct dma_chan
*chan
, dma_addr_t dest
,
918 dma_addr_t src
, size_t len
, unsigned long flags
)
920 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
921 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
922 struct imxdma_desc
*desc
;
924 dev_dbg(imxdma
->dev
, "%s channel: %d src=0x%x dst=0x%x len=%d\n",
925 __func__
, imxdmac
->channel
, src
, dest
, len
);
927 if (list_empty(&imxdmac
->ld_free
) ||
928 imxdma_chan_is_doing_cyclic(imxdmac
))
931 desc
= list_first_entry(&imxdmac
->ld_free
, struct imxdma_desc
, node
);
933 desc
->type
= IMXDMA_DESC_MEMCPY
;
937 desc
->dmamode
= DMA_MODE_WRITE
;
938 desc
->config_port
= IMX_DMA_MEMSIZE_32
| IMX_DMA_TYPE_LINEAR
;
939 desc
->config_mem
= IMX_DMA_MEMSIZE_32
| IMX_DMA_TYPE_LINEAR
;
940 desc
->desc
.callback
= NULL
;
941 desc
->desc
.callback_param
= NULL
;
946 static void imxdma_issue_pending(struct dma_chan
*chan
)
948 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
949 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
950 struct imxdma_desc
*desc
;
953 spin_lock_irqsave(&imxdmac
->lock
, flags
);
954 if (list_empty(&imxdmac
->ld_active
) &&
955 !list_empty(&imxdmac
->ld_queue
)) {
956 desc
= list_first_entry(&imxdmac
->ld_queue
,
957 struct imxdma_desc
, node
);
959 if (imxdma_xfer_desc(desc
) < 0) {
960 dev_warn(imxdma
->dev
,
961 "%s: channel: %d couldn't issue DMA xfer\n",
962 __func__
, imxdmac
->channel
);
964 list_move_tail(imxdmac
->ld_queue
.next
,
965 &imxdmac
->ld_active
);
968 spin_unlock_irqrestore(&imxdmac
->lock
, flags
);
971 static int __init
imxdma_probe(struct platform_device
*pdev
)
973 struct imxdma_engine
*imxdma
;
977 imx_dmav1_baseaddr
= MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR
);
978 else if (cpu_is_mx21())
979 imx_dmav1_baseaddr
= MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR
);
980 else if (cpu_is_mx27())
981 imx_dmav1_baseaddr
= MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR
);
985 dma_clk
= clk_get(NULL
, "dma");
987 return PTR_ERR(dma_clk
);
990 /* reset DMA module */
991 imx_dmav1_writel(DCR_DRST
, DMA_DCR
);
994 ret
= request_irq(MX1_DMA_INT
, dma_irq_handler
, 0, "DMA", imxdma
);
996 pr_crit("Can't register IRQ for DMA\n");
1000 ret
= request_irq(MX1_DMA_ERR
, imxdma_err_handler
, 0, "DMA", imxdma
);
1002 pr_crit("Can't register ERRIRQ for DMA\n");
1003 free_irq(MX1_DMA_INT
, NULL
);
1008 /* enable DMA module */
1009 imx_dmav1_writel(DCR_DEN
, DMA_DCR
);
1011 /* clear all interrupts */
1012 imx_dmav1_writel((1 << IMX_DMA_CHANNELS
) - 1, DMA_DISR
);
1014 /* disable interrupts */
1015 imx_dmav1_writel((1 << IMX_DMA_CHANNELS
) - 1, DMA_DIMR
);
1017 imxdma
= kzalloc(sizeof(*imxdma
), GFP_KERNEL
);
1021 INIT_LIST_HEAD(&imxdma
->dma_device
.channels
);
1023 dma_cap_set(DMA_SLAVE
, imxdma
->dma_device
.cap_mask
);
1024 dma_cap_set(DMA_CYCLIC
, imxdma
->dma_device
.cap_mask
);
1025 dma_cap_set(DMA_MEMCPY
, imxdma
->dma_device
.cap_mask
);
1027 /* Initialize channel parameters */
1028 for (i
= 0; i
< IMX_DMA_CHANNELS
; i
++) {
1029 struct imxdma_channel
*imxdmac
= &imxdma
->channel
[i
];
1030 memset(&imxdmac
->internal
, 0, sizeof(imxdmac
->internal
));
1031 if (cpu_is_mx21() || cpu_is_mx27()) {
1032 ret
= request_irq(MX2x_INT_DMACH0
+ i
,
1033 dma_irq_handler
, 0, "DMA", imxdma
);
1035 pr_crit("Can't register IRQ %d for DMA channel %d\n",
1036 MX2x_INT_DMACH0
+ i
, i
);
1039 init_timer(&imxdmac
->internal
.watchdog
);
1040 imxdmac
->internal
.watchdog
.function
= &imxdma_watchdog
;
1041 imxdmac
->internal
.watchdog
.data
= (unsigned long)imxdmac
;
1044 imxdmac
->imxdma
= imxdma
;
1045 spin_lock_init(&imxdmac
->lock
);
1047 INIT_LIST_HEAD(&imxdmac
->ld_queue
);
1048 INIT_LIST_HEAD(&imxdmac
->ld_free
);
1049 INIT_LIST_HEAD(&imxdmac
->ld_active
);
1051 tasklet_init(&imxdmac
->dma_tasklet
, imxdma_tasklet
,
1052 (unsigned long)imxdmac
);
1053 imxdmac
->chan
.device
= &imxdma
->dma_device
;
1054 dma_cookie_init(&imxdmac
->chan
);
1055 imxdmac
->channel
= i
;
1057 /* Add the channel to the DMAC list */
1058 list_add_tail(&imxdmac
->chan
.device_node
,
1059 &imxdma
->dma_device
.channels
);
1062 imxdma
->dev
= &pdev
->dev
;
1063 imxdma
->dma_device
.dev
= &pdev
->dev
;
1065 imxdma
->dma_device
.device_alloc_chan_resources
= imxdma_alloc_chan_resources
;
1066 imxdma
->dma_device
.device_free_chan_resources
= imxdma_free_chan_resources
;
1067 imxdma
->dma_device
.device_tx_status
= imxdma_tx_status
;
1068 imxdma
->dma_device
.device_prep_slave_sg
= imxdma_prep_slave_sg
;
1069 imxdma
->dma_device
.device_prep_dma_cyclic
= imxdma_prep_dma_cyclic
;
1070 imxdma
->dma_device
.device_prep_dma_memcpy
= imxdma_prep_dma_memcpy
;
1071 imxdma
->dma_device
.device_control
= imxdma_control
;
1072 imxdma
->dma_device
.device_issue_pending
= imxdma_issue_pending
;
1074 platform_set_drvdata(pdev
, imxdma
);
1076 imxdma
->dma_device
.copy_align
= 2; /* 2^2 = 4 bytes alignment */
1077 imxdma
->dma_device
.dev
->dma_parms
= &imxdma
->dma_parms
;
1078 dma_set_max_seg_size(imxdma
->dma_device
.dev
, 0xffffff);
1080 ret
= dma_async_device_register(&imxdma
->dma_device
);
1082 dev_err(&pdev
->dev
, "unable to register\n");
1090 if (cpu_is_mx21() || cpu_is_mx27()) {
1092 free_irq(MX2x_INT_DMACH0
+ i
, NULL
);
1093 } else if cpu_is_mx1() {
1094 free_irq(MX1_DMA_INT
, NULL
);
1095 free_irq(MX1_DMA_ERR
, NULL
);
1102 static int __exit
imxdma_remove(struct platform_device
*pdev
)
1104 struct imxdma_engine
*imxdma
= platform_get_drvdata(pdev
);
1107 dma_async_device_unregister(&imxdma
->dma_device
);
1109 if (cpu_is_mx21() || cpu_is_mx27()) {
1110 for (i
= 0; i
< IMX_DMA_CHANNELS
; i
++)
1111 free_irq(MX2x_INT_DMACH0
+ i
, NULL
);
1112 } else if cpu_is_mx1() {
1113 free_irq(MX1_DMA_INT
, NULL
);
1114 free_irq(MX1_DMA_ERR
, NULL
);
1122 static struct platform_driver imxdma_driver
= {
1126 .remove
= __exit_p(imxdma_remove
),
1129 static int __init
imxdma_module_init(void)
1131 return platform_driver_probe(&imxdma_driver
, imxdma_probe
);
1133 subsys_initcall(imxdma_module_init
);
1135 MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1136 MODULE_DESCRIPTION("i.MX dma driver");
1137 MODULE_LICENSE("GPL");