2 * drivers/dma/imx-dma.c
4 * This file contains a driver for the Freescale i.MX DMA engine
7 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
8 * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
10 * The code contained herein is licensed under the GNU General Public
11 * License. You may obtain a copy of the GNU General Public License
12 * Version 2 or later at the following locations:
14 * http://www.opensource.org/licenses/gpl-license.html
15 * http://www.gnu.org/copyleft/gpl.html
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/types.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/device.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/slab.h>
26 #include <linux/platform_device.h>
27 #include <linux/clk.h>
28 #include <linux/dmaengine.h>
29 #include <linux/module.h>
33 #include <mach/hardware.h>
35 #include "dmaengine.h"
36 #define IMXDMA_MAX_CHAN_DESCRIPTORS 16
37 #define IMX_DMA_CHANNELS 16
39 #define IMX_DMA_LENGTH_LOOP ((unsigned int)-1)
40 #define IMX_DMA_MEMSIZE_32 (0 << 4)
41 #define IMX_DMA_MEMSIZE_8 (1 << 4)
42 #define IMX_DMA_MEMSIZE_16 (2 << 4)
43 #define IMX_DMA_TYPE_LINEAR (0 << 10)
44 #define IMX_DMA_TYPE_2D (1 << 10)
45 #define IMX_DMA_TYPE_FIFO (2 << 10)
47 #define IMX_DMA_ERR_BURST (1 << 0)
48 #define IMX_DMA_ERR_REQUEST (1 << 1)
49 #define IMX_DMA_ERR_TRANSFER (1 << 2)
50 #define IMX_DMA_ERR_BUFFER (1 << 3)
51 #define IMX_DMA_ERR_TIMEOUT (1 << 4)
53 #define DMA_DCR 0x00 /* Control Register */
54 #define DMA_DISR 0x04 /* Interrupt status Register */
55 #define DMA_DIMR 0x08 /* Interrupt mask Register */
56 #define DMA_DBTOSR 0x0c /* Burst timeout status Register */
57 #define DMA_DRTOSR 0x10 /* Request timeout Register */
58 #define DMA_DSESR 0x14 /* Transfer Error Status Register */
59 #define DMA_DBOSR 0x18 /* Buffer overflow status Register */
60 #define DMA_DBTOCR 0x1c /* Burst timeout control Register */
61 #define DMA_WSRA 0x40 /* W-Size Register A */
62 #define DMA_XSRA 0x44 /* X-Size Register A */
63 #define DMA_YSRA 0x48 /* Y-Size Register A */
64 #define DMA_WSRB 0x4c /* W-Size Register B */
65 #define DMA_XSRB 0x50 /* X-Size Register B */
66 #define DMA_YSRB 0x54 /* Y-Size Register B */
67 #define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */
68 #define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */
69 #define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */
70 #define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */
71 #define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */
72 #define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */
73 #define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */
74 #define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */
75 #define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */
77 #define DCR_DRST (1<<1)
78 #define DCR_DEN (1<<0)
79 #define DBTOCR_EN (1<<15)
80 #define DBTOCR_CNT(x) ((x) & 0x7fff)
81 #define CNTR_CNT(x) ((x) & 0xffffff)
82 #define CCR_ACRPT (1<<14)
83 #define CCR_DMOD_LINEAR (0x0 << 12)
84 #define CCR_DMOD_2D (0x1 << 12)
85 #define CCR_DMOD_FIFO (0x2 << 12)
86 #define CCR_DMOD_EOBFIFO (0x3 << 12)
87 #define CCR_SMOD_LINEAR (0x0 << 10)
88 #define CCR_SMOD_2D (0x1 << 10)
89 #define CCR_SMOD_FIFO (0x2 << 10)
90 #define CCR_SMOD_EOBFIFO (0x3 << 10)
91 #define CCR_MDIR_DEC (1<<9)
92 #define CCR_MSEL_B (1<<8)
93 #define CCR_DSIZ_32 (0x0 << 6)
94 #define CCR_DSIZ_8 (0x1 << 6)
95 #define CCR_DSIZ_16 (0x2 << 6)
96 #define CCR_SSIZ_32 (0x0 << 4)
97 #define CCR_SSIZ_8 (0x1 << 4)
98 #define CCR_SSIZ_16 (0x2 << 4)
99 #define CCR_REN (1<<3)
100 #define CCR_RPT (1<<2)
101 #define CCR_FRC (1<<1)
102 #define CCR_CEN (1<<0)
103 #define RTOR_EN (1<<15)
104 #define RTOR_CLK (1<<14)
105 #define RTOR_PSC (1<<13)
107 enum imxdma_prep_type
{
109 IMXDMA_DESC_INTERLEAVED
,
110 IMXDMA_DESC_SLAVE_SG
,
115 * struct imxdma_channel_internal - i.MX specific DMA extension
116 * @name: name specified by DMA client
117 * @irq_handler: client callback for end of transfer
118 * @err_handler: client callback for error condition
119 * @data: clients context data for callbacks
120 * @dma_mode: direction of the transfer %DMA_MODE_READ or %DMA_MODE_WRITE
121 * @sg: pointer to the actual read/written chunk for scatter-gather emulation
122 * @resbytes: total residual number of bytes to transfer
123 * (it can be lower or same as sum of SG mapped chunk sizes)
124 * @sgcount: number of chunks to be read/written
126 * Structure is used for IMX DMA processing. It would be probably good
127 * @struct dma_struct in the future for external interfacing and use
128 * @struct imxdma_channel_internal only as extension to it.
131 struct imxdma_channel_internal
{
132 struct scatterlist
*sg
;
133 unsigned int resbytes
;
140 struct timer_list watchdog
;
146 struct list_head node
;
147 struct dma_async_tx_descriptor desc
;
148 enum dma_status status
;
152 enum dma_transfer_direction direction
;
153 enum imxdma_prep_type type
;
154 /* For memcpy and interleaved */
155 unsigned int config_port
;
156 unsigned int config_mem
;
157 /* For interleaved transfers */
161 /* For slave sg and cyclic */
162 struct scatterlist
*sg
;
163 unsigned int sgcount
;
166 struct imxdma_channel
{
167 struct imxdma_channel_internal internal
;
168 struct imxdma_engine
*imxdma
;
169 unsigned int channel
;
171 struct tasklet_struct dma_tasklet
;
172 struct list_head ld_free
;
173 struct list_head ld_queue
;
174 struct list_head ld_active
;
176 enum dma_slave_buswidth word_size
;
177 dma_addr_t per_address
;
179 struct dma_chan chan
;
181 struct dma_async_tx_descriptor desc
;
182 enum dma_status status
;
184 struct scatterlist
*sg_list
;
187 struct imxdma_engine
{
189 struct device_dma_parameters dma_parms
;
190 struct dma_device dma_device
;
191 struct imxdma_channel channel
[IMX_DMA_CHANNELS
];
194 static struct imxdma_channel
*to_imxdma_chan(struct dma_chan
*chan
)
196 return container_of(chan
, struct imxdma_channel
, chan
);
199 static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel
*imxdmac
)
201 struct imxdma_desc
*desc
;
203 if (!list_empty(&imxdmac
->ld_active
)) {
204 desc
= list_first_entry(&imxdmac
->ld_active
, struct imxdma_desc
,
206 if (desc
->type
== IMXDMA_DESC_CYCLIC
)
212 /* TODO: put this inside any struct */
213 static void __iomem
*imx_dmav1_baseaddr
;
214 static struct clk
*dma_clk
;
216 static void imx_dmav1_writel(unsigned val
, unsigned offset
)
218 __raw_writel(val
, imx_dmav1_baseaddr
+ offset
);
221 static unsigned imx_dmav1_readl(unsigned offset
)
223 return __raw_readl(imx_dmav1_baseaddr
+ offset
);
226 static int imxdma_hw_chain(struct imxdma_channel_internal
*imxdma
)
229 return imxdma
->hw_chaining
;
235 * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation
237 static inline int imxdma_sg_next(struct imxdma_desc
*d
, struct scatterlist
*sg
)
239 struct imxdma_channel
*imxdmac
= to_imxdma_chan(d
->desc
.chan
);
240 struct imxdma_channel_internal
*imxdma
= &imxdmac
->internal
;
243 now
= min(imxdma
->resbytes
, sg
->length
);
244 if (imxdma
->resbytes
!= IMX_DMA_LENGTH_LOOP
)
245 imxdma
->resbytes
-= now
;
247 if (d
->direction
== DMA_DEV_TO_MEM
)
248 imx_dmav1_writel(sg
->dma_address
, DMA_DAR(imxdmac
->channel
));
250 imx_dmav1_writel(sg
->dma_address
, DMA_SAR(imxdmac
->channel
));
252 imx_dmav1_writel(now
, DMA_CNTR(imxdmac
->channel
));
254 pr_debug("imxdma%d: next sg chunk dst 0x%08x, src 0x%08x, "
255 "size 0x%08x\n", imxdmac
->channel
,
256 imx_dmav1_readl(DMA_DAR(imxdmac
->channel
)),
257 imx_dmav1_readl(DMA_SAR(imxdmac
->channel
)),
258 imx_dmav1_readl(DMA_CNTR(imxdmac
->channel
)));
264 imxdma_setup_mem2mem_hw(struct imxdma_channel
*imxdmac
, dma_addr_t dma_address
,
265 unsigned int dma_length
, unsigned int dev_addr
)
267 int channel
= imxdmac
->channel
;
269 imxdmac
->internal
.sg
= NULL
;
272 printk(KERN_ERR
"imxdma%d: imx_dma_setup_single null address\n",
278 printk(KERN_ERR
"imxdma%d: imx_dma_setup_single zero length\n",
283 pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d "
284 "dev_addr=0x%08x for write\n",
285 channel
, __func__
, (unsigned int)dma_address
,
286 dma_length
, dev_addr
);
288 imx_dmav1_writel(dma_address
, DMA_SAR(channel
));
289 imx_dmav1_writel(dev_addr
, DMA_DAR(channel
));
290 imx_dmav1_writel(imxdmac
->internal
.ccr_to_device
,
293 imx_dmav1_writel(dma_length
, DMA_CNTR(channel
));
298 static void imxdma_enable_hw(struct imxdma_desc
*d
)
300 struct imxdma_channel
*imxdmac
= to_imxdma_chan(d
->desc
.chan
);
301 int channel
= imxdmac
->channel
;
304 pr_debug("imxdma%d: imx_dma_enable\n", channel
);
306 if (imxdmac
->internal
.in_use
)
309 local_irq_save(flags
);
311 imx_dmav1_writel(1 << channel
, DMA_DISR
);
312 imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR
) & ~(1 << channel
), DMA_DIMR
);
313 imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel
)) | CCR_CEN
|
314 CCR_ACRPT
, DMA_CCR(channel
));
316 if ((cpu_is_mx21() || cpu_is_mx27()) &&
317 imxdmac
->internal
.sg
&& imxdma_hw_chain(&imxdmac
->internal
)) {
318 imxdmac
->internal
.sg
= sg_next(imxdmac
->internal
.sg
);
319 if (imxdmac
->internal
.sg
) {
321 imxdma_sg_next(d
, imxdmac
->internal
.sg
);
322 tmp
= imx_dmav1_readl(DMA_CCR(channel
));
323 imx_dmav1_writel(tmp
| CCR_RPT
| CCR_ACRPT
,
327 imxdmac
->internal
.in_use
= 1;
329 local_irq_restore(flags
);
332 static void imxdma_disable_hw(struct imxdma_channel
*imxdmac
)
334 int channel
= imxdmac
->channel
;
337 pr_debug("imxdma%d: imx_dma_disable\n", channel
);
339 if (imxdma_hw_chain(&imxdmac
->internal
))
340 del_timer(&imxdmac
->internal
.watchdog
);
342 local_irq_save(flags
);
343 imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR
) | (1 << channel
), DMA_DIMR
);
344 imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel
)) & ~CCR_CEN
,
346 imx_dmav1_writel(1 << channel
, DMA_DISR
);
347 imxdmac
->internal
.in_use
= 0;
348 local_irq_restore(flags
);
352 imxdma_config_channel_hw(struct imxdma_channel
*imxdmac
, unsigned int config_port
,
353 unsigned int config_mem
, unsigned int dmareq
, int hw_chaining
)
355 int channel
= imxdmac
->channel
;
358 imxdmac
->internal
.hw_chaining
= 0;
361 imxdmac
->internal
.hw_chaining
= 1;
362 if (!imxdma_hw_chain(&imxdmac
->internal
))
369 imxdmac
->internal
.ccr_from_device
= config_port
| (config_mem
<< 2) | dreq
;
370 imxdmac
->internal
.ccr_to_device
= config_mem
| (config_port
<< 2) | dreq
;
372 imx_dmav1_writel(dmareq
, DMA_RSSR(channel
));
378 imxdma_setup_sg_hw(struct imxdma_desc
*d
,
379 struct scatterlist
*sg
, unsigned int sgcount
,
380 unsigned int dma_length
, unsigned int dev_addr
,
381 enum dma_transfer_direction direction
)
383 struct imxdma_channel
*imxdmac
= to_imxdma_chan(d
->desc
.chan
);
384 int channel
= imxdmac
->channel
;
386 if (imxdmac
->internal
.in_use
)
389 imxdmac
->internal
.sg
= sg
;
390 imxdmac
->internal
.resbytes
= dma_length
;
392 if (!sg
|| !sgcount
) {
393 printk(KERN_ERR
"imxdma%d: imx_dma_setup_sg empty sg list\n",
399 printk(KERN_ERR
"imxdma%d: imx_dma_setup_sg zero length\n",
404 if (direction
== DMA_DEV_TO_MEM
) {
405 pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d "
406 "dev_addr=0x%08x for read\n",
407 channel
, __func__
, sg
, sgcount
, dma_length
, dev_addr
);
409 imx_dmav1_writel(dev_addr
, DMA_SAR(channel
));
410 imx_dmav1_writel(imxdmac
->internal
.ccr_from_device
, DMA_CCR(channel
));
411 } else if (direction
== DMA_MEM_TO_DEV
) {
412 pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d "
413 "dev_addr=0x%08x for write\n",
414 channel
, __func__
, sg
, sgcount
, dma_length
, dev_addr
);
416 imx_dmav1_writel(dev_addr
, DMA_DAR(channel
));
417 imx_dmav1_writel(imxdmac
->internal
.ccr_to_device
, DMA_CCR(channel
));
419 printk(KERN_ERR
"imxdma%d: imx_dma_setup_sg bad dmamode\n",
424 imxdma_sg_next(d
, sg
);
429 static void imxdma_watchdog(unsigned long data
)
431 struct imxdma_channel
*imxdmac
= (struct imxdma_channel
*)data
;
432 int channel
= imxdmac
->channel
;
434 imx_dmav1_writel(0, DMA_CCR(channel
));
435 imxdmac
->internal
.in_use
= 0;
436 imxdmac
->internal
.sg
= NULL
;
438 /* Tasklet watchdog error handler */
439 tasklet_schedule(&imxdmac
->dma_tasklet
);
440 pr_debug("imxdma%d: watchdog timeout!\n", imxdmac
->channel
);
443 static irqreturn_t
imxdma_err_handler(int irq
, void *dev_id
)
445 struct imxdma_engine
*imxdma
= dev_id
;
446 struct imxdma_channel_internal
*internal
;
447 unsigned int err_mask
;
451 disr
= imx_dmav1_readl(DMA_DISR
);
453 err_mask
= imx_dmav1_readl(DMA_DBTOSR
) |
454 imx_dmav1_readl(DMA_DRTOSR
) |
455 imx_dmav1_readl(DMA_DSESR
) |
456 imx_dmav1_readl(DMA_DBOSR
);
461 imx_dmav1_writel(disr
& err_mask
, DMA_DISR
);
463 for (i
= 0; i
< IMX_DMA_CHANNELS
; i
++) {
464 if (!(err_mask
& (1 << i
)))
466 internal
= &imxdma
->channel
[i
].internal
;
469 if (imx_dmav1_readl(DMA_DBTOSR
) & (1 << i
)) {
470 imx_dmav1_writel(1 << i
, DMA_DBTOSR
);
471 errcode
|= IMX_DMA_ERR_BURST
;
473 if (imx_dmav1_readl(DMA_DRTOSR
) & (1 << i
)) {
474 imx_dmav1_writel(1 << i
, DMA_DRTOSR
);
475 errcode
|= IMX_DMA_ERR_REQUEST
;
477 if (imx_dmav1_readl(DMA_DSESR
) & (1 << i
)) {
478 imx_dmav1_writel(1 << i
, DMA_DSESR
);
479 errcode
|= IMX_DMA_ERR_TRANSFER
;
481 if (imx_dmav1_readl(DMA_DBOSR
) & (1 << i
)) {
482 imx_dmav1_writel(1 << i
, DMA_DBOSR
);
483 errcode
|= IMX_DMA_ERR_BUFFER
;
485 /* Tasklet error handler */
486 tasklet_schedule(&imxdma
->channel
[i
].dma_tasklet
);
489 "DMA timeout on channel %d -%s%s%s%s\n", i
,
490 errcode
& IMX_DMA_ERR_BURST
? " burst" : "",
491 errcode
& IMX_DMA_ERR_REQUEST
? " request" : "",
492 errcode
& IMX_DMA_ERR_TRANSFER
? " transfer" : "",
493 errcode
& IMX_DMA_ERR_BUFFER
? " buffer" : "");
498 static void dma_irq_handle_channel(struct imxdma_channel
*imxdmac
)
500 struct imxdma_channel_internal
*imxdma
= &imxdmac
->internal
;
501 int chno
= imxdmac
->channel
;
502 struct imxdma_desc
*desc
;
506 imxdma
->sg
= sg_next(imxdma
->sg
);
510 spin_lock(&imxdmac
->lock
);
511 if (list_empty(&imxdmac
->ld_active
)) {
512 spin_unlock(&imxdmac
->lock
);
516 desc
= list_first_entry(&imxdmac
->ld_active
,
519 spin_unlock(&imxdmac
->lock
);
521 imxdma_sg_next(desc
, imxdma
->sg
);
523 tmp
= imx_dmav1_readl(DMA_CCR(chno
));
525 if (imxdma_hw_chain(imxdma
)) {
526 /* FIXME: The timeout should probably be
529 mod_timer(&imxdma
->watchdog
,
530 jiffies
+ msecs_to_jiffies(500));
532 tmp
|= CCR_CEN
| CCR_RPT
| CCR_ACRPT
;
533 imx_dmav1_writel(tmp
, DMA_CCR(chno
));
535 imx_dmav1_writel(tmp
& ~CCR_CEN
, DMA_CCR(chno
));
539 imx_dmav1_writel(tmp
, DMA_CCR(chno
));
541 if (imxdma_chan_is_doing_cyclic(imxdmac
))
542 /* Tasklet progression */
543 tasklet_schedule(&imxdmac
->dma_tasklet
);
548 if (imxdma_hw_chain(imxdma
)) {
549 del_timer(&imxdma
->watchdog
);
555 imx_dmav1_writel(0, DMA_CCR(chno
));
558 tasklet_schedule(&imxdmac
->dma_tasklet
);
561 static irqreturn_t
dma_irq_handler(int irq
, void *dev_id
)
563 struct imxdma_engine
*imxdma
= dev_id
;
564 struct imxdma_channel_internal
*internal
;
567 if (cpu_is_mx21() || cpu_is_mx27())
568 imxdma_err_handler(irq
, dev_id
);
570 disr
= imx_dmav1_readl(DMA_DISR
);
572 pr_debug("imxdma: dma_irq_handler called, disr=0x%08x\n",
575 imx_dmav1_writel(disr
, DMA_DISR
);
576 for (i
= 0; i
< IMX_DMA_CHANNELS
; i
++) {
577 if (disr
& (1 << i
)) {
578 internal
= &imxdma
->channel
[i
].internal
;
579 dma_irq_handle_channel(&imxdma
->channel
[i
]);
586 static int imxdma_xfer_desc(struct imxdma_desc
*d
)
588 struct imxdma_channel
*imxdmac
= to_imxdma_chan(d
->desc
.chan
);
591 /* Configure and enable */
593 case IMXDMA_DESC_MEMCPY
:
594 ret
= imxdma_config_channel_hw(imxdmac
,
595 d
->config_port
, d
->config_mem
, 0, 0);
598 ret
= imxdma_setup_mem2mem_hw(imxdmac
, d
->src
, d
->len
, d
->dest
);
603 /* Cyclic transfer is the same as slave_sg with special sg configuration. */
604 case IMXDMA_DESC_CYCLIC
:
605 case IMXDMA_DESC_SLAVE_SG
:
606 ret
= imxdma_setup_sg_hw(d
, d
->sg
, d
->sgcount
, d
->len
,
607 imxdmac
->per_address
, d
->direction
);
618 static void imxdma_tasklet(unsigned long data
)
620 struct imxdma_channel
*imxdmac
= (void *)data
;
621 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
622 struct imxdma_desc
*desc
;
624 spin_lock(&imxdmac
->lock
);
626 if (list_empty(&imxdmac
->ld_active
)) {
627 /* Someone might have called terminate all */
630 desc
= list_first_entry(&imxdmac
->ld_active
, struct imxdma_desc
, node
);
632 if (desc
->desc
.callback
)
633 desc
->desc
.callback(desc
->desc
.callback_param
);
635 dma_cookie_complete(&desc
->desc
);
637 /* If we are dealing with a cyclic descriptor keep it on ld_active */
638 if (imxdma_chan_is_doing_cyclic(imxdmac
))
641 list_move_tail(imxdmac
->ld_active
.next
, &imxdmac
->ld_free
);
643 if (!list_empty(&imxdmac
->ld_queue
)) {
644 desc
= list_first_entry(&imxdmac
->ld_queue
, struct imxdma_desc
,
646 list_move_tail(imxdmac
->ld_queue
.next
, &imxdmac
->ld_active
);
647 if (imxdma_xfer_desc(desc
) < 0)
648 dev_warn(imxdma
->dev
, "%s: channel: %d couldn't xfer desc\n",
649 __func__
, imxdmac
->channel
);
652 spin_unlock(&imxdmac
->lock
);
655 static int imxdma_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
658 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
659 struct dma_slave_config
*dmaengine_cfg
= (void *)arg
;
662 unsigned int mode
= 0;
665 case DMA_TERMINATE_ALL
:
666 imxdma_disable_hw(imxdmac
);
668 spin_lock_irqsave(&imxdmac
->lock
, flags
);
669 list_splice_tail_init(&imxdmac
->ld_active
, &imxdmac
->ld_free
);
670 list_splice_tail_init(&imxdmac
->ld_queue
, &imxdmac
->ld_free
);
671 spin_unlock_irqrestore(&imxdmac
->lock
, flags
);
673 case DMA_SLAVE_CONFIG
:
674 if (dmaengine_cfg
->direction
== DMA_DEV_TO_MEM
) {
675 imxdmac
->per_address
= dmaengine_cfg
->src_addr
;
676 imxdmac
->watermark_level
= dmaengine_cfg
->src_maxburst
;
677 imxdmac
->word_size
= dmaengine_cfg
->src_addr_width
;
679 imxdmac
->per_address
= dmaengine_cfg
->dst_addr
;
680 imxdmac
->watermark_level
= dmaengine_cfg
->dst_maxburst
;
681 imxdmac
->word_size
= dmaengine_cfg
->dst_addr_width
;
684 switch (imxdmac
->word_size
) {
685 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
686 mode
= IMX_DMA_MEMSIZE_8
;
688 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
689 mode
= IMX_DMA_MEMSIZE_16
;
692 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
693 mode
= IMX_DMA_MEMSIZE_32
;
696 ret
= imxdma_config_channel_hw(imxdmac
,
697 mode
| IMX_DMA_TYPE_FIFO
,
698 IMX_DMA_MEMSIZE_32
| IMX_DMA_TYPE_LINEAR
,
699 imxdmac
->dma_request
, 1);
703 /* Set burst length */
704 imx_dmav1_writel(imxdmac
->watermark_level
* imxdmac
->word_size
,
705 DMA_BLR(imxdmac
->channel
));
715 static enum dma_status
imxdma_tx_status(struct dma_chan
*chan
,
717 struct dma_tx_state
*txstate
)
719 return dma_cookie_status(chan
, cookie
, txstate
);
722 static dma_cookie_t
imxdma_tx_submit(struct dma_async_tx_descriptor
*tx
)
724 struct imxdma_channel
*imxdmac
= to_imxdma_chan(tx
->chan
);
728 spin_lock_irqsave(&imxdmac
->lock
, flags
);
729 cookie
= dma_cookie_assign(tx
);
730 spin_unlock_irqrestore(&imxdmac
->lock
, flags
);
735 static int imxdma_alloc_chan_resources(struct dma_chan
*chan
)
737 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
738 struct imx_dma_data
*data
= chan
->private;
741 imxdmac
->dma_request
= data
->dma_request
;
743 while (imxdmac
->descs_allocated
< IMXDMA_MAX_CHAN_DESCRIPTORS
) {
744 struct imxdma_desc
*desc
;
746 desc
= kzalloc(sizeof(*desc
), GFP_KERNEL
);
749 __memzero(&desc
->desc
, sizeof(struct dma_async_tx_descriptor
));
750 dma_async_tx_descriptor_init(&desc
->desc
, chan
);
751 desc
->desc
.tx_submit
= imxdma_tx_submit
;
752 /* txd.flags will be overwritten in prep funcs */
753 desc
->desc
.flags
= DMA_CTRL_ACK
;
754 desc
->status
= DMA_SUCCESS
;
756 list_add_tail(&desc
->node
, &imxdmac
->ld_free
);
757 imxdmac
->descs_allocated
++;
760 if (!imxdmac
->descs_allocated
)
763 return imxdmac
->descs_allocated
;
766 static void imxdma_free_chan_resources(struct dma_chan
*chan
)
768 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
769 struct imxdma_desc
*desc
, *_desc
;
772 spin_lock_irqsave(&imxdmac
->lock
, flags
);
774 imxdma_disable_hw(imxdmac
);
775 list_splice_tail_init(&imxdmac
->ld_active
, &imxdmac
->ld_free
);
776 list_splice_tail_init(&imxdmac
->ld_queue
, &imxdmac
->ld_free
);
778 spin_unlock_irqrestore(&imxdmac
->lock
, flags
);
780 list_for_each_entry_safe(desc
, _desc
, &imxdmac
->ld_free
, node
) {
782 imxdmac
->descs_allocated
--;
784 INIT_LIST_HEAD(&imxdmac
->ld_free
);
786 if (imxdmac
->sg_list
) {
787 kfree(imxdmac
->sg_list
);
788 imxdmac
->sg_list
= NULL
;
792 static struct dma_async_tx_descriptor
*imxdma_prep_slave_sg(
793 struct dma_chan
*chan
, struct scatterlist
*sgl
,
794 unsigned int sg_len
, enum dma_transfer_direction direction
,
795 unsigned long flags
, void *context
)
797 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
798 struct scatterlist
*sg
;
799 int i
, dma_length
= 0;
800 struct imxdma_desc
*desc
;
802 if (list_empty(&imxdmac
->ld_free
) ||
803 imxdma_chan_is_doing_cyclic(imxdmac
))
806 desc
= list_first_entry(&imxdmac
->ld_free
, struct imxdma_desc
, node
);
808 for_each_sg(sgl
, sg
, sg_len
, i
) {
809 dma_length
+= sg
->length
;
812 switch (imxdmac
->word_size
) {
813 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
814 if (sgl
->length
& 3 || sgl
->dma_address
& 3)
817 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
818 if (sgl
->length
& 1 || sgl
->dma_address
& 1)
821 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
827 desc
->type
= IMXDMA_DESC_SLAVE_SG
;
829 desc
->sgcount
= sg_len
;
830 desc
->len
= dma_length
;
831 desc
->direction
= direction
;
832 if (direction
== DMA_DEV_TO_MEM
) {
833 desc
->src
= imxdmac
->per_address
;
835 desc
->dest
= imxdmac
->per_address
;
837 desc
->desc
.callback
= NULL
;
838 desc
->desc
.callback_param
= NULL
;
843 static struct dma_async_tx_descriptor
*imxdma_prep_dma_cyclic(
844 struct dma_chan
*chan
, dma_addr_t dma_addr
, size_t buf_len
,
845 size_t period_len
, enum dma_transfer_direction direction
,
848 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
849 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
850 struct imxdma_desc
*desc
;
852 unsigned int periods
= buf_len
/ period_len
;
854 dev_dbg(imxdma
->dev
, "%s channel: %d buf_len=%d period_len=%d\n",
855 __func__
, imxdmac
->channel
, buf_len
, period_len
);
857 if (list_empty(&imxdmac
->ld_free
) ||
858 imxdma_chan_is_doing_cyclic(imxdmac
))
861 desc
= list_first_entry(&imxdmac
->ld_free
, struct imxdma_desc
, node
);
863 if (imxdmac
->sg_list
)
864 kfree(imxdmac
->sg_list
);
866 imxdmac
->sg_list
= kcalloc(periods
+ 1,
867 sizeof(struct scatterlist
), GFP_KERNEL
);
868 if (!imxdmac
->sg_list
)
871 sg_init_table(imxdmac
->sg_list
, periods
);
873 for (i
= 0; i
< periods
; i
++) {
874 imxdmac
->sg_list
[i
].page_link
= 0;
875 imxdmac
->sg_list
[i
].offset
= 0;
876 imxdmac
->sg_list
[i
].dma_address
= dma_addr
;
877 imxdmac
->sg_list
[i
].length
= period_len
;
878 dma_addr
+= period_len
;
882 imxdmac
->sg_list
[periods
].offset
= 0;
883 imxdmac
->sg_list
[periods
].length
= 0;
884 imxdmac
->sg_list
[periods
].page_link
=
885 ((unsigned long)imxdmac
->sg_list
| 0x01) & ~0x02;
887 desc
->type
= IMXDMA_DESC_CYCLIC
;
888 desc
->sg
= imxdmac
->sg_list
;
889 desc
->sgcount
= periods
;
890 desc
->len
= IMX_DMA_LENGTH_LOOP
;
891 desc
->direction
= direction
;
892 if (direction
== DMA_DEV_TO_MEM
) {
893 desc
->src
= imxdmac
->per_address
;
895 desc
->dest
= imxdmac
->per_address
;
897 desc
->desc
.callback
= NULL
;
898 desc
->desc
.callback_param
= NULL
;
903 static struct dma_async_tx_descriptor
*imxdma_prep_dma_memcpy(
904 struct dma_chan
*chan
, dma_addr_t dest
,
905 dma_addr_t src
, size_t len
, unsigned long flags
)
907 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
908 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
909 struct imxdma_desc
*desc
;
911 dev_dbg(imxdma
->dev
, "%s channel: %d src=0x%x dst=0x%x len=%d\n",
912 __func__
, imxdmac
->channel
, src
, dest
, len
);
914 if (list_empty(&imxdmac
->ld_free
) ||
915 imxdma_chan_is_doing_cyclic(imxdmac
))
918 desc
= list_first_entry(&imxdmac
->ld_free
, struct imxdma_desc
, node
);
920 desc
->type
= IMXDMA_DESC_MEMCPY
;
924 desc
->direction
= DMA_MEM_TO_MEM
;
925 desc
->config_port
= IMX_DMA_MEMSIZE_32
| IMX_DMA_TYPE_LINEAR
;
926 desc
->config_mem
= IMX_DMA_MEMSIZE_32
| IMX_DMA_TYPE_LINEAR
;
927 desc
->desc
.callback
= NULL
;
928 desc
->desc
.callback_param
= NULL
;
933 static void imxdma_issue_pending(struct dma_chan
*chan
)
935 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
936 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
937 struct imxdma_desc
*desc
;
940 spin_lock_irqsave(&imxdmac
->lock
, flags
);
941 if (list_empty(&imxdmac
->ld_active
) &&
942 !list_empty(&imxdmac
->ld_queue
)) {
943 desc
= list_first_entry(&imxdmac
->ld_queue
,
944 struct imxdma_desc
, node
);
946 if (imxdma_xfer_desc(desc
) < 0) {
947 dev_warn(imxdma
->dev
,
948 "%s: channel: %d couldn't issue DMA xfer\n",
949 __func__
, imxdmac
->channel
);
951 list_move_tail(imxdmac
->ld_queue
.next
,
952 &imxdmac
->ld_active
);
955 spin_unlock_irqrestore(&imxdmac
->lock
, flags
);
958 static int __init
imxdma_probe(struct platform_device
*pdev
)
960 struct imxdma_engine
*imxdma
;
964 imx_dmav1_baseaddr
= MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR
);
965 else if (cpu_is_mx21())
966 imx_dmav1_baseaddr
= MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR
);
967 else if (cpu_is_mx27())
968 imx_dmav1_baseaddr
= MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR
);
972 dma_clk
= clk_get(NULL
, "dma");
974 return PTR_ERR(dma_clk
);
977 /* reset DMA module */
978 imx_dmav1_writel(DCR_DRST
, DMA_DCR
);
981 ret
= request_irq(MX1_DMA_INT
, dma_irq_handler
, 0, "DMA", imxdma
);
983 pr_crit("Can't register IRQ for DMA\n");
987 ret
= request_irq(MX1_DMA_ERR
, imxdma_err_handler
, 0, "DMA", imxdma
);
989 pr_crit("Can't register ERRIRQ for DMA\n");
990 free_irq(MX1_DMA_INT
, NULL
);
995 /* enable DMA module */
996 imx_dmav1_writel(DCR_DEN
, DMA_DCR
);
998 /* clear all interrupts */
999 imx_dmav1_writel((1 << IMX_DMA_CHANNELS
) - 1, DMA_DISR
);
1001 /* disable interrupts */
1002 imx_dmav1_writel((1 << IMX_DMA_CHANNELS
) - 1, DMA_DIMR
);
1004 imxdma
= kzalloc(sizeof(*imxdma
), GFP_KERNEL
);
1008 INIT_LIST_HEAD(&imxdma
->dma_device
.channels
);
1010 dma_cap_set(DMA_SLAVE
, imxdma
->dma_device
.cap_mask
);
1011 dma_cap_set(DMA_CYCLIC
, imxdma
->dma_device
.cap_mask
);
1012 dma_cap_set(DMA_MEMCPY
, imxdma
->dma_device
.cap_mask
);
1014 /* Initialize channel parameters */
1015 for (i
= 0; i
< IMX_DMA_CHANNELS
; i
++) {
1016 struct imxdma_channel
*imxdmac
= &imxdma
->channel
[i
];
1017 memset(&imxdmac
->internal
, 0, sizeof(imxdmac
->internal
));
1018 if (cpu_is_mx21() || cpu_is_mx27()) {
1019 ret
= request_irq(MX2x_INT_DMACH0
+ i
,
1020 dma_irq_handler
, 0, "DMA", imxdma
);
1022 pr_crit("Can't register IRQ %d for DMA channel %d\n",
1023 MX2x_INT_DMACH0
+ i
, i
);
1026 init_timer(&imxdmac
->internal
.watchdog
);
1027 imxdmac
->internal
.watchdog
.function
= &imxdma_watchdog
;
1028 imxdmac
->internal
.watchdog
.data
= (unsigned long)imxdmac
;
1031 imxdmac
->imxdma
= imxdma
;
1032 spin_lock_init(&imxdmac
->lock
);
1034 INIT_LIST_HEAD(&imxdmac
->ld_queue
);
1035 INIT_LIST_HEAD(&imxdmac
->ld_free
);
1036 INIT_LIST_HEAD(&imxdmac
->ld_active
);
1038 tasklet_init(&imxdmac
->dma_tasklet
, imxdma_tasklet
,
1039 (unsigned long)imxdmac
);
1040 imxdmac
->chan
.device
= &imxdma
->dma_device
;
1041 dma_cookie_init(&imxdmac
->chan
);
1042 imxdmac
->channel
= i
;
1044 /* Add the channel to the DMAC list */
1045 list_add_tail(&imxdmac
->chan
.device_node
,
1046 &imxdma
->dma_device
.channels
);
1049 imxdma
->dev
= &pdev
->dev
;
1050 imxdma
->dma_device
.dev
= &pdev
->dev
;
1052 imxdma
->dma_device
.device_alloc_chan_resources
= imxdma_alloc_chan_resources
;
1053 imxdma
->dma_device
.device_free_chan_resources
= imxdma_free_chan_resources
;
1054 imxdma
->dma_device
.device_tx_status
= imxdma_tx_status
;
1055 imxdma
->dma_device
.device_prep_slave_sg
= imxdma_prep_slave_sg
;
1056 imxdma
->dma_device
.device_prep_dma_cyclic
= imxdma_prep_dma_cyclic
;
1057 imxdma
->dma_device
.device_prep_dma_memcpy
= imxdma_prep_dma_memcpy
;
1058 imxdma
->dma_device
.device_control
= imxdma_control
;
1059 imxdma
->dma_device
.device_issue_pending
= imxdma_issue_pending
;
1061 platform_set_drvdata(pdev
, imxdma
);
1063 imxdma
->dma_device
.copy_align
= 2; /* 2^2 = 4 bytes alignment */
1064 imxdma
->dma_device
.dev
->dma_parms
= &imxdma
->dma_parms
;
1065 dma_set_max_seg_size(imxdma
->dma_device
.dev
, 0xffffff);
1067 ret
= dma_async_device_register(&imxdma
->dma_device
);
1069 dev_err(&pdev
->dev
, "unable to register\n");
1077 if (cpu_is_mx21() || cpu_is_mx27()) {
1079 free_irq(MX2x_INT_DMACH0
+ i
, NULL
);
1080 } else if cpu_is_mx1() {
1081 free_irq(MX1_DMA_INT
, NULL
);
1082 free_irq(MX1_DMA_ERR
, NULL
);
1089 static int __exit
imxdma_remove(struct platform_device
*pdev
)
1091 struct imxdma_engine
*imxdma
= platform_get_drvdata(pdev
);
1094 dma_async_device_unregister(&imxdma
->dma_device
);
1096 if (cpu_is_mx21() || cpu_is_mx27()) {
1097 for (i
= 0; i
< IMX_DMA_CHANNELS
; i
++)
1098 free_irq(MX2x_INT_DMACH0
+ i
, NULL
);
1099 } else if cpu_is_mx1() {
1100 free_irq(MX1_DMA_INT
, NULL
);
1101 free_irq(MX1_DMA_ERR
, NULL
);
1109 static struct platform_driver imxdma_driver
= {
1113 .remove
= __exit_p(imxdma_remove
),
1116 static int __init
imxdma_module_init(void)
1118 return platform_driver_probe(&imxdma_driver
, imxdma_probe
);
1120 subsys_initcall(imxdma_module_init
);
1122 MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1123 MODULE_DESCRIPTION("i.MX dma driver");
1124 MODULE_LICENSE("GPL");