2 * drivers/dma/imx-dma.c
4 * This file contains a driver for the Freescale i.MX DMA engine
7 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
8 * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
10 * The code contained herein is licensed under the GNU General Public
11 * License. You may obtain a copy of the GNU General Public License
12 * Version 2 or later at the following locations:
14 * http://www.opensource.org/licenses/gpl-license.html
15 * http://www.gnu.org/copyleft/gpl.html
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/types.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/device.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/slab.h>
26 #include <linux/platform_device.h>
27 #include <linux/clk.h>
28 #include <linux/dmaengine.h>
29 #include <linux/module.h>
33 #include <mach/hardware.h>
35 #include "dmaengine.h"
36 #define IMXDMA_MAX_CHAN_DESCRIPTORS 16
37 #define IMX_DMA_CHANNELS 16
39 #define IMX_DMA_LENGTH_LOOP ((unsigned int)-1)
40 #define IMX_DMA_MEMSIZE_32 (0 << 4)
41 #define IMX_DMA_MEMSIZE_8 (1 << 4)
42 #define IMX_DMA_MEMSIZE_16 (2 << 4)
43 #define IMX_DMA_TYPE_LINEAR (0 << 10)
44 #define IMX_DMA_TYPE_2D (1 << 10)
45 #define IMX_DMA_TYPE_FIFO (2 << 10)
47 #define IMX_DMA_ERR_BURST (1 << 0)
48 #define IMX_DMA_ERR_REQUEST (1 << 1)
49 #define IMX_DMA_ERR_TRANSFER (1 << 2)
50 #define IMX_DMA_ERR_BUFFER (1 << 3)
51 #define IMX_DMA_ERR_TIMEOUT (1 << 4)
53 #define DMA_DCR 0x00 /* Control Register */
54 #define DMA_DISR 0x04 /* Interrupt status Register */
55 #define DMA_DIMR 0x08 /* Interrupt mask Register */
56 #define DMA_DBTOSR 0x0c /* Burst timeout status Register */
57 #define DMA_DRTOSR 0x10 /* Request timeout Register */
58 #define DMA_DSESR 0x14 /* Transfer Error Status Register */
59 #define DMA_DBOSR 0x18 /* Buffer overflow status Register */
60 #define DMA_DBTOCR 0x1c /* Burst timeout control Register */
61 #define DMA_WSRA 0x40 /* W-Size Register A */
62 #define DMA_XSRA 0x44 /* X-Size Register A */
63 #define DMA_YSRA 0x48 /* Y-Size Register A */
64 #define DMA_WSRB 0x4c /* W-Size Register B */
65 #define DMA_XSRB 0x50 /* X-Size Register B */
66 #define DMA_YSRB 0x54 /* Y-Size Register B */
67 #define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */
68 #define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */
69 #define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */
70 #define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */
71 #define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */
72 #define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */
73 #define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */
74 #define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */
75 #define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */
77 #define DCR_DRST (1<<1)
78 #define DCR_DEN (1<<0)
79 #define DBTOCR_EN (1<<15)
80 #define DBTOCR_CNT(x) ((x) & 0x7fff)
81 #define CNTR_CNT(x) ((x) & 0xffffff)
82 #define CCR_ACRPT (1<<14)
83 #define CCR_DMOD_LINEAR (0x0 << 12)
84 #define CCR_DMOD_2D (0x1 << 12)
85 #define CCR_DMOD_FIFO (0x2 << 12)
86 #define CCR_DMOD_EOBFIFO (0x3 << 12)
87 #define CCR_SMOD_LINEAR (0x0 << 10)
88 #define CCR_SMOD_2D (0x1 << 10)
89 #define CCR_SMOD_FIFO (0x2 << 10)
90 #define CCR_SMOD_EOBFIFO (0x3 << 10)
91 #define CCR_MDIR_DEC (1<<9)
92 #define CCR_MSEL_B (1<<8)
93 #define CCR_DSIZ_32 (0x0 << 6)
94 #define CCR_DSIZ_8 (0x1 << 6)
95 #define CCR_DSIZ_16 (0x2 << 6)
96 #define CCR_SSIZ_32 (0x0 << 4)
97 #define CCR_SSIZ_8 (0x1 << 4)
98 #define CCR_SSIZ_16 (0x2 << 4)
99 #define CCR_REN (1<<3)
100 #define CCR_RPT (1<<2)
101 #define CCR_FRC (1<<1)
102 #define CCR_CEN (1<<0)
103 #define RTOR_EN (1<<15)
104 #define RTOR_CLK (1<<14)
105 #define RTOR_PSC (1<<13)
107 enum imxdma_prep_type
{
109 IMXDMA_DESC_INTERLEAVED
,
110 IMXDMA_DESC_SLAVE_SG
,
115 struct list_head node
;
116 struct dma_async_tx_descriptor desc
;
117 enum dma_status status
;
121 enum dma_transfer_direction direction
;
122 enum imxdma_prep_type type
;
123 /* For memcpy and interleaved */
124 unsigned int config_port
;
125 unsigned int config_mem
;
126 /* For interleaved transfers */
130 /* For slave sg and cyclic */
131 struct scatterlist
*sg
;
132 unsigned int sgcount
;
135 struct imxdma_channel
{
137 struct timer_list watchdog
;
138 struct imxdma_engine
*imxdma
;
139 unsigned int channel
;
141 struct tasklet_struct dma_tasklet
;
142 struct list_head ld_free
;
143 struct list_head ld_queue
;
144 struct list_head ld_active
;
146 enum dma_slave_buswidth word_size
;
147 dma_addr_t per_address
;
149 struct dma_chan chan
;
151 struct dma_async_tx_descriptor desc
;
152 enum dma_status status
;
154 struct scatterlist
*sg_list
;
159 struct imxdma_engine
{
161 struct device_dma_parameters dma_parms
;
162 struct dma_device dma_device
;
165 struct imxdma_channel channel
[IMX_DMA_CHANNELS
];
168 static struct imxdma_channel
*to_imxdma_chan(struct dma_chan
*chan
)
170 return container_of(chan
, struct imxdma_channel
, chan
);
173 static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel
*imxdmac
)
175 struct imxdma_desc
*desc
;
177 if (!list_empty(&imxdmac
->ld_active
)) {
178 desc
= list_first_entry(&imxdmac
->ld_active
, struct imxdma_desc
,
180 if (desc
->type
== IMXDMA_DESC_CYCLIC
)
188 static void imx_dmav1_writel(struct imxdma_engine
*imxdma
, unsigned val
,
191 __raw_writel(val
, imxdma
->base
+ offset
);
194 static unsigned imx_dmav1_readl(struct imxdma_engine
*imxdma
, unsigned offset
)
196 return __raw_readl(imxdma
->base
+ offset
);
199 static int imxdma_hw_chain(struct imxdma_channel
*imxdmac
)
202 return imxdmac
->hw_chaining
;
208 * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation
210 static inline int imxdma_sg_next(struct imxdma_desc
*d
)
212 struct imxdma_channel
*imxdmac
= to_imxdma_chan(d
->desc
.chan
);
213 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
214 struct scatterlist
*sg
= d
->sg
;
217 now
= min(d
->len
, sg
->length
);
218 if (d
->len
!= IMX_DMA_LENGTH_LOOP
)
221 if (d
->direction
== DMA_DEV_TO_MEM
)
222 imx_dmav1_writel(imxdma
, sg
->dma_address
,
223 DMA_DAR(imxdmac
->channel
));
225 imx_dmav1_writel(imxdma
, sg
->dma_address
,
226 DMA_SAR(imxdmac
->channel
));
228 imx_dmav1_writel(imxdma
, now
, DMA_CNTR(imxdmac
->channel
));
230 pr_debug("imxdma%d: next sg chunk dst 0x%08x, src 0x%08x, "
231 "size 0x%08x\n", imxdmac
->channel
,
232 imx_dmav1_readl(imxdma
, DMA_DAR(imxdmac
->channel
)),
233 imx_dmav1_readl(imxdma
, DMA_SAR(imxdmac
->channel
)),
234 imx_dmav1_readl(imxdma
, DMA_CNTR(imxdmac
->channel
)));
239 static void imxdma_enable_hw(struct imxdma_desc
*d
)
241 struct imxdma_channel
*imxdmac
= to_imxdma_chan(d
->desc
.chan
);
242 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
243 int channel
= imxdmac
->channel
;
246 pr_debug("imxdma%d: imx_dma_enable\n", channel
);
248 local_irq_save(flags
);
250 imx_dmav1_writel(imxdma
, 1 << channel
, DMA_DISR
);
251 imx_dmav1_writel(imxdma
, imx_dmav1_readl(imxdma
, DMA_DIMR
) &
252 ~(1 << channel
), DMA_DIMR
);
253 imx_dmav1_writel(imxdma
, imx_dmav1_readl(imxdma
, DMA_CCR(channel
)) |
254 CCR_CEN
| CCR_ACRPT
, DMA_CCR(channel
));
256 if ((cpu_is_mx21() || cpu_is_mx27()) &&
257 d
->sg
&& imxdma_hw_chain(imxdmac
)) {
258 d
->sg
= sg_next(d
->sg
);
262 tmp
= imx_dmav1_readl(imxdma
, DMA_CCR(channel
));
263 imx_dmav1_writel(imxdma
, tmp
| CCR_RPT
| CCR_ACRPT
,
268 local_irq_restore(flags
);
271 static void imxdma_disable_hw(struct imxdma_channel
*imxdmac
)
273 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
274 int channel
= imxdmac
->channel
;
277 pr_debug("imxdma%d: imx_dma_disable\n", channel
);
279 if (imxdma_hw_chain(imxdmac
))
280 del_timer(&imxdmac
->watchdog
);
282 local_irq_save(flags
);
283 imx_dmav1_writel(imxdma
, imx_dmav1_readl(imxdma
, DMA_DIMR
) |
284 (1 << channel
), DMA_DIMR
);
285 imx_dmav1_writel(imxdma
, imx_dmav1_readl(imxdma
, DMA_CCR(channel
)) &
286 ~CCR_CEN
, DMA_CCR(channel
));
287 imx_dmav1_writel(imxdma
, 1 << channel
, DMA_DISR
);
288 local_irq_restore(flags
);
291 static void imxdma_watchdog(unsigned long data
)
293 struct imxdma_channel
*imxdmac
= (struct imxdma_channel
*)data
;
294 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
295 int channel
= imxdmac
->channel
;
297 imx_dmav1_writel(imxdma
, 0, DMA_CCR(channel
));
299 /* Tasklet watchdog error handler */
300 tasklet_schedule(&imxdmac
->dma_tasklet
);
301 pr_debug("imxdma%d: watchdog timeout!\n", imxdmac
->channel
);
304 static irqreturn_t
imxdma_err_handler(int irq
, void *dev_id
)
306 struct imxdma_engine
*imxdma
= dev_id
;
307 unsigned int err_mask
;
311 disr
= imx_dmav1_readl(imxdma
, DMA_DISR
);
313 err_mask
= imx_dmav1_readl(imxdma
, DMA_DBTOSR
) |
314 imx_dmav1_readl(imxdma
, DMA_DRTOSR
) |
315 imx_dmav1_readl(imxdma
, DMA_DSESR
) |
316 imx_dmav1_readl(imxdma
, DMA_DBOSR
);
321 imx_dmav1_writel(imxdma
, disr
& err_mask
, DMA_DISR
);
323 for (i
= 0; i
< IMX_DMA_CHANNELS
; i
++) {
324 if (!(err_mask
& (1 << i
)))
328 if (imx_dmav1_readl(imxdma
, DMA_DBTOSR
) & (1 << i
)) {
329 imx_dmav1_writel(imxdma
, 1 << i
, DMA_DBTOSR
);
330 errcode
|= IMX_DMA_ERR_BURST
;
332 if (imx_dmav1_readl(imxdma
, DMA_DRTOSR
) & (1 << i
)) {
333 imx_dmav1_writel(imxdma
, 1 << i
, DMA_DRTOSR
);
334 errcode
|= IMX_DMA_ERR_REQUEST
;
336 if (imx_dmav1_readl(imxdma
, DMA_DSESR
) & (1 << i
)) {
337 imx_dmav1_writel(imxdma
, 1 << i
, DMA_DSESR
);
338 errcode
|= IMX_DMA_ERR_TRANSFER
;
340 if (imx_dmav1_readl(imxdma
, DMA_DBOSR
) & (1 << i
)) {
341 imx_dmav1_writel(imxdma
, 1 << i
, DMA_DBOSR
);
342 errcode
|= IMX_DMA_ERR_BUFFER
;
344 /* Tasklet error handler */
345 tasklet_schedule(&imxdma
->channel
[i
].dma_tasklet
);
348 "DMA timeout on channel %d -%s%s%s%s\n", i
,
349 errcode
& IMX_DMA_ERR_BURST
? " burst" : "",
350 errcode
& IMX_DMA_ERR_REQUEST
? " request" : "",
351 errcode
& IMX_DMA_ERR_TRANSFER
? " transfer" : "",
352 errcode
& IMX_DMA_ERR_BUFFER
? " buffer" : "");
357 static void dma_irq_handle_channel(struct imxdma_channel
*imxdmac
)
359 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
360 int chno
= imxdmac
->channel
;
361 struct imxdma_desc
*desc
;
363 spin_lock(&imxdmac
->lock
);
364 if (list_empty(&imxdmac
->ld_active
)) {
365 spin_unlock(&imxdmac
->lock
);
369 desc
= list_first_entry(&imxdmac
->ld_active
,
372 spin_unlock(&imxdmac
->lock
);
376 desc
->sg
= sg_next(desc
->sg
);
379 imxdma_sg_next(desc
);
381 tmp
= imx_dmav1_readl(imxdma
, DMA_CCR(chno
));
383 if (imxdma_hw_chain(imxdmac
)) {
384 /* FIXME: The timeout should probably be
387 mod_timer(&imxdmac
->watchdog
,
388 jiffies
+ msecs_to_jiffies(500));
390 tmp
|= CCR_CEN
| CCR_RPT
| CCR_ACRPT
;
391 imx_dmav1_writel(imxdma
, tmp
, DMA_CCR(chno
));
393 imx_dmav1_writel(imxdma
, tmp
& ~CCR_CEN
,
398 imx_dmav1_writel(imxdma
, tmp
, DMA_CCR(chno
));
400 if (imxdma_chan_is_doing_cyclic(imxdmac
))
401 /* Tasklet progression */
402 tasklet_schedule(&imxdmac
->dma_tasklet
);
407 if (imxdma_hw_chain(imxdmac
)) {
408 del_timer(&imxdmac
->watchdog
);
414 imx_dmav1_writel(imxdma
, 0, DMA_CCR(chno
));
416 tasklet_schedule(&imxdmac
->dma_tasklet
);
419 static irqreturn_t
dma_irq_handler(int irq
, void *dev_id
)
421 struct imxdma_engine
*imxdma
= dev_id
;
424 if (cpu_is_mx21() || cpu_is_mx27())
425 imxdma_err_handler(irq
, dev_id
);
427 disr
= imx_dmav1_readl(imxdma
, DMA_DISR
);
429 pr_debug("imxdma: dma_irq_handler called, disr=0x%08x\n",
432 imx_dmav1_writel(imxdma
, disr
, DMA_DISR
);
433 for (i
= 0; i
< IMX_DMA_CHANNELS
; i
++) {
435 dma_irq_handle_channel(&imxdma
->channel
[i
]);
441 static int imxdma_xfer_desc(struct imxdma_desc
*d
)
443 struct imxdma_channel
*imxdmac
= to_imxdma_chan(d
->desc
.chan
);
444 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
446 /* Configure and enable */
448 case IMXDMA_DESC_MEMCPY
:
449 imx_dmav1_writel(imxdma
, d
->src
, DMA_SAR(imxdmac
->channel
));
450 imx_dmav1_writel(imxdma
, d
->dest
, DMA_DAR(imxdmac
->channel
));
451 imx_dmav1_writel(imxdma
, d
->config_mem
| (d
->config_port
<< 2),
452 DMA_CCR(imxdmac
->channel
));
454 imx_dmav1_writel(imxdma
, d
->len
, DMA_CNTR(imxdmac
->channel
));
456 dev_dbg(imxdma
->dev
, "%s channel: %d dest=0x%08x src=0x%08x "
457 "dma_length=%d\n", __func__
, imxdmac
->channel
,
458 d
->dest
, d
->src
, d
->len
);
461 /* Cyclic transfer is the same as slave_sg with special sg configuration. */
462 case IMXDMA_DESC_CYCLIC
:
463 case IMXDMA_DESC_SLAVE_SG
:
464 if (d
->direction
== DMA_DEV_TO_MEM
) {
465 imx_dmav1_writel(imxdma
, imxdmac
->per_address
,
466 DMA_SAR(imxdmac
->channel
));
467 imx_dmav1_writel(imxdma
, imxdmac
->ccr_from_device
,
468 DMA_CCR(imxdmac
->channel
));
470 dev_dbg(imxdma
->dev
, "%s channel: %d sg=%p sgcount=%d "
471 "total length=%d dev_addr=0x%08x (dev2mem)\n",
472 __func__
, imxdmac
->channel
, d
->sg
, d
->sgcount
,
473 d
->len
, imxdmac
->per_address
);
474 } else if (d
->direction
== DMA_MEM_TO_DEV
) {
475 imx_dmav1_writel(imxdma
, imxdmac
->per_address
,
476 DMA_DAR(imxdmac
->channel
));
477 imx_dmav1_writel(imxdma
, imxdmac
->ccr_to_device
,
478 DMA_CCR(imxdmac
->channel
));
480 dev_dbg(imxdma
->dev
, "%s channel: %d sg=%p sgcount=%d "
481 "total length=%d dev_addr=0x%08x (mem2dev)\n",
482 __func__
, imxdmac
->channel
, d
->sg
, d
->sgcount
,
483 d
->len
, imxdmac
->per_address
);
485 dev_err(imxdma
->dev
, "%s channel: %d bad dma mode\n",
486 __func__
, imxdmac
->channel
);
500 static void imxdma_tasklet(unsigned long data
)
502 struct imxdma_channel
*imxdmac
= (void *)data
;
503 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
504 struct imxdma_desc
*desc
;
506 spin_lock(&imxdmac
->lock
);
508 if (list_empty(&imxdmac
->ld_active
)) {
509 /* Someone might have called terminate all */
512 desc
= list_first_entry(&imxdmac
->ld_active
, struct imxdma_desc
, node
);
514 if (desc
->desc
.callback
)
515 desc
->desc
.callback(desc
->desc
.callback_param
);
517 dma_cookie_complete(&desc
->desc
);
519 /* If we are dealing with a cyclic descriptor keep it on ld_active */
520 if (imxdma_chan_is_doing_cyclic(imxdmac
))
523 list_move_tail(imxdmac
->ld_active
.next
, &imxdmac
->ld_free
);
525 if (!list_empty(&imxdmac
->ld_queue
)) {
526 desc
= list_first_entry(&imxdmac
->ld_queue
, struct imxdma_desc
,
528 list_move_tail(imxdmac
->ld_queue
.next
, &imxdmac
->ld_active
);
529 if (imxdma_xfer_desc(desc
) < 0)
530 dev_warn(imxdma
->dev
, "%s: channel: %d couldn't xfer desc\n",
531 __func__
, imxdmac
->channel
);
534 spin_unlock(&imxdmac
->lock
);
537 static int imxdma_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
540 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
541 struct dma_slave_config
*dmaengine_cfg
= (void *)arg
;
542 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
544 unsigned int mode
= 0;
547 case DMA_TERMINATE_ALL
:
548 imxdma_disable_hw(imxdmac
);
550 spin_lock_irqsave(&imxdmac
->lock
, flags
);
551 list_splice_tail_init(&imxdmac
->ld_active
, &imxdmac
->ld_free
);
552 list_splice_tail_init(&imxdmac
->ld_queue
, &imxdmac
->ld_free
);
553 spin_unlock_irqrestore(&imxdmac
->lock
, flags
);
555 case DMA_SLAVE_CONFIG
:
556 if (dmaengine_cfg
->direction
== DMA_DEV_TO_MEM
) {
557 imxdmac
->per_address
= dmaengine_cfg
->src_addr
;
558 imxdmac
->watermark_level
= dmaengine_cfg
->src_maxburst
;
559 imxdmac
->word_size
= dmaengine_cfg
->src_addr_width
;
561 imxdmac
->per_address
= dmaengine_cfg
->dst_addr
;
562 imxdmac
->watermark_level
= dmaengine_cfg
->dst_maxburst
;
563 imxdmac
->word_size
= dmaengine_cfg
->dst_addr_width
;
566 switch (imxdmac
->word_size
) {
567 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
568 mode
= IMX_DMA_MEMSIZE_8
;
570 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
571 mode
= IMX_DMA_MEMSIZE_16
;
574 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
575 mode
= IMX_DMA_MEMSIZE_32
;
579 imxdmac
->hw_chaining
= 1;
580 if (!imxdma_hw_chain(imxdmac
))
582 imxdmac
->ccr_from_device
= (mode
| IMX_DMA_TYPE_FIFO
) |
583 ((IMX_DMA_MEMSIZE_32
| IMX_DMA_TYPE_LINEAR
) << 2) |
585 imxdmac
->ccr_to_device
=
586 (IMX_DMA_MEMSIZE_32
| IMX_DMA_TYPE_LINEAR
) |
587 ((mode
| IMX_DMA_TYPE_FIFO
) << 2) | CCR_REN
;
588 imx_dmav1_writel(imxdma
, imxdmac
->dma_request
,
589 DMA_RSSR(imxdmac
->channel
));
591 /* Set burst length */
592 imx_dmav1_writel(imxdma
, imxdmac
->watermark_level
*
593 imxdmac
->word_size
, DMA_BLR(imxdmac
->channel
));
603 static enum dma_status
imxdma_tx_status(struct dma_chan
*chan
,
605 struct dma_tx_state
*txstate
)
607 return dma_cookie_status(chan
, cookie
, txstate
);
610 static dma_cookie_t
imxdma_tx_submit(struct dma_async_tx_descriptor
*tx
)
612 struct imxdma_channel
*imxdmac
= to_imxdma_chan(tx
->chan
);
616 spin_lock_irqsave(&imxdmac
->lock
, flags
);
617 cookie
= dma_cookie_assign(tx
);
618 spin_unlock_irqrestore(&imxdmac
->lock
, flags
);
623 static int imxdma_alloc_chan_resources(struct dma_chan
*chan
)
625 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
626 struct imx_dma_data
*data
= chan
->private;
629 imxdmac
->dma_request
= data
->dma_request
;
631 while (imxdmac
->descs_allocated
< IMXDMA_MAX_CHAN_DESCRIPTORS
) {
632 struct imxdma_desc
*desc
;
634 desc
= kzalloc(sizeof(*desc
), GFP_KERNEL
);
637 __memzero(&desc
->desc
, sizeof(struct dma_async_tx_descriptor
));
638 dma_async_tx_descriptor_init(&desc
->desc
, chan
);
639 desc
->desc
.tx_submit
= imxdma_tx_submit
;
640 /* txd.flags will be overwritten in prep funcs */
641 desc
->desc
.flags
= DMA_CTRL_ACK
;
642 desc
->status
= DMA_SUCCESS
;
644 list_add_tail(&desc
->node
, &imxdmac
->ld_free
);
645 imxdmac
->descs_allocated
++;
648 if (!imxdmac
->descs_allocated
)
651 return imxdmac
->descs_allocated
;
654 static void imxdma_free_chan_resources(struct dma_chan
*chan
)
656 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
657 struct imxdma_desc
*desc
, *_desc
;
660 spin_lock_irqsave(&imxdmac
->lock
, flags
);
662 imxdma_disable_hw(imxdmac
);
663 list_splice_tail_init(&imxdmac
->ld_active
, &imxdmac
->ld_free
);
664 list_splice_tail_init(&imxdmac
->ld_queue
, &imxdmac
->ld_free
);
666 spin_unlock_irqrestore(&imxdmac
->lock
, flags
);
668 list_for_each_entry_safe(desc
, _desc
, &imxdmac
->ld_free
, node
) {
670 imxdmac
->descs_allocated
--;
672 INIT_LIST_HEAD(&imxdmac
->ld_free
);
674 if (imxdmac
->sg_list
) {
675 kfree(imxdmac
->sg_list
);
676 imxdmac
->sg_list
= NULL
;
680 static struct dma_async_tx_descriptor
*imxdma_prep_slave_sg(
681 struct dma_chan
*chan
, struct scatterlist
*sgl
,
682 unsigned int sg_len
, enum dma_transfer_direction direction
,
683 unsigned long flags
, void *context
)
685 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
686 struct scatterlist
*sg
;
687 int i
, dma_length
= 0;
688 struct imxdma_desc
*desc
;
690 if (list_empty(&imxdmac
->ld_free
) ||
691 imxdma_chan_is_doing_cyclic(imxdmac
))
694 desc
= list_first_entry(&imxdmac
->ld_free
, struct imxdma_desc
, node
);
696 for_each_sg(sgl
, sg
, sg_len
, i
) {
697 dma_length
+= sg
->length
;
700 switch (imxdmac
->word_size
) {
701 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
702 if (sgl
->length
& 3 || sgl
->dma_address
& 3)
705 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
706 if (sgl
->length
& 1 || sgl
->dma_address
& 1)
709 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
715 desc
->type
= IMXDMA_DESC_SLAVE_SG
;
717 desc
->sgcount
= sg_len
;
718 desc
->len
= dma_length
;
719 desc
->direction
= direction
;
720 if (direction
== DMA_DEV_TO_MEM
) {
721 desc
->src
= imxdmac
->per_address
;
723 desc
->dest
= imxdmac
->per_address
;
725 desc
->desc
.callback
= NULL
;
726 desc
->desc
.callback_param
= NULL
;
731 static struct dma_async_tx_descriptor
*imxdma_prep_dma_cyclic(
732 struct dma_chan
*chan
, dma_addr_t dma_addr
, size_t buf_len
,
733 size_t period_len
, enum dma_transfer_direction direction
,
736 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
737 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
738 struct imxdma_desc
*desc
;
740 unsigned int periods
= buf_len
/ period_len
;
742 dev_dbg(imxdma
->dev
, "%s channel: %d buf_len=%d period_len=%d\n",
743 __func__
, imxdmac
->channel
, buf_len
, period_len
);
745 if (list_empty(&imxdmac
->ld_free
) ||
746 imxdma_chan_is_doing_cyclic(imxdmac
))
749 desc
= list_first_entry(&imxdmac
->ld_free
, struct imxdma_desc
, node
);
751 if (imxdmac
->sg_list
)
752 kfree(imxdmac
->sg_list
);
754 imxdmac
->sg_list
= kcalloc(periods
+ 1,
755 sizeof(struct scatterlist
), GFP_KERNEL
);
756 if (!imxdmac
->sg_list
)
759 sg_init_table(imxdmac
->sg_list
, periods
);
761 for (i
= 0; i
< periods
; i
++) {
762 imxdmac
->sg_list
[i
].page_link
= 0;
763 imxdmac
->sg_list
[i
].offset
= 0;
764 imxdmac
->sg_list
[i
].dma_address
= dma_addr
;
765 imxdmac
->sg_list
[i
].length
= period_len
;
766 dma_addr
+= period_len
;
770 imxdmac
->sg_list
[periods
].offset
= 0;
771 imxdmac
->sg_list
[periods
].length
= 0;
772 imxdmac
->sg_list
[periods
].page_link
=
773 ((unsigned long)imxdmac
->sg_list
| 0x01) & ~0x02;
775 desc
->type
= IMXDMA_DESC_CYCLIC
;
776 desc
->sg
= imxdmac
->sg_list
;
777 desc
->sgcount
= periods
;
778 desc
->len
= IMX_DMA_LENGTH_LOOP
;
779 desc
->direction
= direction
;
780 if (direction
== DMA_DEV_TO_MEM
) {
781 desc
->src
= imxdmac
->per_address
;
783 desc
->dest
= imxdmac
->per_address
;
785 desc
->desc
.callback
= NULL
;
786 desc
->desc
.callback_param
= NULL
;
791 static struct dma_async_tx_descriptor
*imxdma_prep_dma_memcpy(
792 struct dma_chan
*chan
, dma_addr_t dest
,
793 dma_addr_t src
, size_t len
, unsigned long flags
)
795 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
796 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
797 struct imxdma_desc
*desc
;
799 dev_dbg(imxdma
->dev
, "%s channel: %d src=0x%x dst=0x%x len=%d\n",
800 __func__
, imxdmac
->channel
, src
, dest
, len
);
802 if (list_empty(&imxdmac
->ld_free
) ||
803 imxdma_chan_is_doing_cyclic(imxdmac
))
806 desc
= list_first_entry(&imxdmac
->ld_free
, struct imxdma_desc
, node
);
808 desc
->type
= IMXDMA_DESC_MEMCPY
;
812 desc
->direction
= DMA_MEM_TO_MEM
;
813 desc
->config_port
= IMX_DMA_MEMSIZE_32
| IMX_DMA_TYPE_LINEAR
;
814 desc
->config_mem
= IMX_DMA_MEMSIZE_32
| IMX_DMA_TYPE_LINEAR
;
815 desc
->desc
.callback
= NULL
;
816 desc
->desc
.callback_param
= NULL
;
821 static void imxdma_issue_pending(struct dma_chan
*chan
)
823 struct imxdma_channel
*imxdmac
= to_imxdma_chan(chan
);
824 struct imxdma_engine
*imxdma
= imxdmac
->imxdma
;
825 struct imxdma_desc
*desc
;
828 spin_lock_irqsave(&imxdmac
->lock
, flags
);
829 if (list_empty(&imxdmac
->ld_active
) &&
830 !list_empty(&imxdmac
->ld_queue
)) {
831 desc
= list_first_entry(&imxdmac
->ld_queue
,
832 struct imxdma_desc
, node
);
834 if (imxdma_xfer_desc(desc
) < 0) {
835 dev_warn(imxdma
->dev
,
836 "%s: channel: %d couldn't issue DMA xfer\n",
837 __func__
, imxdmac
->channel
);
839 list_move_tail(imxdmac
->ld_queue
.next
,
840 &imxdmac
->ld_active
);
843 spin_unlock_irqrestore(&imxdmac
->lock
, flags
);
846 static int __init
imxdma_probe(struct platform_device
*pdev
)
848 struct imxdma_engine
*imxdma
;
852 imxdma
= kzalloc(sizeof(*imxdma
), GFP_KERNEL
);
857 imxdma
->base
= MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR
);
858 } else if (cpu_is_mx21()) {
859 imxdma
->base
= MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR
);
860 } else if (cpu_is_mx27()) {
861 imxdma
->base
= MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR
);
867 imxdma
->dma_clk
= clk_get(NULL
, "dma");
868 if (IS_ERR(imxdma
->dma_clk
))
869 return PTR_ERR(imxdma
->dma_clk
);
870 clk_enable(imxdma
->dma_clk
);
872 /* reset DMA module */
873 imx_dmav1_writel(imxdma
, DCR_DRST
, DMA_DCR
);
876 ret
= request_irq(MX1_DMA_INT
, dma_irq_handler
, 0, "DMA", imxdma
);
878 pr_crit("Can't register IRQ for DMA\n");
883 ret
= request_irq(MX1_DMA_ERR
, imxdma_err_handler
, 0, "DMA", imxdma
);
885 pr_crit("Can't register ERRIRQ for DMA\n");
886 free_irq(MX1_DMA_INT
, NULL
);
892 /* enable DMA module */
893 imx_dmav1_writel(imxdma
, DCR_DEN
, DMA_DCR
);
895 /* clear all interrupts */
896 imx_dmav1_writel(imxdma
, (1 << IMX_DMA_CHANNELS
) - 1, DMA_DISR
);
898 /* disable interrupts */
899 imx_dmav1_writel(imxdma
, (1 << IMX_DMA_CHANNELS
) - 1, DMA_DIMR
);
901 INIT_LIST_HEAD(&imxdma
->dma_device
.channels
);
903 dma_cap_set(DMA_SLAVE
, imxdma
->dma_device
.cap_mask
);
904 dma_cap_set(DMA_CYCLIC
, imxdma
->dma_device
.cap_mask
);
905 dma_cap_set(DMA_MEMCPY
, imxdma
->dma_device
.cap_mask
);
907 /* Initialize channel parameters */
908 for (i
= 0; i
< IMX_DMA_CHANNELS
; i
++) {
909 struct imxdma_channel
*imxdmac
= &imxdma
->channel
[i
];
911 if (cpu_is_mx21() || cpu_is_mx27()) {
912 ret
= request_irq(MX2x_INT_DMACH0
+ i
,
913 dma_irq_handler
, 0, "DMA", imxdma
);
915 pr_crit("Can't register IRQ %d for DMA channel %d\n",
916 MX2x_INT_DMACH0
+ i
, i
);
919 init_timer(&imxdmac
->watchdog
);
920 imxdmac
->watchdog
.function
= &imxdma_watchdog
;
921 imxdmac
->watchdog
.data
= (unsigned long)imxdmac
;
924 imxdmac
->imxdma
= imxdma
;
925 spin_lock_init(&imxdmac
->lock
);
927 INIT_LIST_HEAD(&imxdmac
->ld_queue
);
928 INIT_LIST_HEAD(&imxdmac
->ld_free
);
929 INIT_LIST_HEAD(&imxdmac
->ld_active
);
931 tasklet_init(&imxdmac
->dma_tasklet
, imxdma_tasklet
,
932 (unsigned long)imxdmac
);
933 imxdmac
->chan
.device
= &imxdma
->dma_device
;
934 dma_cookie_init(&imxdmac
->chan
);
935 imxdmac
->channel
= i
;
937 /* Add the channel to the DMAC list */
938 list_add_tail(&imxdmac
->chan
.device_node
,
939 &imxdma
->dma_device
.channels
);
942 imxdma
->dev
= &pdev
->dev
;
943 imxdma
->dma_device
.dev
= &pdev
->dev
;
945 imxdma
->dma_device
.device_alloc_chan_resources
= imxdma_alloc_chan_resources
;
946 imxdma
->dma_device
.device_free_chan_resources
= imxdma_free_chan_resources
;
947 imxdma
->dma_device
.device_tx_status
= imxdma_tx_status
;
948 imxdma
->dma_device
.device_prep_slave_sg
= imxdma_prep_slave_sg
;
949 imxdma
->dma_device
.device_prep_dma_cyclic
= imxdma_prep_dma_cyclic
;
950 imxdma
->dma_device
.device_prep_dma_memcpy
= imxdma_prep_dma_memcpy
;
951 imxdma
->dma_device
.device_control
= imxdma_control
;
952 imxdma
->dma_device
.device_issue_pending
= imxdma_issue_pending
;
954 platform_set_drvdata(pdev
, imxdma
);
956 imxdma
->dma_device
.copy_align
= 2; /* 2^2 = 4 bytes alignment */
957 imxdma
->dma_device
.dev
->dma_parms
= &imxdma
->dma_parms
;
958 dma_set_max_seg_size(imxdma
->dma_device
.dev
, 0xffffff);
960 ret
= dma_async_device_register(&imxdma
->dma_device
);
962 dev_err(&pdev
->dev
, "unable to register\n");
970 if (cpu_is_mx21() || cpu_is_mx27()) {
972 free_irq(MX2x_INT_DMACH0
+ i
, NULL
);
973 } else if cpu_is_mx1() {
974 free_irq(MX1_DMA_INT
, NULL
);
975 free_irq(MX1_DMA_ERR
, NULL
);
982 static int __exit
imxdma_remove(struct platform_device
*pdev
)
984 struct imxdma_engine
*imxdma
= platform_get_drvdata(pdev
);
987 dma_async_device_unregister(&imxdma
->dma_device
);
989 if (cpu_is_mx21() || cpu_is_mx27()) {
990 for (i
= 0; i
< IMX_DMA_CHANNELS
; i
++)
991 free_irq(MX2x_INT_DMACH0
+ i
, NULL
);
992 } else if cpu_is_mx1() {
993 free_irq(MX1_DMA_INT
, NULL
);
994 free_irq(MX1_DMA_ERR
, NULL
);
1002 static struct platform_driver imxdma_driver
= {
1006 .remove
= __exit_p(imxdma_remove
),
1009 static int __init
imxdma_module_init(void)
1011 return platform_driver_probe(&imxdma_driver
, imxdma_probe
);
1013 subsys_initcall(imxdma_module_init
);
1015 MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1016 MODULE_DESCRIPTION("i.MX dma driver");
1017 MODULE_LICENSE("GPL");