2 * Freescale MPC85xx, MPC83xx DMA Engine support
4 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
7 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007
8 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007
11 * DMA engine driver for Freescale MPC8540 DMA controller, which is
12 * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
13 * The support for MPC8349 DMA contorller is also added.
15 * This driver instructs the DMA controller to issue the PCI Read Multiple
16 * command for PCI read operations, instead of using the default PCI Read Line
17 * command. Please be aware that this setting may result in read pre-fetching
20 * This is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <linux/interrupt.h>
31 #include <linux/dmaengine.h>
32 #include <linux/delay.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/dmapool.h>
35 #include <linux/of_platform.h>
37 #include <asm/fsldma.h>
40 static void dma_init(struct fsldma_chan
*fsl_chan
)
42 /* Reset the channel */
43 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->mr
, 0, 32);
45 switch (fsl_chan
->feature
& FSL_DMA_IP_MASK
) {
47 /* Set the channel to below modes:
48 * EIE - Error interrupt enable
49 * EOSIE - End of segments interrupt enable (basic mode)
50 * EOLNIE - End of links interrupt enable
52 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->mr
, FSL_DMA_MR_EIE
53 | FSL_DMA_MR_EOLNIE
| FSL_DMA_MR_EOSIE
, 32);
56 /* Set the channel to below modes:
57 * EOTIE - End-of-transfer interrupt enable
58 * PRC_RM - PCI read multiple
60 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->mr
, FSL_DMA_MR_EOTIE
61 | FSL_DMA_MR_PRC_RM
, 32);
67 static void set_sr(struct fsldma_chan
*fsl_chan
, u32 val
)
69 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->sr
, val
, 32);
72 static u32
get_sr(struct fsldma_chan
*fsl_chan
)
74 return DMA_IN(fsl_chan
, &fsl_chan
->reg_base
->sr
, 32);
77 static void set_desc_cnt(struct fsldma_chan
*fsl_chan
,
78 struct fsl_dma_ld_hw
*hw
, u32 count
)
80 hw
->count
= CPU_TO_DMA(fsl_chan
, count
, 32);
83 static void set_desc_src(struct fsldma_chan
*fsl_chan
,
84 struct fsl_dma_ld_hw
*hw
, dma_addr_t src
)
88 snoop_bits
= ((fsl_chan
->feature
& FSL_DMA_IP_MASK
) == FSL_DMA_IP_85XX
)
89 ? ((u64
)FSL_DMA_SATR_SREADTYPE_SNOOP_READ
<< 32) : 0;
90 hw
->src_addr
= CPU_TO_DMA(fsl_chan
, snoop_bits
| src
, 64);
93 static void set_desc_dest(struct fsldma_chan
*fsl_chan
,
94 struct fsl_dma_ld_hw
*hw
, dma_addr_t dest
)
98 snoop_bits
= ((fsl_chan
->feature
& FSL_DMA_IP_MASK
) == FSL_DMA_IP_85XX
)
99 ? ((u64
)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE
<< 32) : 0;
100 hw
->dst_addr
= CPU_TO_DMA(fsl_chan
, snoop_bits
| dest
, 64);
103 static void set_desc_next(struct fsldma_chan
*fsl_chan
,
104 struct fsl_dma_ld_hw
*hw
, dma_addr_t next
)
108 snoop_bits
= ((fsl_chan
->feature
& FSL_DMA_IP_MASK
) == FSL_DMA_IP_83XX
)
110 hw
->next_ln_addr
= CPU_TO_DMA(fsl_chan
, snoop_bits
| next
, 64);
113 static void set_cdar(struct fsldma_chan
*fsl_chan
, dma_addr_t addr
)
115 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->cdar
, addr
| FSL_DMA_SNEN
, 64);
118 static dma_addr_t
get_cdar(struct fsldma_chan
*fsl_chan
)
120 return DMA_IN(fsl_chan
, &fsl_chan
->reg_base
->cdar
, 64) & ~FSL_DMA_SNEN
;
123 static void set_ndar(struct fsldma_chan
*fsl_chan
, dma_addr_t addr
)
125 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->ndar
, addr
, 64);
128 static dma_addr_t
get_ndar(struct fsldma_chan
*fsl_chan
)
130 return DMA_IN(fsl_chan
, &fsl_chan
->reg_base
->ndar
, 64);
133 static u32
get_bcr(struct fsldma_chan
*fsl_chan
)
135 return DMA_IN(fsl_chan
, &fsl_chan
->reg_base
->bcr
, 32);
138 static int dma_is_idle(struct fsldma_chan
*fsl_chan
)
140 u32 sr
= get_sr(fsl_chan
);
141 return (!(sr
& FSL_DMA_SR_CB
)) || (sr
& FSL_DMA_SR_CH
);
144 static void dma_start(struct fsldma_chan
*fsl_chan
)
148 mode
= DMA_IN(fsl_chan
, &fsl_chan
->reg_base
->mr
, 32);
150 if ((fsl_chan
->feature
& FSL_DMA_IP_MASK
) == FSL_DMA_IP_85XX
) {
151 if (fsl_chan
->feature
& FSL_DMA_CHAN_PAUSE_EXT
) {
152 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->bcr
, 0, 32);
153 mode
|= FSL_DMA_MR_EMP_EN
;
155 mode
&= ~FSL_DMA_MR_EMP_EN
;
159 if (fsl_chan
->feature
& FSL_DMA_CHAN_START_EXT
)
160 mode
|= FSL_DMA_MR_EMS_EN
;
162 mode
|= FSL_DMA_MR_CS
;
164 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->mr
, mode
, 32);
167 static void dma_halt(struct fsldma_chan
*fsl_chan
)
172 mode
= DMA_IN(fsl_chan
, &fsl_chan
->reg_base
->mr
, 32);
173 mode
|= FSL_DMA_MR_CA
;
174 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->mr
, mode
, 32);
176 mode
&= ~(FSL_DMA_MR_CS
| FSL_DMA_MR_EMS_EN
| FSL_DMA_MR_CA
);
177 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->mr
, mode
, 32);
179 for (i
= 0; i
< 100; i
++) {
180 if (dma_is_idle(fsl_chan
))
185 if (i
>= 100 && !dma_is_idle(fsl_chan
))
186 dev_err(fsl_chan
->dev
, "DMA halt timeout!\n");
189 static void set_ld_eol(struct fsldma_chan
*fsl_chan
,
190 struct fsl_desc_sw
*desc
)
194 snoop_bits
= ((fsl_chan
->feature
& FSL_DMA_IP_MASK
) == FSL_DMA_IP_83XX
)
197 desc
->hw
.next_ln_addr
= CPU_TO_DMA(fsl_chan
,
198 DMA_TO_CPU(fsl_chan
, desc
->hw
.next_ln_addr
, 64) | FSL_DMA_EOL
202 static void append_ld_queue(struct fsldma_chan
*fsl_chan
,
203 struct fsl_desc_sw
*new_desc
)
205 struct fsl_desc_sw
*queue_tail
= to_fsl_desc(fsl_chan
->ld_queue
.prev
);
207 if (list_empty(&fsl_chan
->ld_queue
))
210 /* Link to the new descriptor physical address and
211 * Enable End-of-segment interrupt for
212 * the last link descriptor.
213 * (the previous node's next link descriptor)
215 * For FSL_DMA_IP_83xx, the snoop enable bit need be set.
217 queue_tail
->hw
.next_ln_addr
= CPU_TO_DMA(fsl_chan
,
218 new_desc
->async_tx
.phys
| FSL_DMA_EOSIE
|
219 (((fsl_chan
->feature
& FSL_DMA_IP_MASK
)
220 == FSL_DMA_IP_83XX
) ? FSL_DMA_SNEN
: 0), 64);
224 * fsl_chan_set_src_loop_size - Set source address hold transfer size
225 * @fsl_chan : Freescale DMA channel
226 * @size : Address loop size, 0 for disable loop
228 * The set source address hold transfer size. The source
229 * address hold or loop transfer size is when the DMA transfer
230 * data from source address (SA), if the loop size is 4, the DMA will
231 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
232 * SA + 1 ... and so on.
234 static void fsl_chan_set_src_loop_size(struct fsldma_chan
*fsl_chan
, int size
)
238 mode
= DMA_IN(fsl_chan
, &fsl_chan
->reg_base
->mr
, 32);
242 mode
&= ~FSL_DMA_MR_SAHE
;
248 mode
|= FSL_DMA_MR_SAHE
| (__ilog2(size
) << 14);
252 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->mr
, mode
, 32);
256 * fsl_chan_set_dest_loop_size - Set destination address hold transfer size
257 * @fsl_chan : Freescale DMA channel
258 * @size : Address loop size, 0 for disable loop
260 * The set destination address hold transfer size. The destination
261 * address hold or loop transfer size is when the DMA transfer
262 * data to destination address (TA), if the loop size is 4, the DMA will
263 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
264 * TA + 1 ... and so on.
266 static void fsl_chan_set_dest_loop_size(struct fsldma_chan
*fsl_chan
, int size
)
270 mode
= DMA_IN(fsl_chan
, &fsl_chan
->reg_base
->mr
, 32);
274 mode
&= ~FSL_DMA_MR_DAHE
;
280 mode
|= FSL_DMA_MR_DAHE
| (__ilog2(size
) << 16);
284 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->mr
, mode
, 32);
288 * fsl_chan_set_request_count - Set DMA Request Count for external control
289 * @fsl_chan : Freescale DMA channel
290 * @size : Number of bytes to transfer in a single request
292 * The Freescale DMA channel can be controlled by the external signal DREQ#.
293 * The DMA request count is how many bytes are allowed to transfer before
294 * pausing the channel, after which a new assertion of DREQ# resumes channel
297 * A size of 0 disables external pause control. The maximum size is 1024.
299 static void fsl_chan_set_request_count(struct fsldma_chan
*fsl_chan
, int size
)
305 mode
= DMA_IN(fsl_chan
, &fsl_chan
->reg_base
->mr
, 32);
306 mode
|= (__ilog2(size
) << 24) & 0x0f000000;
308 DMA_OUT(fsl_chan
, &fsl_chan
->reg_base
->mr
, mode
, 32);
312 * fsl_chan_toggle_ext_pause - Toggle channel external pause status
313 * @fsl_chan : Freescale DMA channel
314 * @enable : 0 is disabled, 1 is enabled.
316 * The Freescale DMA channel can be controlled by the external signal DREQ#.
317 * The DMA Request Count feature should be used in addition to this feature
318 * to set the number of bytes to transfer before pausing the channel.
320 static void fsl_chan_toggle_ext_pause(struct fsldma_chan
*fsl_chan
, int enable
)
323 fsl_chan
->feature
|= FSL_DMA_CHAN_PAUSE_EXT
;
325 fsl_chan
->feature
&= ~FSL_DMA_CHAN_PAUSE_EXT
;
329 * fsl_chan_toggle_ext_start - Toggle channel external start status
330 * @fsl_chan : Freescale DMA channel
331 * @enable : 0 is disabled, 1 is enabled.
333 * If enable the external start, the channel can be started by an
334 * external DMA start pin. So the dma_start() does not start the
335 * transfer immediately. The DMA channel will wait for the
336 * control pin asserted.
338 static void fsl_chan_toggle_ext_start(struct fsldma_chan
*fsl_chan
, int enable
)
341 fsl_chan
->feature
|= FSL_DMA_CHAN_START_EXT
;
343 fsl_chan
->feature
&= ~FSL_DMA_CHAN_START_EXT
;
346 static dma_cookie_t
fsl_dma_tx_submit(struct dma_async_tx_descriptor
*tx
)
348 struct fsldma_chan
*fsl_chan
= to_fsl_chan(tx
->chan
);
349 struct fsl_desc_sw
*desc
= tx_to_fsl_desc(tx
);
350 struct fsl_desc_sw
*child
;
354 /* cookie increment and adding to ld_queue must be atomic */
355 spin_lock_irqsave(&fsl_chan
->desc_lock
, flags
);
357 cookie
= fsl_chan
->common
.cookie
;
358 list_for_each_entry(child
, &desc
->tx_list
, node
) {
363 desc
->async_tx
.cookie
= cookie
;
366 fsl_chan
->common
.cookie
= cookie
;
367 append_ld_queue(fsl_chan
, desc
);
368 list_splice_init(&desc
->tx_list
, fsl_chan
->ld_queue
.prev
);
370 spin_unlock_irqrestore(&fsl_chan
->desc_lock
, flags
);
376 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
377 * @fsl_chan : Freescale DMA channel
379 * Return - The descriptor allocated. NULL for failed.
381 static struct fsl_desc_sw
*fsl_dma_alloc_descriptor(
382 struct fsldma_chan
*fsl_chan
)
385 struct fsl_desc_sw
*desc_sw
;
387 desc_sw
= dma_pool_alloc(fsl_chan
->desc_pool
, GFP_ATOMIC
, &pdesc
);
389 memset(desc_sw
, 0, sizeof(struct fsl_desc_sw
));
390 INIT_LIST_HEAD(&desc_sw
->tx_list
);
391 dma_async_tx_descriptor_init(&desc_sw
->async_tx
,
393 desc_sw
->async_tx
.tx_submit
= fsl_dma_tx_submit
;
394 desc_sw
->async_tx
.phys
= pdesc
;
402 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
403 * @fsl_chan : Freescale DMA channel
405 * This function will create a dma pool for descriptor allocation.
407 * Return - The number of descriptors allocated.
409 static int fsl_dma_alloc_chan_resources(struct dma_chan
*chan
)
411 struct fsldma_chan
*fsl_chan
= to_fsl_chan(chan
);
413 /* Has this channel already been allocated? */
414 if (fsl_chan
->desc_pool
)
417 /* We need the descriptor to be aligned to 32bytes
418 * for meeting FSL DMA specification requirement.
420 fsl_chan
->desc_pool
= dma_pool_create("fsl_dma_engine_desc_pool",
421 fsl_chan
->dev
, sizeof(struct fsl_desc_sw
),
423 if (!fsl_chan
->desc_pool
) {
424 dev_err(fsl_chan
->dev
, "No memory for channel %d "
425 "descriptor dma pool.\n", fsl_chan
->id
);
433 * fsl_dma_free_chan_resources - Free all resources of the channel.
434 * @fsl_chan : Freescale DMA channel
436 static void fsl_dma_free_chan_resources(struct dma_chan
*chan
)
438 struct fsldma_chan
*fsl_chan
= to_fsl_chan(chan
);
439 struct fsl_desc_sw
*desc
, *_desc
;
442 dev_dbg(fsl_chan
->dev
, "Free all channel resources.\n");
443 spin_lock_irqsave(&fsl_chan
->desc_lock
, flags
);
444 list_for_each_entry_safe(desc
, _desc
, &fsl_chan
->ld_queue
, node
) {
445 #ifdef FSL_DMA_LD_DEBUG
446 dev_dbg(fsl_chan
->dev
,
447 "LD %p will be released.\n", desc
);
449 list_del(&desc
->node
);
450 /* free link descriptor */
451 dma_pool_free(fsl_chan
->desc_pool
, desc
, desc
->async_tx
.phys
);
453 spin_unlock_irqrestore(&fsl_chan
->desc_lock
, flags
);
454 dma_pool_destroy(fsl_chan
->desc_pool
);
456 fsl_chan
->desc_pool
= NULL
;
459 static struct dma_async_tx_descriptor
*
460 fsl_dma_prep_interrupt(struct dma_chan
*chan
, unsigned long flags
)
462 struct fsldma_chan
*fsl_chan
;
463 struct fsl_desc_sw
*new;
468 fsl_chan
= to_fsl_chan(chan
);
470 new = fsl_dma_alloc_descriptor(fsl_chan
);
472 dev_err(fsl_chan
->dev
, "No free memory for link descriptor\n");
476 new->async_tx
.cookie
= -EBUSY
;
477 new->async_tx
.flags
= flags
;
479 /* Insert the link descriptor to the LD ring */
480 list_add_tail(&new->node
, &new->tx_list
);
482 /* Set End-of-link to the last link descriptor of new list*/
483 set_ld_eol(fsl_chan
, new);
485 return &new->async_tx
;
488 static struct dma_async_tx_descriptor
*fsl_dma_prep_memcpy(
489 struct dma_chan
*chan
, dma_addr_t dma_dest
, dma_addr_t dma_src
,
490 size_t len
, unsigned long flags
)
492 struct fsldma_chan
*fsl_chan
;
493 struct fsl_desc_sw
*first
= NULL
, *prev
= NULL
, *new;
494 struct list_head
*list
;
503 fsl_chan
= to_fsl_chan(chan
);
507 /* Allocate the link descriptor from DMA pool */
508 new = fsl_dma_alloc_descriptor(fsl_chan
);
510 dev_err(fsl_chan
->dev
,
511 "No free memory for link descriptor\n");
514 #ifdef FSL_DMA_LD_DEBUG
515 dev_dbg(fsl_chan
->dev
, "new link desc alloc %p\n", new);
518 copy
= min(len
, (size_t)FSL_DMA_BCR_MAX_CNT
);
520 set_desc_cnt(fsl_chan
, &new->hw
, copy
);
521 set_desc_src(fsl_chan
, &new->hw
, dma_src
);
522 set_desc_dest(fsl_chan
, &new->hw
, dma_dest
);
527 set_desc_next(fsl_chan
, &prev
->hw
, new->async_tx
.phys
);
529 new->async_tx
.cookie
= 0;
530 async_tx_ack(&new->async_tx
);
537 /* Insert the link descriptor to the LD ring */
538 list_add_tail(&new->node
, &first
->tx_list
);
541 new->async_tx
.flags
= flags
; /* client is in control of this ack */
542 new->async_tx
.cookie
= -EBUSY
;
544 /* Set End-of-link to the last link descriptor of new list*/
545 set_ld_eol(fsl_chan
, new);
547 return &first
->async_tx
;
553 list
= &first
->tx_list
;
554 list_for_each_entry_safe_reverse(new, prev
, list
, node
) {
555 list_del(&new->node
);
556 dma_pool_free(fsl_chan
->desc_pool
, new, new->async_tx
.phys
);
563 * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
565 * @sgl: scatterlist to transfer to/from
566 * @sg_len: number of entries in @scatterlist
567 * @direction: DMA direction
568 * @flags: DMAEngine flags
570 * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
571 * DMA_SLAVE API, this gets the device-specific information from the
572 * chan->private variable.
574 static struct dma_async_tx_descriptor
*fsl_dma_prep_slave_sg(
575 struct dma_chan
*chan
, struct scatterlist
*sgl
, unsigned int sg_len
,
576 enum dma_data_direction direction
, unsigned long flags
)
578 struct fsldma_chan
*fsl_chan
;
579 struct fsl_desc_sw
*first
= NULL
, *prev
= NULL
, *new = NULL
;
580 struct fsl_dma_slave
*slave
;
581 struct list_head
*tx_list
;
585 struct scatterlist
*sg
;
588 struct fsl_dma_hw_addr
*hw
;
589 dma_addr_t dma_dst
, dma_src
;
597 fsl_chan
= to_fsl_chan(chan
);
598 slave
= chan
->private;
600 if (list_empty(&slave
->addresses
))
603 hw
= list_first_entry(&slave
->addresses
, struct fsl_dma_hw_addr
, entry
);
607 * Build the hardware transaction to copy from the scatterlist to
608 * the hardware, or from the hardware to the scatterlist
610 * If you are copying from the hardware to the scatterlist and it
611 * takes two hardware entries to fill an entire page, then both
612 * hardware entries will be coalesced into the same page
614 * If you are copying from the scatterlist to the hardware and a
615 * single page can fill two hardware entries, then the data will
616 * be read out of the page into the first hardware entry, and so on
618 for_each_sg(sgl
, sg
, sg_len
, i
) {
621 /* Loop until the entire scatterlist entry is used */
622 while (sg_used
< sg_dma_len(sg
)) {
625 * If we've used up the current hardware address/length
626 * pair, we need to load a new one
628 * This is done in a while loop so that descriptors with
629 * length == 0 will be skipped
631 while (hw_used
>= hw
->length
) {
634 * If the current hardware entry is the last
635 * entry in the list, we're finished
637 if (list_is_last(&hw
->entry
, &slave
->addresses
))
640 /* Get the next hardware address/length pair */
641 hw
= list_entry(hw
->entry
.next
,
642 struct fsl_dma_hw_addr
, entry
);
646 /* Allocate the link descriptor from DMA pool */
647 new = fsl_dma_alloc_descriptor(fsl_chan
);
649 dev_err(fsl_chan
->dev
, "No free memory for "
650 "link descriptor\n");
653 #ifdef FSL_DMA_LD_DEBUG
654 dev_dbg(fsl_chan
->dev
, "new link desc alloc %p\n", new);
658 * Calculate the maximum number of bytes to transfer,
659 * making sure it is less than the DMA controller limit
661 copy
= min_t(size_t, sg_dma_len(sg
) - sg_used
,
662 hw
->length
- hw_used
);
663 copy
= min_t(size_t, copy
, FSL_DMA_BCR_MAX_CNT
);
667 * from the hardware to the scatterlist
670 * from the scatterlist to the hardware
672 if (direction
== DMA_FROM_DEVICE
) {
673 dma_src
= hw
->address
+ hw_used
;
674 dma_dst
= sg_dma_address(sg
) + sg_used
;
676 dma_src
= sg_dma_address(sg
) + sg_used
;
677 dma_dst
= hw
->address
+ hw_used
;
680 /* Fill in the descriptor */
681 set_desc_cnt(fsl_chan
, &new->hw
, copy
);
682 set_desc_src(fsl_chan
, &new->hw
, dma_src
);
683 set_desc_dest(fsl_chan
, &new->hw
, dma_dst
);
686 * If this is not the first descriptor, chain the
687 * current descriptor after the previous descriptor
692 set_desc_next(fsl_chan
, &prev
->hw
,
696 new->async_tx
.cookie
= 0;
697 async_tx_ack(&new->async_tx
);
703 /* Insert the link descriptor into the LD ring */
704 list_add_tail(&new->node
, &first
->tx_list
);
710 /* All of the hardware address/length pairs had length == 0 */
714 new->async_tx
.flags
= flags
;
715 new->async_tx
.cookie
= -EBUSY
;
717 /* Set End-of-link to the last link descriptor of new list */
718 set_ld_eol(fsl_chan
, new);
720 /* Enable extra controller features */
721 if (fsl_chan
->set_src_loop_size
)
722 fsl_chan
->set_src_loop_size(fsl_chan
, slave
->src_loop_size
);
724 if (fsl_chan
->set_dest_loop_size
)
725 fsl_chan
->set_dest_loop_size(fsl_chan
, slave
->dst_loop_size
);
727 if (fsl_chan
->toggle_ext_start
)
728 fsl_chan
->toggle_ext_start(fsl_chan
, slave
->external_start
);
730 if (fsl_chan
->toggle_ext_pause
)
731 fsl_chan
->toggle_ext_pause(fsl_chan
, slave
->external_pause
);
733 if (fsl_chan
->set_request_count
)
734 fsl_chan
->set_request_count(fsl_chan
, slave
->request_count
);
736 return &first
->async_tx
;
739 /* If first was not set, then we failed to allocate the very first
740 * descriptor, and we're done */
745 * First is set, so all of the descriptors we allocated have been added
746 * to first->tx_list, INCLUDING "first" itself. Therefore we
747 * must traverse the list backwards freeing each descriptor in turn
749 * We're re-using variables for the loop, oh well
751 tx_list
= &first
->tx_list
;
752 list_for_each_entry_safe_reverse(new, prev
, tx_list
, node
) {
753 list_del_init(&new->node
);
754 dma_pool_free(fsl_chan
->desc_pool
, new, new->async_tx
.phys
);
760 static void fsl_dma_device_terminate_all(struct dma_chan
*chan
)
762 struct fsldma_chan
*fsl_chan
;
763 struct fsl_desc_sw
*desc
, *tmp
;
769 fsl_chan
= to_fsl_chan(chan
);
771 /* Halt the DMA engine */
774 spin_lock_irqsave(&fsl_chan
->desc_lock
, flags
);
776 /* Remove and free all of the descriptors in the LD queue */
777 list_for_each_entry_safe(desc
, tmp
, &fsl_chan
->ld_queue
, node
) {
778 list_del(&desc
->node
);
779 dma_pool_free(fsl_chan
->desc_pool
, desc
, desc
->async_tx
.phys
);
782 spin_unlock_irqrestore(&fsl_chan
->desc_lock
, flags
);
786 * fsl_dma_update_completed_cookie - Update the completed cookie.
787 * @fsl_chan : Freescale DMA channel
789 static void fsl_dma_update_completed_cookie(struct fsldma_chan
*fsl_chan
)
791 struct fsl_desc_sw
*cur_desc
, *desc
;
794 ld_phy
= get_cdar(fsl_chan
) & FSL_DMA_NLDA_MASK
;
798 list_for_each_entry(desc
, &fsl_chan
->ld_queue
, node
)
799 if (desc
->async_tx
.phys
== ld_phy
) {
804 if (cur_desc
&& cur_desc
->async_tx
.cookie
) {
805 if (dma_is_idle(fsl_chan
))
806 fsl_chan
->completed_cookie
=
807 cur_desc
->async_tx
.cookie
;
809 fsl_chan
->completed_cookie
=
810 cur_desc
->async_tx
.cookie
- 1;
816 * fsl_chan_ld_cleanup - Clean up link descriptors
817 * @fsl_chan : Freescale DMA channel
819 * This function clean up the ld_queue of DMA channel.
820 * If 'in_intr' is set, the function will move the link descriptor to
821 * the recycle list. Otherwise, free it directly.
823 static void fsl_chan_ld_cleanup(struct fsldma_chan
*fsl_chan
)
825 struct fsl_desc_sw
*desc
, *_desc
;
828 spin_lock_irqsave(&fsl_chan
->desc_lock
, flags
);
830 dev_dbg(fsl_chan
->dev
, "chan completed_cookie = %d\n",
831 fsl_chan
->completed_cookie
);
832 list_for_each_entry_safe(desc
, _desc
, &fsl_chan
->ld_queue
, node
) {
833 dma_async_tx_callback callback
;
834 void *callback_param
;
836 if (dma_async_is_complete(desc
->async_tx
.cookie
,
837 fsl_chan
->completed_cookie
, fsl_chan
->common
.cookie
)
841 callback
= desc
->async_tx
.callback
;
842 callback_param
= desc
->async_tx
.callback_param
;
844 /* Remove from ld_queue list */
845 list_del(&desc
->node
);
847 dev_dbg(fsl_chan
->dev
, "link descriptor %p will be recycle.\n",
849 dma_pool_free(fsl_chan
->desc_pool
, desc
, desc
->async_tx
.phys
);
851 /* Run the link descriptor callback function */
853 spin_unlock_irqrestore(&fsl_chan
->desc_lock
, flags
);
854 dev_dbg(fsl_chan
->dev
, "link descriptor %p callback\n",
856 callback(callback_param
);
857 spin_lock_irqsave(&fsl_chan
->desc_lock
, flags
);
860 spin_unlock_irqrestore(&fsl_chan
->desc_lock
, flags
);
864 * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue.
865 * @fsl_chan : Freescale DMA channel
867 static void fsl_chan_xfer_ld_queue(struct fsldma_chan
*fsl_chan
)
869 struct list_head
*ld_node
;
870 dma_addr_t next_dest_addr
;
873 spin_lock_irqsave(&fsl_chan
->desc_lock
, flags
);
875 if (!dma_is_idle(fsl_chan
))
880 /* If there are some link descriptors
881 * not transfered in queue. We need to start it.
884 /* Find the first un-transfer desciptor */
885 for (ld_node
= fsl_chan
->ld_queue
.next
;
886 (ld_node
!= &fsl_chan
->ld_queue
)
887 && (dma_async_is_complete(
888 to_fsl_desc(ld_node
)->async_tx
.cookie
,
889 fsl_chan
->completed_cookie
,
890 fsl_chan
->common
.cookie
) == DMA_SUCCESS
);
891 ld_node
= ld_node
->next
);
893 if (ld_node
!= &fsl_chan
->ld_queue
) {
894 /* Get the ld start address from ld_queue */
895 next_dest_addr
= to_fsl_desc(ld_node
)->async_tx
.phys
;
896 dev_dbg(fsl_chan
->dev
, "xfer LDs staring from 0x%llx\n",
897 (unsigned long long)next_dest_addr
);
898 set_cdar(fsl_chan
, next_dest_addr
);
901 set_cdar(fsl_chan
, 0);
902 set_ndar(fsl_chan
, 0);
906 spin_unlock_irqrestore(&fsl_chan
->desc_lock
, flags
);
910 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
911 * @fsl_chan : Freescale DMA channel
913 static void fsl_dma_memcpy_issue_pending(struct dma_chan
*chan
)
915 struct fsldma_chan
*fsl_chan
= to_fsl_chan(chan
);
917 #ifdef FSL_DMA_LD_DEBUG
918 struct fsl_desc_sw
*ld
;
921 spin_lock_irqsave(&fsl_chan
->desc_lock
, flags
);
922 if (list_empty(&fsl_chan
->ld_queue
)) {
923 spin_unlock_irqrestore(&fsl_chan
->desc_lock
, flags
);
927 dev_dbg(fsl_chan
->dev
, "--memcpy issue--\n");
928 list_for_each_entry(ld
, &fsl_chan
->ld_queue
, node
) {
930 dev_dbg(fsl_chan
->dev
, "Ch %d, LD %08x\n",
931 fsl_chan
->id
, ld
->async_tx
.phys
);
932 for (i
= 0; i
< 8; i
++)
933 dev_dbg(fsl_chan
->dev
, "LD offset %d: %08x\n",
934 i
, *(((u32
*)&ld
->hw
) + i
));
936 dev_dbg(fsl_chan
->dev
, "----------------\n");
937 spin_unlock_irqrestore(&fsl_chan
->desc_lock
, flags
);
940 fsl_chan_xfer_ld_queue(fsl_chan
);
944 * fsl_dma_is_complete - Determine the DMA status
945 * @fsl_chan : Freescale DMA channel
947 static enum dma_status
fsl_dma_is_complete(struct dma_chan
*chan
,
952 struct fsldma_chan
*fsl_chan
= to_fsl_chan(chan
);
953 dma_cookie_t last_used
;
954 dma_cookie_t last_complete
;
956 fsl_chan_ld_cleanup(fsl_chan
);
958 last_used
= chan
->cookie
;
959 last_complete
= fsl_chan
->completed_cookie
;
962 *done
= last_complete
;
967 return dma_async_is_complete(cookie
, last_complete
, last_used
);
970 static irqreturn_t
fsl_dma_chan_do_interrupt(int irq
, void *data
)
972 struct fsldma_chan
*fsl_chan
= data
;
974 int update_cookie
= 0;
977 stat
= get_sr(fsl_chan
);
978 dev_dbg(fsl_chan
->dev
, "event: channel %d, stat = 0x%x\n",
980 set_sr(fsl_chan
, stat
); /* Clear the event register */
982 stat
&= ~(FSL_DMA_SR_CB
| FSL_DMA_SR_CH
);
986 if (stat
& FSL_DMA_SR_TE
)
987 dev_err(fsl_chan
->dev
, "Transfer Error!\n");
990 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
991 * triger a PE interrupt.
993 if (stat
& FSL_DMA_SR_PE
) {
994 dev_dbg(fsl_chan
->dev
, "event: Programming Error INT\n");
995 if (get_bcr(fsl_chan
) == 0) {
996 /* BCR register is 0, this is a DMA_INTERRUPT async_tx.
997 * Now, update the completed cookie, and continue the
998 * next uncompleted transfer.
1003 stat
&= ~FSL_DMA_SR_PE
;
1006 /* If the link descriptor segment transfer finishes,
1007 * we will recycle the used descriptor.
1009 if (stat
& FSL_DMA_SR_EOSI
) {
1010 dev_dbg(fsl_chan
->dev
, "event: End-of-segments INT\n");
1011 dev_dbg(fsl_chan
->dev
, "event: clndar 0x%llx, nlndar 0x%llx\n",
1012 (unsigned long long)get_cdar(fsl_chan
),
1013 (unsigned long long)get_ndar(fsl_chan
));
1014 stat
&= ~FSL_DMA_SR_EOSI
;
1018 /* For MPC8349, EOCDI event need to update cookie
1019 * and start the next transfer if it exist.
1021 if (stat
& FSL_DMA_SR_EOCDI
) {
1022 dev_dbg(fsl_chan
->dev
, "event: End-of-Chain link INT\n");
1023 stat
&= ~FSL_DMA_SR_EOCDI
;
1028 /* If it current transfer is the end-of-transfer,
1029 * we should clear the Channel Start bit for
1030 * prepare next transfer.
1032 if (stat
& FSL_DMA_SR_EOLNI
) {
1033 dev_dbg(fsl_chan
->dev
, "event: End-of-link INT\n");
1034 stat
&= ~FSL_DMA_SR_EOLNI
;
1039 fsl_dma_update_completed_cookie(fsl_chan
);
1041 fsl_chan_xfer_ld_queue(fsl_chan
);
1043 dev_dbg(fsl_chan
->dev
, "event: unhandled sr 0x%02x\n",
1046 dev_dbg(fsl_chan
->dev
, "event: Exit\n");
1047 tasklet_schedule(&fsl_chan
->tasklet
);
1051 static irqreturn_t
fsl_dma_do_interrupt(int irq
, void *data
)
1053 struct fsldma_device
*fdev
= data
;
1057 gsr
= (fdev
->feature
& FSL_DMA_BIG_ENDIAN
) ? in_be32(fdev
->reg_base
)
1058 : in_le32(fdev
->reg_base
);
1059 ch_nr
= (32 - ffs(gsr
)) / 8;
1061 return fdev
->chan
[ch_nr
] ? fsl_dma_chan_do_interrupt(irq
,
1062 fdev
->chan
[ch_nr
]) : IRQ_NONE
;
1065 static void dma_do_tasklet(unsigned long data
)
1067 struct fsldma_chan
*fsl_chan
= (struct fsldma_chan
*)data
;
1068 fsl_chan_ld_cleanup(fsl_chan
);
1071 /*----------------------------------------------------------------------------*/
1072 /* OpenFirmware Subsystem */
1073 /*----------------------------------------------------------------------------*/
1075 static int __devinit
fsl_dma_chan_probe(struct fsldma_device
*fdev
,
1076 struct device_node
*node
, u32 feature
, const char *compatible
)
1078 struct fsldma_chan
*new_fsl_chan
;
1079 struct resource res
;
1083 new_fsl_chan
= kzalloc(sizeof(*new_fsl_chan
), GFP_KERNEL
);
1084 if (!new_fsl_chan
) {
1085 dev_err(fdev
->dev
, "No free memory for allocating "
1090 /* get dma channel register base */
1091 err
= of_address_to_resource(node
, 0, &res
);
1093 dev_err(fdev
->dev
, "Can't get %s property 'reg'\n",
1098 new_fsl_chan
->feature
= feature
;
1101 fdev
->feature
= new_fsl_chan
->feature
;
1103 /* If the DMA device's feature is different than its channels',
1106 WARN_ON(fdev
->feature
!= new_fsl_chan
->feature
);
1108 new_fsl_chan
->dev
= fdev
->dev
;
1109 new_fsl_chan
->reg_base
= ioremap(res
.start
, resource_size(&res
));
1110 new_fsl_chan
->id
= ((res
.start
- 0x100) & 0xfff) >> 7;
1111 if (new_fsl_chan
->id
>= FSL_DMA_MAX_CHANS_PER_DEVICE
) {
1112 dev_err(fdev
->dev
, "There is no %d channel!\n",
1117 fdev
->chan
[new_fsl_chan
->id
] = new_fsl_chan
;
1118 tasklet_init(&new_fsl_chan
->tasklet
, dma_do_tasklet
,
1119 (unsigned long)new_fsl_chan
);
1121 /* Init the channel */
1122 dma_init(new_fsl_chan
);
1124 /* Clear cdar registers */
1125 set_cdar(new_fsl_chan
, 0);
1127 switch (new_fsl_chan
->feature
& FSL_DMA_IP_MASK
) {
1128 case FSL_DMA_IP_85XX
:
1129 new_fsl_chan
->toggle_ext_pause
= fsl_chan_toggle_ext_pause
;
1130 case FSL_DMA_IP_83XX
:
1131 new_fsl_chan
->toggle_ext_start
= fsl_chan_toggle_ext_start
;
1132 new_fsl_chan
->set_src_loop_size
= fsl_chan_set_src_loop_size
;
1133 new_fsl_chan
->set_dest_loop_size
= fsl_chan_set_dest_loop_size
;
1134 new_fsl_chan
->set_request_count
= fsl_chan_set_request_count
;
1137 spin_lock_init(&new_fsl_chan
->desc_lock
);
1138 INIT_LIST_HEAD(&new_fsl_chan
->ld_queue
);
1140 new_fsl_chan
->common
.device
= &fdev
->common
;
1142 /* Add the channel to DMA device channel list */
1143 list_add_tail(&new_fsl_chan
->common
.device_node
,
1144 &fdev
->common
.channels
);
1145 fdev
->common
.chancnt
++;
1147 new_fsl_chan
->irq
= irq_of_parse_and_map(node
, 0);
1148 if (new_fsl_chan
->irq
!= NO_IRQ
) {
1149 err
= request_irq(new_fsl_chan
->irq
,
1150 &fsl_dma_chan_do_interrupt
, IRQF_SHARED
,
1151 "fsldma-channel", new_fsl_chan
);
1153 dev_err(fdev
->dev
, "DMA channel %s request_irq error "
1154 "with return %d\n", node
->full_name
, err
);
1159 dev_info(fdev
->dev
, "#%d (%s), irq %d\n", new_fsl_chan
->id
,
1161 new_fsl_chan
->irq
!= NO_IRQ
? new_fsl_chan
->irq
: fdev
->irq
);
1166 list_del(&new_fsl_chan
->common
.device_node
);
1168 iounmap(new_fsl_chan
->reg_base
);
1170 kfree(new_fsl_chan
);
1174 static void fsl_dma_chan_remove(struct fsldma_chan
*fchan
)
1176 if (fchan
->irq
!= NO_IRQ
)
1177 free_irq(fchan
->irq
, fchan
);
1178 list_del(&fchan
->common
.device_node
);
1179 iounmap(fchan
->reg_base
);
1183 static int __devinit
fsldma_of_probe(struct of_device
*dev
,
1184 const struct of_device_id
*match
)
1187 struct fsldma_device
*fdev
;
1188 struct device_node
*child
;
1189 struct resource res
;
1191 fdev
= kzalloc(sizeof(*fdev
), GFP_KERNEL
);
1193 dev_err(&dev
->dev
, "No enough memory for 'priv'\n");
1196 fdev
->dev
= &dev
->dev
;
1197 INIT_LIST_HEAD(&fdev
->common
.channels
);
1199 /* get DMA controller register base */
1200 err
= of_address_to_resource(dev
->node
, 0, &res
);
1202 dev_err(&dev
->dev
, "Can't get %s property 'reg'\n",
1203 dev
->node
->full_name
);
1207 dev_info(&dev
->dev
, "Probe the Freescale DMA driver for %s "
1208 "controller at 0x%llx...\n",
1209 match
->compatible
, (unsigned long long)res
.start
);
1210 fdev
->reg_base
= ioremap(res
.start
, resource_size(&res
));
1212 dma_cap_set(DMA_MEMCPY
, fdev
->common
.cap_mask
);
1213 dma_cap_set(DMA_INTERRUPT
, fdev
->common
.cap_mask
);
1214 dma_cap_set(DMA_SLAVE
, fdev
->common
.cap_mask
);
1215 fdev
->common
.device_alloc_chan_resources
= fsl_dma_alloc_chan_resources
;
1216 fdev
->common
.device_free_chan_resources
= fsl_dma_free_chan_resources
;
1217 fdev
->common
.device_prep_dma_interrupt
= fsl_dma_prep_interrupt
;
1218 fdev
->common
.device_prep_dma_memcpy
= fsl_dma_prep_memcpy
;
1219 fdev
->common
.device_is_tx_complete
= fsl_dma_is_complete
;
1220 fdev
->common
.device_issue_pending
= fsl_dma_memcpy_issue_pending
;
1221 fdev
->common
.device_prep_slave_sg
= fsl_dma_prep_slave_sg
;
1222 fdev
->common
.device_terminate_all
= fsl_dma_device_terminate_all
;
1223 fdev
->common
.dev
= &dev
->dev
;
1225 fdev
->irq
= irq_of_parse_and_map(dev
->node
, 0);
1226 if (fdev
->irq
!= NO_IRQ
) {
1227 err
= request_irq(fdev
->irq
, &fsl_dma_do_interrupt
, IRQF_SHARED
,
1228 "fsldma-device", fdev
);
1230 dev_err(&dev
->dev
, "DMA device request_irq error "
1231 "with return %d\n", err
);
1236 dev_set_drvdata(&(dev
->dev
), fdev
);
1238 /* We cannot use of_platform_bus_probe() because there is no
1239 * of_platform_bus_remove. Instead, we manually instantiate every DMA
1242 for_each_child_of_node(dev
->node
, child
) {
1243 if (of_device_is_compatible(child
, "fsl,eloplus-dma-channel"))
1244 fsl_dma_chan_probe(fdev
, child
,
1245 FSL_DMA_IP_85XX
| FSL_DMA_BIG_ENDIAN
,
1246 "fsl,eloplus-dma-channel");
1247 if (of_device_is_compatible(child
, "fsl,elo-dma-channel"))
1248 fsl_dma_chan_probe(fdev
, child
,
1249 FSL_DMA_IP_83XX
| FSL_DMA_LITTLE_ENDIAN
,
1250 "fsl,elo-dma-channel");
1253 dma_async_device_register(&fdev
->common
);
1257 iounmap(fdev
->reg_base
);
1263 static int fsldma_of_remove(struct of_device
*of_dev
)
1265 struct fsldma_device
*fdev
;
1268 fdev
= dev_get_drvdata(&of_dev
->dev
);
1270 dma_async_device_unregister(&fdev
->common
);
1272 for (i
= 0; i
< FSL_DMA_MAX_CHANS_PER_DEVICE
; i
++)
1274 fsl_dma_chan_remove(fdev
->chan
[i
]);
1276 if (fdev
->irq
!= NO_IRQ
)
1277 free_irq(fdev
->irq
, fdev
);
1279 iounmap(fdev
->reg_base
);
1282 dev_set_drvdata(&of_dev
->dev
, NULL
);
1287 static struct of_device_id fsldma_of_ids
[] = {
1288 { .compatible
= "fsl,eloplus-dma", },
1289 { .compatible
= "fsl,elo-dma", },
1293 static struct of_platform_driver fsldma_of_driver
= {
1294 .name
= "fsl-elo-dma",
1295 .match_table
= fsldma_of_ids
,
1296 .probe
= fsldma_of_probe
,
1297 .remove
= fsldma_of_remove
,
1300 /*----------------------------------------------------------------------------*/
1301 /* Module Init / Exit */
1302 /*----------------------------------------------------------------------------*/
1304 static __init
int fsldma_init(void)
1308 pr_info("Freescale Elo / Elo Plus DMA driver\n");
1310 ret
= of_register_platform_driver(&fsldma_of_driver
);
1312 pr_err("fsldma: failed to register platform driver\n");
1317 static void __exit
fsldma_exit(void)
1319 of_unregister_platform_driver(&fsldma_of_driver
);
1322 subsys_initcall(fsldma_init
);
1323 module_exit(fsldma_exit
);
1325 MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
1326 MODULE_LICENSE("GPL");