2 * Copyright 2012 Marvell International Ltd.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 #include <linux/module.h>
9 #include <linux/init.h>
10 #include <linux/types.h>
11 #include <linux/interrupt.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/slab.h>
14 #include <linux/dmaengine.h>
15 #include <linux/platform_device.h>
16 #include <linux/device.h>
17 #include <linux/platform_data/mmp_dma.h>
18 #include <linux/dmapool.h>
19 #include <linux/of_device.h>
22 #include "dmaengine.h"
32 #define DCSR_RUN (1 << 31) /* Run Bit (read / write) */
33 #define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch (read / write) */
34 #define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable (read / write) */
35 #define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */
36 #define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */
37 #define DCSR_ENDINTR (1 << 2) /* End Interrupt (read / write) */
38 #define DCSR_STARTINTR (1 << 1) /* Start Interrupt (read / write) */
39 #define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt (read / write) */
41 #define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable (R/W) */
42 #define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */
43 #define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */
44 #define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */
45 #define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */
46 #define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */
47 #define DCSR_EORINTR (1 << 9) /* The end of Receive */
49 #define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */
50 #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
52 #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */
53 #define DDADR_STOP (1 << 0) /* Stop (read / write) */
55 #define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */
56 #define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */
57 #define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */
58 #define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */
59 #define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */
60 #define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */
61 #define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */
62 #define DCMD_BURST8 (1 << 16) /* 8 byte burst */
63 #define DCMD_BURST16 (2 << 16) /* 16 byte burst */
64 #define DCMD_BURST32 (3 << 16) /* 32 byte burst */
65 #define DCMD_WIDTH1 (1 << 14) /* 1 byte width */
66 #define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */
67 #define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */
68 #define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
70 #define PDMA_ALIGNMENT 3
71 #define PDMA_MAX_DESC_BYTES 0x1000
73 struct mmp_pdma_desc_hw
{
74 u32 ddadr
; /* Points to the next descriptor + flags */
75 u32 dsadr
; /* DSADR value for the current transfer */
76 u32 dtadr
; /* DTADR value for the current transfer */
77 u32 dcmd
; /* DCMD value for the current transfer */
80 struct mmp_pdma_desc_sw
{
81 struct mmp_pdma_desc_hw desc
;
82 struct list_head node
;
83 struct list_head tx_list
;
84 struct dma_async_tx_descriptor async_tx
;
89 struct mmp_pdma_chan
{
92 struct dma_async_tx_descriptor desc
;
93 struct mmp_pdma_phy
*phy
;
94 enum dma_transfer_direction dir
;
96 /* channel's basic info */
97 struct tasklet_struct tasklet
;
103 spinlock_t desc_lock
; /* Descriptor list lock */
104 struct list_head chain_pending
; /* Link descriptors queue for pending */
105 struct list_head chain_running
; /* Link descriptors queue for running */
106 bool idle
; /* channel statue machine */
108 struct dma_pool
*desc_pool
; /* Descriptors pool */
111 struct mmp_pdma_phy
{
114 struct mmp_pdma_chan
*vchan
;
117 struct mmp_pdma_device
{
121 struct dma_device device
;
122 struct mmp_pdma_phy
*phy
;
125 #define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx)
126 #define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node)
127 #define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan)
128 #define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device)
130 static void set_desc(struct mmp_pdma_phy
*phy
, dma_addr_t addr
)
132 u32 reg
= (phy
->idx
<< 4) + DDADR
;
134 writel(addr
, phy
->base
+ reg
);
137 static void enable_chan(struct mmp_pdma_phy
*phy
)
144 reg
= phy
->vchan
->drcmr
;
145 reg
= (((reg
) < 64) ? 0x0100 : 0x1100) + (((reg
) & 0x3f) << 2);
146 writel(DRCMR_MAPVLD
| phy
->idx
, phy
->base
+ reg
);
148 reg
= (phy
->idx
<< 2) + DCSR
;
149 writel(readl(phy
->base
+ reg
) | DCSR_RUN
,
153 static void disable_chan(struct mmp_pdma_phy
*phy
)
158 reg
= (phy
->idx
<< 2) + DCSR
;
159 writel(readl(phy
->base
+ reg
) & ~DCSR_RUN
,
164 static int clear_chan_irq(struct mmp_pdma_phy
*phy
)
167 u32 dint
= readl(phy
->base
+ DINT
);
168 u32 reg
= (phy
->idx
<< 2) + DCSR
;
170 if (dint
& BIT(phy
->idx
)) {
172 dcsr
= readl(phy
->base
+ reg
);
173 writel(dcsr
, phy
->base
+ reg
);
174 if ((dcsr
& DCSR_BUSERR
) && (phy
->vchan
))
175 dev_warn(phy
->vchan
->dev
, "DCSR_BUSERR\n");
181 static irqreturn_t
mmp_pdma_chan_handler(int irq
, void *dev_id
)
183 struct mmp_pdma_phy
*phy
= dev_id
;
185 if (clear_chan_irq(phy
) == 0) {
186 tasklet_schedule(&phy
->vchan
->tasklet
);
192 static irqreturn_t
mmp_pdma_int_handler(int irq
, void *dev_id
)
194 struct mmp_pdma_device
*pdev
= dev_id
;
195 struct mmp_pdma_phy
*phy
;
196 u32 dint
= readl(pdev
->base
+ DINT
);
204 ret
= mmp_pdma_chan_handler(irq
, phy
);
205 if (ret
== IRQ_HANDLED
)
215 /* lookup free phy channel as descending priority */
216 static struct mmp_pdma_phy
*lookup_phy(struct mmp_pdma_chan
*pchan
)
219 struct mmp_pdma_device
*pdev
= to_mmp_pdma_dev(pchan
->chan
.device
);
220 struct mmp_pdma_phy
*phy
;
223 * dma channel priorities
224 * ch 0 - 3, 16 - 19 <--> (0)
225 * ch 4 - 7, 20 - 23 <--> (1)
226 * ch 8 - 11, 24 - 27 <--> (2)
227 * ch 12 - 15, 28 - 31 <--> (3)
229 for (prio
= 0; prio
<= (((pdev
->dma_channels
- 1) & 0xf) >> 2); prio
++) {
230 for (i
= 0; i
< pdev
->dma_channels
; i
++) {
231 if (prio
!= ((i
& 0xf) >> 2))
244 /* desc->tx_list ==> pending list */
245 static void append_pending_queue(struct mmp_pdma_chan
*chan
,
246 struct mmp_pdma_desc_sw
*desc
)
248 struct mmp_pdma_desc_sw
*tail
=
249 to_mmp_pdma_desc(chan
->chain_pending
.prev
);
251 if (list_empty(&chan
->chain_pending
))
254 /* one irq per queue, even appended */
255 tail
->desc
.ddadr
= desc
->async_tx
.phys
;
256 tail
->desc
.dcmd
&= ~DCMD_ENDIRQEN
;
258 /* softly link to pending list */
260 list_splice_tail_init(&desc
->tx_list
, &chan
->chain_pending
);
264 * start_pending_queue - transfer any pending transactions
265 * pending list ==> running list
267 static void start_pending_queue(struct mmp_pdma_chan
*chan
)
269 struct mmp_pdma_desc_sw
*desc
;
271 /* still in running, irq will start the pending list */
273 dev_dbg(chan
->dev
, "DMA controller still busy\n");
277 if (list_empty(&chan
->chain_pending
)) {
278 /* chance to re-fetch phy channel with higher prio */
280 chan
->phy
->vchan
= NULL
;
283 dev_dbg(chan
->dev
, "no pending list\n");
288 chan
->phy
= lookup_phy(chan
);
290 dev_dbg(chan
->dev
, "no free dma channel\n");
297 * reintilize pending list
299 desc
= list_first_entry(&chan
->chain_pending
,
300 struct mmp_pdma_desc_sw
, node
);
301 list_splice_tail_init(&chan
->chain_pending
, &chan
->chain_running
);
304 * Program the descriptor's address into the DMA controller,
305 * then start the DMA transaction
307 set_desc(chan
->phy
, desc
->async_tx
.phys
);
308 enable_chan(chan
->phy
);
313 /* desc->tx_list ==> pending list */
314 static dma_cookie_t
mmp_pdma_tx_submit(struct dma_async_tx_descriptor
*tx
)
316 struct mmp_pdma_chan
*chan
= to_mmp_pdma_chan(tx
->chan
);
317 struct mmp_pdma_desc_sw
*desc
= tx_to_mmp_pdma_desc(tx
);
318 struct mmp_pdma_desc_sw
*child
;
320 dma_cookie_t cookie
= -EBUSY
;
322 spin_lock_irqsave(&chan
->desc_lock
, flags
);
324 list_for_each_entry(child
, &desc
->tx_list
, node
) {
325 cookie
= dma_cookie_assign(&child
->async_tx
);
328 append_pending_queue(chan
, desc
);
330 spin_unlock_irqrestore(&chan
->desc_lock
, flags
);
335 struct mmp_pdma_desc_sw
*mmp_pdma_alloc_descriptor(struct mmp_pdma_chan
*chan
)
337 struct mmp_pdma_desc_sw
*desc
;
340 desc
= dma_pool_alloc(chan
->desc_pool
, GFP_ATOMIC
, &pdesc
);
342 dev_err(chan
->dev
, "out of memory for link descriptor\n");
346 memset(desc
, 0, sizeof(*desc
));
347 INIT_LIST_HEAD(&desc
->tx_list
);
348 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->chan
);
349 /* each desc has submit */
350 desc
->async_tx
.tx_submit
= mmp_pdma_tx_submit
;
351 desc
->async_tx
.phys
= pdesc
;
357 * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel.
359 * This function will create a dma pool for descriptor allocation.
360 * Request irq only when channel is requested
361 * Return - The number of allocated descriptors.
364 static int mmp_pdma_alloc_chan_resources(struct dma_chan
*dchan
)
366 struct mmp_pdma_chan
*chan
= to_mmp_pdma_chan(dchan
);
372 dma_pool_create(dev_name(&dchan
->dev
->device
), chan
->dev
,
373 sizeof(struct mmp_pdma_desc_sw
),
374 __alignof__(struct mmp_pdma_desc_sw
), 0);
375 if (!chan
->desc_pool
) {
376 dev_err(chan
->dev
, "unable to allocate descriptor pool\n");
380 chan
->phy
->vchan
= NULL
;
388 static void mmp_pdma_free_desc_list(struct mmp_pdma_chan
*chan
,
389 struct list_head
*list
)
391 struct mmp_pdma_desc_sw
*desc
, *_desc
;
393 list_for_each_entry_safe(desc
, _desc
, list
, node
) {
394 list_del(&desc
->node
);
395 dma_pool_free(chan
->desc_pool
, desc
, desc
->async_tx
.phys
);
399 static void mmp_pdma_free_chan_resources(struct dma_chan
*dchan
)
401 struct mmp_pdma_chan
*chan
= to_mmp_pdma_chan(dchan
);
404 spin_lock_irqsave(&chan
->desc_lock
, flags
);
405 mmp_pdma_free_desc_list(chan
, &chan
->chain_pending
);
406 mmp_pdma_free_desc_list(chan
, &chan
->chain_running
);
407 spin_unlock_irqrestore(&chan
->desc_lock
, flags
);
409 dma_pool_destroy(chan
->desc_pool
);
410 chan
->desc_pool
= NULL
;
414 chan
->phy
->vchan
= NULL
;
420 static struct dma_async_tx_descriptor
*
421 mmp_pdma_prep_memcpy(struct dma_chan
*dchan
,
422 dma_addr_t dma_dst
, dma_addr_t dma_src
,
423 size_t len
, unsigned long flags
)
425 struct mmp_pdma_chan
*chan
;
426 struct mmp_pdma_desc_sw
*first
= NULL
, *prev
= NULL
, *new;
435 chan
= to_mmp_pdma_chan(dchan
);
438 chan
->dir
= DMA_MEM_TO_MEM
;
439 chan
->dcmd
= DCMD_INCTRGADDR
| DCMD_INCSRCADDR
;
440 chan
->dcmd
|= DCMD_BURST32
;
444 /* Allocate the link descriptor from DMA pool */
445 new = mmp_pdma_alloc_descriptor(chan
);
447 dev_err(chan
->dev
, "no memory for desc\n");
451 copy
= min_t(size_t, len
, PDMA_MAX_DESC_BYTES
);
453 new->desc
.dcmd
= chan
->dcmd
| (DCMD_LENGTH
& copy
);
454 new->desc
.dsadr
= dma_src
;
455 new->desc
.dtadr
= dma_dst
;
460 prev
->desc
.ddadr
= new->async_tx
.phys
;
462 new->async_tx
.cookie
= 0;
463 async_tx_ack(&new->async_tx
);
468 if (chan
->dir
== DMA_MEM_TO_DEV
) {
470 } else if (chan
->dir
== DMA_DEV_TO_MEM
) {
472 } else if (chan
->dir
== DMA_MEM_TO_MEM
) {
477 /* Insert the link descriptor to the LD ring */
478 list_add_tail(&new->node
, &first
->tx_list
);
481 first
->async_tx
.flags
= flags
; /* client is in control of this ack */
482 first
->async_tx
.cookie
= -EBUSY
;
484 /* last desc and fire IRQ */
485 new->desc
.ddadr
= DDADR_STOP
;
486 new->desc
.dcmd
|= DCMD_ENDIRQEN
;
488 return &first
->async_tx
;
492 mmp_pdma_free_desc_list(chan
, &first
->tx_list
);
496 static struct dma_async_tx_descriptor
*
497 mmp_pdma_prep_slave_sg(struct dma_chan
*dchan
, struct scatterlist
*sgl
,
498 unsigned int sg_len
, enum dma_transfer_direction dir
,
499 unsigned long flags
, void *context
)
501 struct mmp_pdma_chan
*chan
= to_mmp_pdma_chan(dchan
);
502 struct mmp_pdma_desc_sw
*first
= NULL
, *prev
= NULL
, *new = NULL
;
504 struct scatterlist
*sg
;
508 if ((sgl
== NULL
) || (sg_len
== 0))
511 for_each_sg(sgl
, sg
, sg_len
, i
) {
512 addr
= sg_dma_address(sg
);
513 avail
= sg_dma_len(sgl
);
516 len
= min_t(size_t, avail
, PDMA_MAX_DESC_BYTES
);
518 /* allocate and populate the descriptor */
519 new = mmp_pdma_alloc_descriptor(chan
);
521 dev_err(chan
->dev
, "no memory for desc\n");
525 new->desc
.dcmd
= chan
->dcmd
| (DCMD_LENGTH
& len
);
526 if (dir
== DMA_MEM_TO_DEV
) {
527 new->desc
.dsadr
= addr
;
528 new->desc
.dtadr
= chan
->dev_addr
;
530 new->desc
.dsadr
= chan
->dev_addr
;
531 new->desc
.dtadr
= addr
;
537 prev
->desc
.ddadr
= new->async_tx
.phys
;
539 new->async_tx
.cookie
= 0;
540 async_tx_ack(&new->async_tx
);
543 /* Insert the link descriptor to the LD ring */
544 list_add_tail(&new->node
, &first
->tx_list
);
546 /* update metadata */
552 first
->async_tx
.cookie
= -EBUSY
;
553 first
->async_tx
.flags
= flags
;
555 /* last desc and fire IRQ */
556 new->desc
.ddadr
= DDADR_STOP
;
557 new->desc
.dcmd
|= DCMD_ENDIRQEN
;
559 return &first
->async_tx
;
563 mmp_pdma_free_desc_list(chan
, &first
->tx_list
);
567 static int mmp_pdma_control(struct dma_chan
*dchan
, enum dma_ctrl_cmd cmd
,
570 struct mmp_pdma_chan
*chan
= to_mmp_pdma_chan(dchan
);
571 struct dma_slave_config
*cfg
= (void *)arg
;
574 u32 maxburst
= 0, addr
= 0;
575 enum dma_slave_buswidth width
= DMA_SLAVE_BUSWIDTH_UNDEFINED
;
581 case DMA_TERMINATE_ALL
:
582 disable_chan(chan
->phy
);
584 chan
->phy
->vchan
= NULL
;
587 spin_lock_irqsave(&chan
->desc_lock
, flags
);
588 mmp_pdma_free_desc_list(chan
, &chan
->chain_pending
);
589 mmp_pdma_free_desc_list(chan
, &chan
->chain_running
);
590 spin_unlock_irqrestore(&chan
->desc_lock
, flags
);
593 case DMA_SLAVE_CONFIG
:
594 if (cfg
->direction
== DMA_DEV_TO_MEM
) {
595 chan
->dcmd
= DCMD_INCTRGADDR
| DCMD_FLOWSRC
;
596 maxburst
= cfg
->src_maxburst
;
597 width
= cfg
->src_addr_width
;
598 addr
= cfg
->src_addr
;
599 } else if (cfg
->direction
== DMA_MEM_TO_DEV
) {
600 chan
->dcmd
= DCMD_INCSRCADDR
| DCMD_FLOWTRG
;
601 maxburst
= cfg
->dst_maxburst
;
602 width
= cfg
->dst_addr_width
;
603 addr
= cfg
->dst_addr
;
606 if (width
== DMA_SLAVE_BUSWIDTH_1_BYTE
)
607 chan
->dcmd
|= DCMD_WIDTH1
;
608 else if (width
== DMA_SLAVE_BUSWIDTH_2_BYTES
)
609 chan
->dcmd
|= DCMD_WIDTH2
;
610 else if (width
== DMA_SLAVE_BUSWIDTH_4_BYTES
)
611 chan
->dcmd
|= DCMD_WIDTH4
;
614 chan
->dcmd
|= DCMD_BURST8
;
615 else if (maxburst
== 16)
616 chan
->dcmd
|= DCMD_BURST16
;
617 else if (maxburst
== 32)
618 chan
->dcmd
|= DCMD_BURST32
;
621 chan
->dir
= cfg
->direction
;
622 chan
->drcmr
= cfg
->slave_id
;
624 chan
->dev_addr
= addr
;
633 static enum dma_status
mmp_pdma_tx_status(struct dma_chan
*dchan
,
634 dma_cookie_t cookie
, struct dma_tx_state
*txstate
)
636 struct mmp_pdma_chan
*chan
= to_mmp_pdma_chan(dchan
);
640 spin_lock_irqsave(&chan
->desc_lock
, flags
);
641 ret
= dma_cookie_status(dchan
, cookie
, txstate
);
642 spin_unlock_irqrestore(&chan
->desc_lock
, flags
);
648 * mmp_pdma_issue_pending - Issue the DMA start command
649 * pending list ==> running list
651 static void mmp_pdma_issue_pending(struct dma_chan
*dchan
)
653 struct mmp_pdma_chan
*chan
= to_mmp_pdma_chan(dchan
);
656 spin_lock_irqsave(&chan
->desc_lock
, flags
);
657 start_pending_queue(chan
);
658 spin_unlock_irqrestore(&chan
->desc_lock
, flags
);
666 static void dma_do_tasklet(unsigned long data
)
668 struct mmp_pdma_chan
*chan
= (struct mmp_pdma_chan
*)data
;
669 struct mmp_pdma_desc_sw
*desc
, *_desc
;
670 LIST_HEAD(chain_cleanup
);
673 /* submit pending list; callback for each desc; free desc */
675 spin_lock_irqsave(&chan
->desc_lock
, flags
);
677 /* update the cookie if we have some descriptors to cleanup */
678 if (!list_empty(&chan
->chain_running
)) {
681 desc
= to_mmp_pdma_desc(chan
->chain_running
.prev
);
682 cookie
= desc
->async_tx
.cookie
;
683 dma_cookie_complete(&desc
->async_tx
);
685 dev_dbg(chan
->dev
, "completed_cookie=%d\n", cookie
);
689 * move the descriptors to a temporary list so we can drop the lock
690 * during the entire cleanup operation
692 list_splice_tail_init(&chan
->chain_running
, &chain_cleanup
);
694 /* the hardware is now idle and ready for more */
697 /* Start any pending transactions automatically */
698 start_pending_queue(chan
);
699 spin_unlock_irqrestore(&chan
->desc_lock
, flags
);
701 /* Run the callback for each descriptor, in order */
702 list_for_each_entry_safe(desc
, _desc
, &chain_cleanup
, node
) {
703 struct dma_async_tx_descriptor
*txd
= &desc
->async_tx
;
705 /* Remove from the list of transactions */
706 list_del(&desc
->node
);
707 /* Run the link descriptor callback function */
709 txd
->callback(txd
->callback_param
);
711 dma_pool_free(chan
->desc_pool
, desc
, txd
->phys
);
715 static int __devexit
mmp_pdma_remove(struct platform_device
*op
)
717 struct mmp_pdma_device
*pdev
= platform_get_drvdata(op
);
719 dma_async_device_unregister(&pdev
->device
);
723 static int mmp_pdma_chan_init(struct mmp_pdma_device
*pdev
,
726 struct mmp_pdma_phy
*phy
= &pdev
->phy
[idx
];
727 struct mmp_pdma_chan
*chan
;
730 chan
= devm_kzalloc(pdev
->dev
,
731 sizeof(struct mmp_pdma_chan
), GFP_KERNEL
);
736 phy
->base
= pdev
->base
;
739 ret
= devm_request_irq(pdev
->dev
, irq
,
740 mmp_pdma_chan_handler
, IRQF_DISABLED
, "pdma", phy
);
742 dev_err(pdev
->dev
, "channel request irq fail!\n");
747 spin_lock_init(&chan
->desc_lock
);
748 chan
->dev
= pdev
->dev
;
749 chan
->chan
.device
= &pdev
->device
;
750 tasklet_init(&chan
->tasklet
, dma_do_tasklet
, (unsigned long)chan
);
751 INIT_LIST_HEAD(&chan
->chain_pending
);
752 INIT_LIST_HEAD(&chan
->chain_running
);
754 /* register virt channel to dma engine */
755 list_add_tail(&chan
->chan
.device_node
,
756 &pdev
->device
.channels
);
761 static struct of_device_id mmp_pdma_dt_ids
[] = {
762 { .compatible
= "marvell,pdma-1.0", },
765 MODULE_DEVICE_TABLE(of
, mmp_pdma_dt_ids
);
767 static int mmp_pdma_probe(struct platform_device
*op
)
769 struct mmp_pdma_device
*pdev
;
770 const struct of_device_id
*of_id
;
771 struct mmp_dma_platdata
*pdata
= dev_get_platdata(&op
->dev
);
772 struct resource
*iores
;
774 int dma_channels
= 0, irq_num
= 0;
776 pdev
= devm_kzalloc(&op
->dev
, sizeof(*pdev
), GFP_KERNEL
);
779 pdev
->dev
= &op
->dev
;
781 iores
= platform_get_resource(op
, IORESOURCE_MEM
, 0);
785 pdev
->base
= devm_request_and_ioremap(pdev
->dev
, iores
);
787 return -EADDRNOTAVAIL
;
789 of_id
= of_match_device(mmp_pdma_dt_ids
, pdev
->dev
);
791 of_property_read_u32(pdev
->dev
->of_node
,
792 "#dma-channels", &dma_channels
);
793 else if (pdata
&& pdata
->dma_channels
)
794 dma_channels
= pdata
->dma_channels
;
796 dma_channels
= 32; /* default 32 channel */
797 pdev
->dma_channels
= dma_channels
;
799 for (i
= 0; i
< dma_channels
; i
++) {
800 if (platform_get_irq(op
, i
) > 0)
804 pdev
->phy
= devm_kzalloc(pdev
->dev
,
805 dma_channels
* sizeof(struct mmp_pdma_chan
), GFP_KERNEL
);
806 if (pdev
->phy
== NULL
)
809 INIT_LIST_HEAD(&pdev
->device
.channels
);
811 if (irq_num
!= dma_channels
) {
812 /* all chan share one irq, demux inside */
813 irq
= platform_get_irq(op
, 0);
814 ret
= devm_request_irq(pdev
->dev
, irq
,
815 mmp_pdma_int_handler
, IRQF_DISABLED
, "pdma", pdev
);
820 for (i
= 0; i
< dma_channels
; i
++) {
821 irq
= (irq_num
!= dma_channels
) ? 0 : platform_get_irq(op
, i
);
822 ret
= mmp_pdma_chan_init(pdev
, i
, irq
);
827 dma_cap_set(DMA_SLAVE
, pdev
->device
.cap_mask
);
828 dma_cap_set(DMA_MEMCPY
, pdev
->device
.cap_mask
);
829 dma_cap_set(DMA_SLAVE
, pdev
->device
.cap_mask
);
830 pdev
->device
.dev
= &op
->dev
;
831 pdev
->device
.device_alloc_chan_resources
= mmp_pdma_alloc_chan_resources
;
832 pdev
->device
.device_free_chan_resources
= mmp_pdma_free_chan_resources
;
833 pdev
->device
.device_tx_status
= mmp_pdma_tx_status
;
834 pdev
->device
.device_prep_dma_memcpy
= mmp_pdma_prep_memcpy
;
835 pdev
->device
.device_prep_slave_sg
= mmp_pdma_prep_slave_sg
;
836 pdev
->device
.device_issue_pending
= mmp_pdma_issue_pending
;
837 pdev
->device
.device_control
= mmp_pdma_control
;
838 pdev
->device
.copy_align
= PDMA_ALIGNMENT
;
840 if (pdev
->dev
->coherent_dma_mask
)
841 dma_set_mask(pdev
->dev
, pdev
->dev
->coherent_dma_mask
);
843 dma_set_mask(pdev
->dev
, DMA_BIT_MASK(64));
845 ret
= dma_async_device_register(&pdev
->device
);
847 dev_err(pdev
->device
.dev
, "unable to register\n");
851 dev_info(pdev
->device
.dev
, "initialized\n");
855 static const struct platform_device_id mmp_pdma_id_table
[] = {
860 static struct platform_driver mmp_pdma_driver
= {
863 .owner
= THIS_MODULE
,
864 .of_match_table
= mmp_pdma_dt_ids
,
866 .id_table
= mmp_pdma_id_table
,
867 .probe
= mmp_pdma_probe
,
868 .remove
= mmp_pdma_remove
,
871 module_platform_driver(mmp_pdma_driver
);
873 MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver");
874 MODULE_AUTHOR("Marvell International Ltd.");
875 MODULE_LICENSE("GPL v2");