Commit | Line | Data |
---|---|---|
3bfb1d20 HS |
1 | /* |
2 | * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on | |
3 | * AVR32 systems.) | |
4 | * | |
5 | * Copyright (C) 2007-2008 Atmel Corporation | |
aecb7b64 | 6 | * Copyright (C) 2010-2011 ST Microelectronics |
3bfb1d20 HS |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
327e6970 | 12 | #include <linux/bitops.h> |
3bfb1d20 HS |
13 | #include <linux/clk.h> |
14 | #include <linux/delay.h> | |
15 | #include <linux/dmaengine.h> | |
16 | #include <linux/dma-mapping.h> | |
17 | #include <linux/init.h> | |
18 | #include <linux/interrupt.h> | |
19 | #include <linux/io.h> | |
d3f797d9 | 20 | #include <linux/of.h> |
3bfb1d20 HS |
21 | #include <linux/mm.h> |
22 | #include <linux/module.h> | |
23 | #include <linux/platform_device.h> | |
24 | #include <linux/slab.h> | |
25 | ||
26 | #include "dw_dmac_regs.h" | |
d2ebfb33 | 27 | #include "dmaengine.h" |
3bfb1d20 HS |
28 | |
29 | /* | |
30 | * This supports the Synopsys "DesignWare AHB Central DMA Controller", | |
31 | * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all | |
32 | * of which use ARM any more). See the "Databook" from Synopsys for | |
33 | * information beyond what licensees probably provide. | |
34 | * | |
35 | * The driver has currently been tested only with the Atmel AT32AP7000, | |
36 | * which does not support descriptor writeback. | |
37 | */ | |
38 | ||
327e6970 VK |
39 | #define DWC_DEFAULT_CTLLO(_chan) ({ \ |
40 | struct dw_dma_slave *__slave = (_chan->private); \ | |
41 | struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ | |
42 | struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ | |
43 | int _dms = __slave ? __slave->dst_master : 0; \ | |
44 | int _sms = __slave ? __slave->src_master : 1; \ | |
45 | u8 _smsize = __slave ? _sconfig->src_maxburst : \ | |
46 | DW_DMA_MSIZE_16; \ | |
47 | u8 _dmsize = __slave ? _sconfig->dst_maxburst : \ | |
48 | DW_DMA_MSIZE_16; \ | |
f301c062 | 49 | \ |
327e6970 VK |
50 | (DWC_CTLL_DST_MSIZE(_dmsize) \ |
51 | | DWC_CTLL_SRC_MSIZE(_smsize) \ | |
f301c062 JI |
52 | | DWC_CTLL_LLP_D_EN \ |
53 | | DWC_CTLL_LLP_S_EN \ | |
327e6970 VK |
54 | | DWC_CTLL_DMS(_dms) \ |
55 | | DWC_CTLL_SMS(_sms)); \ | |
f301c062 | 56 | }) |
3bfb1d20 HS |
57 | |
58 | /* | |
59 | * This is configuration-dependent and usually a funny size like 4095. | |
3bfb1d20 HS |
60 | * |
61 | * Note that this is a transfer count, i.e. if we transfer 32-bit | |
418e7407 | 62 | * words, we can do 16380 bytes per descriptor. |
3bfb1d20 HS |
63 | * |
64 | * This parameter is also system-specific. | |
65 | */ | |
418e7407 | 66 | #define DWC_MAX_COUNT 4095U |
3bfb1d20 HS |
67 | |
68 | /* | |
69 | * Number of descriptors to allocate for each channel. This should be | |
70 | * made configurable somehow; preferably, the clients (at least the | |
71 | * ones using slave transfers) should be able to give us a hint. | |
72 | */ | |
73 | #define NR_DESCS_PER_CHANNEL 64 | |
74 | ||
75 | /*----------------------------------------------------------------------*/ | |
76 | ||
77 | /* | |
78 | * Because we're not relying on writeback from the controller (it may not | |
79 | * even be configured into the core!) we don't need to use dma_pool. These | |
80 | * descriptors -- and associated data -- are cacheable. We do need to make | |
81 | * sure their dcache entries are written back before handing them off to | |
82 | * the controller, though. | |
83 | */ | |
84 | ||
41d5e59c DW |
85 | static struct device *chan2dev(struct dma_chan *chan) |
86 | { | |
87 | return &chan->dev->device; | |
88 | } | |
89 | static struct device *chan2parent(struct dma_chan *chan) | |
90 | { | |
91 | return chan->dev->device.parent; | |
92 | } | |
93 | ||
3bfb1d20 HS |
94 | static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) |
95 | { | |
96 | return list_entry(dwc->active_list.next, struct dw_desc, desc_node); | |
97 | } | |
98 | ||
3bfb1d20 HS |
99 | static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) |
100 | { | |
101 | struct dw_desc *desc, *_desc; | |
102 | struct dw_desc *ret = NULL; | |
103 | unsigned int i = 0; | |
69cea5a0 | 104 | unsigned long flags; |
3bfb1d20 | 105 | |
69cea5a0 | 106 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 | 107 | list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { |
2ab37276 | 108 | i++; |
3bfb1d20 HS |
109 | if (async_tx_test_ack(&desc->txd)) { |
110 | list_del(&desc->desc_node); | |
111 | ret = desc; | |
112 | break; | |
113 | } | |
41d5e59c | 114 | dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); |
3bfb1d20 | 115 | } |
69cea5a0 | 116 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 117 | |
41d5e59c | 118 | dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i); |
3bfb1d20 HS |
119 | |
120 | return ret; | |
121 | } | |
122 | ||
123 | static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc) | |
124 | { | |
125 | struct dw_desc *child; | |
126 | ||
e0bd0f8c | 127 | list_for_each_entry(child, &desc->tx_list, desc_node) |
41d5e59c | 128 | dma_sync_single_for_cpu(chan2parent(&dwc->chan), |
3bfb1d20 HS |
129 | child->txd.phys, sizeof(child->lli), |
130 | DMA_TO_DEVICE); | |
41d5e59c | 131 | dma_sync_single_for_cpu(chan2parent(&dwc->chan), |
3bfb1d20 HS |
132 | desc->txd.phys, sizeof(desc->lli), |
133 | DMA_TO_DEVICE); | |
134 | } | |
135 | ||
136 | /* | |
137 | * Move a descriptor, including any children, to the free list. | |
138 | * `desc' must not be on any lists. | |
139 | */ | |
140 | static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) | |
141 | { | |
69cea5a0 VK |
142 | unsigned long flags; |
143 | ||
3bfb1d20 HS |
144 | if (desc) { |
145 | struct dw_desc *child; | |
146 | ||
147 | dwc_sync_desc_for_cpu(dwc, desc); | |
148 | ||
69cea5a0 | 149 | spin_lock_irqsave(&dwc->lock, flags); |
e0bd0f8c | 150 | list_for_each_entry(child, &desc->tx_list, desc_node) |
41d5e59c | 151 | dev_vdbg(chan2dev(&dwc->chan), |
3bfb1d20 HS |
152 | "moving child desc %p to freelist\n", |
153 | child); | |
e0bd0f8c | 154 | list_splice_init(&desc->tx_list, &dwc->free_list); |
41d5e59c | 155 | dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); |
3bfb1d20 | 156 | list_add(&desc->desc_node, &dwc->free_list); |
69cea5a0 | 157 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 HS |
158 | } |
159 | } | |
160 | ||
61e183f8 VK |
161 | static void dwc_initialize(struct dw_dma_chan *dwc) |
162 | { | |
163 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
164 | struct dw_dma_slave *dws = dwc->chan.private; | |
165 | u32 cfghi = DWC_CFGH_FIFO_MODE; | |
166 | u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); | |
167 | ||
168 | if (dwc->initialized == true) | |
169 | return; | |
170 | ||
171 | if (dws) { | |
172 | /* | |
173 | * We need controller-specific data to set up slave | |
174 | * transfers. | |
175 | */ | |
176 | BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); | |
177 | ||
178 | cfghi = dws->cfg_hi; | |
179 | cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; | |
180 | } | |
181 | ||
182 | channel_writel(dwc, CFG_LO, cfglo); | |
183 | channel_writel(dwc, CFG_HI, cfghi); | |
184 | ||
185 | /* Enable interrupts */ | |
186 | channel_set_bit(dw, MASK.XFER, dwc->mask); | |
61e183f8 VK |
187 | channel_set_bit(dw, MASK.ERROR, dwc->mask); |
188 | ||
189 | dwc->initialized = true; | |
190 | } | |
191 | ||
3bfb1d20 HS |
192 | /*----------------------------------------------------------------------*/ |
193 | ||
4c2d56c5 AS |
194 | static inline unsigned int dwc_fast_fls(unsigned long long v) |
195 | { | |
196 | /* | |
197 | * We can be a lot more clever here, but this should take care | |
198 | * of the most common optimization. | |
199 | */ | |
200 | if (!(v & 7)) | |
201 | return 3; | |
202 | else if (!(v & 3)) | |
203 | return 2; | |
204 | else if (!(v & 1)) | |
205 | return 1; | |
206 | return 0; | |
207 | } | |
208 | ||
1d455437 AS |
209 | static void dwc_dump_chan_regs(struct dw_dma_chan *dwc) |
210 | { | |
211 | dev_err(chan2dev(&dwc->chan), | |
212 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | |
213 | channel_readl(dwc, SAR), | |
214 | channel_readl(dwc, DAR), | |
215 | channel_readl(dwc, LLP), | |
216 | channel_readl(dwc, CTL_HI), | |
217 | channel_readl(dwc, CTL_LO)); | |
218 | } | |
219 | ||
220 | /*----------------------------------------------------------------------*/ | |
221 | ||
3bfb1d20 HS |
222 | /* Called with dwc->lock held and bh disabled */ |
223 | static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | |
224 | { | |
225 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
226 | ||
227 | /* ASSERT: channel is idle */ | |
228 | if (dma_readl(dw, CH_EN) & dwc->mask) { | |
41d5e59c | 229 | dev_err(chan2dev(&dwc->chan), |
3bfb1d20 | 230 | "BUG: Attempted to start non-idle channel\n"); |
1d455437 | 231 | dwc_dump_chan_regs(dwc); |
3bfb1d20 HS |
232 | |
233 | /* The tasklet will hopefully advance the queue... */ | |
234 | return; | |
235 | } | |
236 | ||
61e183f8 VK |
237 | dwc_initialize(dwc); |
238 | ||
3bfb1d20 HS |
239 | channel_writel(dwc, LLP, first->txd.phys); |
240 | channel_writel(dwc, CTL_LO, | |
241 | DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); | |
242 | channel_writel(dwc, CTL_HI, 0); | |
243 | channel_set_bit(dw, CH_EN, dwc->mask); | |
244 | } | |
245 | ||
246 | /*----------------------------------------------------------------------*/ | |
247 | ||
248 | static void | |
5fedefb8 VK |
249 | dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, |
250 | bool callback_required) | |
3bfb1d20 | 251 | { |
5fedefb8 VK |
252 | dma_async_tx_callback callback = NULL; |
253 | void *param = NULL; | |
3bfb1d20 | 254 | struct dma_async_tx_descriptor *txd = &desc->txd; |
e518076e | 255 | struct dw_desc *child; |
69cea5a0 | 256 | unsigned long flags; |
3bfb1d20 | 257 | |
41d5e59c | 258 | dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); |
3bfb1d20 | 259 | |
69cea5a0 | 260 | spin_lock_irqsave(&dwc->lock, flags); |
f7fbce07 | 261 | dma_cookie_complete(txd); |
5fedefb8 VK |
262 | if (callback_required) { |
263 | callback = txd->callback; | |
264 | param = txd->callback_param; | |
265 | } | |
3bfb1d20 HS |
266 | |
267 | dwc_sync_desc_for_cpu(dwc, desc); | |
e518076e VK |
268 | |
269 | /* async_tx_ack */ | |
270 | list_for_each_entry(child, &desc->tx_list, desc_node) | |
271 | async_tx_ack(&child->txd); | |
272 | async_tx_ack(&desc->txd); | |
273 | ||
e0bd0f8c | 274 | list_splice_init(&desc->tx_list, &dwc->free_list); |
3bfb1d20 HS |
275 | list_move(&desc->desc_node, &dwc->free_list); |
276 | ||
657a77fa AN |
277 | if (!dwc->chan.private) { |
278 | struct device *parent = chan2parent(&dwc->chan); | |
279 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | |
280 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) | |
281 | dma_unmap_single(parent, desc->lli.dar, | |
282 | desc->len, DMA_FROM_DEVICE); | |
283 | else | |
284 | dma_unmap_page(parent, desc->lli.dar, | |
285 | desc->len, DMA_FROM_DEVICE); | |
286 | } | |
287 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | |
288 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) | |
289 | dma_unmap_single(parent, desc->lli.sar, | |
290 | desc->len, DMA_TO_DEVICE); | |
291 | else | |
292 | dma_unmap_page(parent, desc->lli.sar, | |
293 | desc->len, DMA_TO_DEVICE); | |
294 | } | |
295 | } | |
3bfb1d20 | 296 | |
69cea5a0 VK |
297 | spin_unlock_irqrestore(&dwc->lock, flags); |
298 | ||
5fedefb8 | 299 | if (callback_required && callback) |
3bfb1d20 HS |
300 | callback(param); |
301 | } | |
302 | ||
303 | static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) | |
304 | { | |
305 | struct dw_desc *desc, *_desc; | |
306 | LIST_HEAD(list); | |
69cea5a0 | 307 | unsigned long flags; |
3bfb1d20 | 308 | |
69cea5a0 | 309 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 | 310 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
41d5e59c | 311 | dev_err(chan2dev(&dwc->chan), |
3bfb1d20 HS |
312 | "BUG: XFER bit set, but channel not idle!\n"); |
313 | ||
314 | /* Try to continue after resetting the channel... */ | |
315 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
316 | while (dma_readl(dw, CH_EN) & dwc->mask) | |
317 | cpu_relax(); | |
318 | } | |
319 | ||
320 | /* | |
321 | * Submit queued descriptors ASAP, i.e. before we go through | |
322 | * the completed ones. | |
323 | */ | |
3bfb1d20 | 324 | list_splice_init(&dwc->active_list, &list); |
f336e42f VK |
325 | if (!list_empty(&dwc->queue)) { |
326 | list_move(dwc->queue.next, &dwc->active_list); | |
327 | dwc_dostart(dwc, dwc_first_active(dwc)); | |
328 | } | |
3bfb1d20 | 329 | |
69cea5a0 VK |
330 | spin_unlock_irqrestore(&dwc->lock, flags); |
331 | ||
3bfb1d20 | 332 | list_for_each_entry_safe(desc, _desc, &list, desc_node) |
5fedefb8 | 333 | dwc_descriptor_complete(dwc, desc, true); |
3bfb1d20 HS |
334 | } |
335 | ||
336 | static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |
337 | { | |
338 | dma_addr_t llp; | |
339 | struct dw_desc *desc, *_desc; | |
340 | struct dw_desc *child; | |
341 | u32 status_xfer; | |
69cea5a0 | 342 | unsigned long flags; |
3bfb1d20 | 343 | |
69cea5a0 | 344 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
345 | llp = channel_readl(dwc, LLP); |
346 | status_xfer = dma_readl(dw, RAW.XFER); | |
347 | ||
348 | if (status_xfer & dwc->mask) { | |
349 | /* Everything we've submitted is done */ | |
350 | dma_writel(dw, CLEAR.XFER, dwc->mask); | |
69cea5a0 VK |
351 | spin_unlock_irqrestore(&dwc->lock, flags); |
352 | ||
3bfb1d20 HS |
353 | dwc_complete_all(dw, dwc); |
354 | return; | |
355 | } | |
356 | ||
69cea5a0 VK |
357 | if (list_empty(&dwc->active_list)) { |
358 | spin_unlock_irqrestore(&dwc->lock, flags); | |
087809fc | 359 | return; |
69cea5a0 | 360 | } |
087809fc | 361 | |
2e4c364e | 362 | dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__, |
2f45d613 | 363 | (unsigned long long)llp); |
3bfb1d20 HS |
364 | |
365 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { | |
84adccfb | 366 | /* check first descriptors addr */ |
69cea5a0 VK |
367 | if (desc->txd.phys == llp) { |
368 | spin_unlock_irqrestore(&dwc->lock, flags); | |
84adccfb | 369 | return; |
69cea5a0 | 370 | } |
84adccfb VK |
371 | |
372 | /* check first descriptors llp */ | |
69cea5a0 | 373 | if (desc->lli.llp == llp) { |
3bfb1d20 | 374 | /* This one is currently in progress */ |
69cea5a0 | 375 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 376 | return; |
69cea5a0 | 377 | } |
3bfb1d20 | 378 | |
e0bd0f8c | 379 | list_for_each_entry(child, &desc->tx_list, desc_node) |
69cea5a0 | 380 | if (child->lli.llp == llp) { |
3bfb1d20 | 381 | /* Currently in progress */ |
69cea5a0 | 382 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 383 | return; |
69cea5a0 | 384 | } |
3bfb1d20 HS |
385 | |
386 | /* | |
387 | * No descriptors so far seem to be in progress, i.e. | |
388 | * this one must be done. | |
389 | */ | |
69cea5a0 | 390 | spin_unlock_irqrestore(&dwc->lock, flags); |
5fedefb8 | 391 | dwc_descriptor_complete(dwc, desc, true); |
69cea5a0 | 392 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
393 | } |
394 | ||
41d5e59c | 395 | dev_err(chan2dev(&dwc->chan), |
3bfb1d20 HS |
396 | "BUG: All descriptors done, but channel not idle!\n"); |
397 | ||
398 | /* Try to continue after resetting the channel... */ | |
399 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
400 | while (dma_readl(dw, CH_EN) & dwc->mask) | |
401 | cpu_relax(); | |
402 | ||
403 | if (!list_empty(&dwc->queue)) { | |
f336e42f VK |
404 | list_move(dwc->queue.next, &dwc->active_list); |
405 | dwc_dostart(dwc, dwc_first_active(dwc)); | |
3bfb1d20 | 406 | } |
69cea5a0 | 407 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 HS |
408 | } |
409 | ||
410 | static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) | |
411 | { | |
41d5e59c | 412 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), |
2f45d613 AS |
413 | " desc: s0x%llx d0x%llx l0x%llx c0x%x:%x\n", |
414 | (unsigned long long)lli->sar, | |
415 | (unsigned long long)lli->dar, | |
416 | (unsigned long long)lli->llp, | |
3bfb1d20 HS |
417 | lli->ctlhi, lli->ctllo); |
418 | } | |
419 | ||
420 | static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | |
421 | { | |
422 | struct dw_desc *bad_desc; | |
423 | struct dw_desc *child; | |
69cea5a0 | 424 | unsigned long flags; |
3bfb1d20 HS |
425 | |
426 | dwc_scan_descriptors(dw, dwc); | |
427 | ||
69cea5a0 VK |
428 | spin_lock_irqsave(&dwc->lock, flags); |
429 | ||
3bfb1d20 HS |
430 | /* |
431 | * The descriptor currently at the head of the active list is | |
432 | * borked. Since we don't have any way to report errors, we'll | |
433 | * just have to scream loudly and try to carry on. | |
434 | */ | |
435 | bad_desc = dwc_first_active(dwc); | |
436 | list_del_init(&bad_desc->desc_node); | |
f336e42f | 437 | list_move(dwc->queue.next, dwc->active_list.prev); |
3bfb1d20 HS |
438 | |
439 | /* Clear the error flag and try to restart the controller */ | |
440 | dma_writel(dw, CLEAR.ERROR, dwc->mask); | |
441 | if (!list_empty(&dwc->active_list)) | |
442 | dwc_dostart(dwc, dwc_first_active(dwc)); | |
443 | ||
444 | /* | |
445 | * KERN_CRITICAL may seem harsh, but since this only happens | |
446 | * when someone submits a bad physical address in a | |
447 | * descriptor, we should consider ourselves lucky that the | |
448 | * controller flagged an error instead of scribbling over | |
449 | * random memory locations. | |
450 | */ | |
41d5e59c | 451 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), |
3bfb1d20 | 452 | "Bad descriptor submitted for DMA!\n"); |
41d5e59c | 453 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), |
3bfb1d20 HS |
454 | " cookie: %d\n", bad_desc->txd.cookie); |
455 | dwc_dump_lli(dwc, &bad_desc->lli); | |
e0bd0f8c | 456 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) |
3bfb1d20 HS |
457 | dwc_dump_lli(dwc, &child->lli); |
458 | ||
69cea5a0 VK |
459 | spin_unlock_irqrestore(&dwc->lock, flags); |
460 | ||
3bfb1d20 | 461 | /* Pretend the descriptor completed successfully */ |
5fedefb8 | 462 | dwc_descriptor_complete(dwc, bad_desc, true); |
3bfb1d20 HS |
463 | } |
464 | ||
d9de4519 HCE |
465 | /* --------------------- Cyclic DMA API extensions -------------------- */ |
466 | ||
467 | inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan) | |
468 | { | |
469 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
470 | return channel_readl(dwc, SAR); | |
471 | } | |
472 | EXPORT_SYMBOL(dw_dma_get_src_addr); | |
473 | ||
474 | inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) | |
475 | { | |
476 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
477 | return channel_readl(dwc, DAR); | |
478 | } | |
479 | EXPORT_SYMBOL(dw_dma_get_dst_addr); | |
480 | ||
481 | /* called with dwc->lock held and all DMAC interrupts disabled */ | |
482 | static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, | |
ff7b05f2 | 483 | u32 status_err, u32 status_xfer) |
d9de4519 | 484 | { |
69cea5a0 VK |
485 | unsigned long flags; |
486 | ||
ff7b05f2 | 487 | if (dwc->mask) { |
d9de4519 HCE |
488 | void (*callback)(void *param); |
489 | void *callback_param; | |
490 | ||
491 | dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", | |
492 | channel_readl(dwc, LLP)); | |
d9de4519 HCE |
493 | |
494 | callback = dwc->cdesc->period_callback; | |
495 | callback_param = dwc->cdesc->period_callback_param; | |
69cea5a0 VK |
496 | |
497 | if (callback) | |
d9de4519 | 498 | callback(callback_param); |
d9de4519 HCE |
499 | } |
500 | ||
501 | /* | |
502 | * Error and transfer complete are highly unlikely, and will most | |
503 | * likely be due to a configuration error by the user. | |
504 | */ | |
505 | if (unlikely(status_err & dwc->mask) || | |
506 | unlikely(status_xfer & dwc->mask)) { | |
507 | int i; | |
508 | ||
509 | dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s " | |
510 | "interrupt, stopping DMA transfer\n", | |
511 | status_xfer ? "xfer" : "error"); | |
69cea5a0 VK |
512 | |
513 | spin_lock_irqsave(&dwc->lock, flags); | |
514 | ||
1d455437 | 515 | dwc_dump_chan_regs(dwc); |
d9de4519 HCE |
516 | |
517 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
518 | while (dma_readl(dw, CH_EN) & dwc->mask) | |
519 | cpu_relax(); | |
520 | ||
521 | /* make sure DMA does not restart by loading a new list */ | |
522 | channel_writel(dwc, LLP, 0); | |
523 | channel_writel(dwc, CTL_LO, 0); | |
524 | channel_writel(dwc, CTL_HI, 0); | |
525 | ||
d9de4519 HCE |
526 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
527 | dma_writel(dw, CLEAR.XFER, dwc->mask); | |
528 | ||
529 | for (i = 0; i < dwc->cdesc->periods; i++) | |
530 | dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli); | |
69cea5a0 VK |
531 | |
532 | spin_unlock_irqrestore(&dwc->lock, flags); | |
d9de4519 HCE |
533 | } |
534 | } | |
535 | ||
536 | /* ------------------------------------------------------------------------- */ | |
537 | ||
3bfb1d20 HS |
538 | static void dw_dma_tasklet(unsigned long data) |
539 | { | |
540 | struct dw_dma *dw = (struct dw_dma *)data; | |
541 | struct dw_dma_chan *dwc; | |
3bfb1d20 HS |
542 | u32 status_xfer; |
543 | u32 status_err; | |
544 | int i; | |
545 | ||
7fe7b2f4 | 546 | status_xfer = dma_readl(dw, RAW.XFER); |
3bfb1d20 HS |
547 | status_err = dma_readl(dw, RAW.ERROR); |
548 | ||
2e4c364e | 549 | dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err); |
3bfb1d20 HS |
550 | |
551 | for (i = 0; i < dw->dma.chancnt; i++) { | |
552 | dwc = &dw->chan[i]; | |
d9de4519 | 553 | if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) |
ff7b05f2 | 554 | dwc_handle_cyclic(dw, dwc, status_err, status_xfer); |
d9de4519 | 555 | else if (status_err & (1 << i)) |
3bfb1d20 | 556 | dwc_handle_error(dw, dwc); |
ff7b05f2 | 557 | else if (status_xfer & (1 << i)) |
3bfb1d20 | 558 | dwc_scan_descriptors(dw, dwc); |
3bfb1d20 HS |
559 | } |
560 | ||
561 | /* | |
ff7b05f2 | 562 | * Re-enable interrupts. |
3bfb1d20 HS |
563 | */ |
564 | channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); | |
3bfb1d20 HS |
565 | channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); |
566 | } | |
567 | ||
568 | static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) | |
569 | { | |
570 | struct dw_dma *dw = dev_id; | |
571 | u32 status; | |
572 | ||
2e4c364e | 573 | dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, |
3bfb1d20 HS |
574 | dma_readl(dw, STATUS_INT)); |
575 | ||
576 | /* | |
577 | * Just disable the interrupts. We'll turn them back on in the | |
578 | * softirq handler. | |
579 | */ | |
580 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | |
3bfb1d20 HS |
581 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); |
582 | ||
583 | status = dma_readl(dw, STATUS_INT); | |
584 | if (status) { | |
585 | dev_err(dw->dma.dev, | |
586 | "BUG: Unexpected interrupts pending: 0x%x\n", | |
587 | status); | |
588 | ||
589 | /* Try to recover */ | |
590 | channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); | |
3bfb1d20 HS |
591 | channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); |
592 | channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); | |
593 | channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); | |
594 | } | |
595 | ||
596 | tasklet_schedule(&dw->tasklet); | |
597 | ||
598 | return IRQ_HANDLED; | |
599 | } | |
600 | ||
601 | /*----------------------------------------------------------------------*/ | |
602 | ||
603 | static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) | |
604 | { | |
605 | struct dw_desc *desc = txd_to_dw_desc(tx); | |
606 | struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); | |
607 | dma_cookie_t cookie; | |
69cea5a0 | 608 | unsigned long flags; |
3bfb1d20 | 609 | |
69cea5a0 | 610 | spin_lock_irqsave(&dwc->lock, flags); |
884485e1 | 611 | cookie = dma_cookie_assign(tx); |
3bfb1d20 HS |
612 | |
613 | /* | |
614 | * REVISIT: We should attempt to chain as many descriptors as | |
615 | * possible, perhaps even appending to those already submitted | |
616 | * for DMA. But this is hard to do in a race-free manner. | |
617 | */ | |
618 | if (list_empty(&dwc->active_list)) { | |
2e4c364e | 619 | dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__, |
3bfb1d20 | 620 | desc->txd.cookie); |
3bfb1d20 | 621 | list_add_tail(&desc->desc_node, &dwc->active_list); |
f336e42f | 622 | dwc_dostart(dwc, dwc_first_active(dwc)); |
3bfb1d20 | 623 | } else { |
2e4c364e | 624 | dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, |
3bfb1d20 HS |
625 | desc->txd.cookie); |
626 | ||
627 | list_add_tail(&desc->desc_node, &dwc->queue); | |
628 | } | |
629 | ||
69cea5a0 | 630 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 HS |
631 | |
632 | return cookie; | |
633 | } | |
634 | ||
635 | static struct dma_async_tx_descriptor * | |
636 | dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |
637 | size_t len, unsigned long flags) | |
638 | { | |
639 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
640 | struct dw_desc *desc; | |
641 | struct dw_desc *first; | |
642 | struct dw_desc *prev; | |
643 | size_t xfer_count; | |
644 | size_t offset; | |
645 | unsigned int src_width; | |
646 | unsigned int dst_width; | |
647 | u32 ctllo; | |
648 | ||
2f45d613 | 649 | dev_vdbg(chan2dev(chan), |
2e4c364e | 650 | "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__, |
2f45d613 AS |
651 | (unsigned long long)dest, (unsigned long long)src, |
652 | len, flags); | |
3bfb1d20 HS |
653 | |
654 | if (unlikely(!len)) { | |
2e4c364e | 655 | dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); |
3bfb1d20 HS |
656 | return NULL; |
657 | } | |
658 | ||
4c2d56c5 | 659 | src_width = dst_width = dwc_fast_fls(src | dest | len); |
3bfb1d20 | 660 | |
327e6970 | 661 | ctllo = DWC_DEFAULT_CTLLO(chan) |
3bfb1d20 HS |
662 | | DWC_CTLL_DST_WIDTH(dst_width) |
663 | | DWC_CTLL_SRC_WIDTH(src_width) | |
664 | | DWC_CTLL_DST_INC | |
665 | | DWC_CTLL_SRC_INC | |
666 | | DWC_CTLL_FC_M2M; | |
667 | prev = first = NULL; | |
668 | ||
669 | for (offset = 0; offset < len; offset += xfer_count << src_width) { | |
670 | xfer_count = min_t(size_t, (len - offset) >> src_width, | |
671 | DWC_MAX_COUNT); | |
672 | ||
673 | desc = dwc_desc_get(dwc); | |
674 | if (!desc) | |
675 | goto err_desc_get; | |
676 | ||
677 | desc->lli.sar = src + offset; | |
678 | desc->lli.dar = dest + offset; | |
679 | desc->lli.ctllo = ctllo; | |
680 | desc->lli.ctlhi = xfer_count; | |
681 | ||
682 | if (!first) { | |
683 | first = desc; | |
684 | } else { | |
685 | prev->lli.llp = desc->txd.phys; | |
41d5e59c | 686 | dma_sync_single_for_device(chan2parent(chan), |
3bfb1d20 HS |
687 | prev->txd.phys, sizeof(prev->lli), |
688 | DMA_TO_DEVICE); | |
689 | list_add_tail(&desc->desc_node, | |
e0bd0f8c | 690 | &first->tx_list); |
3bfb1d20 HS |
691 | } |
692 | prev = desc; | |
693 | } | |
694 | ||
695 | ||
696 | if (flags & DMA_PREP_INTERRUPT) | |
697 | /* Trigger interrupt after last block */ | |
698 | prev->lli.ctllo |= DWC_CTLL_INT_EN; | |
699 | ||
700 | prev->lli.llp = 0; | |
41d5e59c | 701 | dma_sync_single_for_device(chan2parent(chan), |
3bfb1d20 HS |
702 | prev->txd.phys, sizeof(prev->lli), |
703 | DMA_TO_DEVICE); | |
704 | ||
705 | first->txd.flags = flags; | |
706 | first->len = len; | |
707 | ||
708 | return &first->txd; | |
709 | ||
710 | err_desc_get: | |
711 | dwc_desc_put(dwc, first); | |
712 | return NULL; | |
713 | } | |
714 | ||
715 | static struct dma_async_tx_descriptor * | |
716 | dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |
db8196df | 717 | unsigned int sg_len, enum dma_transfer_direction direction, |
185ecb5f | 718 | unsigned long flags, void *context) |
3bfb1d20 HS |
719 | { |
720 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
287d8592 | 721 | struct dw_dma_slave *dws = chan->private; |
327e6970 | 722 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; |
3bfb1d20 HS |
723 | struct dw_desc *prev; |
724 | struct dw_desc *first; | |
725 | u32 ctllo; | |
726 | dma_addr_t reg; | |
727 | unsigned int reg_width; | |
728 | unsigned int mem_width; | |
729 | unsigned int i; | |
730 | struct scatterlist *sg; | |
731 | size_t total_len = 0; | |
732 | ||
2e4c364e | 733 | dev_vdbg(chan2dev(chan), "%s\n", __func__); |
3bfb1d20 HS |
734 | |
735 | if (unlikely(!dws || !sg_len)) | |
736 | return NULL; | |
737 | ||
3bfb1d20 HS |
738 | prev = first = NULL; |
739 | ||
3bfb1d20 | 740 | switch (direction) { |
db8196df | 741 | case DMA_MEM_TO_DEV: |
327e6970 VK |
742 | reg_width = __fls(sconfig->dst_addr_width); |
743 | reg = sconfig->dst_addr; | |
744 | ctllo = (DWC_DEFAULT_CTLLO(chan) | |
3bfb1d20 HS |
745 | | DWC_CTLL_DST_WIDTH(reg_width) |
746 | | DWC_CTLL_DST_FIX | |
327e6970 VK |
747 | | DWC_CTLL_SRC_INC); |
748 | ||
749 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : | |
750 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); | |
751 | ||
3bfb1d20 HS |
752 | for_each_sg(sgl, sg, sg_len, i) { |
753 | struct dw_desc *desc; | |
69dc14b5 | 754 | u32 len, dlen, mem; |
3bfb1d20 | 755 | |
cbb796cc | 756 | mem = sg_dma_address(sg); |
69dc14b5 | 757 | len = sg_dma_len(sg); |
6bc711f6 | 758 | |
4c2d56c5 | 759 | mem_width = dwc_fast_fls(mem | len); |
3bfb1d20 | 760 | |
69dc14b5 | 761 | slave_sg_todev_fill_desc: |
3bfb1d20 HS |
762 | desc = dwc_desc_get(dwc); |
763 | if (!desc) { | |
41d5e59c | 764 | dev_err(chan2dev(chan), |
3bfb1d20 HS |
765 | "not enough descriptors available\n"); |
766 | goto err_desc_get; | |
767 | } | |
768 | ||
3bfb1d20 HS |
769 | desc->lli.sar = mem; |
770 | desc->lli.dar = reg; | |
771 | desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); | |
69dc14b5 VK |
772 | if ((len >> mem_width) > DWC_MAX_COUNT) { |
773 | dlen = DWC_MAX_COUNT << mem_width; | |
774 | mem += dlen; | |
775 | len -= dlen; | |
776 | } else { | |
777 | dlen = len; | |
778 | len = 0; | |
779 | } | |
780 | ||
781 | desc->lli.ctlhi = dlen >> mem_width; | |
3bfb1d20 HS |
782 | |
783 | if (!first) { | |
784 | first = desc; | |
785 | } else { | |
786 | prev->lli.llp = desc->txd.phys; | |
41d5e59c | 787 | dma_sync_single_for_device(chan2parent(chan), |
3bfb1d20 HS |
788 | prev->txd.phys, |
789 | sizeof(prev->lli), | |
790 | DMA_TO_DEVICE); | |
791 | list_add_tail(&desc->desc_node, | |
e0bd0f8c | 792 | &first->tx_list); |
3bfb1d20 HS |
793 | } |
794 | prev = desc; | |
69dc14b5 VK |
795 | total_len += dlen; |
796 | ||
797 | if (len) | |
798 | goto slave_sg_todev_fill_desc; | |
3bfb1d20 HS |
799 | } |
800 | break; | |
db8196df | 801 | case DMA_DEV_TO_MEM: |
327e6970 VK |
802 | reg_width = __fls(sconfig->src_addr_width); |
803 | reg = sconfig->src_addr; | |
804 | ctllo = (DWC_DEFAULT_CTLLO(chan) | |
3bfb1d20 HS |
805 | | DWC_CTLL_SRC_WIDTH(reg_width) |
806 | | DWC_CTLL_DST_INC | |
327e6970 VK |
807 | | DWC_CTLL_SRC_FIX); |
808 | ||
809 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : | |
810 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); | |
3bfb1d20 | 811 | |
3bfb1d20 HS |
812 | for_each_sg(sgl, sg, sg_len, i) { |
813 | struct dw_desc *desc; | |
69dc14b5 | 814 | u32 len, dlen, mem; |
3bfb1d20 | 815 | |
cbb796cc | 816 | mem = sg_dma_address(sg); |
3bfb1d20 | 817 | len = sg_dma_len(sg); |
6bc711f6 | 818 | |
4c2d56c5 | 819 | mem_width = dwc_fast_fls(mem | len); |
3bfb1d20 | 820 | |
69dc14b5 VK |
821 | slave_sg_fromdev_fill_desc: |
822 | desc = dwc_desc_get(dwc); | |
823 | if (!desc) { | |
824 | dev_err(chan2dev(chan), | |
825 | "not enough descriptors available\n"); | |
826 | goto err_desc_get; | |
827 | } | |
828 | ||
3bfb1d20 HS |
829 | desc->lli.sar = reg; |
830 | desc->lli.dar = mem; | |
831 | desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); | |
69dc14b5 VK |
832 | if ((len >> reg_width) > DWC_MAX_COUNT) { |
833 | dlen = DWC_MAX_COUNT << reg_width; | |
834 | mem += dlen; | |
835 | len -= dlen; | |
836 | } else { | |
837 | dlen = len; | |
838 | len = 0; | |
839 | } | |
840 | desc->lli.ctlhi = dlen >> reg_width; | |
3bfb1d20 HS |
841 | |
842 | if (!first) { | |
843 | first = desc; | |
844 | } else { | |
845 | prev->lli.llp = desc->txd.phys; | |
41d5e59c | 846 | dma_sync_single_for_device(chan2parent(chan), |
3bfb1d20 HS |
847 | prev->txd.phys, |
848 | sizeof(prev->lli), | |
849 | DMA_TO_DEVICE); | |
850 | list_add_tail(&desc->desc_node, | |
e0bd0f8c | 851 | &first->tx_list); |
3bfb1d20 HS |
852 | } |
853 | prev = desc; | |
69dc14b5 VK |
854 | total_len += dlen; |
855 | ||
856 | if (len) | |
857 | goto slave_sg_fromdev_fill_desc; | |
3bfb1d20 HS |
858 | } |
859 | break; | |
860 | default: | |
861 | return NULL; | |
862 | } | |
863 | ||
864 | if (flags & DMA_PREP_INTERRUPT) | |
865 | /* Trigger interrupt after last block */ | |
866 | prev->lli.ctllo |= DWC_CTLL_INT_EN; | |
867 | ||
868 | prev->lli.llp = 0; | |
41d5e59c | 869 | dma_sync_single_for_device(chan2parent(chan), |
3bfb1d20 HS |
870 | prev->txd.phys, sizeof(prev->lli), |
871 | DMA_TO_DEVICE); | |
872 | ||
873 | first->len = total_len; | |
874 | ||
875 | return &first->txd; | |
876 | ||
877 | err_desc_get: | |
878 | dwc_desc_put(dwc, first); | |
879 | return NULL; | |
880 | } | |
881 | ||
327e6970 VK |
882 | /* |
883 | * Fix sconfig's burst size according to dw_dmac. We need to convert them as: | |
884 | * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. | |
885 | * | |
886 | * NOTE: burst size 2 is not supported by controller. | |
887 | * | |
888 | * This can be done by finding least significant bit set: n & (n - 1) | |
889 | */ | |
890 | static inline void convert_burst(u32 *maxburst) | |
891 | { | |
892 | if (*maxburst > 1) | |
893 | *maxburst = fls(*maxburst) - 2; | |
894 | else | |
895 | *maxburst = 0; | |
896 | } | |
897 | ||
898 | static int | |
899 | set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) | |
900 | { | |
901 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
902 | ||
903 | /* Check if it is chan is configured for slave transfers */ | |
904 | if (!chan->private) | |
905 | return -EINVAL; | |
906 | ||
907 | memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); | |
908 | ||
909 | convert_burst(&dwc->dma_sconfig.src_maxburst); | |
910 | convert_burst(&dwc->dma_sconfig.dst_maxburst); | |
911 | ||
912 | return 0; | |
913 | } | |
914 | ||
05827630 LW |
915 | static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
916 | unsigned long arg) | |
3bfb1d20 HS |
917 | { |
918 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
919 | struct dw_dma *dw = to_dw_dma(chan->device); | |
920 | struct dw_desc *desc, *_desc; | |
69cea5a0 | 921 | unsigned long flags; |
a7c57cf7 | 922 | u32 cfglo; |
3bfb1d20 HS |
923 | LIST_HEAD(list); |
924 | ||
a7c57cf7 LW |
925 | if (cmd == DMA_PAUSE) { |
926 | spin_lock_irqsave(&dwc->lock, flags); | |
c3635c78 | 927 | |
a7c57cf7 LW |
928 | cfglo = channel_readl(dwc, CFG_LO); |
929 | channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); | |
930 | while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY)) | |
931 | cpu_relax(); | |
3bfb1d20 | 932 | |
a7c57cf7 LW |
933 | dwc->paused = true; |
934 | spin_unlock_irqrestore(&dwc->lock, flags); | |
935 | } else if (cmd == DMA_RESUME) { | |
936 | if (!dwc->paused) | |
937 | return 0; | |
3bfb1d20 | 938 | |
a7c57cf7 | 939 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 | 940 | |
a7c57cf7 LW |
941 | cfglo = channel_readl(dwc, CFG_LO); |
942 | channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); | |
943 | dwc->paused = false; | |
3bfb1d20 | 944 | |
a7c57cf7 LW |
945 | spin_unlock_irqrestore(&dwc->lock, flags); |
946 | } else if (cmd == DMA_TERMINATE_ALL) { | |
947 | spin_lock_irqsave(&dwc->lock, flags); | |
3bfb1d20 | 948 | |
a7c57cf7 LW |
949 | channel_clear_bit(dw, CH_EN, dwc->mask); |
950 | while (dma_readl(dw, CH_EN) & dwc->mask) | |
951 | cpu_relax(); | |
952 | ||
953 | dwc->paused = false; | |
954 | ||
955 | /* active_list entries will end up before queued entries */ | |
956 | list_splice_init(&dwc->queue, &list); | |
957 | list_splice_init(&dwc->active_list, &list); | |
958 | ||
959 | spin_unlock_irqrestore(&dwc->lock, flags); | |
960 | ||
961 | /* Flush all pending and queued descriptors */ | |
962 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | |
963 | dwc_descriptor_complete(dwc, desc, false); | |
327e6970 VK |
964 | } else if (cmd == DMA_SLAVE_CONFIG) { |
965 | return set_runtime_config(chan, (struct dma_slave_config *)arg); | |
966 | } else { | |
a7c57cf7 | 967 | return -ENXIO; |
327e6970 | 968 | } |
c3635c78 LW |
969 | |
970 | return 0; | |
3bfb1d20 HS |
971 | } |
972 | ||
973 | static enum dma_status | |
07934481 LW |
974 | dwc_tx_status(struct dma_chan *chan, |
975 | dma_cookie_t cookie, | |
976 | struct dma_tx_state *txstate) | |
3bfb1d20 HS |
977 | { |
978 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
96a2af41 | 979 | enum dma_status ret; |
3bfb1d20 | 980 | |
96a2af41 | 981 | ret = dma_cookie_status(chan, cookie, txstate); |
3bfb1d20 HS |
982 | if (ret != DMA_SUCCESS) { |
983 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); | |
984 | ||
96a2af41 | 985 | ret = dma_cookie_status(chan, cookie, txstate); |
3bfb1d20 HS |
986 | } |
987 | ||
abf53902 | 988 | if (ret != DMA_SUCCESS) |
96a2af41 | 989 | dma_set_residue(txstate, dwc_first_active(dwc)->len); |
3bfb1d20 | 990 | |
a7c57cf7 LW |
991 | if (dwc->paused) |
992 | return DMA_PAUSED; | |
3bfb1d20 HS |
993 | |
994 | return ret; | |
995 | } | |
996 | ||
997 | static void dwc_issue_pending(struct dma_chan *chan) | |
998 | { | |
999 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1000 | ||
3bfb1d20 HS |
1001 | if (!list_empty(&dwc->queue)) |
1002 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); | |
3bfb1d20 HS |
1003 | } |
1004 | ||
aa1e6f1a | 1005 | static int dwc_alloc_chan_resources(struct dma_chan *chan) |
3bfb1d20 HS |
1006 | { |
1007 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1008 | struct dw_dma *dw = to_dw_dma(chan->device); | |
1009 | struct dw_desc *desc; | |
3bfb1d20 | 1010 | int i; |
69cea5a0 | 1011 | unsigned long flags; |
3bfb1d20 | 1012 | |
2e4c364e | 1013 | dev_vdbg(chan2dev(chan), "%s\n", __func__); |
3bfb1d20 | 1014 | |
3bfb1d20 HS |
1015 | /* ASSERT: channel is idle */ |
1016 | if (dma_readl(dw, CH_EN) & dwc->mask) { | |
41d5e59c | 1017 | dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); |
3bfb1d20 HS |
1018 | return -EIO; |
1019 | } | |
1020 | ||
d3ee98cd | 1021 | dma_cookie_init(chan); |
3bfb1d20 | 1022 | |
3bfb1d20 HS |
1023 | /* |
1024 | * NOTE: some controllers may have additional features that we | |
1025 | * need to initialize here, like "scatter-gather" (which | |
1026 | * doesn't mean what you think it means), and status writeback. | |
1027 | */ | |
1028 | ||
69cea5a0 | 1029 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
1030 | i = dwc->descs_allocated; |
1031 | while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { | |
69cea5a0 | 1032 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 HS |
1033 | |
1034 | desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); | |
1035 | if (!desc) { | |
41d5e59c | 1036 | dev_info(chan2dev(chan), |
3bfb1d20 | 1037 | "only allocated %d descriptors\n", i); |
69cea5a0 | 1038 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
1039 | break; |
1040 | } | |
1041 | ||
e0bd0f8c | 1042 | INIT_LIST_HEAD(&desc->tx_list); |
3bfb1d20 HS |
1043 | dma_async_tx_descriptor_init(&desc->txd, chan); |
1044 | desc->txd.tx_submit = dwc_tx_submit; | |
1045 | desc->txd.flags = DMA_CTRL_ACK; | |
41d5e59c | 1046 | desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli, |
3bfb1d20 HS |
1047 | sizeof(desc->lli), DMA_TO_DEVICE); |
1048 | dwc_desc_put(dwc, desc); | |
1049 | ||
69cea5a0 | 1050 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
1051 | i = ++dwc->descs_allocated; |
1052 | } | |
1053 | ||
69cea5a0 | 1054 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 1055 | |
2e4c364e | 1056 | dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); |
3bfb1d20 HS |
1057 | |
1058 | return i; | |
1059 | } | |
1060 | ||
1061 | static void dwc_free_chan_resources(struct dma_chan *chan) | |
1062 | { | |
1063 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1064 | struct dw_dma *dw = to_dw_dma(chan->device); | |
1065 | struct dw_desc *desc, *_desc; | |
69cea5a0 | 1066 | unsigned long flags; |
3bfb1d20 HS |
1067 | LIST_HEAD(list); |
1068 | ||
2e4c364e | 1069 | dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__, |
3bfb1d20 HS |
1070 | dwc->descs_allocated); |
1071 | ||
1072 | /* ASSERT: channel is idle */ | |
1073 | BUG_ON(!list_empty(&dwc->active_list)); | |
1074 | BUG_ON(!list_empty(&dwc->queue)); | |
1075 | BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); | |
1076 | ||
69cea5a0 | 1077 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
1078 | list_splice_init(&dwc->free_list, &list); |
1079 | dwc->descs_allocated = 0; | |
61e183f8 | 1080 | dwc->initialized = false; |
3bfb1d20 HS |
1081 | |
1082 | /* Disable interrupts */ | |
1083 | channel_clear_bit(dw, MASK.XFER, dwc->mask); | |
3bfb1d20 HS |
1084 | channel_clear_bit(dw, MASK.ERROR, dwc->mask); |
1085 | ||
69cea5a0 | 1086 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 HS |
1087 | |
1088 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { | |
41d5e59c DW |
1089 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); |
1090 | dma_unmap_single(chan2parent(chan), desc->txd.phys, | |
3bfb1d20 HS |
1091 | sizeof(desc->lli), DMA_TO_DEVICE); |
1092 | kfree(desc); | |
1093 | } | |
1094 | ||
2e4c364e | 1095 | dev_vdbg(chan2dev(chan), "%s: done\n", __func__); |
3bfb1d20 HS |
1096 | } |
1097 | ||
d9de4519 HCE |
1098 | /* --------------------- Cyclic DMA API extensions -------------------- */ |
1099 | ||
1100 | /** | |
1101 | * dw_dma_cyclic_start - start the cyclic DMA transfer | |
1102 | * @chan: the DMA channel to start | |
1103 | * | |
1104 | * Must be called with soft interrupts disabled. Returns zero on success or | |
1105 | * -errno on failure. | |
1106 | */ | |
1107 | int dw_dma_cyclic_start(struct dma_chan *chan) | |
1108 | { | |
1109 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1110 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
69cea5a0 | 1111 | unsigned long flags; |
d9de4519 HCE |
1112 | |
1113 | if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { | |
1114 | dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); | |
1115 | return -ENODEV; | |
1116 | } | |
1117 | ||
69cea5a0 | 1118 | spin_lock_irqsave(&dwc->lock, flags); |
d9de4519 HCE |
1119 | |
1120 | /* assert channel is idle */ | |
1121 | if (dma_readl(dw, CH_EN) & dwc->mask) { | |
1122 | dev_err(chan2dev(&dwc->chan), | |
1123 | "BUG: Attempted to start non-idle channel\n"); | |
1d455437 | 1124 | dwc_dump_chan_regs(dwc); |
69cea5a0 | 1125 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1126 | return -EBUSY; |
1127 | } | |
1128 | ||
d9de4519 HCE |
1129 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
1130 | dma_writel(dw, CLEAR.XFER, dwc->mask); | |
1131 | ||
1132 | /* setup DMAC channel registers */ | |
1133 | channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys); | |
1134 | channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); | |
1135 | channel_writel(dwc, CTL_HI, 0); | |
1136 | ||
1137 | channel_set_bit(dw, CH_EN, dwc->mask); | |
1138 | ||
69cea5a0 | 1139 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1140 | |
1141 | return 0; | |
1142 | } | |
1143 | EXPORT_SYMBOL(dw_dma_cyclic_start); | |
1144 | ||
1145 | /** | |
1146 | * dw_dma_cyclic_stop - stop the cyclic DMA transfer | |
1147 | * @chan: the DMA channel to stop | |
1148 | * | |
1149 | * Must be called with soft interrupts disabled. | |
1150 | */ | |
1151 | void dw_dma_cyclic_stop(struct dma_chan *chan) | |
1152 | { | |
1153 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1154 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
69cea5a0 | 1155 | unsigned long flags; |
d9de4519 | 1156 | |
69cea5a0 | 1157 | spin_lock_irqsave(&dwc->lock, flags); |
d9de4519 HCE |
1158 | |
1159 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
1160 | while (dma_readl(dw, CH_EN) & dwc->mask) | |
1161 | cpu_relax(); | |
1162 | ||
69cea5a0 | 1163 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1164 | } |
1165 | EXPORT_SYMBOL(dw_dma_cyclic_stop); | |
1166 | ||
1167 | /** | |
1168 | * dw_dma_cyclic_prep - prepare the cyclic DMA transfer | |
1169 | * @chan: the DMA channel to prepare | |
1170 | * @buf_addr: physical DMA address where the buffer starts | |
1171 | * @buf_len: total number of bytes for the entire buffer | |
1172 | * @period_len: number of bytes for each period | |
1173 | * @direction: transfer direction, to or from device | |
1174 | * | |
1175 | * Must be called before trying to start the transfer. Returns a valid struct | |
1176 | * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful. | |
1177 | */ | |
1178 | struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |
1179 | dma_addr_t buf_addr, size_t buf_len, size_t period_len, | |
db8196df | 1180 | enum dma_transfer_direction direction) |
d9de4519 HCE |
1181 | { |
1182 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
327e6970 | 1183 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; |
d9de4519 HCE |
1184 | struct dw_cyclic_desc *cdesc; |
1185 | struct dw_cyclic_desc *retval = NULL; | |
1186 | struct dw_desc *desc; | |
1187 | struct dw_desc *last = NULL; | |
d9de4519 HCE |
1188 | unsigned long was_cyclic; |
1189 | unsigned int reg_width; | |
1190 | unsigned int periods; | |
1191 | unsigned int i; | |
69cea5a0 | 1192 | unsigned long flags; |
d9de4519 | 1193 | |
69cea5a0 | 1194 | spin_lock_irqsave(&dwc->lock, flags); |
d9de4519 | 1195 | if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { |
69cea5a0 | 1196 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1197 | dev_dbg(chan2dev(&dwc->chan), |
1198 | "queue and/or active list are not empty\n"); | |
1199 | return ERR_PTR(-EBUSY); | |
1200 | } | |
1201 | ||
1202 | was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); | |
69cea5a0 | 1203 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1204 | if (was_cyclic) { |
1205 | dev_dbg(chan2dev(&dwc->chan), | |
1206 | "channel already prepared for cyclic DMA\n"); | |
1207 | return ERR_PTR(-EBUSY); | |
1208 | } | |
1209 | ||
1210 | retval = ERR_PTR(-EINVAL); | |
327e6970 VK |
1211 | |
1212 | if (direction == DMA_MEM_TO_DEV) | |
1213 | reg_width = __ffs(sconfig->dst_addr_width); | |
1214 | else | |
1215 | reg_width = __ffs(sconfig->src_addr_width); | |
1216 | ||
d9de4519 HCE |
1217 | periods = buf_len / period_len; |
1218 | ||
1219 | /* Check for too big/unaligned periods and unaligned DMA buffer. */ | |
1220 | if (period_len > (DWC_MAX_COUNT << reg_width)) | |
1221 | goto out_err; | |
1222 | if (unlikely(period_len & ((1 << reg_width) - 1))) | |
1223 | goto out_err; | |
1224 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) | |
1225 | goto out_err; | |
db8196df | 1226 | if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM)))) |
d9de4519 HCE |
1227 | goto out_err; |
1228 | ||
1229 | retval = ERR_PTR(-ENOMEM); | |
1230 | ||
1231 | if (periods > NR_DESCS_PER_CHANNEL) | |
1232 | goto out_err; | |
1233 | ||
1234 | cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL); | |
1235 | if (!cdesc) | |
1236 | goto out_err; | |
1237 | ||
1238 | cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL); | |
1239 | if (!cdesc->desc) | |
1240 | goto out_err_alloc; | |
1241 | ||
1242 | for (i = 0; i < periods; i++) { | |
1243 | desc = dwc_desc_get(dwc); | |
1244 | if (!desc) | |
1245 | goto out_err_desc_get; | |
1246 | ||
1247 | switch (direction) { | |
db8196df | 1248 | case DMA_MEM_TO_DEV: |
327e6970 | 1249 | desc->lli.dar = sconfig->dst_addr; |
d9de4519 | 1250 | desc->lli.sar = buf_addr + (period_len * i); |
327e6970 | 1251 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) |
d9de4519 HCE |
1252 | | DWC_CTLL_DST_WIDTH(reg_width) |
1253 | | DWC_CTLL_SRC_WIDTH(reg_width) | |
1254 | | DWC_CTLL_DST_FIX | |
1255 | | DWC_CTLL_SRC_INC | |
d9de4519 | 1256 | | DWC_CTLL_INT_EN); |
327e6970 VK |
1257 | |
1258 | desc->lli.ctllo |= sconfig->device_fc ? | |
1259 | DWC_CTLL_FC(DW_DMA_FC_P_M2P) : | |
1260 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); | |
1261 | ||
d9de4519 | 1262 | break; |
db8196df | 1263 | case DMA_DEV_TO_MEM: |
d9de4519 | 1264 | desc->lli.dar = buf_addr + (period_len * i); |
327e6970 VK |
1265 | desc->lli.sar = sconfig->src_addr; |
1266 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) | |
d9de4519 HCE |
1267 | | DWC_CTLL_SRC_WIDTH(reg_width) |
1268 | | DWC_CTLL_DST_WIDTH(reg_width) | |
1269 | | DWC_CTLL_DST_INC | |
1270 | | DWC_CTLL_SRC_FIX | |
d9de4519 | 1271 | | DWC_CTLL_INT_EN); |
327e6970 VK |
1272 | |
1273 | desc->lli.ctllo |= sconfig->device_fc ? | |
1274 | DWC_CTLL_FC(DW_DMA_FC_P_P2M) : | |
1275 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); | |
1276 | ||
d9de4519 HCE |
1277 | break; |
1278 | default: | |
1279 | break; | |
1280 | } | |
1281 | ||
1282 | desc->lli.ctlhi = (period_len >> reg_width); | |
1283 | cdesc->desc[i] = desc; | |
1284 | ||
1285 | if (last) { | |
1286 | last->lli.llp = desc->txd.phys; | |
1287 | dma_sync_single_for_device(chan2parent(chan), | |
1288 | last->txd.phys, sizeof(last->lli), | |
1289 | DMA_TO_DEVICE); | |
1290 | } | |
1291 | ||
1292 | last = desc; | |
1293 | } | |
1294 | ||
1295 | /* lets make a cyclic list */ | |
1296 | last->lli.llp = cdesc->desc[0]->txd.phys; | |
1297 | dma_sync_single_for_device(chan2parent(chan), last->txd.phys, | |
1298 | sizeof(last->lli), DMA_TO_DEVICE); | |
1299 | ||
2f45d613 AS |
1300 | dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu " |
1301 | "period %zu periods %d\n", (unsigned long long)buf_addr, | |
1302 | buf_len, period_len, periods); | |
d9de4519 HCE |
1303 | |
1304 | cdesc->periods = periods; | |
1305 | dwc->cdesc = cdesc; | |
1306 | ||
1307 | return cdesc; | |
1308 | ||
1309 | out_err_desc_get: | |
1310 | while (i--) | |
1311 | dwc_desc_put(dwc, cdesc->desc[i]); | |
1312 | out_err_alloc: | |
1313 | kfree(cdesc); | |
1314 | out_err: | |
1315 | clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); | |
1316 | return (struct dw_cyclic_desc *)retval; | |
1317 | } | |
1318 | EXPORT_SYMBOL(dw_dma_cyclic_prep); | |
1319 | ||
1320 | /** | |
1321 | * dw_dma_cyclic_free - free a prepared cyclic DMA transfer | |
1322 | * @chan: the DMA channel to free | |
1323 | */ | |
1324 | void dw_dma_cyclic_free(struct dma_chan *chan) | |
1325 | { | |
1326 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1327 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
1328 | struct dw_cyclic_desc *cdesc = dwc->cdesc; | |
1329 | int i; | |
69cea5a0 | 1330 | unsigned long flags; |
d9de4519 | 1331 | |
2e4c364e | 1332 | dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__); |
d9de4519 HCE |
1333 | |
1334 | if (!cdesc) | |
1335 | return; | |
1336 | ||
69cea5a0 | 1337 | spin_lock_irqsave(&dwc->lock, flags); |
d9de4519 HCE |
1338 | |
1339 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
1340 | while (dma_readl(dw, CH_EN) & dwc->mask) | |
1341 | cpu_relax(); | |
1342 | ||
d9de4519 HCE |
1343 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
1344 | dma_writel(dw, CLEAR.XFER, dwc->mask); | |
1345 | ||
69cea5a0 | 1346 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1347 | |
1348 | for (i = 0; i < cdesc->periods; i++) | |
1349 | dwc_desc_put(dwc, cdesc->desc[i]); | |
1350 | ||
1351 | kfree(cdesc->desc); | |
1352 | kfree(cdesc); | |
1353 | ||
1354 | clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); | |
1355 | } | |
1356 | EXPORT_SYMBOL(dw_dma_cyclic_free); | |
1357 | ||
3bfb1d20 HS |
1358 | /*----------------------------------------------------------------------*/ |
1359 | ||
1360 | static void dw_dma_off(struct dw_dma *dw) | |
1361 | { | |
61e183f8 VK |
1362 | int i; |
1363 | ||
3bfb1d20 HS |
1364 | dma_writel(dw, CFG, 0); |
1365 | ||
1366 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | |
3bfb1d20 HS |
1367 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); |
1368 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); | |
1369 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | |
1370 | ||
1371 | while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) | |
1372 | cpu_relax(); | |
61e183f8 VK |
1373 | |
1374 | for (i = 0; i < dw->dma.chancnt; i++) | |
1375 | dw->chan[i].initialized = false; | |
3bfb1d20 HS |
1376 | } |
1377 | ||
1378 | static int __init dw_probe(struct platform_device *pdev) | |
1379 | { | |
1380 | struct dw_dma_platform_data *pdata; | |
1381 | struct resource *io; | |
1382 | struct dw_dma *dw; | |
1383 | size_t size; | |
1384 | int irq; | |
1385 | int err; | |
1386 | int i; | |
1387 | ||
6c618c9d | 1388 | pdata = dev_get_platdata(&pdev->dev); |
3bfb1d20 HS |
1389 | if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) |
1390 | return -EINVAL; | |
1391 | ||
1392 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1393 | if (!io) | |
1394 | return -EINVAL; | |
1395 | ||
1396 | irq = platform_get_irq(pdev, 0); | |
1397 | if (irq < 0) | |
1398 | return irq; | |
1399 | ||
1400 | size = sizeof(struct dw_dma); | |
1401 | size += pdata->nr_channels * sizeof(struct dw_dma_chan); | |
1402 | dw = kzalloc(size, GFP_KERNEL); | |
1403 | if (!dw) | |
1404 | return -ENOMEM; | |
1405 | ||
1406 | if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) { | |
1407 | err = -EBUSY; | |
1408 | goto err_kfree; | |
1409 | } | |
1410 | ||
3bfb1d20 HS |
1411 | dw->regs = ioremap(io->start, DW_REGLEN); |
1412 | if (!dw->regs) { | |
1413 | err = -ENOMEM; | |
1414 | goto err_release_r; | |
1415 | } | |
1416 | ||
1417 | dw->clk = clk_get(&pdev->dev, "hclk"); | |
1418 | if (IS_ERR(dw->clk)) { | |
1419 | err = PTR_ERR(dw->clk); | |
1420 | goto err_clk; | |
1421 | } | |
3075528d | 1422 | clk_prepare_enable(dw->clk); |
3bfb1d20 | 1423 | |
11f932ec AS |
1424 | /* Calculate all channel mask before DMA setup */ |
1425 | dw->all_chan_mask = (1 << pdata->nr_channels) - 1; | |
1426 | ||
3bfb1d20 HS |
1427 | /* force dma off, just in case */ |
1428 | dw_dma_off(dw); | |
1429 | ||
236b106f AS |
1430 | /* disable BLOCK interrupts as well */ |
1431 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); | |
1432 | ||
3bfb1d20 HS |
1433 | err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw); |
1434 | if (err) | |
1435 | goto err_irq; | |
1436 | ||
1437 | platform_set_drvdata(pdev, dw); | |
1438 | ||
1439 | tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); | |
1440 | ||
3bfb1d20 | 1441 | INIT_LIST_HEAD(&dw->dma.channels); |
46389470 | 1442 | for (i = 0; i < pdata->nr_channels; i++) { |
3bfb1d20 HS |
1443 | struct dw_dma_chan *dwc = &dw->chan[i]; |
1444 | ||
1445 | dwc->chan.device = &dw->dma; | |
d3ee98cd | 1446 | dma_cookie_init(&dwc->chan); |
b0c3130d VK |
1447 | if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) |
1448 | list_add_tail(&dwc->chan.device_node, | |
1449 | &dw->dma.channels); | |
1450 | else | |
1451 | list_add(&dwc->chan.device_node, &dw->dma.channels); | |
3bfb1d20 | 1452 | |
93317e8e VK |
1453 | /* 7 is highest priority & 0 is lowest. */ |
1454 | if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) | |
e8d9f875 | 1455 | dwc->priority = pdata->nr_channels - i - 1; |
93317e8e VK |
1456 | else |
1457 | dwc->priority = i; | |
1458 | ||
3bfb1d20 HS |
1459 | dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; |
1460 | spin_lock_init(&dwc->lock); | |
1461 | dwc->mask = 1 << i; | |
1462 | ||
1463 | INIT_LIST_HEAD(&dwc->active_list); | |
1464 | INIT_LIST_HEAD(&dwc->queue); | |
1465 | INIT_LIST_HEAD(&dwc->free_list); | |
1466 | ||
1467 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
1468 | } | |
1469 | ||
11f932ec | 1470 | /* Clear all interrupts on all channels. */ |
3bfb1d20 | 1471 | dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); |
236b106f | 1472 | dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask); |
3bfb1d20 HS |
1473 | dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); |
1474 | dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); | |
1475 | dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); | |
1476 | ||
3bfb1d20 HS |
1477 | dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); |
1478 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); | |
95ea759e JI |
1479 | if (pdata->is_private) |
1480 | dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); | |
3bfb1d20 HS |
1481 | dw->dma.dev = &pdev->dev; |
1482 | dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; | |
1483 | dw->dma.device_free_chan_resources = dwc_free_chan_resources; | |
1484 | ||
1485 | dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; | |
1486 | ||
1487 | dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; | |
c3635c78 | 1488 | dw->dma.device_control = dwc_control; |
3bfb1d20 | 1489 | |
07934481 | 1490 | dw->dma.device_tx_status = dwc_tx_status; |
3bfb1d20 HS |
1491 | dw->dma.device_issue_pending = dwc_issue_pending; |
1492 | ||
1493 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | |
1494 | ||
1495 | printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n", | |
46389470 | 1496 | dev_name(&pdev->dev), pdata->nr_channels); |
3bfb1d20 HS |
1497 | |
1498 | dma_async_device_register(&dw->dma); | |
1499 | ||
1500 | return 0; | |
1501 | ||
1502 | err_irq: | |
3075528d | 1503 | clk_disable_unprepare(dw->clk); |
3bfb1d20 HS |
1504 | clk_put(dw->clk); |
1505 | err_clk: | |
1506 | iounmap(dw->regs); | |
1507 | dw->regs = NULL; | |
1508 | err_release_r: | |
1509 | release_resource(io); | |
1510 | err_kfree: | |
1511 | kfree(dw); | |
1512 | return err; | |
1513 | } | |
1514 | ||
1515 | static int __exit dw_remove(struct platform_device *pdev) | |
1516 | { | |
1517 | struct dw_dma *dw = platform_get_drvdata(pdev); | |
1518 | struct dw_dma_chan *dwc, *_dwc; | |
1519 | struct resource *io; | |
1520 | ||
1521 | dw_dma_off(dw); | |
1522 | dma_async_device_unregister(&dw->dma); | |
1523 | ||
1524 | free_irq(platform_get_irq(pdev, 0), dw); | |
1525 | tasklet_kill(&dw->tasklet); | |
1526 | ||
1527 | list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, | |
1528 | chan.device_node) { | |
1529 | list_del(&dwc->chan.device_node); | |
1530 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
1531 | } | |
1532 | ||
3075528d | 1533 | clk_disable_unprepare(dw->clk); |
3bfb1d20 HS |
1534 | clk_put(dw->clk); |
1535 | ||
1536 | iounmap(dw->regs); | |
1537 | dw->regs = NULL; | |
1538 | ||
1539 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1540 | release_mem_region(io->start, DW_REGLEN); | |
1541 | ||
1542 | kfree(dw); | |
1543 | ||
1544 | return 0; | |
1545 | } | |
1546 | ||
1547 | static void dw_shutdown(struct platform_device *pdev) | |
1548 | { | |
1549 | struct dw_dma *dw = platform_get_drvdata(pdev); | |
1550 | ||
1551 | dw_dma_off(platform_get_drvdata(pdev)); | |
3075528d | 1552 | clk_disable_unprepare(dw->clk); |
3bfb1d20 HS |
1553 | } |
1554 | ||
4a256b5f | 1555 | static int dw_suspend_noirq(struct device *dev) |
3bfb1d20 | 1556 | { |
4a256b5f | 1557 | struct platform_device *pdev = to_platform_device(dev); |
3bfb1d20 HS |
1558 | struct dw_dma *dw = platform_get_drvdata(pdev); |
1559 | ||
1560 | dw_dma_off(platform_get_drvdata(pdev)); | |
3075528d | 1561 | clk_disable_unprepare(dw->clk); |
61e183f8 | 1562 | |
3bfb1d20 HS |
1563 | return 0; |
1564 | } | |
1565 | ||
4a256b5f | 1566 | static int dw_resume_noirq(struct device *dev) |
3bfb1d20 | 1567 | { |
4a256b5f | 1568 | struct platform_device *pdev = to_platform_device(dev); |
3bfb1d20 HS |
1569 | struct dw_dma *dw = platform_get_drvdata(pdev); |
1570 | ||
3075528d | 1571 | clk_prepare_enable(dw->clk); |
3bfb1d20 HS |
1572 | dma_writel(dw, CFG, DW_CFG_DMA_EN); |
1573 | return 0; | |
3bfb1d20 HS |
1574 | } |
1575 | ||
47145210 | 1576 | static const struct dev_pm_ops dw_dev_pm_ops = { |
4a256b5f MD |
1577 | .suspend_noirq = dw_suspend_noirq, |
1578 | .resume_noirq = dw_resume_noirq, | |
7414a1b8 RK |
1579 | .freeze_noirq = dw_suspend_noirq, |
1580 | .thaw_noirq = dw_resume_noirq, | |
1581 | .restore_noirq = dw_resume_noirq, | |
1582 | .poweroff_noirq = dw_suspend_noirq, | |
4a256b5f MD |
1583 | }; |
1584 | ||
d3f797d9 VK |
1585 | #ifdef CONFIG_OF |
1586 | static const struct of_device_id dw_dma_id_table[] = { | |
1587 | { .compatible = "snps,dma-spear1340" }, | |
1588 | {} | |
1589 | }; | |
1590 | MODULE_DEVICE_TABLE(of, dw_dma_id_table); | |
1591 | #endif | |
1592 | ||
3bfb1d20 HS |
1593 | static struct platform_driver dw_driver = { |
1594 | .remove = __exit_p(dw_remove), | |
1595 | .shutdown = dw_shutdown, | |
3bfb1d20 HS |
1596 | .driver = { |
1597 | .name = "dw_dmac", | |
4a256b5f | 1598 | .pm = &dw_dev_pm_ops, |
d3f797d9 | 1599 | .of_match_table = of_match_ptr(dw_dma_id_table), |
3bfb1d20 HS |
1600 | }, |
1601 | }; | |
1602 | ||
1603 | static int __init dw_init(void) | |
1604 | { | |
1605 | return platform_driver_probe(&dw_driver, dw_probe); | |
1606 | } | |
cb689a70 | 1607 | subsys_initcall(dw_init); |
3bfb1d20 HS |
1608 | |
1609 | static void __exit dw_exit(void) | |
1610 | { | |
1611 | platform_driver_unregister(&dw_driver); | |
1612 | } | |
1613 | module_exit(dw_exit); | |
1614 | ||
1615 | MODULE_LICENSE("GPL v2"); | |
1616 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); | |
e05503ef | 1617 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); |
aecb7b64 | 1618 | MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); |