Merge branch 'writeback-for-2.6.34' into nfs-for-2.6.34
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / dma / shdma.c
1 /*
2 * Renesas SuperH DMA Engine support
3 *
4 * base is drivers/dma/flsdma.c
5 *
6 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
7 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
8 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
9 *
10 * This is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * - DMA of SuperH does not have Hardware DMA chain mode.
16 * - MAX DMA size is 16MB.
17 *
18 */
19
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/interrupt.h>
23 #include <linux/dmaengine.h>
24 #include <linux/delay.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/platform_device.h>
27 #include <cpu/dma.h>
28 #include <asm/dma-sh.h>
29 #include "shdma.h"
30
31 /* DMA descriptor control */
32 enum sh_dmae_desc_status {
33 DESC_IDLE,
34 DESC_PREPARED,
35 DESC_SUBMITTED,
36 DESC_COMPLETED, /* completed, have to call callback */
37 DESC_WAITING, /* callback called, waiting for ack / re-submit */
38 };
39
40 #define NR_DESCS_PER_CHANNEL 32
41 /*
42 * Define the default configuration for dual address memory-memory transfer.
43 * The 0x400 value represents auto-request, external->external.
44 *
45 * And this driver set 4byte burst mode.
46 * If you want to change mode, you need to change RS_DEFAULT of value.
47 * (ex 1byte burst mode -> (RS_DUAL & ~TS_32)
48 */
49 #define RS_DEFAULT (RS_DUAL)
50
51 /* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
52 static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)];
53
54 static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
55
56 #define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id])
57 static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
58 {
59 ctrl_outl(data, SH_DMAC_CHAN_BASE(sh_dc->id) + reg);
60 }
61
62 static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
63 {
64 return ctrl_inl(SH_DMAC_CHAN_BASE(sh_dc->id) + reg);
65 }
66
67 /*
68 * Reset DMA controller
69 *
70 * SH7780 has two DMAOR register
71 */
72 static void sh_dmae_ctl_stop(int id)
73 {
74 unsigned short dmaor = dmaor_read_reg(id);
75
76 dmaor &= ~(DMAOR_NMIF | DMAOR_AE);
77 dmaor_write_reg(id, dmaor);
78 }
79
80 static int sh_dmae_rst(int id)
81 {
82 unsigned short dmaor;
83
84 sh_dmae_ctl_stop(id);
85 dmaor = dmaor_read_reg(id) | DMAOR_INIT;
86
87 dmaor_write_reg(id, dmaor);
88 if (dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF)) {
89 pr_warning(KERN_ERR "dma-sh: Can't initialize DMAOR.\n");
90 return -EINVAL;
91 }
92 return 0;
93 }
94
95 static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
96 {
97 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
98
99 if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
100 return true; /* working */
101
102 return false; /* waiting */
103 }
104
105 static unsigned int ts_shift[] = TS_SHIFT;
106 static inline unsigned int calc_xmit_shift(u32 chcr)
107 {
108 int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) |
109 ((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT);
110
111 return ts_shift[cnt];
112 }
113
114 static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
115 {
116 sh_dmae_writel(sh_chan, hw->sar, SAR);
117 sh_dmae_writel(sh_chan, hw->dar, DAR);
118 sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
119 }
120
121 static void dmae_start(struct sh_dmae_chan *sh_chan)
122 {
123 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
124
125 chcr |= CHCR_DE | CHCR_IE;
126 sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR);
127 }
128
129 static void dmae_halt(struct sh_dmae_chan *sh_chan)
130 {
131 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
132
133 chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
134 sh_dmae_writel(sh_chan, chcr, CHCR);
135 }
136
137 static void dmae_init(struct sh_dmae_chan *sh_chan)
138 {
139 u32 chcr = RS_DEFAULT; /* default is DUAL mode */
140 sh_chan->xmit_shift = calc_xmit_shift(chcr);
141 sh_dmae_writel(sh_chan, chcr, CHCR);
142 }
143
144 static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
145 {
146 /* When DMA was working, can not set data to CHCR */
147 if (dmae_is_busy(sh_chan))
148 return -EBUSY;
149
150 sh_chan->xmit_shift = calc_xmit_shift(val);
151 sh_dmae_writel(sh_chan, val, CHCR);
152
153 return 0;
154 }
155
156 #define DMARS_SHIFT 8
157 #define DMARS_CHAN_MSK 0x01
158 static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
159 {
160 u32 addr;
161 int shift = 0;
162
163 if (dmae_is_busy(sh_chan))
164 return -EBUSY;
165
166 if (sh_chan->id & DMARS_CHAN_MSK)
167 shift = DMARS_SHIFT;
168
169 if (sh_chan->id < 6)
170 /* DMA0RS0 - DMA0RS2 */
171 addr = SH_DMARS_BASE0 + (sh_chan->id / 2) * 4;
172 #ifdef SH_DMARS_BASE1
173 else if (sh_chan->id < 12)
174 /* DMA1RS0 - DMA1RS2 */
175 addr = SH_DMARS_BASE1 + ((sh_chan->id - 6) / 2) * 4;
176 #endif
177 else
178 return -EINVAL;
179
180 ctrl_outw((val << shift) | (ctrl_inw(addr) & (0xFF00 >> shift)), addr);
181
182 return 0;
183 }
184
185 static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
186 {
187 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
188 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
189 dma_async_tx_callback callback = tx->callback;
190 dma_cookie_t cookie;
191
192 spin_lock_bh(&sh_chan->desc_lock);
193
194 cookie = sh_chan->common.cookie;
195 cookie++;
196 if (cookie < 0)
197 cookie = 1;
198
199 sh_chan->common.cookie = cookie;
200 tx->cookie = cookie;
201
202 /* Mark all chunks of this descriptor as submitted, move to the queue */
203 list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
204 /*
205 * All chunks are on the global ld_free, so, we have to find
206 * the end of the chain ourselves
207 */
208 if (chunk != desc && (chunk->mark == DESC_IDLE ||
209 chunk->async_tx.cookie > 0 ||
210 chunk->async_tx.cookie == -EBUSY ||
211 &chunk->node == &sh_chan->ld_free))
212 break;
213 chunk->mark = DESC_SUBMITTED;
214 /* Callback goes to the last chunk */
215 chunk->async_tx.callback = NULL;
216 chunk->cookie = cookie;
217 list_move_tail(&chunk->node, &sh_chan->ld_queue);
218 last = chunk;
219 }
220
221 last->async_tx.callback = callback;
222 last->async_tx.callback_param = tx->callback_param;
223
224 dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n",
225 tx->cookie, &last->async_tx, sh_chan->id,
226 desc->hw.sar, desc->hw.tcr, desc->hw.dar);
227
228 spin_unlock_bh(&sh_chan->desc_lock);
229
230 return cookie;
231 }
232
233 /* Called with desc_lock held */
234 static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
235 {
236 struct sh_desc *desc;
237
238 list_for_each_entry(desc, &sh_chan->ld_free, node)
239 if (desc->mark != DESC_PREPARED) {
240 BUG_ON(desc->mark != DESC_IDLE);
241 list_del(&desc->node);
242 return desc;
243 }
244
245 return NULL;
246 }
247
248 static struct sh_dmae_slave_config *sh_dmae_find_slave(
249 struct sh_dmae_chan *sh_chan, enum sh_dmae_slave_chan_id slave_id)
250 {
251 struct dma_device *dma_dev = sh_chan->common.device;
252 struct sh_dmae_device *shdev = container_of(dma_dev,
253 struct sh_dmae_device, common);
254 struct sh_dmae_pdata *pdata = &shdev->pdata;
255 int i;
256
257 if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER)
258 return NULL;
259
260 for (i = 0; i < pdata->config_num; i++)
261 if (pdata->config[i].slave_id == slave_id)
262 return pdata->config + i;
263
264 return NULL;
265 }
266
267 static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
268 {
269 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
270 struct sh_desc *desc;
271 struct sh_dmae_slave *param = chan->private;
272
273 /*
274 * This relies on the guarantee from dmaengine that alloc_chan_resources
275 * never runs concurrently with itself or free_chan_resources.
276 */
277 if (param) {
278 struct sh_dmae_slave_config *cfg;
279
280 cfg = sh_dmae_find_slave(sh_chan, param->slave_id);
281 if (!cfg)
282 return -EINVAL;
283
284 if (test_and_set_bit(param->slave_id, sh_dmae_slave_used))
285 return -EBUSY;
286
287 param->config = cfg;
288
289 dmae_set_dmars(sh_chan, cfg->mid_rid);
290 dmae_set_chcr(sh_chan, cfg->chcr);
291 } else {
292 if ((sh_dmae_readl(sh_chan, CHCR) & 0x700) != 0x400)
293 dmae_set_chcr(sh_chan, RS_DEFAULT);
294 }
295
296 spin_lock_bh(&sh_chan->desc_lock);
297 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
298 spin_unlock_bh(&sh_chan->desc_lock);
299 desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
300 if (!desc) {
301 spin_lock_bh(&sh_chan->desc_lock);
302 break;
303 }
304 dma_async_tx_descriptor_init(&desc->async_tx,
305 &sh_chan->common);
306 desc->async_tx.tx_submit = sh_dmae_tx_submit;
307 desc->mark = DESC_IDLE;
308
309 spin_lock_bh(&sh_chan->desc_lock);
310 list_add(&desc->node, &sh_chan->ld_free);
311 sh_chan->descs_allocated++;
312 }
313 spin_unlock_bh(&sh_chan->desc_lock);
314
315 return sh_chan->descs_allocated;
316 }
317
318 /*
319 * sh_dma_free_chan_resources - Free all resources of the channel.
320 */
321 static void sh_dmae_free_chan_resources(struct dma_chan *chan)
322 {
323 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
324 struct sh_desc *desc, *_desc;
325 LIST_HEAD(list);
326
327 dmae_halt(sh_chan);
328
329 /* Prepared and not submitted descriptors can still be on the queue */
330 if (!list_empty(&sh_chan->ld_queue))
331 sh_dmae_chan_ld_cleanup(sh_chan, true);
332
333 if (chan->private) {
334 /* The caller is holding dma_list_mutex */
335 struct sh_dmae_slave *param = chan->private;
336 clear_bit(param->slave_id, sh_dmae_slave_used);
337 }
338
339 spin_lock_bh(&sh_chan->desc_lock);
340
341 list_splice_init(&sh_chan->ld_free, &list);
342 sh_chan->descs_allocated = 0;
343
344 spin_unlock_bh(&sh_chan->desc_lock);
345
346 list_for_each_entry_safe(desc, _desc, &list, node)
347 kfree(desc);
348 }
349
350 /**
351 * sh_dmae_add_desc - get, set up and return one transfer descriptor
352 * @sh_chan: DMA channel
353 * @flags: DMA transfer flags
354 * @dest: destination DMA address, incremented when direction equals
355 * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL
356 * @src: source DMA address, incremented when direction equals
357 * DMA_TO_DEVICE or DMA_BIDIRECTIONAL
358 * @len: DMA transfer length
359 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
360 * @direction: needed for slave DMA to decide which address to keep constant,
361 * equals DMA_BIDIRECTIONAL for MEMCPY
362 * Returns 0 or an error
363 * Locks: called with desc_lock held
364 */
365 static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
366 unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
367 struct sh_desc **first, enum dma_data_direction direction)
368 {
369 struct sh_desc *new;
370 size_t copy_size;
371
372 if (!*len)
373 return NULL;
374
375 /* Allocate the link descriptor from the free list */
376 new = sh_dmae_get_desc(sh_chan);
377 if (!new) {
378 dev_err(sh_chan->dev, "No free link descriptor available\n");
379 return NULL;
380 }
381
382 copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1);
383
384 new->hw.sar = *src;
385 new->hw.dar = *dest;
386 new->hw.tcr = copy_size;
387
388 if (!*first) {
389 /* First desc */
390 new->async_tx.cookie = -EBUSY;
391 *first = new;
392 } else {
393 /* Other desc - invisible to the user */
394 new->async_tx.cookie = -EINVAL;
395 }
396
397 dev_dbg(sh_chan->dev,
398 "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n",
399 copy_size, *len, *src, *dest, &new->async_tx,
400 new->async_tx.cookie, sh_chan->xmit_shift);
401
402 new->mark = DESC_PREPARED;
403 new->async_tx.flags = flags;
404 new->direction = direction;
405
406 *len -= copy_size;
407 if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
408 *src += copy_size;
409 if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE)
410 *dest += copy_size;
411
412 return new;
413 }
414
415 /*
416 * sh_dmae_prep_sg - prepare transfer descriptors from an SG list
417 *
418 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
419 * converted to scatter-gather to guarantee consistent locking and a correct
420 * list manipulation. For slave DMA direction carries the usual meaning, and,
421 * logically, the SG list is RAM and the addr variable contains slave address,
422 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL
423 * and the SG list contains only one element and points at the source buffer.
424 */
425 static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
426 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
427 enum dma_data_direction direction, unsigned long flags)
428 {
429 struct scatterlist *sg;
430 struct sh_desc *first = NULL, *new = NULL /* compiler... */;
431 LIST_HEAD(tx_list);
432 int chunks = 0;
433 int i;
434
435 if (!sg_len)
436 return NULL;
437
438 for_each_sg(sgl, sg, sg_len, i)
439 chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) /
440 (SH_DMA_TCR_MAX + 1);
441
442 /* Have to lock the whole loop to protect against concurrent release */
443 spin_lock_bh(&sh_chan->desc_lock);
444
445 /*
446 * Chaining:
447 * first descriptor is what user is dealing with in all API calls, its
448 * cookie is at first set to -EBUSY, at tx-submit to a positive
449 * number
450 * if more than one chunk is needed further chunks have cookie = -EINVAL
451 * the last chunk, if not equal to the first, has cookie = -ENOSPC
452 * all chunks are linked onto the tx_list head with their .node heads
453 * only during this function, then they are immediately spliced
454 * back onto the free list in form of a chain
455 */
456 for_each_sg(sgl, sg, sg_len, i) {
457 dma_addr_t sg_addr = sg_dma_address(sg);
458 size_t len = sg_dma_len(sg);
459
460 if (!len)
461 goto err_get_desc;
462
463 do {
464 dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
465 i, sg, len, (unsigned long long)sg_addr);
466
467 if (direction == DMA_FROM_DEVICE)
468 new = sh_dmae_add_desc(sh_chan, flags,
469 &sg_addr, addr, &len, &first,
470 direction);
471 else
472 new = sh_dmae_add_desc(sh_chan, flags,
473 addr, &sg_addr, &len, &first,
474 direction);
475 if (!new)
476 goto err_get_desc;
477
478 new->chunks = chunks--;
479 list_add_tail(&new->node, &tx_list);
480 } while (len);
481 }
482
483 if (new != first)
484 new->async_tx.cookie = -ENOSPC;
485
486 /* Put them back on the free list, so, they don't get lost */
487 list_splice_tail(&tx_list, &sh_chan->ld_free);
488
489 spin_unlock_bh(&sh_chan->desc_lock);
490
491 return &first->async_tx;
492
493 err_get_desc:
494 list_for_each_entry(new, &tx_list, node)
495 new->mark = DESC_IDLE;
496 list_splice(&tx_list, &sh_chan->ld_free);
497
498 spin_unlock_bh(&sh_chan->desc_lock);
499
500 return NULL;
501 }
502
503 static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
504 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
505 size_t len, unsigned long flags)
506 {
507 struct sh_dmae_chan *sh_chan;
508 struct scatterlist sg;
509
510 if (!chan || !len)
511 return NULL;
512
513 chan->private = NULL;
514
515 sh_chan = to_sh_chan(chan);
516
517 sg_init_table(&sg, 1);
518 sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
519 offset_in_page(dma_src));
520 sg_dma_address(&sg) = dma_src;
521 sg_dma_len(&sg) = len;
522
523 return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL,
524 flags);
525 }
526
527 static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
528 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
529 enum dma_data_direction direction, unsigned long flags)
530 {
531 struct sh_dmae_slave *param;
532 struct sh_dmae_chan *sh_chan;
533
534 if (!chan)
535 return NULL;
536
537 sh_chan = to_sh_chan(chan);
538 param = chan->private;
539
540 /* Someone calling slave DMA on a public channel? */
541 if (!param || !sg_len) {
542 dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n",
543 __func__, param, sg_len, param ? param->slave_id : -1);
544 return NULL;
545 }
546
547 /*
548 * if (param != NULL), this is a successfully requested slave channel,
549 * therefore param->config != NULL too.
550 */
551 return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &param->config->addr,
552 direction, flags);
553 }
554
555 static void sh_dmae_terminate_all(struct dma_chan *chan)
556 {
557 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
558
559 if (!chan)
560 return;
561
562 sh_dmae_chan_ld_cleanup(sh_chan, true);
563 }
564
565 static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
566 {
567 struct sh_desc *desc, *_desc;
568 /* Is the "exposed" head of a chain acked? */
569 bool head_acked = false;
570 dma_cookie_t cookie = 0;
571 dma_async_tx_callback callback = NULL;
572 void *param = NULL;
573
574 spin_lock_bh(&sh_chan->desc_lock);
575 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
576 struct dma_async_tx_descriptor *tx = &desc->async_tx;
577
578 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
579 BUG_ON(desc->mark != DESC_SUBMITTED &&
580 desc->mark != DESC_COMPLETED &&
581 desc->mark != DESC_WAITING);
582
583 /*
584 * queue is ordered, and we use this loop to (1) clean up all
585 * completed descriptors, and to (2) update descriptor flags of
586 * any chunks in a (partially) completed chain
587 */
588 if (!all && desc->mark == DESC_SUBMITTED &&
589 desc->cookie != cookie)
590 break;
591
592 if (tx->cookie > 0)
593 cookie = tx->cookie;
594
595 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
596 if (sh_chan->completed_cookie != desc->cookie - 1)
597 dev_dbg(sh_chan->dev,
598 "Completing cookie %d, expected %d\n",
599 desc->cookie,
600 sh_chan->completed_cookie + 1);
601 sh_chan->completed_cookie = desc->cookie;
602 }
603
604 /* Call callback on the last chunk */
605 if (desc->mark == DESC_COMPLETED && tx->callback) {
606 desc->mark = DESC_WAITING;
607 callback = tx->callback;
608 param = tx->callback_param;
609 dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n",
610 tx->cookie, tx, sh_chan->id);
611 BUG_ON(desc->chunks != 1);
612 break;
613 }
614
615 if (tx->cookie > 0 || tx->cookie == -EBUSY) {
616 if (desc->mark == DESC_COMPLETED) {
617 BUG_ON(tx->cookie < 0);
618 desc->mark = DESC_WAITING;
619 }
620 head_acked = async_tx_test_ack(tx);
621 } else {
622 switch (desc->mark) {
623 case DESC_COMPLETED:
624 desc->mark = DESC_WAITING;
625 /* Fall through */
626 case DESC_WAITING:
627 if (head_acked)
628 async_tx_ack(&desc->async_tx);
629 }
630 }
631
632 dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n",
633 tx, tx->cookie);
634
635 if (((desc->mark == DESC_COMPLETED ||
636 desc->mark == DESC_WAITING) &&
637 async_tx_test_ack(&desc->async_tx)) || all) {
638 /* Remove from ld_queue list */
639 desc->mark = DESC_IDLE;
640 list_move(&desc->node, &sh_chan->ld_free);
641 }
642 }
643 spin_unlock_bh(&sh_chan->desc_lock);
644
645 if (callback)
646 callback(param);
647
648 return callback;
649 }
650
651 /*
652 * sh_chan_ld_cleanup - Clean up link descriptors
653 *
654 * This function cleans up the ld_queue of DMA channel.
655 */
656 static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
657 {
658 while (__ld_cleanup(sh_chan, all))
659 ;
660 }
661
662 static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
663 {
664 struct sh_desc *sd;
665
666 spin_lock_bh(&sh_chan->desc_lock);
667 /* DMA work check */
668 if (dmae_is_busy(sh_chan)) {
669 spin_unlock_bh(&sh_chan->desc_lock);
670 return;
671 }
672
673 /* Find the first not transferred desciptor */
674 list_for_each_entry(sd, &sh_chan->ld_queue, node)
675 if (sd->mark == DESC_SUBMITTED) {
676 /* Get the ld start address from ld_queue */
677 dmae_set_reg(sh_chan, &sd->hw);
678 dmae_start(sh_chan);
679 break;
680 }
681
682 spin_unlock_bh(&sh_chan->desc_lock);
683 }
684
685 static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
686 {
687 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
688 sh_chan_xfer_ld_queue(sh_chan);
689 }
690
691 static enum dma_status sh_dmae_is_complete(struct dma_chan *chan,
692 dma_cookie_t cookie,
693 dma_cookie_t *done,
694 dma_cookie_t *used)
695 {
696 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
697 dma_cookie_t last_used;
698 dma_cookie_t last_complete;
699
700 sh_dmae_chan_ld_cleanup(sh_chan, false);
701
702 last_used = chan->cookie;
703 last_complete = sh_chan->completed_cookie;
704 BUG_ON(last_complete < 0);
705
706 if (done)
707 *done = last_complete;
708
709 if (used)
710 *used = last_used;
711
712 return dma_async_is_complete(cookie, last_complete, last_used);
713 }
714
715 static irqreturn_t sh_dmae_interrupt(int irq, void *data)
716 {
717 irqreturn_t ret = IRQ_NONE;
718 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
719 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
720
721 if (chcr & CHCR_TE) {
722 /* DMA stop */
723 dmae_halt(sh_chan);
724
725 ret = IRQ_HANDLED;
726 tasklet_schedule(&sh_chan->tasklet);
727 }
728
729 return ret;
730 }
731
732 #if defined(CONFIG_CPU_SH4)
733 static irqreturn_t sh_dmae_err(int irq, void *data)
734 {
735 int err = 0;
736 struct sh_dmae_device *shdev = (struct sh_dmae_device *)data;
737
738 /* IRQ Multi */
739 if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
740 int __maybe_unused cnt = 0;
741 switch (irq) {
742 #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ)
743 case DMTE6_IRQ:
744 cnt++;
745 #endif
746 case DMTE0_IRQ:
747 if (dmaor_read_reg(cnt) & (DMAOR_NMIF | DMAOR_AE)) {
748 disable_irq(irq);
749 return IRQ_HANDLED;
750 }
751 default:
752 return IRQ_NONE;
753 }
754 } else {
755 /* reset dma controller */
756 err = sh_dmae_rst(0);
757 if (err)
758 return err;
759 #ifdef SH_DMAC_BASE1
760 if (shdev->pdata.mode & SHDMA_DMAOR1) {
761 err = sh_dmae_rst(1);
762 if (err)
763 return err;
764 }
765 #endif
766 disable_irq(irq);
767 return IRQ_HANDLED;
768 }
769 }
770 #endif
771
772 static void dmae_do_tasklet(unsigned long data)
773 {
774 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
775 struct sh_desc *desc;
776 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
777 u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
778
779 spin_lock(&sh_chan->desc_lock);
780 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
781 if (desc->mark == DESC_SUBMITTED &&
782 ((desc->direction == DMA_FROM_DEVICE &&
783 (desc->hw.dar + desc->hw.tcr) == dar_buf) ||
784 (desc->hw.sar + desc->hw.tcr) == sar_buf)) {
785 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
786 desc->async_tx.cookie, &desc->async_tx,
787 desc->hw.dar);
788 desc->mark = DESC_COMPLETED;
789 break;
790 }
791 }
792 spin_unlock(&sh_chan->desc_lock);
793
794 /* Next desc */
795 sh_chan_xfer_ld_queue(sh_chan);
796 sh_dmae_chan_ld_cleanup(sh_chan, false);
797 }
798
799 static unsigned int get_dmae_irq(unsigned int id)
800 {
801 unsigned int irq = 0;
802 if (id < ARRAY_SIZE(dmte_irq_map))
803 irq = dmte_irq_map[id];
804 return irq;
805 }
806
807 static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
808 {
809 int err;
810 unsigned int irq = get_dmae_irq(id);
811 unsigned long irqflags = IRQF_DISABLED;
812 struct sh_dmae_chan *new_sh_chan;
813
814 /* alloc channel */
815 new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
816 if (!new_sh_chan) {
817 dev_err(shdev->common.dev,
818 "No free memory for allocating dma channels!\n");
819 return -ENOMEM;
820 }
821
822 new_sh_chan->dev = shdev->common.dev;
823 new_sh_chan->id = id;
824
825 /* Init DMA tasklet */
826 tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet,
827 (unsigned long)new_sh_chan);
828
829 /* Init the channel */
830 dmae_init(new_sh_chan);
831
832 spin_lock_init(&new_sh_chan->desc_lock);
833
834 /* Init descripter manage list */
835 INIT_LIST_HEAD(&new_sh_chan->ld_queue);
836 INIT_LIST_HEAD(&new_sh_chan->ld_free);
837
838 /* copy struct dma_device */
839 new_sh_chan->common.device = &shdev->common;
840
841 /* Add the channel to DMA device channel list */
842 list_add_tail(&new_sh_chan->common.device_node,
843 &shdev->common.channels);
844 shdev->common.chancnt++;
845
846 if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
847 irqflags = IRQF_SHARED;
848 #if defined(DMTE6_IRQ)
849 if (irq >= DMTE6_IRQ)
850 irq = DMTE6_IRQ;
851 else
852 #endif
853 irq = DMTE0_IRQ;
854 }
855
856 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
857 "sh-dmae%d", new_sh_chan->id);
858
859 /* set up channel irq */
860 err = request_irq(irq, &sh_dmae_interrupt, irqflags,
861 new_sh_chan->dev_id, new_sh_chan);
862 if (err) {
863 dev_err(shdev->common.dev, "DMA channel %d request_irq error "
864 "with return %d\n", id, err);
865 goto err_no_irq;
866 }
867
868 shdev->chan[id] = new_sh_chan;
869 return 0;
870
871 err_no_irq:
872 /* remove from dmaengine device node */
873 list_del(&new_sh_chan->common.device_node);
874 kfree(new_sh_chan);
875 return err;
876 }
877
878 static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
879 {
880 int i;
881
882 for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) {
883 if (shdev->chan[i]) {
884 struct sh_dmae_chan *shchan = shdev->chan[i];
885 if (!(shdev->pdata.mode & SHDMA_MIX_IRQ))
886 free_irq(dmte_irq_map[i], shchan);
887
888 list_del(&shchan->common.device_node);
889 kfree(shchan);
890 shdev->chan[i] = NULL;
891 }
892 }
893 shdev->common.chancnt = 0;
894 }
895
896 static int __init sh_dmae_probe(struct platform_device *pdev)
897 {
898 int err = 0, cnt, ecnt;
899 unsigned long irqflags = IRQF_DISABLED;
900 #if defined(CONFIG_CPU_SH4)
901 int eirq[] = { DMAE0_IRQ,
902 #if defined(DMAE1_IRQ)
903 DMAE1_IRQ
904 #endif
905 };
906 #endif
907 struct sh_dmae_device *shdev;
908
909 /* get platform data */
910 if (!pdev->dev.platform_data)
911 return -ENODEV;
912
913 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
914 if (!shdev) {
915 dev_err(&pdev->dev, "No enough memory\n");
916 return -ENOMEM;
917 }
918
919 /* platform data */
920 memcpy(&shdev->pdata, pdev->dev.platform_data,
921 sizeof(struct sh_dmae_pdata));
922
923 /* reset dma controller */
924 err = sh_dmae_rst(0);
925 if (err)
926 goto rst_err;
927
928 /* SH7780/85/23 has DMAOR1 */
929 if (shdev->pdata.mode & SHDMA_DMAOR1) {
930 err = sh_dmae_rst(1);
931 if (err)
932 goto rst_err;
933 }
934
935 INIT_LIST_HEAD(&shdev->common.channels);
936
937 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
938 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
939
940 shdev->common.device_alloc_chan_resources
941 = sh_dmae_alloc_chan_resources;
942 shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
943 shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
944 shdev->common.device_is_tx_complete = sh_dmae_is_complete;
945 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
946
947 /* Compulsory for DMA_SLAVE fields */
948 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
949 shdev->common.device_terminate_all = sh_dmae_terminate_all;
950
951 shdev->common.dev = &pdev->dev;
952 /* Default transfer size of 32 bytes requires 32-byte alignment */
953 shdev->common.copy_align = 5;
954
955 #if defined(CONFIG_CPU_SH4)
956 /* Non Mix IRQ mode SH7722/SH7730 etc... */
957 if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
958 irqflags = IRQF_SHARED;
959 eirq[0] = DMTE0_IRQ;
960 #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ)
961 eirq[1] = DMTE6_IRQ;
962 #endif
963 }
964
965 for (ecnt = 0 ; ecnt < ARRAY_SIZE(eirq); ecnt++) {
966 err = request_irq(eirq[ecnt], sh_dmae_err, irqflags,
967 "DMAC Address Error", shdev);
968 if (err) {
969 dev_err(&pdev->dev, "DMA device request_irq"
970 "error (irq %d) with return %d\n",
971 eirq[ecnt], err);
972 goto eirq_err;
973 }
974 }
975 #endif /* CONFIG_CPU_SH4 */
976
977 /* Create DMA Channel */
978 for (cnt = 0 ; cnt < MAX_DMA_CHANNELS ; cnt++) {
979 err = sh_dmae_chan_probe(shdev, cnt);
980 if (err)
981 goto chan_probe_err;
982 }
983
984 platform_set_drvdata(pdev, shdev);
985 dma_async_device_register(&shdev->common);
986
987 return err;
988
989 chan_probe_err:
990 sh_dmae_chan_remove(shdev);
991
992 eirq_err:
993 for (ecnt-- ; ecnt >= 0; ecnt--)
994 free_irq(eirq[ecnt], shdev);
995
996 rst_err:
997 kfree(shdev);
998
999 return err;
1000 }
1001
1002 static int __exit sh_dmae_remove(struct platform_device *pdev)
1003 {
1004 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
1005
1006 dma_async_device_unregister(&shdev->common);
1007
1008 if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
1009 free_irq(DMTE0_IRQ, shdev);
1010 #if defined(DMTE6_IRQ)
1011 free_irq(DMTE6_IRQ, shdev);
1012 #endif
1013 }
1014
1015 /* channel data remove */
1016 sh_dmae_chan_remove(shdev);
1017
1018 if (!(shdev->pdata.mode & SHDMA_MIX_IRQ)) {
1019 free_irq(DMAE0_IRQ, shdev);
1020 #if defined(DMAE1_IRQ)
1021 free_irq(DMAE1_IRQ, shdev);
1022 #endif
1023 }
1024 kfree(shdev);
1025
1026 return 0;
1027 }
1028
1029 static void sh_dmae_shutdown(struct platform_device *pdev)
1030 {
1031 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
1032 sh_dmae_ctl_stop(0);
1033 if (shdev->pdata.mode & SHDMA_DMAOR1)
1034 sh_dmae_ctl_stop(1);
1035 }
1036
1037 static struct platform_driver sh_dmae_driver = {
1038 .remove = __exit_p(sh_dmae_remove),
1039 .shutdown = sh_dmae_shutdown,
1040 .driver = {
1041 .name = "sh-dma-engine",
1042 },
1043 };
1044
1045 static int __init sh_dmae_init(void)
1046 {
1047 return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
1048 }
1049 module_init(sh_dmae_init);
1050
1051 static void __exit sh_dmae_exit(void)
1052 {
1053 platform_driver_unregister(&sh_dmae_driver);
1054 }
1055 module_exit(sh_dmae_exit);
1056
1057 MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
1058 MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
1059 MODULE_LICENSE("GPL");