Merge branches 'devel-stable', 'entry', 'fixes', 'mach-types', 'misc' and 'smp-hotplu...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / mach-davinci / dma.c
1 /*
2 * EDMA3 support for DaVinci
3 *
4 * Copyright (C) 2006-2009 Texas Instruments.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20 #include <linux/kernel.h>
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/interrupt.h>
24 #include <linux/platform_device.h>
25 #include <linux/io.h>
26 #include <linux/slab.h>
27
28 #include <mach/edma.h>
29
30 /* Offsets matching "struct edmacc_param" */
31 #define PARM_OPT 0x00
32 #define PARM_SRC 0x04
33 #define PARM_A_B_CNT 0x08
34 #define PARM_DST 0x0c
35 #define PARM_SRC_DST_BIDX 0x10
36 #define PARM_LINK_BCNTRLD 0x14
37 #define PARM_SRC_DST_CIDX 0x18
38 #define PARM_CCNT 0x1c
39
40 #define PARM_SIZE 0x20
41
42 /* Offsets for EDMA CC global channel registers and their shadows */
43 #define SH_ER 0x00 /* 64 bits */
44 #define SH_ECR 0x08 /* 64 bits */
45 #define SH_ESR 0x10 /* 64 bits */
46 #define SH_CER 0x18 /* 64 bits */
47 #define SH_EER 0x20 /* 64 bits */
48 #define SH_EECR 0x28 /* 64 bits */
49 #define SH_EESR 0x30 /* 64 bits */
50 #define SH_SER 0x38 /* 64 bits */
51 #define SH_SECR 0x40 /* 64 bits */
52 #define SH_IER 0x50 /* 64 bits */
53 #define SH_IECR 0x58 /* 64 bits */
54 #define SH_IESR 0x60 /* 64 bits */
55 #define SH_IPR 0x68 /* 64 bits */
56 #define SH_ICR 0x70 /* 64 bits */
57 #define SH_IEVAL 0x78
58 #define SH_QER 0x80
59 #define SH_QEER 0x84
60 #define SH_QEECR 0x88
61 #define SH_QEESR 0x8c
62 #define SH_QSER 0x90
63 #define SH_QSECR 0x94
64 #define SH_SIZE 0x200
65
66 /* Offsets for EDMA CC global registers */
67 #define EDMA_REV 0x0000
68 #define EDMA_CCCFG 0x0004
69 #define EDMA_QCHMAP 0x0200 /* 8 registers */
70 #define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */
71 #define EDMA_QDMAQNUM 0x0260
72 #define EDMA_QUETCMAP 0x0280
73 #define EDMA_QUEPRI 0x0284
74 #define EDMA_EMR 0x0300 /* 64 bits */
75 #define EDMA_EMCR 0x0308 /* 64 bits */
76 #define EDMA_QEMR 0x0310
77 #define EDMA_QEMCR 0x0314
78 #define EDMA_CCERR 0x0318
79 #define EDMA_CCERRCLR 0x031c
80 #define EDMA_EEVAL 0x0320
81 #define EDMA_DRAE 0x0340 /* 4 x 64 bits*/
82 #define EDMA_QRAE 0x0380 /* 4 registers */
83 #define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */
84 #define EDMA_QSTAT 0x0600 /* 2 registers */
85 #define EDMA_QWMTHRA 0x0620
86 #define EDMA_QWMTHRB 0x0624
87 #define EDMA_CCSTAT 0x0640
88
89 #define EDMA_M 0x1000 /* global channel registers */
90 #define EDMA_ECR 0x1008
91 #define EDMA_ECRH 0x100C
92 #define EDMA_SHADOW0 0x2000 /* 4 regions shadowing global channels */
93 #define EDMA_PARM 0x4000 /* 128 param entries */
94
95 #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5))
96
97 #define EDMA_DCHMAP 0x0100 /* 64 registers */
98 #define CHMAP_EXIST BIT(24)
99
100 #define EDMA_MAX_DMACH 64
101 #define EDMA_MAX_PARAMENTRY 512
102
103 /*****************************************************************************/
104
105 static void __iomem *edmacc_regs_base[EDMA_MAX_CC];
106
107 static inline unsigned int edma_read(unsigned ctlr, int offset)
108 {
109 return (unsigned int)__raw_readl(edmacc_regs_base[ctlr] + offset);
110 }
111
112 static inline void edma_write(unsigned ctlr, int offset, int val)
113 {
114 __raw_writel(val, edmacc_regs_base[ctlr] + offset);
115 }
116 static inline void edma_modify(unsigned ctlr, int offset, unsigned and,
117 unsigned or)
118 {
119 unsigned val = edma_read(ctlr, offset);
120 val &= and;
121 val |= or;
122 edma_write(ctlr, offset, val);
123 }
124 static inline void edma_and(unsigned ctlr, int offset, unsigned and)
125 {
126 unsigned val = edma_read(ctlr, offset);
127 val &= and;
128 edma_write(ctlr, offset, val);
129 }
130 static inline void edma_or(unsigned ctlr, int offset, unsigned or)
131 {
132 unsigned val = edma_read(ctlr, offset);
133 val |= or;
134 edma_write(ctlr, offset, val);
135 }
136 static inline unsigned int edma_read_array(unsigned ctlr, int offset, int i)
137 {
138 return edma_read(ctlr, offset + (i << 2));
139 }
140 static inline void edma_write_array(unsigned ctlr, int offset, int i,
141 unsigned val)
142 {
143 edma_write(ctlr, offset + (i << 2), val);
144 }
145 static inline void edma_modify_array(unsigned ctlr, int offset, int i,
146 unsigned and, unsigned or)
147 {
148 edma_modify(ctlr, offset + (i << 2), and, or);
149 }
150 static inline void edma_or_array(unsigned ctlr, int offset, int i, unsigned or)
151 {
152 edma_or(ctlr, offset + (i << 2), or);
153 }
154 static inline void edma_or_array2(unsigned ctlr, int offset, int i, int j,
155 unsigned or)
156 {
157 edma_or(ctlr, offset + ((i*2 + j) << 2), or);
158 }
159 static inline void edma_write_array2(unsigned ctlr, int offset, int i, int j,
160 unsigned val)
161 {
162 edma_write(ctlr, offset + ((i*2 + j) << 2), val);
163 }
164 static inline unsigned int edma_shadow0_read(unsigned ctlr, int offset)
165 {
166 return edma_read(ctlr, EDMA_SHADOW0 + offset);
167 }
168 static inline unsigned int edma_shadow0_read_array(unsigned ctlr, int offset,
169 int i)
170 {
171 return edma_read(ctlr, EDMA_SHADOW0 + offset + (i << 2));
172 }
173 static inline void edma_shadow0_write(unsigned ctlr, int offset, unsigned val)
174 {
175 edma_write(ctlr, EDMA_SHADOW0 + offset, val);
176 }
177 static inline void edma_shadow0_write_array(unsigned ctlr, int offset, int i,
178 unsigned val)
179 {
180 edma_write(ctlr, EDMA_SHADOW0 + offset + (i << 2), val);
181 }
182 static inline unsigned int edma_parm_read(unsigned ctlr, int offset,
183 int param_no)
184 {
185 return edma_read(ctlr, EDMA_PARM + offset + (param_no << 5));
186 }
187 static inline void edma_parm_write(unsigned ctlr, int offset, int param_no,
188 unsigned val)
189 {
190 edma_write(ctlr, EDMA_PARM + offset + (param_no << 5), val);
191 }
192 static inline void edma_parm_modify(unsigned ctlr, int offset, int param_no,
193 unsigned and, unsigned or)
194 {
195 edma_modify(ctlr, EDMA_PARM + offset + (param_no << 5), and, or);
196 }
197 static inline void edma_parm_and(unsigned ctlr, int offset, int param_no,
198 unsigned and)
199 {
200 edma_and(ctlr, EDMA_PARM + offset + (param_no << 5), and);
201 }
202 static inline void edma_parm_or(unsigned ctlr, int offset, int param_no,
203 unsigned or)
204 {
205 edma_or(ctlr, EDMA_PARM + offset + (param_no << 5), or);
206 }
207
208 static inline void set_bits(int offset, int len, unsigned long *p)
209 {
210 for (; len > 0; len--)
211 set_bit(offset + (len - 1), p);
212 }
213
214 static inline void clear_bits(int offset, int len, unsigned long *p)
215 {
216 for (; len > 0; len--)
217 clear_bit(offset + (len - 1), p);
218 }
219
220 /*****************************************************************************/
221
222 /* actual number of DMA channels and slots on this silicon */
223 struct edma {
224 /* how many dma resources of each type */
225 unsigned num_channels;
226 unsigned num_region;
227 unsigned num_slots;
228 unsigned num_tc;
229 unsigned num_cc;
230 enum dma_event_q default_queue;
231
232 /* list of channels with no even trigger; terminated by "-1" */
233 const s8 *noevent;
234
235 /* The edma_inuse bit for each PaRAM slot is clear unless the
236 * channel is in use ... by ARM or DSP, for QDMA, or whatever.
237 */
238 DECLARE_BITMAP(edma_inuse, EDMA_MAX_PARAMENTRY);
239
240 /* The edma_unused bit for each channel is clear unless
241 * it is not being used on this platform. It uses a bit
242 * of SOC-specific initialization code.
243 */
244 DECLARE_BITMAP(edma_unused, EDMA_MAX_DMACH);
245
246 unsigned irq_res_start;
247 unsigned irq_res_end;
248
249 struct dma_interrupt_data {
250 void (*callback)(unsigned channel, unsigned short ch_status,
251 void *data);
252 void *data;
253 } intr_data[EDMA_MAX_DMACH];
254 };
255
256 static struct edma *edma_cc[EDMA_MAX_CC];
257 static int arch_num_cc;
258
259 /* dummy param set used to (re)initialize parameter RAM slots */
260 static const struct edmacc_param dummy_paramset = {
261 .link_bcntrld = 0xffff,
262 .ccnt = 1,
263 };
264
265 /*****************************************************************************/
266
267 static void map_dmach_queue(unsigned ctlr, unsigned ch_no,
268 enum dma_event_q queue_no)
269 {
270 int bit = (ch_no & 0x7) * 4;
271
272 /* default to low priority queue */
273 if (queue_no == EVENTQ_DEFAULT)
274 queue_no = edma_cc[ctlr]->default_queue;
275
276 queue_no &= 7;
277 edma_modify_array(ctlr, EDMA_DMAQNUM, (ch_no >> 3),
278 ~(0x7 << bit), queue_no << bit);
279 }
280
281 static void __init map_queue_tc(unsigned ctlr, int queue_no, int tc_no)
282 {
283 int bit = queue_no * 4;
284 edma_modify(ctlr, EDMA_QUETCMAP, ~(0x7 << bit), ((tc_no & 0x7) << bit));
285 }
286
287 static void __init assign_priority_to_queue(unsigned ctlr, int queue_no,
288 int priority)
289 {
290 int bit = queue_no * 4;
291 edma_modify(ctlr, EDMA_QUEPRI, ~(0x7 << bit),
292 ((priority & 0x7) << bit));
293 }
294
295 /**
296 * map_dmach_param - Maps channel number to param entry number
297 *
298 * This maps the dma channel number to param entry numberter. In
299 * other words using the DMA channel mapping registers a param entry
300 * can be mapped to any channel
301 *
302 * Callers are responsible for ensuring the channel mapping logic is
303 * included in that particular EDMA variant (Eg : dm646x)
304 *
305 */
306 static void __init map_dmach_param(unsigned ctlr)
307 {
308 int i;
309 for (i = 0; i < EDMA_MAX_DMACH; i++)
310 edma_write_array(ctlr, EDMA_DCHMAP , i , (i << 5));
311 }
312
313 static inline void
314 setup_dma_interrupt(unsigned lch,
315 void (*callback)(unsigned channel, u16 ch_status, void *data),
316 void *data)
317 {
318 unsigned ctlr;
319
320 ctlr = EDMA_CTLR(lch);
321 lch = EDMA_CHAN_SLOT(lch);
322
323 if (!callback)
324 edma_shadow0_write_array(ctlr, SH_IECR, lch >> 5,
325 BIT(lch & 0x1f));
326
327 edma_cc[ctlr]->intr_data[lch].callback = callback;
328 edma_cc[ctlr]->intr_data[lch].data = data;
329
330 if (callback) {
331 edma_shadow0_write_array(ctlr, SH_ICR, lch >> 5,
332 BIT(lch & 0x1f));
333 edma_shadow0_write_array(ctlr, SH_IESR, lch >> 5,
334 BIT(lch & 0x1f));
335 }
336 }
337
338 static int irq2ctlr(int irq)
339 {
340 if (irq >= edma_cc[0]->irq_res_start && irq <= edma_cc[0]->irq_res_end)
341 return 0;
342 else if (irq >= edma_cc[1]->irq_res_start &&
343 irq <= edma_cc[1]->irq_res_end)
344 return 1;
345
346 return -1;
347 }
348
349 /******************************************************************************
350 *
351 * DMA interrupt handler
352 *
353 *****************************************************************************/
354 static irqreturn_t dma_irq_handler(int irq, void *data)
355 {
356 int ctlr;
357 u32 sh_ier;
358 u32 sh_ipr;
359 u32 bank;
360
361 ctlr = irq2ctlr(irq);
362 if (ctlr < 0)
363 return IRQ_NONE;
364
365 dev_dbg(data, "dma_irq_handler\n");
366
367 sh_ipr = edma_shadow0_read_array(ctlr, SH_IPR, 0);
368 if (!sh_ipr) {
369 sh_ipr = edma_shadow0_read_array(ctlr, SH_IPR, 1);
370 if (!sh_ipr)
371 return IRQ_NONE;
372 sh_ier = edma_shadow0_read_array(ctlr, SH_IER, 1);
373 bank = 1;
374 } else {
375 sh_ier = edma_shadow0_read_array(ctlr, SH_IER, 0);
376 bank = 0;
377 }
378
379 do {
380 u32 slot;
381 u32 channel;
382
383 dev_dbg(data, "IPR%d %08x\n", bank, sh_ipr);
384
385 slot = __ffs(sh_ipr);
386 sh_ipr &= ~(BIT(slot));
387
388 if (sh_ier & BIT(slot)) {
389 channel = (bank << 5) | slot;
390 /* Clear the corresponding IPR bits */
391 edma_shadow0_write_array(ctlr, SH_ICR, bank,
392 BIT(slot));
393 if (edma_cc[ctlr]->intr_data[channel].callback)
394 edma_cc[ctlr]->intr_data[channel].callback(
395 channel, DMA_COMPLETE,
396 edma_cc[ctlr]->intr_data[channel].data);
397 }
398 } while (sh_ipr);
399
400 edma_shadow0_write(ctlr, SH_IEVAL, 1);
401 return IRQ_HANDLED;
402 }
403
404 /******************************************************************************
405 *
406 * DMA error interrupt handler
407 *
408 *****************************************************************************/
409 static irqreturn_t dma_ccerr_handler(int irq, void *data)
410 {
411 int i;
412 int ctlr;
413 unsigned int cnt = 0;
414
415 ctlr = irq2ctlr(irq);
416 if (ctlr < 0)
417 return IRQ_NONE;
418
419 dev_dbg(data, "dma_ccerr_handler\n");
420
421 if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
422 (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
423 (edma_read(ctlr, EDMA_QEMR) == 0) &&
424 (edma_read(ctlr, EDMA_CCERR) == 0))
425 return IRQ_NONE;
426
427 while (1) {
428 int j = -1;
429 if (edma_read_array(ctlr, EDMA_EMR, 0))
430 j = 0;
431 else if (edma_read_array(ctlr, EDMA_EMR, 1))
432 j = 1;
433 if (j >= 0) {
434 dev_dbg(data, "EMR%d %08x\n", j,
435 edma_read_array(ctlr, EDMA_EMR, j));
436 for (i = 0; i < 32; i++) {
437 int k = (j << 5) + i;
438 if (edma_read_array(ctlr, EDMA_EMR, j) &
439 BIT(i)) {
440 /* Clear the corresponding EMR bits */
441 edma_write_array(ctlr, EDMA_EMCR, j,
442 BIT(i));
443 /* Clear any SER */
444 edma_shadow0_write_array(ctlr, SH_SECR,
445 j, BIT(i));
446 if (edma_cc[ctlr]->intr_data[k].
447 callback) {
448 edma_cc[ctlr]->intr_data[k].
449 callback(k,
450 DMA_CC_ERROR,
451 edma_cc[ctlr]->intr_data
452 [k].data);
453 }
454 }
455 }
456 } else if (edma_read(ctlr, EDMA_QEMR)) {
457 dev_dbg(data, "QEMR %02x\n",
458 edma_read(ctlr, EDMA_QEMR));
459 for (i = 0; i < 8; i++) {
460 if (edma_read(ctlr, EDMA_QEMR) & BIT(i)) {
461 /* Clear the corresponding IPR bits */
462 edma_write(ctlr, EDMA_QEMCR, BIT(i));
463 edma_shadow0_write(ctlr, SH_QSECR,
464 BIT(i));
465
466 /* NOTE: not reported!! */
467 }
468 }
469 } else if (edma_read(ctlr, EDMA_CCERR)) {
470 dev_dbg(data, "CCERR %08x\n",
471 edma_read(ctlr, EDMA_CCERR));
472 /* FIXME: CCERR.BIT(16) ignored! much better
473 * to just write CCERRCLR with CCERR value...
474 */
475 for (i = 0; i < 8; i++) {
476 if (edma_read(ctlr, EDMA_CCERR) & BIT(i)) {
477 /* Clear the corresponding IPR bits */
478 edma_write(ctlr, EDMA_CCERRCLR, BIT(i));
479
480 /* NOTE: not reported!! */
481 }
482 }
483 }
484 if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
485 (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
486 (edma_read(ctlr, EDMA_QEMR) == 0) &&
487 (edma_read(ctlr, EDMA_CCERR) == 0))
488 break;
489 cnt++;
490 if (cnt > 10)
491 break;
492 }
493 edma_write(ctlr, EDMA_EEVAL, 1);
494 return IRQ_HANDLED;
495 }
496
497 /******************************************************************************
498 *
499 * Transfer controller error interrupt handlers
500 *
501 *****************************************************************************/
502
503 #define tc_errs_handled false /* disabled as long as they're NOPs */
504
505 static irqreturn_t dma_tc0err_handler(int irq, void *data)
506 {
507 dev_dbg(data, "dma_tc0err_handler\n");
508 return IRQ_HANDLED;
509 }
510
511 static irqreturn_t dma_tc1err_handler(int irq, void *data)
512 {
513 dev_dbg(data, "dma_tc1err_handler\n");
514 return IRQ_HANDLED;
515 }
516
517 static int reserve_contiguous_slots(int ctlr, unsigned int id,
518 unsigned int num_slots,
519 unsigned int start_slot)
520 {
521 int i, j;
522 unsigned int count = num_slots;
523 int stop_slot = start_slot;
524 DECLARE_BITMAP(tmp_inuse, EDMA_MAX_PARAMENTRY);
525
526 for (i = start_slot; i < edma_cc[ctlr]->num_slots; ++i) {
527 j = EDMA_CHAN_SLOT(i);
528 if (!test_and_set_bit(j, edma_cc[ctlr]->edma_inuse)) {
529 /* Record our current beginning slot */
530 if (count == num_slots)
531 stop_slot = i;
532
533 count--;
534 set_bit(j, tmp_inuse);
535
536 if (count == 0)
537 break;
538 } else {
539 clear_bit(j, tmp_inuse);
540
541 if (id == EDMA_CONT_PARAMS_FIXED_EXACT) {
542 stop_slot = i;
543 break;
544 } else {
545 count = num_slots;
546 }
547 }
548 }
549
550 /*
551 * We have to clear any bits that we set
552 * if we run out parameter RAM slots, i.e we do find a set
553 * of contiguous parameter RAM slots but do not find the exact number
554 * requested as we may reach the total number of parameter RAM slots
555 */
556 if (i == edma_cc[ctlr]->num_slots)
557 stop_slot = i;
558
559 j = start_slot;
560 for_each_set_bit_from(j, tmp_inuse, stop_slot)
561 clear_bit(j, edma_cc[ctlr]->edma_inuse);
562
563 if (count)
564 return -EBUSY;
565
566 for (j = i - num_slots + 1; j <= i; ++j)
567 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(j),
568 &dummy_paramset, PARM_SIZE);
569
570 return EDMA_CTLR_CHAN(ctlr, i - num_slots + 1);
571 }
572
573 static int prepare_unused_channel_list(struct device *dev, void *data)
574 {
575 struct platform_device *pdev = to_platform_device(dev);
576 int i, ctlr;
577
578 for (i = 0; i < pdev->num_resources; i++) {
579 if ((pdev->resource[i].flags & IORESOURCE_DMA) &&
580 (int)pdev->resource[i].start >= 0) {
581 ctlr = EDMA_CTLR(pdev->resource[i].start);
582 clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start),
583 edma_cc[ctlr]->edma_unused);
584 }
585 }
586
587 return 0;
588 }
589
590 /*-----------------------------------------------------------------------*/
591
592 static bool unused_chan_list_done;
593
594 /* Resource alloc/free: dma channels, parameter RAM slots */
595
596 /**
597 * edma_alloc_channel - allocate DMA channel and paired parameter RAM
598 * @channel: specific channel to allocate; negative for "any unmapped channel"
599 * @callback: optional; to be issued on DMA completion or errors
600 * @data: passed to callback
601 * @eventq_no: an EVENTQ_* constant, used to choose which Transfer
602 * Controller (TC) executes requests using this channel. Use
603 * EVENTQ_DEFAULT unless you really need a high priority queue.
604 *
605 * This allocates a DMA channel and its associated parameter RAM slot.
606 * The parameter RAM is initialized to hold a dummy transfer.
607 *
608 * Normal use is to pass a specific channel number as @channel, to make
609 * use of hardware events mapped to that channel. When the channel will
610 * be used only for software triggering or event chaining, channels not
611 * mapped to hardware events (or mapped to unused events) are preferable.
612 *
613 * DMA transfers start from a channel using edma_start(), or by
614 * chaining. When the transfer described in that channel's parameter RAM
615 * slot completes, that slot's data may be reloaded through a link.
616 *
617 * DMA errors are only reported to the @callback associated with the
618 * channel driving that transfer, but transfer completion callbacks can
619 * be sent to another channel under control of the TCC field in
620 * the option word of the transfer's parameter RAM set. Drivers must not
621 * use DMA transfer completion callbacks for channels they did not allocate.
622 * (The same applies to TCC codes used in transfer chaining.)
623 *
624 * Returns the number of the channel, else negative errno.
625 */
626 int edma_alloc_channel(int channel,
627 void (*callback)(unsigned channel, u16 ch_status, void *data),
628 void *data,
629 enum dma_event_q eventq_no)
630 {
631 unsigned i, done = 0, ctlr = 0;
632 int ret = 0;
633
634 if (!unused_chan_list_done) {
635 /*
636 * Scan all the platform devices to find out the EDMA channels
637 * used and clear them in the unused list, making the rest
638 * available for ARM usage.
639 */
640 ret = bus_for_each_dev(&platform_bus_type, NULL, NULL,
641 prepare_unused_channel_list);
642 if (ret < 0)
643 return ret;
644
645 unused_chan_list_done = true;
646 }
647
648 if (channel >= 0) {
649 ctlr = EDMA_CTLR(channel);
650 channel = EDMA_CHAN_SLOT(channel);
651 }
652
653 if (channel < 0) {
654 for (i = 0; i < arch_num_cc; i++) {
655 channel = 0;
656 for (;;) {
657 channel = find_next_bit(edma_cc[i]->edma_unused,
658 edma_cc[i]->num_channels,
659 channel);
660 if (channel == edma_cc[i]->num_channels)
661 break;
662 if (!test_and_set_bit(channel,
663 edma_cc[i]->edma_inuse)) {
664 done = 1;
665 ctlr = i;
666 break;
667 }
668 channel++;
669 }
670 if (done)
671 break;
672 }
673 if (!done)
674 return -ENOMEM;
675 } else if (channel >= edma_cc[ctlr]->num_channels) {
676 return -EINVAL;
677 } else if (test_and_set_bit(channel, edma_cc[ctlr]->edma_inuse)) {
678 return -EBUSY;
679 }
680
681 /* ensure access through shadow region 0 */
682 edma_or_array2(ctlr, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
683
684 /* ensure no events are pending */
685 edma_stop(EDMA_CTLR_CHAN(ctlr, channel));
686 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
687 &dummy_paramset, PARM_SIZE);
688
689 if (callback)
690 setup_dma_interrupt(EDMA_CTLR_CHAN(ctlr, channel),
691 callback, data);
692
693 map_dmach_queue(ctlr, channel, eventq_no);
694
695 return EDMA_CTLR_CHAN(ctlr, channel);
696 }
697 EXPORT_SYMBOL(edma_alloc_channel);
698
699
700 /**
701 * edma_free_channel - deallocate DMA channel
702 * @channel: dma channel returned from edma_alloc_channel()
703 *
704 * This deallocates the DMA channel and associated parameter RAM slot
705 * allocated by edma_alloc_channel().
706 *
707 * Callers are responsible for ensuring the channel is inactive, and
708 * will not be reactivated by linking, chaining, or software calls to
709 * edma_start().
710 */
711 void edma_free_channel(unsigned channel)
712 {
713 unsigned ctlr;
714
715 ctlr = EDMA_CTLR(channel);
716 channel = EDMA_CHAN_SLOT(channel);
717
718 if (channel >= edma_cc[ctlr]->num_channels)
719 return;
720
721 setup_dma_interrupt(channel, NULL, NULL);
722 /* REVISIT should probably take out of shadow region 0 */
723
724 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
725 &dummy_paramset, PARM_SIZE);
726 clear_bit(channel, edma_cc[ctlr]->edma_inuse);
727 }
728 EXPORT_SYMBOL(edma_free_channel);
729
730 /**
731 * edma_alloc_slot - allocate DMA parameter RAM
732 * @slot: specific slot to allocate; negative for "any unused slot"
733 *
734 * This allocates a parameter RAM slot, initializing it to hold a
735 * dummy transfer. Slots allocated using this routine have not been
736 * mapped to a hardware DMA channel, and will normally be used by
737 * linking to them from a slot associated with a DMA channel.
738 *
739 * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
740 * slots may be allocated on behalf of DSP firmware.
741 *
742 * Returns the number of the slot, else negative errno.
743 */
744 int edma_alloc_slot(unsigned ctlr, int slot)
745 {
746 if (!edma_cc[ctlr])
747 return -EINVAL;
748
749 if (slot >= 0)
750 slot = EDMA_CHAN_SLOT(slot);
751
752 if (slot < 0) {
753 slot = edma_cc[ctlr]->num_channels;
754 for (;;) {
755 slot = find_next_zero_bit(edma_cc[ctlr]->edma_inuse,
756 edma_cc[ctlr]->num_slots, slot);
757 if (slot == edma_cc[ctlr]->num_slots)
758 return -ENOMEM;
759 if (!test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse))
760 break;
761 }
762 } else if (slot < edma_cc[ctlr]->num_channels ||
763 slot >= edma_cc[ctlr]->num_slots) {
764 return -EINVAL;
765 } else if (test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse)) {
766 return -EBUSY;
767 }
768
769 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
770 &dummy_paramset, PARM_SIZE);
771
772 return EDMA_CTLR_CHAN(ctlr, slot);
773 }
774 EXPORT_SYMBOL(edma_alloc_slot);
775
776 /**
777 * edma_free_slot - deallocate DMA parameter RAM
778 * @slot: parameter RAM slot returned from edma_alloc_slot()
779 *
780 * This deallocates the parameter RAM slot allocated by edma_alloc_slot().
781 * Callers are responsible for ensuring the slot is inactive, and will
782 * not be activated.
783 */
784 void edma_free_slot(unsigned slot)
785 {
786 unsigned ctlr;
787
788 ctlr = EDMA_CTLR(slot);
789 slot = EDMA_CHAN_SLOT(slot);
790
791 if (slot < edma_cc[ctlr]->num_channels ||
792 slot >= edma_cc[ctlr]->num_slots)
793 return;
794
795 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
796 &dummy_paramset, PARM_SIZE);
797 clear_bit(slot, edma_cc[ctlr]->edma_inuse);
798 }
799 EXPORT_SYMBOL(edma_free_slot);
800
801
802 /**
803 * edma_alloc_cont_slots- alloc contiguous parameter RAM slots
804 * The API will return the starting point of a set of
805 * contiguous parameter RAM slots that have been requested
806 *
807 * @id: can only be EDMA_CONT_PARAMS_ANY or EDMA_CONT_PARAMS_FIXED_EXACT
808 * or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
809 * @count: number of contiguous Paramter RAM slots
810 * @slot - the start value of Parameter RAM slot that should be passed if id
811 * is EDMA_CONT_PARAMS_FIXED_EXACT or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
812 *
813 * If id is EDMA_CONT_PARAMS_ANY then the API starts looking for a set of
814 * contiguous Parameter RAM slots from parameter RAM 64 in the case of
815 * DaVinci SOCs and 32 in the case of DA8xx SOCs.
816 *
817 * If id is EDMA_CONT_PARAMS_FIXED_EXACT then the API starts looking for a
818 * set of contiguous parameter RAM slots from the "slot" that is passed as an
819 * argument to the API.
820 *
821 * If id is EDMA_CONT_PARAMS_FIXED_NOT_EXACT then the API initially tries
822 * starts looking for a set of contiguous parameter RAMs from the "slot"
823 * that is passed as an argument to the API. On failure the API will try to
824 * find a set of contiguous Parameter RAM slots from the remaining Parameter
825 * RAM slots
826 */
827 int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count)
828 {
829 /*
830 * The start slot requested should be greater than
831 * the number of channels and lesser than the total number
832 * of slots
833 */
834 if ((id != EDMA_CONT_PARAMS_ANY) &&
835 (slot < edma_cc[ctlr]->num_channels ||
836 slot >= edma_cc[ctlr]->num_slots))
837 return -EINVAL;
838
839 /*
840 * The number of parameter RAM slots requested cannot be less than 1
841 * and cannot be more than the number of slots minus the number of
842 * channels
843 */
844 if (count < 1 || count >
845 (edma_cc[ctlr]->num_slots - edma_cc[ctlr]->num_channels))
846 return -EINVAL;
847
848 switch (id) {
849 case EDMA_CONT_PARAMS_ANY:
850 return reserve_contiguous_slots(ctlr, id, count,
851 edma_cc[ctlr]->num_channels);
852 case EDMA_CONT_PARAMS_FIXED_EXACT:
853 case EDMA_CONT_PARAMS_FIXED_NOT_EXACT:
854 return reserve_contiguous_slots(ctlr, id, count, slot);
855 default:
856 return -EINVAL;
857 }
858
859 }
860 EXPORT_SYMBOL(edma_alloc_cont_slots);
861
862 /**
863 * edma_free_cont_slots - deallocate DMA parameter RAM slots
864 * @slot: first parameter RAM of a set of parameter RAM slots to be freed
865 * @count: the number of contiguous parameter RAM slots to be freed
866 *
867 * This deallocates the parameter RAM slots allocated by
868 * edma_alloc_cont_slots.
869 * Callers/applications need to keep track of sets of contiguous
870 * parameter RAM slots that have been allocated using the edma_alloc_cont_slots
871 * API.
872 * Callers are responsible for ensuring the slots are inactive, and will
873 * not be activated.
874 */
875 int edma_free_cont_slots(unsigned slot, int count)
876 {
877 unsigned ctlr, slot_to_free;
878 int i;
879
880 ctlr = EDMA_CTLR(slot);
881 slot = EDMA_CHAN_SLOT(slot);
882
883 if (slot < edma_cc[ctlr]->num_channels ||
884 slot >= edma_cc[ctlr]->num_slots ||
885 count < 1)
886 return -EINVAL;
887
888 for (i = slot; i < slot + count; ++i) {
889 ctlr = EDMA_CTLR(i);
890 slot_to_free = EDMA_CHAN_SLOT(i);
891
892 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot_to_free),
893 &dummy_paramset, PARM_SIZE);
894 clear_bit(slot_to_free, edma_cc[ctlr]->edma_inuse);
895 }
896
897 return 0;
898 }
899 EXPORT_SYMBOL(edma_free_cont_slots);
900
901 /*-----------------------------------------------------------------------*/
902
903 /* Parameter RAM operations (i) -- read/write partial slots */
904
905 /**
906 * edma_set_src - set initial DMA source address in parameter RAM slot
907 * @slot: parameter RAM slot being configured
908 * @src_port: physical address of source (memory, controller FIFO, etc)
909 * @addressMode: INCR, except in very rare cases
910 * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
911 * width to use when addressing the fifo (e.g. W8BIT, W32BIT)
912 *
913 * Note that the source address is modified during the DMA transfer
914 * according to edma_set_src_index().
915 */
916 void edma_set_src(unsigned slot, dma_addr_t src_port,
917 enum address_mode mode, enum fifo_width width)
918 {
919 unsigned ctlr;
920
921 ctlr = EDMA_CTLR(slot);
922 slot = EDMA_CHAN_SLOT(slot);
923
924 if (slot < edma_cc[ctlr]->num_slots) {
925 unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
926
927 if (mode) {
928 /* set SAM and program FWID */
929 i = (i & ~(EDMA_FWID)) | (SAM | ((width & 0x7) << 8));
930 } else {
931 /* clear SAM */
932 i &= ~SAM;
933 }
934 edma_parm_write(ctlr, PARM_OPT, slot, i);
935
936 /* set the source port address
937 in source register of param structure */
938 edma_parm_write(ctlr, PARM_SRC, slot, src_port);
939 }
940 }
941 EXPORT_SYMBOL(edma_set_src);
942
943 /**
944 * edma_set_dest - set initial DMA destination address in parameter RAM slot
945 * @slot: parameter RAM slot being configured
946 * @dest_port: physical address of destination (memory, controller FIFO, etc)
947 * @addressMode: INCR, except in very rare cases
948 * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
949 * width to use when addressing the fifo (e.g. W8BIT, W32BIT)
950 *
951 * Note that the destination address is modified during the DMA transfer
952 * according to edma_set_dest_index().
953 */
954 void edma_set_dest(unsigned slot, dma_addr_t dest_port,
955 enum address_mode mode, enum fifo_width width)
956 {
957 unsigned ctlr;
958
959 ctlr = EDMA_CTLR(slot);
960 slot = EDMA_CHAN_SLOT(slot);
961
962 if (slot < edma_cc[ctlr]->num_slots) {
963 unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
964
965 if (mode) {
966 /* set DAM and program FWID */
967 i = (i & ~(EDMA_FWID)) | (DAM | ((width & 0x7) << 8));
968 } else {
969 /* clear DAM */
970 i &= ~DAM;
971 }
972 edma_parm_write(ctlr, PARM_OPT, slot, i);
973 /* set the destination port address
974 in dest register of param structure */
975 edma_parm_write(ctlr, PARM_DST, slot, dest_port);
976 }
977 }
978 EXPORT_SYMBOL(edma_set_dest);
979
980 /**
981 * edma_get_position - returns the current transfer points
982 * @slot: parameter RAM slot being examined
983 * @src: pointer to source port position
984 * @dst: pointer to destination port position
985 *
986 * Returns current source and destination addresses for a particular
987 * parameter RAM slot. Its channel should not be active when this is called.
988 */
989 void edma_get_position(unsigned slot, dma_addr_t *src, dma_addr_t *dst)
990 {
991 struct edmacc_param temp;
992 unsigned ctlr;
993
994 ctlr = EDMA_CTLR(slot);
995 slot = EDMA_CHAN_SLOT(slot);
996
997 edma_read_slot(EDMA_CTLR_CHAN(ctlr, slot), &temp);
998 if (src != NULL)
999 *src = temp.src;
1000 if (dst != NULL)
1001 *dst = temp.dst;
1002 }
1003 EXPORT_SYMBOL(edma_get_position);
1004
1005 /**
1006 * edma_set_src_index - configure DMA source address indexing
1007 * @slot: parameter RAM slot being configured
1008 * @src_bidx: byte offset between source arrays in a frame
1009 * @src_cidx: byte offset between source frames in a block
1010 *
1011 * Offsets are specified to support either contiguous or discontiguous
1012 * memory transfers, or repeated access to a hardware register, as needed.
1013 * When accessing hardware registers, both offsets are normally zero.
1014 */
1015 void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx)
1016 {
1017 unsigned ctlr;
1018
1019 ctlr = EDMA_CTLR(slot);
1020 slot = EDMA_CHAN_SLOT(slot);
1021
1022 if (slot < edma_cc[ctlr]->num_slots) {
1023 edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
1024 0xffff0000, src_bidx);
1025 edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
1026 0xffff0000, src_cidx);
1027 }
1028 }
1029 EXPORT_SYMBOL(edma_set_src_index);
1030
1031 /**
1032 * edma_set_dest_index - configure DMA destination address indexing
1033 * @slot: parameter RAM slot being configured
1034 * @dest_bidx: byte offset between destination arrays in a frame
1035 * @dest_cidx: byte offset between destination frames in a block
1036 *
1037 * Offsets are specified to support either contiguous or discontiguous
1038 * memory transfers, or repeated access to a hardware register, as needed.
1039 * When accessing hardware registers, both offsets are normally zero.
1040 */
1041 void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx)
1042 {
1043 unsigned ctlr;
1044
1045 ctlr = EDMA_CTLR(slot);
1046 slot = EDMA_CHAN_SLOT(slot);
1047
1048 if (slot < edma_cc[ctlr]->num_slots) {
1049 edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
1050 0x0000ffff, dest_bidx << 16);
1051 edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
1052 0x0000ffff, dest_cidx << 16);
1053 }
1054 }
1055 EXPORT_SYMBOL(edma_set_dest_index);
1056
1057 /**
1058 * edma_set_transfer_params - configure DMA transfer parameters
1059 * @slot: parameter RAM slot being configured
1060 * @acnt: how many bytes per array (at least one)
1061 * @bcnt: how many arrays per frame (at least one)
1062 * @ccnt: how many frames per block (at least one)
1063 * @bcnt_rld: used only for A-Synchronized transfers; this specifies
1064 * the value to reload into bcnt when it decrements to zero
1065 * @sync_mode: ASYNC or ABSYNC
1066 *
1067 * See the EDMA3 documentation to understand how to configure and link
1068 * transfers using the fields in PaRAM slots. If you are not doing it
1069 * all at once with edma_write_slot(), you will use this routine
1070 * plus two calls each for source and destination, setting the initial
1071 * address and saying how to index that address.
1072 *
1073 * An example of an A-Synchronized transfer is a serial link using a
1074 * single word shift register. In that case, @acnt would be equal to
1075 * that word size; the serial controller issues a DMA synchronization
1076 * event to transfer each word, and memory access by the DMA transfer
1077 * controller will be word-at-a-time.
1078 *
1079 * An example of an AB-Synchronized transfer is a device using a FIFO.
1080 * In that case, @acnt equals the FIFO width and @bcnt equals its depth.
1081 * The controller with the FIFO issues DMA synchronization events when
1082 * the FIFO threshold is reached, and the DMA transfer controller will
1083 * transfer one frame to (or from) the FIFO. It will probably use
1084 * efficient burst modes to access memory.
1085 */
1086 void edma_set_transfer_params(unsigned slot,
1087 u16 acnt, u16 bcnt, u16 ccnt,
1088 u16 bcnt_rld, enum sync_dimension sync_mode)
1089 {
1090 unsigned ctlr;
1091
1092 ctlr = EDMA_CTLR(slot);
1093 slot = EDMA_CHAN_SLOT(slot);
1094
1095 if (slot < edma_cc[ctlr]->num_slots) {
1096 edma_parm_modify(ctlr, PARM_LINK_BCNTRLD, slot,
1097 0x0000ffff, bcnt_rld << 16);
1098 if (sync_mode == ASYNC)
1099 edma_parm_and(ctlr, PARM_OPT, slot, ~SYNCDIM);
1100 else
1101 edma_parm_or(ctlr, PARM_OPT, slot, SYNCDIM);
1102 /* Set the acount, bcount, ccount registers */
1103 edma_parm_write(ctlr, PARM_A_B_CNT, slot, (bcnt << 16) | acnt);
1104 edma_parm_write(ctlr, PARM_CCNT, slot, ccnt);
1105 }
1106 }
1107 EXPORT_SYMBOL(edma_set_transfer_params);
1108
1109 /**
1110 * edma_link - link one parameter RAM slot to another
1111 * @from: parameter RAM slot originating the link
1112 * @to: parameter RAM slot which is the link target
1113 *
1114 * The originating slot should not be part of any active DMA transfer.
1115 */
1116 void edma_link(unsigned from, unsigned to)
1117 {
1118 unsigned ctlr_from, ctlr_to;
1119
1120 ctlr_from = EDMA_CTLR(from);
1121 from = EDMA_CHAN_SLOT(from);
1122 ctlr_to = EDMA_CTLR(to);
1123 to = EDMA_CHAN_SLOT(to);
1124
1125 if (from >= edma_cc[ctlr_from]->num_slots)
1126 return;
1127 if (to >= edma_cc[ctlr_to]->num_slots)
1128 return;
1129 edma_parm_modify(ctlr_from, PARM_LINK_BCNTRLD, from, 0xffff0000,
1130 PARM_OFFSET(to));
1131 }
1132 EXPORT_SYMBOL(edma_link);
1133
1134 /**
1135 * edma_unlink - cut link from one parameter RAM slot
1136 * @from: parameter RAM slot originating the link
1137 *
1138 * The originating slot should not be part of any active DMA transfer.
1139 * Its link is set to 0xffff.
1140 */
1141 void edma_unlink(unsigned from)
1142 {
1143 unsigned ctlr;
1144
1145 ctlr = EDMA_CTLR(from);
1146 from = EDMA_CHAN_SLOT(from);
1147
1148 if (from >= edma_cc[ctlr]->num_slots)
1149 return;
1150 edma_parm_or(ctlr, PARM_LINK_BCNTRLD, from, 0xffff);
1151 }
1152 EXPORT_SYMBOL(edma_unlink);
1153
1154 /*-----------------------------------------------------------------------*/
1155
1156 /* Parameter RAM operations (ii) -- read/write whole parameter sets */
1157
1158 /**
1159 * edma_write_slot - write parameter RAM data for slot
1160 * @slot: number of parameter RAM slot being modified
1161 * @param: data to be written into parameter RAM slot
1162 *
1163 * Use this to assign all parameters of a transfer at once. This
1164 * allows more efficient setup of transfers than issuing multiple
1165 * calls to set up those parameters in small pieces, and provides
1166 * complete control over all transfer options.
1167 */
1168 void edma_write_slot(unsigned slot, const struct edmacc_param *param)
1169 {
1170 unsigned ctlr;
1171
1172 ctlr = EDMA_CTLR(slot);
1173 slot = EDMA_CHAN_SLOT(slot);
1174
1175 if (slot >= edma_cc[ctlr]->num_slots)
1176 return;
1177 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), param,
1178 PARM_SIZE);
1179 }
1180 EXPORT_SYMBOL(edma_write_slot);
1181
1182 /**
1183 * edma_read_slot - read parameter RAM data from slot
1184 * @slot: number of parameter RAM slot being copied
1185 * @param: where to store copy of parameter RAM data
1186 *
1187 * Use this to read data from a parameter RAM slot, perhaps to
1188 * save them as a template for later reuse.
1189 */
1190 void edma_read_slot(unsigned slot, struct edmacc_param *param)
1191 {
1192 unsigned ctlr;
1193
1194 ctlr = EDMA_CTLR(slot);
1195 slot = EDMA_CHAN_SLOT(slot);
1196
1197 if (slot >= edma_cc[ctlr]->num_slots)
1198 return;
1199 memcpy_fromio(param, edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
1200 PARM_SIZE);
1201 }
1202 EXPORT_SYMBOL(edma_read_slot);
1203
1204 /*-----------------------------------------------------------------------*/
1205
1206 /* Various EDMA channel control operations */
1207
1208 /**
1209 * edma_pause - pause dma on a channel
1210 * @channel: on which edma_start() has been called
1211 *
1212 * This temporarily disables EDMA hardware events on the specified channel,
1213 * preventing them from triggering new transfers on its behalf
1214 */
1215 void edma_pause(unsigned channel)
1216 {
1217 unsigned ctlr;
1218
1219 ctlr = EDMA_CTLR(channel);
1220 channel = EDMA_CHAN_SLOT(channel);
1221
1222 if (channel < edma_cc[ctlr]->num_channels) {
1223 unsigned int mask = BIT(channel & 0x1f);
1224
1225 edma_shadow0_write_array(ctlr, SH_EECR, channel >> 5, mask);
1226 }
1227 }
1228 EXPORT_SYMBOL(edma_pause);
1229
1230 /**
1231 * edma_resume - resumes dma on a paused channel
1232 * @channel: on which edma_pause() has been called
1233 *
1234 * This re-enables EDMA hardware events on the specified channel.
1235 */
1236 void edma_resume(unsigned channel)
1237 {
1238 unsigned ctlr;
1239
1240 ctlr = EDMA_CTLR(channel);
1241 channel = EDMA_CHAN_SLOT(channel);
1242
1243 if (channel < edma_cc[ctlr]->num_channels) {
1244 unsigned int mask = BIT(channel & 0x1f);
1245
1246 edma_shadow0_write_array(ctlr, SH_EESR, channel >> 5, mask);
1247 }
1248 }
1249 EXPORT_SYMBOL(edma_resume);
1250
1251 /**
1252 * edma_start - start dma on a channel
1253 * @channel: channel being activated
1254 *
1255 * Channels with event associations will be triggered by their hardware
1256 * events, and channels without such associations will be triggered by
1257 * software. (At this writing there is no interface for using software
1258 * triggers except with channels that don't support hardware triggers.)
1259 *
1260 * Returns zero on success, else negative errno.
1261 */
1262 int edma_start(unsigned channel)
1263 {
1264 unsigned ctlr;
1265
1266 ctlr = EDMA_CTLR(channel);
1267 channel = EDMA_CHAN_SLOT(channel);
1268
1269 if (channel < edma_cc[ctlr]->num_channels) {
1270 int j = channel >> 5;
1271 unsigned int mask = BIT(channel & 0x1f);
1272
1273 /* EDMA channels without event association */
1274 if (test_bit(channel, edma_cc[ctlr]->edma_unused)) {
1275 pr_debug("EDMA: ESR%d %08x\n", j,
1276 edma_shadow0_read_array(ctlr, SH_ESR, j));
1277 edma_shadow0_write_array(ctlr, SH_ESR, j, mask);
1278 return 0;
1279 }
1280
1281 /* EDMA channel with event association */
1282 pr_debug("EDMA: ER%d %08x\n", j,
1283 edma_shadow0_read_array(ctlr, SH_ER, j));
1284 /* Clear any pending event or error */
1285 edma_write_array(ctlr, EDMA_ECR, j, mask);
1286 edma_write_array(ctlr, EDMA_EMCR, j, mask);
1287 /* Clear any SER */
1288 edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1289 edma_shadow0_write_array(ctlr, SH_EESR, j, mask);
1290 pr_debug("EDMA: EER%d %08x\n", j,
1291 edma_shadow0_read_array(ctlr, SH_EER, j));
1292 return 0;
1293 }
1294
1295 return -EINVAL;
1296 }
1297 EXPORT_SYMBOL(edma_start);
1298
1299 /**
1300 * edma_stop - stops dma on the channel passed
1301 * @channel: channel being deactivated
1302 *
1303 * When @lch is a channel, any active transfer is paused and
1304 * all pending hardware events are cleared. The current transfer
1305 * may not be resumed, and the channel's Parameter RAM should be
1306 * reinitialized before being reused.
1307 */
1308 void edma_stop(unsigned channel)
1309 {
1310 unsigned ctlr;
1311
1312 ctlr = EDMA_CTLR(channel);
1313 channel = EDMA_CHAN_SLOT(channel);
1314
1315 if (channel < edma_cc[ctlr]->num_channels) {
1316 int j = channel >> 5;
1317 unsigned int mask = BIT(channel & 0x1f);
1318
1319 edma_shadow0_write_array(ctlr, SH_EECR, j, mask);
1320 edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
1321 edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1322 edma_write_array(ctlr, EDMA_EMCR, j, mask);
1323
1324 pr_debug("EDMA: EER%d %08x\n", j,
1325 edma_shadow0_read_array(ctlr, SH_EER, j));
1326
1327 /* REVISIT: consider guarding against inappropriate event
1328 * chaining by overwriting with dummy_paramset.
1329 */
1330 }
1331 }
1332 EXPORT_SYMBOL(edma_stop);
1333
1334 /******************************************************************************
1335 *
1336 * It cleans ParamEntry qand bring back EDMA to initial state if media has
1337 * been removed before EDMA has finished.It is usedful for removable media.
1338 * Arguments:
1339 * ch_no - channel no
1340 *
1341 * Return: zero on success, or corresponding error no on failure
1342 *
1343 * FIXME this should not be needed ... edma_stop() should suffice.
1344 *
1345 *****************************************************************************/
1346
1347 void edma_clean_channel(unsigned channel)
1348 {
1349 unsigned ctlr;
1350
1351 ctlr = EDMA_CTLR(channel);
1352 channel = EDMA_CHAN_SLOT(channel);
1353
1354 if (channel < edma_cc[ctlr]->num_channels) {
1355 int j = (channel >> 5);
1356 unsigned int mask = BIT(channel & 0x1f);
1357
1358 pr_debug("EDMA: EMR%d %08x\n", j,
1359 edma_read_array(ctlr, EDMA_EMR, j));
1360 edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
1361 /* Clear the corresponding EMR bits */
1362 edma_write_array(ctlr, EDMA_EMCR, j, mask);
1363 /* Clear any SER */
1364 edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1365 edma_write(ctlr, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
1366 }
1367 }
1368 EXPORT_SYMBOL(edma_clean_channel);
1369
1370 /*
1371 * edma_clear_event - clear an outstanding event on the DMA channel
1372 * Arguments:
1373 * channel - channel number
1374 */
1375 void edma_clear_event(unsigned channel)
1376 {
1377 unsigned ctlr;
1378
1379 ctlr = EDMA_CTLR(channel);
1380 channel = EDMA_CHAN_SLOT(channel);
1381
1382 if (channel >= edma_cc[ctlr]->num_channels)
1383 return;
1384 if (channel < 32)
1385 edma_write(ctlr, EDMA_ECR, BIT(channel));
1386 else
1387 edma_write(ctlr, EDMA_ECRH, BIT(channel - 32));
1388 }
1389 EXPORT_SYMBOL(edma_clear_event);
1390
1391 /*-----------------------------------------------------------------------*/
1392
1393 static int __init edma_probe(struct platform_device *pdev)
1394 {
1395 struct edma_soc_info **info = pdev->dev.platform_data;
1396 const s8 (*queue_priority_mapping)[2];
1397 const s8 (*queue_tc_mapping)[2];
1398 int i, j, off, ln, found = 0;
1399 int status = -1;
1400 const s16 (*rsv_chans)[2];
1401 const s16 (*rsv_slots)[2];
1402 int irq[EDMA_MAX_CC] = {0, 0};
1403 int err_irq[EDMA_MAX_CC] = {0, 0};
1404 struct resource *r[EDMA_MAX_CC] = {NULL};
1405 resource_size_t len[EDMA_MAX_CC];
1406 char res_name[10];
1407 char irq_name[10];
1408
1409 if (!info)
1410 return -ENODEV;
1411
1412 for (j = 0; j < EDMA_MAX_CC; j++) {
1413 sprintf(res_name, "edma_cc%d", j);
1414 r[j] = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1415 res_name);
1416 if (!r[j] || !info[j]) {
1417 if (found)
1418 break;
1419 else
1420 return -ENODEV;
1421 } else {
1422 found = 1;
1423 }
1424
1425 len[j] = resource_size(r[j]);
1426
1427 r[j] = request_mem_region(r[j]->start, len[j],
1428 dev_name(&pdev->dev));
1429 if (!r[j]) {
1430 status = -EBUSY;
1431 goto fail1;
1432 }
1433
1434 edmacc_regs_base[j] = ioremap(r[j]->start, len[j]);
1435 if (!edmacc_regs_base[j]) {
1436 status = -EBUSY;
1437 goto fail1;
1438 }
1439
1440 edma_cc[j] = kzalloc(sizeof(struct edma), GFP_KERNEL);
1441 if (!edma_cc[j]) {
1442 status = -ENOMEM;
1443 goto fail1;
1444 }
1445
1446 edma_cc[j]->num_channels = min_t(unsigned, info[j]->n_channel,
1447 EDMA_MAX_DMACH);
1448 edma_cc[j]->num_slots = min_t(unsigned, info[j]->n_slot,
1449 EDMA_MAX_PARAMENTRY);
1450 edma_cc[j]->num_cc = min_t(unsigned, info[j]->n_cc,
1451 EDMA_MAX_CC);
1452
1453 edma_cc[j]->default_queue = info[j]->default_queue;
1454
1455 dev_dbg(&pdev->dev, "DMA REG BASE ADDR=%p\n",
1456 edmacc_regs_base[j]);
1457
1458 for (i = 0; i < edma_cc[j]->num_slots; i++)
1459 memcpy_toio(edmacc_regs_base[j] + PARM_OFFSET(i),
1460 &dummy_paramset, PARM_SIZE);
1461
1462 /* Mark all channels as unused */
1463 memset(edma_cc[j]->edma_unused, 0xff,
1464 sizeof(edma_cc[j]->edma_unused));
1465
1466 if (info[j]->rsv) {
1467
1468 /* Clear the reserved channels in unused list */
1469 rsv_chans = info[j]->rsv->rsv_chans;
1470 if (rsv_chans) {
1471 for (i = 0; rsv_chans[i][0] != -1; i++) {
1472 off = rsv_chans[i][0];
1473 ln = rsv_chans[i][1];
1474 clear_bits(off, ln,
1475 edma_cc[j]->edma_unused);
1476 }
1477 }
1478
1479 /* Set the reserved slots in inuse list */
1480 rsv_slots = info[j]->rsv->rsv_slots;
1481 if (rsv_slots) {
1482 for (i = 0; rsv_slots[i][0] != -1; i++) {
1483 off = rsv_slots[i][0];
1484 ln = rsv_slots[i][1];
1485 set_bits(off, ln,
1486 edma_cc[j]->edma_inuse);
1487 }
1488 }
1489 }
1490
1491 sprintf(irq_name, "edma%d", j);
1492 irq[j] = platform_get_irq_byname(pdev, irq_name);
1493 edma_cc[j]->irq_res_start = irq[j];
1494 status = request_irq(irq[j], dma_irq_handler, 0, "edma",
1495 &pdev->dev);
1496 if (status < 0) {
1497 dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
1498 irq[j], status);
1499 goto fail;
1500 }
1501
1502 sprintf(irq_name, "edma%d_err", j);
1503 err_irq[j] = platform_get_irq_byname(pdev, irq_name);
1504 edma_cc[j]->irq_res_end = err_irq[j];
1505 status = request_irq(err_irq[j], dma_ccerr_handler, 0,
1506 "edma_error", &pdev->dev);
1507 if (status < 0) {
1508 dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
1509 err_irq[j], status);
1510 goto fail;
1511 }
1512
1513 for (i = 0; i < edma_cc[j]->num_channels; i++)
1514 map_dmach_queue(j, i, info[j]->default_queue);
1515
1516 queue_tc_mapping = info[j]->queue_tc_mapping;
1517 queue_priority_mapping = info[j]->queue_priority_mapping;
1518
1519 /* Event queue to TC mapping */
1520 for (i = 0; queue_tc_mapping[i][0] != -1; i++)
1521 map_queue_tc(j, queue_tc_mapping[i][0],
1522 queue_tc_mapping[i][1]);
1523
1524 /* Event queue priority mapping */
1525 for (i = 0; queue_priority_mapping[i][0] != -1; i++)
1526 assign_priority_to_queue(j,
1527 queue_priority_mapping[i][0],
1528 queue_priority_mapping[i][1]);
1529
1530 /* Map the channel to param entry if channel mapping logic
1531 * exist
1532 */
1533 if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST)
1534 map_dmach_param(j);
1535
1536 for (i = 0; i < info[j]->n_region; i++) {
1537 edma_write_array2(j, EDMA_DRAE, i, 0, 0x0);
1538 edma_write_array2(j, EDMA_DRAE, i, 1, 0x0);
1539 edma_write_array(j, EDMA_QRAE, i, 0x0);
1540 }
1541 arch_num_cc++;
1542 }
1543
1544 if (tc_errs_handled) {
1545 status = request_irq(IRQ_TCERRINT0, dma_tc0err_handler, 0,
1546 "edma_tc0", &pdev->dev);
1547 if (status < 0) {
1548 dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
1549 IRQ_TCERRINT0, status);
1550 return status;
1551 }
1552 status = request_irq(IRQ_TCERRINT, dma_tc1err_handler, 0,
1553 "edma_tc1", &pdev->dev);
1554 if (status < 0) {
1555 dev_dbg(&pdev->dev, "request_irq %d --> %d\n",
1556 IRQ_TCERRINT, status);
1557 return status;
1558 }
1559 }
1560
1561 return 0;
1562
1563 fail:
1564 for (i = 0; i < EDMA_MAX_CC; i++) {
1565 if (err_irq[i])
1566 free_irq(err_irq[i], &pdev->dev);
1567 if (irq[i])
1568 free_irq(irq[i], &pdev->dev);
1569 }
1570 fail1:
1571 for (i = 0; i < EDMA_MAX_CC; i++) {
1572 if (r[i])
1573 release_mem_region(r[i]->start, len[i]);
1574 if (edmacc_regs_base[i])
1575 iounmap(edmacc_regs_base[i]);
1576 kfree(edma_cc[i]);
1577 }
1578 return status;
1579 }
1580
1581
1582 static struct platform_driver edma_driver = {
1583 .driver.name = "edma",
1584 };
1585
1586 static int __init edma_init(void)
1587 {
1588 return platform_driver_probe(&edma_driver, edma_probe);
1589 }
1590 arch_initcall(edma_init);
1591