Merge tag 'v3.10.55' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / mach-mt8127 / mt_dma.c
1 #include <asm/io.h>
2 #include <linux/spinlock.h>
3 #include <linux/interrupt.h>
4 #include <linux/types.h>
5 #include <linux/module.h>
6 #include <linux/workqueue.h>
7 #include <linux/miscdevice.h>
8 #include <linux/dma-mapping.h>
9
10 #include "mach/mt_reg_base.h"
11 #include "mach/irqs.h"
12 #include "mach/dma.h"
13 #include "mach/sync_write.h"
14 #include "mach/mt_clkmgr.h"
15 #include "mach/emi_mpu.h"
16
17 #define DMA_DEBUG 0
18 #if(DMA_DEBUG == 1)
19 #define dbgmsg printk
20 #else
21 #define dbgmsg(...)
22 #endif
23
24 /*
25 * DMA information
26 */
27
28 #define NR_GDMA_CHANNEL (2)
29 #define NR_PDMA_CHANNEL (5)
30 #define NR_VFFDMA_CHANNEL (6)
31 #define GDMA_START (1)
32 #define NR_DMA (NR_GDMA_CHANNEL + NR_PDMA_CHANNEL + NR_VFFDMA_CHANNEL)
33
34 /*
35 * Register Definition
36 */
37
38 #define DMA_BASE_CH(n) IOMEM((AP_DMA_BASE + 0x0080 * (n + 1)))
39 #define DMA_GLOBAL_INT_FLAG IOMEM((AP_DMA_BASE + 0x0000))
40 #define DMA_GLOBAL_RUNNING_STATUS IOMEM((AP_DMA_BASE + 0x0008))
41 #define DMA_GLOBAL_GSEC_EN IOMEM((AP_DMA_BASE + 0x0014))
42 #define DMA_GDMA_SEC_EN(n) IOMEM((AP_DMA_BASE + 0x0020 + 4 * (n)))
43
44 /*
45 * General DMA channel register mapping
46 */
47 #define DMA_INT_FLAG(base) IOMEM((base + 0x0000))
48 #define DMA_INT_EN(base) IOMEM((base + 0x0004))
49 #define DMA_START(base) IOMEM((base + 0x0008))
50 #define DMA_RESET(base) IOMEM((base + 0x000C))
51 #define DMA_STOP(base) IOMEM((base + 0x0010))
52 #define DMA_FLUSH(base) IOMEM((base + 0x0014))
53 #define DMA_CON(base) IOMEM((base + 0x0018))
54 #define DMA_SRC(base) IOMEM((base + 0x001C))
55 #define DMA_DST(base) IOMEM((base + 0x0020))
56 #define DMA_LEN1(base) IOMEM((base + 0x0024))
57 #define DMA_LEN2(base) IOMEM((base + 0x0028))
58 #define DMA_JUMP_ADDR(base) IOMEM((base + 0x002C))
59 #define DMA_IBUFF_SIZE(base) IOMEM((base + 0x0030))
60 #define DMA_CONNECT(base) IOMEM((base + 0x0034))
61 #define DMA_AXIATTR(base) IOMEM((base + 0x0038))
62 #define DMA_DBG_STAT(base) IOMEM((base + 0x0050))
63
64 /*
65 * Register Setting
66 */
67
68 #define DMA_GLBSTA_RUN(ch) (0x00000001 << ((ch)))
69 #define DMA_GLBSTA_IT(ch) (0x00000001 << ((ch)))
70 #define DMA_GDMA_LEN_MAX_MASK (0x000FFFFF)
71
72 #define DMA_CON_DIR (0x00000001)
73 #define DMA_CON_FPEN (0x00000002) /* Use fix pattern. */
74 #define DMA_CON_SLOW_EN (0x00000004)
75 #define DMA_CON_DFIX (0x00000008)
76 #define DMA_CON_SFIX (0x00000010)
77 #define DMA_CON_WPEN (0x00008000)
78 #define DMA_CON_WPSD (0x00100000)
79 #define DMA_CON_WSIZE_1BYTE (0x00000000)
80 #define DMA_CON_WSIZE_2BYTE (0x01000000)
81 #define DMA_CON_WSIZE_4BYTE (0x02000000)
82 #define DMA_CON_RSIZE_1BYTE (0x00000000)
83 #define DMA_CON_RSIZE_2BYTE (0x10000000)
84 #define DMA_CON_RSIZE_4BYTE (0x20000000)
85 #define DMA_CON_BURST_MASK (0x00070000)
86 #define DMA_CON_SLOW_OFFSET (5)
87 #define DMA_CON_SLOW_MAX_MASK (0x000003FF)
88
89 #define DMA_START_BIT (0x00000001)
90 #define DMA_STOP_BIT (0x00000000)
91 #define DMA_INT_FLAG_BIT (0x00000001)
92 #define DMA_INT_FLAG_CLR_BIT (0x00000000)
93 #define DMA_INT_EN_BIT (0x00000001)
94 #define DMA_FLUSH_BIT (0x00000001)
95 #define DMA_FLUSH_CLR_BIT (0x00000000)
96 #define DMA_UART_RX_INT_EN_BIT (0x00000003)
97 #define DMA_INT_EN_CLR_BIT (0x00000000)
98 #define DMA_WARM_RST_BIT (0x00000001)
99 #define DMA_HARD_RST_BIT (0x00000002)
100 #define DMA_HARD_RST_CLR_BIT (0x00000000)
101 #define DMA_READ_COHER_BIT (0x00000010)
102 #define DMA_WRITE_COHER_BIT (0x00100000)
103 #define DMA_GSEC_EN_BIT (0x00000001)
104 #define DMA_SEC_EN_BIT (0x00000001)
105
106
107
108 /*
109 * Register Limitation
110 */
111
112 #define MAX_TRANSFER_LEN1 (0xFFFFF)
113 #define MAX_TRANSFER_LEN2 (0xFFFFF)
114 #define MAX_SLOW_DOWN_CNTER (0x3FF)
115
116 /*
117 * channel information structures
118 */
119
120 struct dma_ctrl
121 {
122 int in_use;
123 void (*isr_cb)(void *);
124 void *data;
125 };
126
127 /*
128 * global variables
129 */
130
131 static struct dma_ctrl dma_ctrl[NR_GDMA_CHANNEL];
132 static DEFINE_SPINLOCK(dma_drv_lock);
133
134 #define PDN_APDMA_MODULE_NAME ("APDMA")
135 #define GDMA_WARM_RST_TIMEOUT (100) // ms
136
137 /*
138 * mt_req_gdma: request a general DMA.
139 * @chan: specify a channel or not
140 * Return channel number for success; return negative errot code for failure.
141 */
142 int mt_req_gdma(DMA_CHAN chan)
143 {
144 unsigned long flags;
145 int i;
146
147 spin_lock_irqsave(&dma_drv_lock, flags);
148
149 if (chan == GDMA_ANY) {
150 for (i = GDMA_START; i < NR_GDMA_CHANNEL; i++) {
151 if (dma_ctrl[i].in_use) {
152 continue;
153 } else {
154 dma_ctrl[i].in_use = 1;
155 break;
156 }
157 }
158 } else {
159 if (dma_ctrl[chan].in_use) {
160 i = NR_GDMA_CHANNEL;
161 }
162 else {
163 i = chan;
164 dma_ctrl[chan].in_use = 1;
165 }
166 }
167
168 spin_unlock_irqrestore(&dma_drv_lock, flags);
169
170 if (i < NR_GDMA_CHANNEL) {
171 enable_clock(MT_CG_PERI_AP_DMA, PDN_APDMA_MODULE_NAME);
172
173 mt_reset_gdma_conf(i);
174
175 return i;
176 } else {
177 return -DMA_ERR_NO_FREE_CH;
178 }
179 }
180
181 EXPORT_SYMBOL(mt_req_gdma);
182
183 /*
184 * mt_start_gdma: start the DMA stransfer for the specified GDMA channel
185 * @channel: GDMA channel to start
186 * Return 0 for success; return negative errot code for failure.
187 */
188 int mt_start_gdma(int channel)
189 {
190 if ((channel < GDMA_START) || (channel >= (GDMA_START + NR_GDMA_CHANNEL))) {
191 return -DMA_ERR_INVALID_CH;
192 }else if (dma_ctrl[channel].in_use == 0) {
193 return -DMA_ERR_CH_FREE;
194 }
195
196 writel(DMA_INT_FLAG_CLR_BIT, DMA_INT_FLAG(DMA_BASE_CH(channel)));
197 mt_reg_sync_writel(DMA_START_BIT, DMA_START(DMA_BASE_CH(channel)));
198
199 return 0;
200 }
201
202 EXPORT_SYMBOL(mt_start_gdma);
203
204 /*
205 * mt_polling_gdma: wait the DMA to finish for the specified GDMA channel
206 * @channel: GDMA channel to polling
207 * @timeout: polling timeout in ms
208 * Return 0 for success;
209 * Return 1 for timeout
210 * return negative errot code for failure.
211 */
212 int mt_polling_gdma(int channel, unsigned long timeout)
213 {
214 if (channel < GDMA_START) {
215 return -DMA_ERR_INVALID_CH;
216 }
217
218 if (channel >= (GDMA_START + NR_GDMA_CHANNEL)) {
219 return -DMA_ERR_INVALID_CH;
220 }
221
222 if (dma_ctrl[channel].in_use == 0) {
223 return -DMA_ERR_CH_FREE;
224 }
225
226 timeout = jiffies + ((HZ * timeout) / 1000);
227
228 do {
229 if (time_after(jiffies, timeout)) {
230 printk(KERN_ERR "GDMA_%d polling timeout !!\n", channel);
231 mt_dump_gdma(channel);
232 return 1;
233 }
234 } while (readl(DMA_START(DMA_BASE_CH(channel))));
235
236 return 0;
237 }
238
239 EXPORT_SYMBOL(mt_polling_gdma);
240
241 /*
242 * mt_stop_gdma: stop the DMA stransfer for the specified GDMA channel
243 * @channel: GDMA channel to stop
244 * Return 0 for success; return negative errot code for failure.
245 */
246 int mt_stop_gdma(int channel)
247 {
248 if (channel < GDMA_START) {
249 return -DMA_ERR_INVALID_CH;
250 }
251
252 if (channel >= (GDMA_START + NR_GDMA_CHANNEL)) {
253 return -DMA_ERR_INVALID_CH;
254 }
255
256 if (dma_ctrl[channel].in_use == 0) {
257 return -DMA_ERR_CH_FREE;
258 }
259
260 writel(DMA_FLUSH_BIT, DMA_FLUSH(DMA_BASE_CH(channel)));
261 while (readl(DMA_START(DMA_BASE_CH(channel))));
262 writel(DMA_FLUSH_CLR_BIT, DMA_FLUSH(DMA_BASE_CH(channel)));
263 mt_reg_sync_writel(DMA_INT_FLAG_CLR_BIT, DMA_INT_FLAG(DMA_BASE_CH(channel)));
264
265 return 0;
266 }
267
268 EXPORT_SYMBOL(mt_stop_gdma);
269
270 /*
271 * mt_config_gdma: configure the given GDMA channel.
272 * @channel: GDMA channel to configure
273 * @config: pointer to the mt_gdma_conf structure in which the GDMA configurations store
274 * @flag: ALL, SRC, DST, or SRC_AND_DST.
275 * Return 0 for success; return negative errot code for failure.
276 */
277 int mt_config_gdma(int channel, struct mt_gdma_conf *config, DMA_CONF_FLAG flag)
278 {
279 unsigned int dma_con = 0x0, limiter = 0;
280
281 if ((channel < GDMA_START) || (channel >= (GDMA_START + NR_GDMA_CHANNEL))) {
282 return -DMA_ERR_INVALID_CH;
283 }
284
285 if (dma_ctrl[channel].in_use == 0) {
286 return -DMA_ERR_CH_FREE;
287 }
288
289 if (!config) {
290 return -DMA_ERR_INV_CONFIG;
291 }
292
293 // if (!(config->sinc) && ((config->src) % 8)) {
294 // printk("GDMA fixed address mode requires 8-bytes aligned address\n");
295 if (config->sfix)
296 {
297 printk("GMDA fixed address mode doesn't support\n");
298 return -DMA_ERR_INV_CONFIG;
299 }
300
301 // if (!(config->dinc) && ((config->dst) % 8)) {
302 // printk("GDMA fixed address mode requires 8-bytes aligned address\n");
303 if (config->dfix)
304 {
305 printk("GMDA fixed address mode doesn't support\n");
306 return -DMA_ERR_INV_CONFIG;
307 }
308
309 if (config->count > MAX_TRANSFER_LEN1)
310 {
311 printk("GDMA transfer length cannot exceeed 0x%x.\n", MAX_TRANSFER_LEN1);
312 return -DMA_ERR_INV_CONFIG;
313 }
314
315 if (config->limiter > MAX_SLOW_DOWN_CNTER)
316 {
317 printk("GDMA slow down counter cannot exceeed 0x%x.\n", MAX_SLOW_DOWN_CNTER);
318 return -DMA_ERR_INV_CONFIG;
319 }
320
321 switch (flag) {
322 case ALL:
323 /* Control Register */
324 writel(config->src, DMA_SRC(DMA_BASE_CH(channel)));
325 writel(config->dst, DMA_DST(DMA_BASE_CH(channel)));
326 writel((config->wplen) & DMA_GDMA_LEN_MAX_MASK, DMA_LEN2(DMA_BASE_CH(channel)));
327 writel(config->wpto, DMA_JUMP_ADDR(DMA_BASE_CH(channel)));
328 writel((config->count) & DMA_GDMA_LEN_MAX_MASK, DMA_LEN1(DMA_BASE_CH(channel)));
329
330 /*setup coherence bus*/
331 /*
332 if (config->cohen){
333 writel((DMA_READ_COHER_BIT|readl(DMA_AXIATTR(DMA_BASE_CH(channel)))), DMA_AXIATTR(DMA_BASE_CH(channel)));
334 writel((DMA_WRITE_COHER_BIT|readl(DMA_AXIATTR(DMA_BASE_CH(channel)))), DMA_AXIATTR(DMA_BASE_CH(channel)));
335 }
336 */
337
338 /*setup security channel */
339 if (config->sec){
340 printk("1:GMDA GSEC:%x, ChSEC:%x\n",readl(DMA_GLOBAL_GSEC_EN),readl(DMA_GDMA_SEC_EN(channel)));
341 writel((DMA_GSEC_EN_BIT|readl(DMA_GLOBAL_GSEC_EN)), DMA_GLOBAL_GSEC_EN);
342 writel((DMA_SEC_EN_BIT|readl(DMA_GDMA_SEC_EN(channel))), DMA_GDMA_SEC_EN(channel));
343 printk("2:GMDA GSEC:%x, ChSEC:%x\n",readl(DMA_GLOBAL_GSEC_EN),readl(DMA_GDMA_SEC_EN(channel)));
344 }
345 else
346 {
347 printk("1:GMDA GSEC:%x, ChSEC:%x\n",readl(DMA_GLOBAL_GSEC_EN),readl(DMA_GDMA_SEC_EN(channel)));
348 //writel(((~DMA_GSEC_EN_BIT)&readl(DMA_GLOBAL_GSEC_EN)), DMA_GLOBAL_GSEC_EN);
349 printk("2:GMDA GSEC:%x, ChSEC:%x\n",readl(DMA_GLOBAL_GSEC_EN),readl(DMA_GDMA_SEC_EN(channel)));
350 }
351
352 if (config->wpen) {
353 dma_con |= DMA_CON_WPEN;
354 }
355
356 if (config->wpsd) {
357 dma_con |= DMA_CON_WPSD;
358 }
359
360 if (config->iten) {
361 dma_ctrl[channel].isr_cb = config->isr_cb;
362 dma_ctrl[channel].data = config->data;
363 writel(DMA_INT_EN_BIT, DMA_INT_EN(DMA_BASE_CH(channel)));
364 }else {
365 dma_ctrl[channel].isr_cb = NULL;
366 dma_ctrl[channel].data = NULL;
367 writel(DMA_INT_EN_CLR_BIT, DMA_INT_EN(DMA_BASE_CH(channel)));
368 }
369
370 if (!(config->dfix) && !(config->sfix)) {
371 dma_con |= (config->burst & DMA_CON_BURST_MASK);
372 }else {
373 if (config->dfix) {
374 dma_con |= DMA_CON_DFIX;
375 dma_con |= DMA_CON_WSIZE_1BYTE;
376 }
377
378 if (config->sfix) {
379 dma_con |= DMA_CON_SFIX;
380 dma_con |= DMA_CON_RSIZE_1BYTE;
381 }
382
383 // fixed src/dst mode only supports burst type SINGLE
384 dma_con |= DMA_CON_BURST_SINGLE;
385 }
386
387 if (config->limiter) {
388 limiter = (config->limiter) & DMA_CON_SLOW_MAX_MASK;
389 dma_con |= limiter << DMA_CON_SLOW_OFFSET;
390 dma_con |= DMA_CON_SLOW_EN;
391 }
392
393 writel(dma_con, DMA_CON(DMA_BASE_CH(channel)));
394 break;
395
396 case SRC:
397 writel(config->src, DMA_SRC(DMA_BASE_CH(channel)));
398
399 break;
400
401 case DST:
402 writel(config->dst, DMA_DST(DMA_BASE_CH(channel)));
403 break;
404
405 case SRC_AND_DST:
406 writel(config->src, DMA_SRC(DMA_BASE_CH(channel)));
407 writel(config->dst, DMA_DST(DMA_BASE_CH(channel)));
408 break;
409
410 default:
411 break;
412 }
413
414 /* use the data synchronization barrier to ensure that all writes are completed */
415 dsb();
416
417 return 0;
418 }
419
420 EXPORT_SYMBOL(mt_config_gdma);
421
422 /*
423 * mt_free_gdma: free a general DMA.
424 * @channel: channel to free
425 * Return 0 for success; return negative errot code for failure.
426 */
427 int mt_free_gdma(int channel)
428 {
429 if (channel < GDMA_START) {
430 return -DMA_ERR_INVALID_CH;
431 }
432
433 if (channel >= (GDMA_START + NR_GDMA_CHANNEL)) {
434 return -DMA_ERR_INVALID_CH;
435 }
436
437 if (dma_ctrl[channel].in_use == 0) {
438 return -DMA_ERR_CH_FREE;
439 }
440
441 mt_stop_gdma(channel);
442
443 dma_ctrl[channel].isr_cb = NULL;
444 dma_ctrl[channel].data = NULL;
445 dma_ctrl[channel].in_use = 0;
446
447 disable_clock(MT_CG_PERI_AP_DMA, PDN_APDMA_MODULE_NAME);
448
449 return 0;
450 }
451
452 EXPORT_SYMBOL(mt_free_gdma);
453
454 /*
455 * mt_dump_gdma: dump registers for the specified GDMA channel
456 * @channel: GDMA channel to dump registers
457 * Return 0 for success; return negative errot code for failure.
458 */
459 int mt_dump_gdma(int channel)
460 {
461 unsigned int i;
462 printk("Channel 0x%x\n",channel);
463 for (i = 0; i < 15; i++)
464 {
465 printk("addr:0x%p, value:%x\n", DMA_BASE_CH(channel) + i * 4, readl(DMA_BASE_CH(channel) + i * 4));
466 }
467 // GDMA DEBUG Status reg is @ channel + 0xD0
468 printk("addr:0x%p, value:%x\n", DMA_BASE_CH(channel) + 0xD0, readl(DMA_BASE_CH(channel) + 0xD0));
469
470 return 0;
471 }
472
473 EXPORT_SYMBOL(mt_dump_gdma);
474
475 /*
476 * mt_warm_reset_gdma: warm reset the specified GDMA channel
477 * @channel: GDMA channel to warm reset
478 * Return 0 for success; return negative errot code for failure.
479 */
480 int mt_warm_reset_gdma(int channel)
481 {
482 if (channel < GDMA_START) {
483 return -DMA_ERR_INVALID_CH;
484 }
485
486 if (channel >= (GDMA_START + NR_GDMA_CHANNEL)) {
487 return -DMA_ERR_INVALID_CH;
488 }
489
490 if (dma_ctrl[channel].in_use == 0) {
491 return -DMA_ERR_CH_FREE;
492 }
493
494 dbgmsg("GDMA_%d Warm Reset !!\n", channel);
495
496 mt_reg_sync_writel(DMA_WARM_RST_BIT, DMA_RESET(DMA_BASE_CH(channel)));
497
498 if (mt_polling_gdma(channel, GDMA_WARM_RST_TIMEOUT) != 0)
499 return 1;
500 else
501 return 0;
502 }
503
504 EXPORT_SYMBOL(mt_warm_reset_gdma);
505
506 /*
507 * mt_hard_reset_gdma: hard reset the specified GDMA channel
508 * @channel: GDMA channel to hard reset
509 * Return 0 for success; return negative errot code for failure.
510 */
511 int mt_hard_reset_gdma(int channel)
512 {
513 if (channel < GDMA_START) {
514 return -DMA_ERR_INVALID_CH;
515 }
516
517 if (channel >= (GDMA_START + NR_GDMA_CHANNEL)) {
518 return -DMA_ERR_INVALID_CH;
519 }
520
521 if (dma_ctrl[channel].in_use == 0) {
522 return -DMA_ERR_CH_FREE;
523 }
524
525 printk(KERN_ERR "GDMA_%d Hard Reset !!\n", channel);
526
527 mt_reg_sync_writel(DMA_HARD_RST_BIT, DMA_RESET(DMA_BASE_CH(channel)));
528 mt_reg_sync_writel(DMA_HARD_RST_CLR_BIT, DMA_RESET(DMA_BASE_CH(channel)));
529
530 return 0;
531 }
532
533 EXPORT_SYMBOL(mt_hard_reset_gdma);
534
535 /*
536 * mt_reset_gdma: reset the specified GDMA channel
537 * @channel: GDMA channel to reset
538 * Return 0 for success; return negative errot code for failure.
539 */
540 int mt_reset_gdma(int channel)
541 {
542 if (channel < GDMA_START) {
543 return -DMA_ERR_INVALID_CH;
544 }
545
546 if (channel >= (GDMA_START + NR_GDMA_CHANNEL)) {
547 return -DMA_ERR_INVALID_CH;
548 }
549
550 if (dma_ctrl[channel].in_use == 0) {
551 return -DMA_ERR_CH_FREE;
552 }
553
554 dbgmsg("GDMA_%d Reset !!\n", channel);
555
556 if (mt_warm_reset_gdma(channel) != 0)
557 mt_hard_reset_gdma(channel);
558
559 return 0;
560 }
561
562 EXPORT_SYMBOL(mt_reset_gdma);
563
564 /*
565 * gdma1_irq_handler: general DMA channel 1 interrupt service routine.
566 * @irq: DMA IRQ number
567 * @dev_id:
568 * Return IRQ returned code.
569 */
570 static irqreturn_t gdma1_irq_handler(int irq, void *dev_id)
571 {
572 volatile unsigned glbsta = readl(DMA_GLOBAL_INT_FLAG);
573
574 dbgmsg(KERN_DEBUG"DMA Module - %s ISR Start\n", __func__);
575 dbgmsg(KERN_DEBUG"DMA Module - GLBSTA = 0x%x\n", glbsta);
576
577 if (glbsta & DMA_GLBSTA_IT(G_DMA_1)){
578 if (dma_ctrl[G_DMA_1].isr_cb) {
579 dma_ctrl[G_DMA_1].isr_cb(dma_ctrl[G_DMA_1].data);
580 }
581
582 mt_reg_sync_writel(DMA_INT_FLAG_CLR_BIT, DMA_INT_FLAG(DMA_BASE_CH(G_DMA_1)));
583 #if(DMA_DEBUG == 1)
584 glbsta = readl(DMA_GLOBAL_INT_FLAG);
585 printk(KERN_DEBUG"DMA Module - GLBSTA after ack = 0x%x\n", glbsta);
586 #endif
587 }
588
589 dbgmsg(KERN_DEBUG"DMA Module - %s ISR END\n", __func__);
590
591 return IRQ_HANDLED;
592 }
593
594 /*
595 * gdma2_irq_handler: general DMA channel 2 interrupt service routine.
596 * @irq: DMA IRQ number
597 * @dev_id:
598 * Return IRQ returned code.
599 */
600 static irqreturn_t gdma2_irq_handler(int irq, void *dev_id)
601 {
602 volatile unsigned glbsta = readl(DMA_GLOBAL_INT_FLAG);
603
604 dbgmsg(KERN_DEBUG"DMA Module - %s ISR Start\n", __func__);
605 dbgmsg(KERN_DEBUG"DMA Module - GLBSTA = 0x%x\n", glbsta);
606
607 if (glbsta & DMA_GLBSTA_IT(G_DMA_2)){
608 if (dma_ctrl[G_DMA_2].isr_cb) {
609 dma_ctrl[G_DMA_2].isr_cb(dma_ctrl[G_DMA_2].data);
610 }
611
612 mt_reg_sync_writel(DMA_INT_FLAG_CLR_BIT, DMA_INT_FLAG(DMA_BASE_CH(G_DMA_2)));
613
614 #if(DMA_DEBUG == 1)
615 glbsta = readl(DMA_GLOBAL_INT_FLAG);
616 printk(KERN_DEBUG"DMA Module - GLBSTA after ack = 0x%x\n", glbsta);
617 #endif
618 }
619
620 dbgmsg(KERN_DEBUG"DMA Module - %s ISR END\n", __func__);
621
622 return IRQ_HANDLED;
623 }
624
625 /*
626 * mt_reset_gdma_conf: reset the config of the specified DMA channel
627 * @iChannel: channel number of the DMA channel to reset
628 */
629 void mt_reset_gdma_conf(const unsigned int iChannel)
630 {
631 struct mt_gdma_conf conf;
632
633 memset(&conf, 0, sizeof(struct mt_gdma_conf));
634
635 if (mt_config_gdma(iChannel, &conf, ALL) != 0){
636 return;
637 }
638
639 return;
640 }
641
642 #define DMA_TEST 0
643 #if(DMA_TEST == 1)
644
645 unsigned int *dma_dst_array_v;
646 unsigned int *dma_src_array_v;
647 dma_addr_t dma_dst_array_p;
648 dma_addr_t dma_src_array_p;
649
650 #define TEST_LEN 4000
651 #define LEN (TEST_LEN / sizeof(int))
652
653 void irq_dma_handler(void * data)
654 {
655 int channel = (int)data;
656 printk("irq_dma_handler called\n");
657 int i = 0;
658 for(i = 0; i < LEN; i++) {
659 if(dma_dst_array_v[i] != dma_src_array_v[i]) {
660 printk("DMA failed, src = %d, dst = %d, i = %d\n", dma_src_array_v[i], dma_dst_array_v[i], i);
661 break;
662 }
663 }
664
665 if(i == LEN)
666 printk("DMA verified ok\n");
667
668 mt_free_gdma(channel);
669 }
670
671 void APDMA_test_transfer(int testcase)
672 {
673 int i, channel;
674
675 channel = mt_req_gdma(GDMA_ANY);
676
677 printk("GDMA channel:%d\n",channel);
678 if(channel < 0 ){
679 printk("ERROR Register DMA\n");
680 return;
681 }
682
683 mt_reset_gdma_conf(channel);
684
685 dma_dst_array_v = dma_alloc_coherent(NULL, TEST_LEN, &dma_dst_array_p, GFP_KERNEL ); // 25 unsinged int
686 dma_src_array_v = dma_alloc_coherent(NULL, TEST_LEN, &dma_src_array_p, GFP_KERNEL );
687 struct mt_gdma_conf dma_conf = {
688 .count = TEST_LEN,
689 .src = dma_src_array_p,
690 .dst = dma_dst_array_p,
691 .iten = (testcase == 2) ? DMA_FALSE : DMA_TRUE,
692 .isr_cb = (testcase == 2) ? NULL : irq_dma_handler,
693 .data = channel,
694 .burst = DMA_CON_BURST_SINGLE,
695 .dfix = DMA_FALSE,
696 .sfix = DMA_FALSE,
697 //.cohen = DMA_TRUE, //enable coherence bus
698 .sec = DMA_FALSE, // non-security channel
699 .limiter = (testcase == 3 || testcase == 4) ? 0x3FF : 0,
700 };
701
702 for(i = 0; i < LEN; i++) {
703 dma_dst_array_v[i] = 0;
704 dma_src_array_v[i] = i;
705 }
706
707 if ( mt_config_gdma(channel, &dma_conf, ALL) != 0) {
708 printk("ERROR set DMA\n");
709 goto _exit;
710 return;
711 }
712
713 /*
714 unsigned int dma_src = readl(DMA_SRC(DMA_BASE_CH(channel)));
715 unsigned int dma_dst = readl(DMA_DST(DMA_BASE_CH(channel)));
716 unsigned int len = readl(DMA_LEN1(DMA_BASE_CH(channel)));
717 printk("start dma channel %d src = 0x%x, dst = 0x%x, len = %d bytes\n", channel, dma_src, dma_dst, len);
718 */
719
720 printk("Start %d\n",mt_start_gdma(channel));
721 switch(testcase)
722 {
723 case 2:
724 if (mt_polling_gdma(channel, GDMA_WARM_RST_TIMEOUT) != 0)
725 printk("Polling transfer failed\n");
726 else
727 printk("Polling succeeded\n");
728 mt_free_gdma(channel);
729 break;
730 case 3:
731 mt_warm_reset_gdma(channel);
732
733 for(i = 0; i < LEN; i++) {
734 if(dma_dst_array_v[i] != dma_src_array_v[i]) {
735 printk("Warm reset succeeded\n");
736 break;
737 }
738 mt_free_gdma(channel);
739 }
740
741 if(i == LEN)
742 printk("Warm reset failed\n");
743 break;
744
745 case 4:
746 mt_hard_reset_gdma(channel);
747
748 for(i = 0; i < LEN; i++) {
749 if(dma_dst_array_v[i] != dma_src_array_v[i]) {
750 printk("Hard reset succeeded\n");
751 break;
752 }
753 mt_free_gdma(channel);
754 }
755 if(i == LEN)
756 printk("Hard reset failed\n");
757 break;
758
759 default:
760 break;
761
762 }
763
764 _exit:
765 if(dma_dst_array_v){
766 dma_free_coherent(NULL, TEST_LEN, dma_dst_array_v, dma_dst_array_p);
767 dma_dst_array_v = dma_dst_array_p = NULL;
768 }
769
770 if(dma_src_array_v){
771 dma_free_coherent(NULL, TEST_LEN, dma_src_array_v, dma_src_array_p);
772 dma_src_array_v = dma_src_array_p = NULL;
773 }
774
775 return;
776 }
777
778 static ssize_t test_show(struct device* dev, struct device_attribute* attr, char* buf)
779 {
780 return snprintf(buf, PAGE_SIZE, "==APDMA test==\n"
781 "1.APDMA transfer (interrupt mode)\n"
782 "2.APDMA transfer (polling mode)\n"
783 "3.APDMA warm reset\n"
784 "4.APDMA hard reset\n"
785 );
786 }
787
788 static ssize_t test_store(struct device* dev, struct device_attribute* attr, const char* buf, size_t count)
789 {
790 char *p = (char *)buf;
791 unsigned int num;
792
793 num = simple_strtoul(p, &p, 10);
794 switch(num){
795 /* Test APDMA Normal Function */
796 case 1:
797 APDMA_test_transfer(1);
798 break;
799 case 2:
800 APDMA_test_transfer(2);
801 break;
802 case 3:
803 APDMA_test_transfer(3);
804 break;
805 case 4:
806 APDMA_test_transfer(4);
807 break;
808 default:
809 break;
810 }
811
812 return count;
813 }
814
815 DEVICE_ATTR(dma_config, 0666, test_show, test_store);
816
817 static struct miscdevice mt_dma_dev = {
818 .minor = MISC_DYNAMIC_MINOR,
819 .name = "mt_dma",
820 //.fops = &dev_fops,
821 .mode = S_IRUGO | S_IWUGO,
822 };
823 #endif
824
825 /*
826 * mt_init_dma: initialize DMA.
827 * Always return 0.
828 */
829 static int __init mt_init_dma(void)
830 {
831 int i;
832
833 for (i = 0; i < NR_GDMA_CHANNEL; i++) {
834 mt_reset_gdma_conf(i);
835 }
836
837 if (request_irq(MT_GDMA1_IRQ_ID, gdma1_irq_handler, IRQF_TRIGGER_LOW, "GDMA1", NULL)) {
838 printk(KERN_ERR"GDMA1 IRQ LINE NOT AVAILABLE!!\n");
839 }
840
841 if (request_irq(MT_GDMA2_IRQ_ID, gdma2_irq_handler, IRQF_TRIGGER_LOW, "GDMA2", NULL)) {
842 printk(KERN_ERR"GDMA2 IRQ LINE NOT AVAILABLE!!\n");
843 }
844
845 #if(DMA_TEST == 1)
846 int ret = misc_register(&mt_dma_dev);
847
848 if(ret < 0) {
849 printk("DMA MISC Register fail, ret = %d\n", ret);
850 return ret;
851 }
852
853 device_create_file(mt_dma_dev.this_device, &dev_attr_dma_config);
854 #endif
855
856 printk("[APDMA] Init APDMA OK\n");
857
858 return 0;
859 }
860
861 void mt_dma_running_status(void)
862 {
863 unsigned int dma_running_status;
864 int i=0;
865 char *DMA_name [15] = {"G_DMA1", "G_DMA2", "HIF_1", "HIF_2", "SIM_1", "SIM_2", "IrDa Tx/Rx",
866 "UART_1 Tx", "UART_1 Rx", "UART_2 Tx", "UART_2 Rx", "UART_3 Tx", "UART_3 Rx", "UART_4 Tx", "UART_4 Rx"};
867
868 dma_running_status = readl(DMA_GLOBAL_RUNNING_STATUS);
869 for(i=0; i<15; i++)
870 {
871 if(((dma_running_status>>i) & 0x01) == 1)
872 {
873 printk("DMA %s is running\n", DMA_name[i]);
874 }
875 }
876 }
877
878 EXPORT_SYMBOL(mt_dma_running_status);
879
880 //arch_initcall(mt_init_dma);
881 late_initcall(mt_init_dma);