rtc: rtc-lp8788: use devm_rtc_device_register()
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / dma / omap-dma.c
1 /*
2 * OMAP DMAengine support
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8 #include <linux/dmaengine.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/err.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/list.h>
14 #include <linux/module.h>
15 #include <linux/omap-dma.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19
20 #include "virt-dma.h"
21
22 struct omap_dmadev {
23 struct dma_device ddev;
24 spinlock_t lock;
25 struct tasklet_struct task;
26 struct list_head pending;
27 };
28
29 struct omap_chan {
30 struct virt_dma_chan vc;
31 struct list_head node;
32
33 struct dma_slave_config cfg;
34 unsigned dma_sig;
35 bool cyclic;
36 bool paused;
37
38 int dma_ch;
39 struct omap_desc *desc;
40 unsigned sgidx;
41 };
42
43 struct omap_sg {
44 dma_addr_t addr;
45 uint32_t en; /* number of elements (24-bit) */
46 uint32_t fn; /* number of frames (16-bit) */
47 };
48
49 struct omap_desc {
50 struct virt_dma_desc vd;
51 enum dma_transfer_direction dir;
52 dma_addr_t dev_addr;
53
54 int16_t fi; /* for OMAP_DMA_SYNC_PACKET */
55 uint8_t es; /* OMAP_DMA_DATA_TYPE_xxx */
56 uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */
57 uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */
58 uint8_t periph_port; /* Peripheral port */
59
60 unsigned sglen;
61 struct omap_sg sg[0];
62 };
63
64 static const unsigned es_bytes[] = {
65 [OMAP_DMA_DATA_TYPE_S8] = 1,
66 [OMAP_DMA_DATA_TYPE_S16] = 2,
67 [OMAP_DMA_DATA_TYPE_S32] = 4,
68 };
69
70 static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
71 {
72 return container_of(d, struct omap_dmadev, ddev);
73 }
74
75 static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
76 {
77 return container_of(c, struct omap_chan, vc.chan);
78 }
79
80 static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
81 {
82 return container_of(t, struct omap_desc, vd.tx);
83 }
84
85 static void omap_dma_desc_free(struct virt_dma_desc *vd)
86 {
87 kfree(container_of(vd, struct omap_desc, vd));
88 }
89
90 static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
91 unsigned idx)
92 {
93 struct omap_sg *sg = d->sg + idx;
94
95 if (d->dir == DMA_DEV_TO_MEM)
96 omap_set_dma_dest_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
97 OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
98 else
99 omap_set_dma_src_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
100 OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
101
102 omap_set_dma_transfer_params(c->dma_ch, d->es, sg->en, sg->fn,
103 d->sync_mode, c->dma_sig, d->sync_type);
104
105 omap_start_dma(c->dma_ch);
106 }
107
108 static void omap_dma_start_desc(struct omap_chan *c)
109 {
110 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
111 struct omap_desc *d;
112
113 if (!vd) {
114 c->desc = NULL;
115 return;
116 }
117
118 list_del(&vd->node);
119
120 c->desc = d = to_omap_dma_desc(&vd->tx);
121 c->sgidx = 0;
122
123 if (d->dir == DMA_DEV_TO_MEM)
124 omap_set_dma_src_params(c->dma_ch, d->periph_port,
125 OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
126 else
127 omap_set_dma_dest_params(c->dma_ch, d->periph_port,
128 OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
129
130 omap_dma_start_sg(c, d, 0);
131 }
132
133 static void omap_dma_callback(int ch, u16 status, void *data)
134 {
135 struct omap_chan *c = data;
136 struct omap_desc *d;
137 unsigned long flags;
138
139 spin_lock_irqsave(&c->vc.lock, flags);
140 d = c->desc;
141 if (d) {
142 if (!c->cyclic) {
143 if (++c->sgidx < d->sglen) {
144 omap_dma_start_sg(c, d, c->sgidx);
145 } else {
146 omap_dma_start_desc(c);
147 vchan_cookie_complete(&d->vd);
148 }
149 } else {
150 vchan_cyclic_callback(&d->vd);
151 }
152 }
153 spin_unlock_irqrestore(&c->vc.lock, flags);
154 }
155
156 /*
157 * This callback schedules all pending channels. We could be more
158 * clever here by postponing allocation of the real DMA channels to
159 * this point, and freeing them when our virtual channel becomes idle.
160 *
161 * We would then need to deal with 'all channels in-use'
162 */
163 static void omap_dma_sched(unsigned long data)
164 {
165 struct omap_dmadev *d = (struct omap_dmadev *)data;
166 LIST_HEAD(head);
167
168 spin_lock_irq(&d->lock);
169 list_splice_tail_init(&d->pending, &head);
170 spin_unlock_irq(&d->lock);
171
172 while (!list_empty(&head)) {
173 struct omap_chan *c = list_first_entry(&head,
174 struct omap_chan, node);
175
176 spin_lock_irq(&c->vc.lock);
177 list_del_init(&c->node);
178 omap_dma_start_desc(c);
179 spin_unlock_irq(&c->vc.lock);
180 }
181 }
182
183 static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
184 {
185 struct omap_chan *c = to_omap_dma_chan(chan);
186
187 dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
188
189 return omap_request_dma(c->dma_sig, "DMA engine",
190 omap_dma_callback, c, &c->dma_ch);
191 }
192
193 static void omap_dma_free_chan_resources(struct dma_chan *chan)
194 {
195 struct omap_chan *c = to_omap_dma_chan(chan);
196
197 vchan_free_chan_resources(&c->vc);
198 omap_free_dma(c->dma_ch);
199
200 dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
201 }
202
203 static size_t omap_dma_sg_size(struct omap_sg *sg)
204 {
205 return sg->en * sg->fn;
206 }
207
208 static size_t omap_dma_desc_size(struct omap_desc *d)
209 {
210 unsigned i;
211 size_t size;
212
213 for (size = i = 0; i < d->sglen; i++)
214 size += omap_dma_sg_size(&d->sg[i]);
215
216 return size * es_bytes[d->es];
217 }
218
219 static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
220 {
221 unsigned i;
222 size_t size, es_size = es_bytes[d->es];
223
224 for (size = i = 0; i < d->sglen; i++) {
225 size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
226
227 if (size)
228 size += this_size;
229 else if (addr >= d->sg[i].addr &&
230 addr < d->sg[i].addr + this_size)
231 size += d->sg[i].addr + this_size - addr;
232 }
233 return size;
234 }
235
236 static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
237 dma_cookie_t cookie, struct dma_tx_state *txstate)
238 {
239 struct omap_chan *c = to_omap_dma_chan(chan);
240 struct virt_dma_desc *vd;
241 enum dma_status ret;
242 unsigned long flags;
243
244 ret = dma_cookie_status(chan, cookie, txstate);
245 if (ret == DMA_SUCCESS || !txstate)
246 return ret;
247
248 spin_lock_irqsave(&c->vc.lock, flags);
249 vd = vchan_find_desc(&c->vc, cookie);
250 if (vd) {
251 txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
252 } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
253 struct omap_desc *d = c->desc;
254 dma_addr_t pos;
255
256 if (d->dir == DMA_MEM_TO_DEV)
257 pos = omap_get_dma_src_pos(c->dma_ch);
258 else if (d->dir == DMA_DEV_TO_MEM)
259 pos = omap_get_dma_dst_pos(c->dma_ch);
260 else
261 pos = 0;
262
263 txstate->residue = omap_dma_desc_size_pos(d, pos);
264 } else {
265 txstate->residue = 0;
266 }
267 spin_unlock_irqrestore(&c->vc.lock, flags);
268
269 return ret;
270 }
271
272 static void omap_dma_issue_pending(struct dma_chan *chan)
273 {
274 struct omap_chan *c = to_omap_dma_chan(chan);
275 unsigned long flags;
276
277 spin_lock_irqsave(&c->vc.lock, flags);
278 if (vchan_issue_pending(&c->vc) && !c->desc) {
279 /*
280 * c->cyclic is used only by audio and in this case the DMA need
281 * to be started without delay.
282 */
283 if (!c->cyclic) {
284 struct omap_dmadev *d = to_omap_dma_dev(chan->device);
285 spin_lock(&d->lock);
286 if (list_empty(&c->node))
287 list_add_tail(&c->node, &d->pending);
288 spin_unlock(&d->lock);
289 tasklet_schedule(&d->task);
290 } else {
291 omap_dma_start_desc(c);
292 }
293 }
294 spin_unlock_irqrestore(&c->vc.lock, flags);
295 }
296
297 static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
298 struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
299 enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
300 {
301 struct omap_chan *c = to_omap_dma_chan(chan);
302 enum dma_slave_buswidth dev_width;
303 struct scatterlist *sgent;
304 struct omap_desc *d;
305 dma_addr_t dev_addr;
306 unsigned i, j = 0, es, en, frame_bytes, sync_type;
307 u32 burst;
308
309 if (dir == DMA_DEV_TO_MEM) {
310 dev_addr = c->cfg.src_addr;
311 dev_width = c->cfg.src_addr_width;
312 burst = c->cfg.src_maxburst;
313 sync_type = OMAP_DMA_SRC_SYNC;
314 } else if (dir == DMA_MEM_TO_DEV) {
315 dev_addr = c->cfg.dst_addr;
316 dev_width = c->cfg.dst_addr_width;
317 burst = c->cfg.dst_maxburst;
318 sync_type = OMAP_DMA_DST_SYNC;
319 } else {
320 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
321 return NULL;
322 }
323
324 /* Bus width translates to the element size (ES) */
325 switch (dev_width) {
326 case DMA_SLAVE_BUSWIDTH_1_BYTE:
327 es = OMAP_DMA_DATA_TYPE_S8;
328 break;
329 case DMA_SLAVE_BUSWIDTH_2_BYTES:
330 es = OMAP_DMA_DATA_TYPE_S16;
331 break;
332 case DMA_SLAVE_BUSWIDTH_4_BYTES:
333 es = OMAP_DMA_DATA_TYPE_S32;
334 break;
335 default: /* not reached */
336 return NULL;
337 }
338
339 /* Now allocate and setup the descriptor. */
340 d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
341 if (!d)
342 return NULL;
343
344 d->dir = dir;
345 d->dev_addr = dev_addr;
346 d->es = es;
347 d->sync_mode = OMAP_DMA_SYNC_FRAME;
348 d->sync_type = sync_type;
349 d->periph_port = OMAP_DMA_PORT_TIPB;
350
351 /*
352 * Build our scatterlist entries: each contains the address,
353 * the number of elements (EN) in each frame, and the number of
354 * frames (FN). Number of bytes for this entry = ES * EN * FN.
355 *
356 * Burst size translates to number of elements with frame sync.
357 * Note: DMA engine defines burst to be the number of dev-width
358 * transfers.
359 */
360 en = burst;
361 frame_bytes = es_bytes[es] * en;
362 for_each_sg(sgl, sgent, sglen, i) {
363 d->sg[j].addr = sg_dma_address(sgent);
364 d->sg[j].en = en;
365 d->sg[j].fn = sg_dma_len(sgent) / frame_bytes;
366 j++;
367 }
368
369 d->sglen = j;
370
371 return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
372 }
373
374 static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
375 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
376 size_t period_len, enum dma_transfer_direction dir, unsigned long flags,
377 void *context)
378 {
379 struct omap_chan *c = to_omap_dma_chan(chan);
380 enum dma_slave_buswidth dev_width;
381 struct omap_desc *d;
382 dma_addr_t dev_addr;
383 unsigned es, sync_type;
384 u32 burst;
385
386 if (dir == DMA_DEV_TO_MEM) {
387 dev_addr = c->cfg.src_addr;
388 dev_width = c->cfg.src_addr_width;
389 burst = c->cfg.src_maxburst;
390 sync_type = OMAP_DMA_SRC_SYNC;
391 } else if (dir == DMA_MEM_TO_DEV) {
392 dev_addr = c->cfg.dst_addr;
393 dev_width = c->cfg.dst_addr_width;
394 burst = c->cfg.dst_maxburst;
395 sync_type = OMAP_DMA_DST_SYNC;
396 } else {
397 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
398 return NULL;
399 }
400
401 /* Bus width translates to the element size (ES) */
402 switch (dev_width) {
403 case DMA_SLAVE_BUSWIDTH_1_BYTE:
404 es = OMAP_DMA_DATA_TYPE_S8;
405 break;
406 case DMA_SLAVE_BUSWIDTH_2_BYTES:
407 es = OMAP_DMA_DATA_TYPE_S16;
408 break;
409 case DMA_SLAVE_BUSWIDTH_4_BYTES:
410 es = OMAP_DMA_DATA_TYPE_S32;
411 break;
412 default: /* not reached */
413 return NULL;
414 }
415
416 /* Now allocate and setup the descriptor. */
417 d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
418 if (!d)
419 return NULL;
420
421 d->dir = dir;
422 d->dev_addr = dev_addr;
423 d->fi = burst;
424 d->es = es;
425 if (burst)
426 d->sync_mode = OMAP_DMA_SYNC_PACKET;
427 else
428 d->sync_mode = OMAP_DMA_SYNC_ELEMENT;
429 d->sync_type = sync_type;
430 d->periph_port = OMAP_DMA_PORT_MPUI;
431 d->sg[0].addr = buf_addr;
432 d->sg[0].en = period_len / es_bytes[es];
433 d->sg[0].fn = buf_len / period_len;
434 d->sglen = 1;
435
436 if (!c->cyclic) {
437 c->cyclic = true;
438 omap_dma_link_lch(c->dma_ch, c->dma_ch);
439
440 if (flags & DMA_PREP_INTERRUPT)
441 omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ);
442
443 omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ);
444 }
445
446 if (dma_omap2plus()) {
447 omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
448 omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
449 }
450
451 return vchan_tx_prep(&c->vc, &d->vd, flags);
452 }
453
454 static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg)
455 {
456 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
457 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
458 return -EINVAL;
459
460 memcpy(&c->cfg, cfg, sizeof(c->cfg));
461
462 return 0;
463 }
464
465 static int omap_dma_terminate_all(struct omap_chan *c)
466 {
467 struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
468 unsigned long flags;
469 LIST_HEAD(head);
470
471 spin_lock_irqsave(&c->vc.lock, flags);
472
473 /* Prevent this channel being scheduled */
474 spin_lock(&d->lock);
475 list_del_init(&c->node);
476 spin_unlock(&d->lock);
477
478 /*
479 * Stop DMA activity: we assume the callback will not be called
480 * after omap_stop_dma() returns (even if it does, it will see
481 * c->desc is NULL and exit.)
482 */
483 if (c->desc) {
484 c->desc = NULL;
485 /* Avoid stopping the dma twice */
486 if (!c->paused)
487 omap_stop_dma(c->dma_ch);
488 }
489
490 if (c->cyclic) {
491 c->cyclic = false;
492 c->paused = false;
493 omap_dma_unlink_lch(c->dma_ch, c->dma_ch);
494 }
495
496 vchan_get_all_descriptors(&c->vc, &head);
497 spin_unlock_irqrestore(&c->vc.lock, flags);
498 vchan_dma_desc_free_list(&c->vc, &head);
499
500 return 0;
501 }
502
503 static int omap_dma_pause(struct omap_chan *c)
504 {
505 /* Pause/Resume only allowed with cyclic mode */
506 if (!c->cyclic)
507 return -EINVAL;
508
509 if (!c->paused) {
510 omap_stop_dma(c->dma_ch);
511 c->paused = true;
512 }
513
514 return 0;
515 }
516
517 static int omap_dma_resume(struct omap_chan *c)
518 {
519 /* Pause/Resume only allowed with cyclic mode */
520 if (!c->cyclic)
521 return -EINVAL;
522
523 if (c->paused) {
524 omap_start_dma(c->dma_ch);
525 c->paused = false;
526 }
527
528 return 0;
529 }
530
531 static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
532 unsigned long arg)
533 {
534 struct omap_chan *c = to_omap_dma_chan(chan);
535 int ret;
536
537 switch (cmd) {
538 case DMA_SLAVE_CONFIG:
539 ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg);
540 break;
541
542 case DMA_TERMINATE_ALL:
543 ret = omap_dma_terminate_all(c);
544 break;
545
546 case DMA_PAUSE:
547 ret = omap_dma_pause(c);
548 break;
549
550 case DMA_RESUME:
551 ret = omap_dma_resume(c);
552 break;
553
554 default:
555 ret = -ENXIO;
556 break;
557 }
558
559 return ret;
560 }
561
562 static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
563 {
564 struct omap_chan *c;
565
566 c = kzalloc(sizeof(*c), GFP_KERNEL);
567 if (!c)
568 return -ENOMEM;
569
570 c->dma_sig = dma_sig;
571 c->vc.desc_free = omap_dma_desc_free;
572 vchan_init(&c->vc, &od->ddev);
573 INIT_LIST_HEAD(&c->node);
574
575 od->ddev.chancnt++;
576
577 return 0;
578 }
579
580 static void omap_dma_free(struct omap_dmadev *od)
581 {
582 tasklet_kill(&od->task);
583 while (!list_empty(&od->ddev.channels)) {
584 struct omap_chan *c = list_first_entry(&od->ddev.channels,
585 struct omap_chan, vc.chan.device_node);
586
587 list_del(&c->vc.chan.device_node);
588 tasklet_kill(&c->vc.task);
589 kfree(c);
590 }
591 kfree(od);
592 }
593
594 static int omap_dma_probe(struct platform_device *pdev)
595 {
596 struct omap_dmadev *od;
597 int rc, i;
598
599 od = kzalloc(sizeof(*od), GFP_KERNEL);
600 if (!od)
601 return -ENOMEM;
602
603 dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
604 dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
605 od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
606 od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
607 od->ddev.device_tx_status = omap_dma_tx_status;
608 od->ddev.device_issue_pending = omap_dma_issue_pending;
609 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
610 od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
611 od->ddev.device_control = omap_dma_control;
612 od->ddev.dev = &pdev->dev;
613 INIT_LIST_HEAD(&od->ddev.channels);
614 INIT_LIST_HEAD(&od->pending);
615 spin_lock_init(&od->lock);
616
617 tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
618
619 for (i = 0; i < 127; i++) {
620 rc = omap_dma_chan_init(od, i);
621 if (rc) {
622 omap_dma_free(od);
623 return rc;
624 }
625 }
626
627 rc = dma_async_device_register(&od->ddev);
628 if (rc) {
629 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
630 rc);
631 omap_dma_free(od);
632 } else {
633 platform_set_drvdata(pdev, od);
634 }
635
636 dev_info(&pdev->dev, "OMAP DMA engine driver\n");
637
638 return rc;
639 }
640
641 static int omap_dma_remove(struct platform_device *pdev)
642 {
643 struct omap_dmadev *od = platform_get_drvdata(pdev);
644
645 dma_async_device_unregister(&od->ddev);
646 omap_dma_free(od);
647
648 return 0;
649 }
650
651 static struct platform_driver omap_dma_driver = {
652 .probe = omap_dma_probe,
653 .remove = omap_dma_remove,
654 .driver = {
655 .name = "omap-dma-engine",
656 .owner = THIS_MODULE,
657 },
658 };
659
660 bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
661 {
662 if (chan->device->dev->driver == &omap_dma_driver.driver) {
663 struct omap_chan *c = to_omap_dma_chan(chan);
664 unsigned req = *(unsigned *)param;
665
666 return req == c->dma_sig;
667 }
668 return false;
669 }
670 EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
671
672 static int omap_dma_init(void)
673 {
674 return platform_driver_register(&omap_dma_driver);
675 }
676 subsys_initcall(omap_dma_init);
677
678 static void __exit omap_dma_exit(void)
679 {
680 platform_driver_unregister(&omap_dma_driver);
681 }
682 module_exit(omap_dma_exit);
683
684 MODULE_AUTHOR("Russell King");
685 MODULE_LICENSE("GPL");