battery: sec_battery: export {CURRENT/VOLTAGE}_MAX to sysfs
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / drivers / dma / pl330.c
CommitLineData
b7d861d9
BK
1/*
2 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
b3040e40
JB
4 *
5 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
6 * Jaswinder Singh <jassi.brar@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
b7d861d9 14#include <linux/kernel.h>
b3040e40
JB
15#include <linux/io.h>
16#include <linux/init.h>
17#include <linux/slab.h>
18#include <linux/module.h>
b7d861d9
BK
19#include <linux/string.h>
20#include <linux/delay.h>
21#include <linux/interrupt.h>
22#include <linux/dma-mapping.h>
b3040e40 23#include <linux/dmaengine.h>
b3040e40
JB
24#include <linux/amba/bus.h>
25#include <linux/amba/pl330.h>
3c2a0909 26#include <linux/pm_runtime.h>
1b9bb715 27#include <linux/scatterlist.h>
93ed5544 28#include <linux/of.h>
a80258f9 29#include <linux/of_dma.h>
bcc7fa95 30#include <linux/err.h>
b3040e40 31
3c2a0909
S
32#include <plat/cpu.h>
33
d2ebfb33 34#include "dmaengine.h"
3c2a0909
S
35
36#include <linux/exynos_ion.h>
37#include <linux/smc.h>
38
39#define MC_FC_SECURE_DMA ((uint32_t)(0x81000010))
40
41struct ion_info {
42 uint32_t base;
43 size_t size;
44};
45
46static bool secure_dma_mode;
47static struct ion_info secdma_mem_info;
48
b7d861d9
BK
49#define PL330_MAX_CHAN 8
50#define PL330_MAX_IRQS 32
51#define PL330_MAX_PERI 32
52
53enum pl330_srccachectrl {
54 SCCTRL0, /* Noncacheable and nonbufferable */
55 SCCTRL1, /* Bufferable only */
56 SCCTRL2, /* Cacheable, but do not allocate */
57 SCCTRL3, /* Cacheable and bufferable, but do not allocate */
58 SINVALID1,
59 SINVALID2,
60 SCCTRL6, /* Cacheable write-through, allocate on reads only */
61 SCCTRL7, /* Cacheable write-back, allocate on reads only */
62};
63
64enum pl330_dstcachectrl {
65 DCCTRL0, /* Noncacheable and nonbufferable */
66 DCCTRL1, /* Bufferable only */
67 DCCTRL2, /* Cacheable, but do not allocate */
68 DCCTRL3, /* Cacheable and bufferable, but do not allocate */
ef08e782 69 DINVALID1, /* AWCACHE = 0x1000 */
b7d861d9
BK
70 DINVALID2,
71 DCCTRL6, /* Cacheable write-through, allocate on writes only */
72 DCCTRL7, /* Cacheable write-back, allocate on writes only */
73};
74
75enum pl330_byteswap {
76 SWAP_NO,
77 SWAP_2,
78 SWAP_4,
79 SWAP_8,
80 SWAP_16,
81};
82
83enum pl330_reqtype {
84 MEMTOMEM,
85 MEMTODEV,
86 DEVTOMEM,
87 DEVTODEV,
88};
89
90/* Register and Bit field Definitions */
91#define DS 0x0
92#define DS_ST_STOP 0x0
93#define DS_ST_EXEC 0x1
94#define DS_ST_CMISS 0x2
95#define DS_ST_UPDTPC 0x3
96#define DS_ST_WFE 0x4
97#define DS_ST_ATBRR 0x5
98#define DS_ST_QBUSY 0x6
99#define DS_ST_WFP 0x7
100#define DS_ST_KILL 0x8
101#define DS_ST_CMPLT 0x9
102#define DS_ST_FLTCMP 0xe
103#define DS_ST_FAULT 0xf
104
105#define DPC 0x4
106#define INTEN 0x20
107#define ES 0x24
108#define INTSTATUS 0x28
109#define INTCLR 0x2c
110#define FSM 0x30
111#define FSC 0x34
112#define FTM 0x38
113
114#define _FTC 0x40
115#define FTC(n) (_FTC + (n)*0x4)
116
117#define _CS 0x100
118#define CS(n) (_CS + (n)*0x8)
119#define CS_CNS (1 << 21)
120
121#define _CPC 0x104
122#define CPC(n) (_CPC + (n)*0x8)
123
124#define _SA 0x400
125#define SA(n) (_SA + (n)*0x20)
126
127#define _DA 0x404
128#define DA(n) (_DA + (n)*0x20)
129
130#define _CC 0x408
131#define CC(n) (_CC + (n)*0x20)
132
133#define CC_SRCINC (1 << 0)
134#define CC_DSTINC (1 << 14)
135#define CC_SRCPRI (1 << 8)
136#define CC_DSTPRI (1 << 22)
137#define CC_SRCNS (1 << 9)
138#define CC_DSTNS (1 << 23)
139#define CC_SRCIA (1 << 10)
140#define CC_DSTIA (1 << 24)
141#define CC_SRCBRSTLEN_SHFT 4
142#define CC_DSTBRSTLEN_SHFT 18
143#define CC_SRCBRSTSIZE_SHFT 1
144#define CC_DSTBRSTSIZE_SHFT 15
145#define CC_SRCCCTRL_SHFT 11
146#define CC_SRCCCTRL_MASK 0x7
147#define CC_DSTCCTRL_SHFT 25
148#define CC_DRCCCTRL_MASK 0x7
149#define CC_SWAP_SHFT 28
150
151#define _LC0 0x40c
152#define LC0(n) (_LC0 + (n)*0x20)
153
154#define _LC1 0x410
155#define LC1(n) (_LC1 + (n)*0x20)
156
157#define DBGSTATUS 0xd00
158#define DBG_BUSY (1 << 0)
159
160#define DBGCMD 0xd04
161#define DBGINST0 0xd08
162#define DBGINST1 0xd0c
163
164#define CR0 0xe00
165#define CR1 0xe04
166#define CR2 0xe08
167#define CR3 0xe0c
168#define CR4 0xe10
169#define CRD 0xe14
170
171#define PERIPH_ID 0xfe0
3ecf51a4
BK
172#define PERIPH_REV_SHIFT 20
173#define PERIPH_REV_MASK 0xf
174#define PERIPH_REV_R0P0 0
175#define PERIPH_REV_R1P0 1
176#define PERIPH_REV_R1P1 2
b7d861d9
BK
177#define PCELL_ID 0xff0
178
179#define CR0_PERIPH_REQ_SET (1 << 0)
180#define CR0_BOOT_EN_SET (1 << 1)
181#define CR0_BOOT_MAN_NS (1 << 2)
182#define CR0_NUM_CHANS_SHIFT 4
183#define CR0_NUM_CHANS_MASK 0x7
184#define CR0_NUM_PERIPH_SHIFT 12
185#define CR0_NUM_PERIPH_MASK 0x1f
186#define CR0_NUM_EVENTS_SHIFT 17
187#define CR0_NUM_EVENTS_MASK 0x1f
188
189#define CR1_ICACHE_LEN_SHIFT 0
190#define CR1_ICACHE_LEN_MASK 0x7
191#define CR1_NUM_ICACHELINES_SHIFT 4
192#define CR1_NUM_ICACHELINES_MASK 0xf
193
194#define CRD_DATA_WIDTH_SHIFT 0
195#define CRD_DATA_WIDTH_MASK 0x7
196#define CRD_WR_CAP_SHIFT 4
197#define CRD_WR_CAP_MASK 0x7
198#define CRD_WR_Q_DEP_SHIFT 8
199#define CRD_WR_Q_DEP_MASK 0xf
200#define CRD_RD_CAP_SHIFT 12
201#define CRD_RD_CAP_MASK 0x7
202#define CRD_RD_Q_DEP_SHIFT 16
203#define CRD_RD_Q_DEP_MASK 0xf
204#define CRD_DATA_BUFF_SHIFT 20
205#define CRD_DATA_BUFF_MASK 0x3ff
206
207#define PART 0x330
208#define DESIGNER 0x41
209#define REVISION 0x0
210#define INTEG_CFG 0x0
211#define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12))
212
213#define PCELL_ID_VAL 0xb105f00d
214
215#define PL330_STATE_STOPPED (1 << 0)
216#define PL330_STATE_EXECUTING (1 << 1)
217#define PL330_STATE_WFE (1 << 2)
218#define PL330_STATE_FAULTING (1 << 3)
219#define PL330_STATE_COMPLETING (1 << 4)
220#define PL330_STATE_WFP (1 << 5)
221#define PL330_STATE_KILLING (1 << 6)
222#define PL330_STATE_FAULT_COMPLETING (1 << 7)
223#define PL330_STATE_CACHEMISS (1 << 8)
224#define PL330_STATE_UPDTPC (1 << 9)
225#define PL330_STATE_ATBARRIER (1 << 10)
226#define PL330_STATE_QUEUEBUSY (1 << 11)
227#define PL330_STATE_INVALID (1 << 15)
228
229#define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \
230 | PL330_STATE_WFE | PL330_STATE_FAULTING)
231
232#define CMD_DMAADDH 0x54
233#define CMD_DMAEND 0x00
234#define CMD_DMAFLUSHP 0x35
235#define CMD_DMAGO 0xa0
236#define CMD_DMALD 0x04
237#define CMD_DMALDP 0x25
238#define CMD_DMALP 0x20
239#define CMD_DMALPEND 0x28
240#define CMD_DMAKILL 0x01
241#define CMD_DMAMOV 0xbc
242#define CMD_DMANOP 0x18
243#define CMD_DMARMB 0x12
244#define CMD_DMASEV 0x34
245#define CMD_DMAST 0x08
246#define CMD_DMASTP 0x29
247#define CMD_DMASTZ 0x0c
248#define CMD_DMAWFE 0x36
249#define CMD_DMAWFP 0x30
250#define CMD_DMAWMB 0x13
251
252#define SZ_DMAADDH 3
253#define SZ_DMAEND 1
254#define SZ_DMAFLUSHP 2
255#define SZ_DMALD 1
256#define SZ_DMALDP 2
257#define SZ_DMALP 2
258#define SZ_DMALPEND 2
259#define SZ_DMAKILL 1
260#define SZ_DMAMOV 6
261#define SZ_DMANOP 1
262#define SZ_DMARMB 1
263#define SZ_DMASEV 2
264#define SZ_DMAST 1
265#define SZ_DMASTP 2
266#define SZ_DMASTZ 1
267#define SZ_DMAWFE 2
268#define SZ_DMAWFP 2
269#define SZ_DMAWMB 1
270#define SZ_DMAGO 6
271
272#define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1)
273#define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7))
274
275#define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr))
276#define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr))
277
278/*
279 * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req
280 * at 1byte/burst for P<->M and M<->M respectively.
281 * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req
282 * should be enough for P<->M and M<->M respectively.
283 */
284#define MCODE_BUFF_PER_REQ 256
285
286/* If the _pl330_req is available to the client */
287#define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND)
288
289/* Use this _only_ to wait on transient states */
3c2a0909
S
290#define UNTIL(t, s) do { \
291 unsigned long timeout = msecs_to_jiffies(5); \
292 bool timeout_flag = true; \
293 do { \
294 if (!(_state(t) & (s))) { \
295 timeout_flag = false; \
296 break; \
297 } \
298 cpu_relax(); \
299 } while (time_before(jiffies, timeout)); \
300 if (timeout_flag) pr_err("%s Timeout error!!!!", __func__); \
301 } while (0)
302
303#if defined(CONFIG_PL330TEST_LOG)
304#define DBG_PRINT(x...) exynos_ss_printk(x);
305#else
306#define DBG_PRINT(x...) do {} while (0)
307#endif
b7d861d9
BK
308
309#ifdef PL330_DEBUG_MCGEN
310static unsigned cmd_line;
311#define PL330_DBGCMD_DUMP(off, x...) do { \
312 printk("%x:", cmd_line); \
313 printk(x); \
314 cmd_line += off; \
315 } while (0)
316#define PL330_DBGMC_START(addr) (cmd_line = addr)
317#else
318#define PL330_DBGCMD_DUMP(off, x...) do {} while (0)
319#define PL330_DBGMC_START(addr) do {} while (0)
320#endif
321
3c2a0909
S
322#define AUDSS_SRAM 0x03000000
323#define AUDSS_SRAM_SIZE 0x00028000
324
b7d861d9 325/* The number of default descriptors */
d2ebfb33 326
b3040e40
JB
327#define NR_DEFAULT_DESC 16
328
b7d861d9
BK
329/* Populated by the PL330 core driver for DMA API driver's info */
330struct pl330_config {
331 u32 periph_id;
332 u32 pcell_id;
333#define DMAC_MODE_NS (1 << 0)
334 unsigned int mode;
335 unsigned int data_bus_width:10; /* In number of bits */
336 unsigned int data_buf_dep:10;
337 unsigned int num_chan:4;
338 unsigned int num_peri:6;
339 u32 peri_ns;
340 unsigned int num_events:6;
341 u32 irq_ns;
342};
343
344/* Handle to the DMAC provided to the PL330 core */
345struct pl330_info {
346 /* Owning device */
347 struct device *dev;
348 /* Size of MicroCode buffers for each channel. */
349 unsigned mcbufsz;
350 /* ioremap'ed address of PL330 registers. */
351 void __iomem *base;
352 /* Client can freely use it. */
353 void *client_data;
354 /* PL330 core data, Client must not touch it. */
355 void *pl330_data;
356 /* Populated by the PL330 core driver during pl330_add */
357 struct pl330_config pcfg;
358 /*
359 * If the DMAC has some reset mechanism, then the
360 * client may want to provide pointer to the method.
361 */
362 void (*dmac_reset)(struct pl330_info *pi);
363};
364
365/**
366 * Request Configuration.
367 * The PL330 core does not modify this and uses the last
368 * working configuration if the request doesn't provide any.
369 *
370 * The Client may want to provide this info only for the
371 * first request and a request with new settings.
372 */
373struct pl330_reqcfg {
374 /* Address Incrementing */
375 unsigned dst_inc:1;
376 unsigned src_inc:1;
377
378 /*
379 * For now, the SRC & DST protection levels
380 * and burst size/length are assumed same.
381 */
382 bool nonsecure;
383 bool privileged;
384 bool insnaccess;
385 unsigned brst_len:5;
386 unsigned brst_size:3; /* in power of 2 */
387
388 enum pl330_dstcachectrl dcctl;
389 enum pl330_srccachectrl scctl;
390 enum pl330_byteswap swap;
3ecf51a4 391 struct pl330_config *pcfg;
b7d861d9
BK
392};
393
394/*
395 * One cycle of DMAC operation.
396 * There may be more than one xfer in a request.
397 */
398struct pl330_xfer {
399 u32 src_addr;
400 u32 dst_addr;
401 /* Size to xfer */
402 u32 bytes;
403 /*
404 * Pointer to next xfer in the list.
405 * The last xfer in the req must point to NULL.
406 */
407 struct pl330_xfer *next;
408};
409
410/* The xfer callbacks are made with one of these arguments. */
411enum pl330_op_err {
412 /* The all xfers in the request were success. */
413 PL330_ERR_NONE,
414 /* If req aborted due to global error. */
415 PL330_ERR_ABORT,
416 /* If req failed due to problem with Channel. */
417 PL330_ERR_FAIL,
418};
419
420/* A request defining Scatter-Gather List ending with NULL xfer. */
421struct pl330_req {
422 enum pl330_reqtype rqtype;
423 /* Index of peripheral for the xfer. */
424 unsigned peri:5;
425 /* Unique token for this xfer, set by the client. */
426 void *token;
427 /* Callback to be called after xfer. */
428 void (*xfer_cb)(void *token, enum pl330_op_err err);
429 /* If NULL, req will be done at last set parameters. */
430 struct pl330_reqcfg *cfg;
431 /* Pointer to first xfer in the request. */
432 struct pl330_xfer *x;
fdec53d5
JM
433 /* Hook to attach to DMAC's list of reqs with due callback */
434 struct list_head rqd;
3c2a0909
S
435 unsigned int infiniteloop;
436 bool sram;
b7d861d9
BK
437};
438
439/*
440 * To know the status of the channel and DMAC, the client
441 * provides a pointer to this structure. The PL330 core
442 * fills it with current information.
443 */
444struct pl330_chanstatus {
445 /*
446 * If the DMAC engine halted due to some error,
447 * the client should remove-add DMAC.
448 */
449 bool dmac_halted;
450 /*
451 * If channel is halted due to some error,
452 * the client should ABORT/FLUSH and START the channel.
453 */
454 bool faulting;
455 /* Location of last load */
456 u32 src_addr;
457 /* Location of last store */
458 u32 dst_addr;
459 /*
460 * Pointer to the currently active req, NULL if channel is
461 * inactive, even though the requests may be present.
462 */
463 struct pl330_req *top_req;
464 /* Pointer to req waiting second in the queue if any. */
465 struct pl330_req *wait_req;
466};
467
468enum pl330_chan_op {
469 /* Start the channel */
470 PL330_OP_START,
471 /* Abort the active xfer */
472 PL330_OP_ABORT,
473 /* Stop xfer and flush queue */
474 PL330_OP_FLUSH,
475};
476
477struct _xfer_spec {
478 u32 ccr;
479 struct pl330_req *r;
480 struct pl330_xfer *x;
481};
482
483enum dmamov_dst {
484 SAR = 0,
485 CCR,
486 DAR,
487};
488
489enum pl330_dst {
490 SRC = 0,
491 DST,
492};
493
494enum pl330_cond {
495 SINGLE,
496 BURST,
497 ALWAYS,
498};
499
500struct _pl330_req {
501 u32 mc_bus;
502 void *mc_cpu;
503 /* Number of bytes taken to setup MC for the req */
504 u32 mc_len;
505 struct pl330_req *r;
b7d861d9
BK
506};
507
508/* ToBeDone for tasklet */
509struct _pl330_tbd {
510 bool reset_dmac;
511 bool reset_mngr;
512 u8 reset_chan;
513};
514
515/* A DMAC Thread */
516struct pl330_thread {
517 u8 id;
518 int ev;
519 /* If the channel is not yet acquired by any client */
520 bool free;
521 /* Parent DMAC */
522 struct pl330_dmac *dmac;
523 /* Only two at a time */
524 struct _pl330_req req[2];
525 /* Index of the last enqueued request */
526 unsigned lstenq;
527 /* Index of the last submitted request or -1 if the DMA is stopped */
528 int req_running;
529};
530
531enum pl330_dmac_state {
532 UNINIT,
533 INIT,
534 DYING,
535};
536
537/* A DMAC */
538struct pl330_dmac {
539 spinlock_t lock;
540 /* Holds list of reqs with due callbacks */
541 struct list_head req_done;
542 /* Pointer to platform specific stuff */
543 struct pl330_info *pinfo;
544 /* Maximum possible events/irqs */
545 int events[32];
546 /* BUS address of MicroCode buffer */
3c2a0909 547 dma_addr_t mcode_bus;
b7d861d9
BK
548 /* CPU address of MicroCode buffer */
549 void *mcode_cpu;
3c2a0909
S
550 /* BUS address of MicroCode buffer in sram */
551 u32 mcode_bus_sram;
552 /* CPU address of MicroCode buffer in sram */
553 void *mcode_cpu_sram;
b7d861d9
BK
554 /* List of all Channel threads */
555 struct pl330_thread *channels;
556 /* Pointer to the MANAGER thread */
557 struct pl330_thread *manager;
558 /* To handle bad news in interrupt */
559 struct tasklet_struct tasks;
560 struct _pl330_tbd dmac_tbd;
561 /* State of DMAC operation */
562 enum pl330_dmac_state state;
3c2a0909
S
563 /* count of open channel */
564 int usage_count;
b7d861d9
BK
565};
566
b3040e40
JB
567enum desc_status {
568 /* In the DMAC pool */
569 FREE,
570 /*
d73111c6 571 * Allocated to some channel during prep_xxx
b3040e40
JB
572 * Also may be sitting on the work_list.
573 */
574 PREP,
575 /*
576 * Sitting on the work_list and already submitted
577 * to the PL330 core. Not more than two descriptors
578 * of a channel can be BUSY at any time.
579 */
580 BUSY,
581 /*
582 * Sitting on the channel work_list but xfer done
583 * by PL330 core
584 */
585 DONE,
586};
587
588struct dma_pl330_chan {
589 /* Schedule desc completion */
590 struct tasklet_struct task;
591
592 /* DMA-Engine Channel */
593 struct dma_chan chan;
594
b3040e40
JB
595 /* List of to be xfered descriptors */
596 struct list_head work_list;
597
598 /* Pointer to the DMAC that manages this channel,
599 * NULL if the channel is available to be acquired.
600 * As the parent, this DMAC also provides descriptors
601 * to the channel.
602 */
603 struct dma_pl330_dmac *dmac;
604
605 /* To protect channel manipulation */
606 spinlock_t lock;
607
608 /* Token of a hardware channel thread of PL330 DMAC
609 * NULL if the channel is available to be acquired.
610 */
611 void *pl330_chid;
1b9bb715
BK
612
613 /* For D-to-M and M-to-D channels */
614 int burst_sz; /* the peripheral fifo width */
1d0c1d60 615 int burst_len; /* the number of burst */
1b9bb715 616 dma_addr_t fifo_addr;
42bc9cf4
BK
617
618 /* for cyclic capability */
619 bool cyclic;
3c2a0909 620 bool on_trigger;
b3040e40
JB
621};
622
623struct dma_pl330_dmac {
624 struct pl330_info pif;
625
626 /* DMA-Engine Device */
627 struct dma_device ddma;
628
629 /* Pool of descriptors available for the DMAC's channels */
630 struct list_head desc_pool;
631 /* To protect desc_pool manipulation */
632 spinlock_t pool_lock;
633
634 /* Peripheral channels connected to this DMAC */
4e0e6109 635 struct dma_pl330_chan *peripherals; /* keep at end */
b3040e40
JB
636};
637
638struct dma_pl330_desc {
639 /* To attach to a queue as child */
640 struct list_head node;
641
642 /* Descriptor for the DMA Engine API */
643 struct dma_async_tx_descriptor txd;
644
645 /* Xfer for PL330 core */
646 struct pl330_xfer px;
647
648 struct pl330_reqcfg rqcfg;
649 struct pl330_req req;
650
651 enum desc_status status;
652
653 /* The channel which currently holds this desc */
654 struct dma_pl330_chan *pchan;
655};
656
34d19355
PV
657struct dma_pl330_filter_args {
658 struct dma_pl330_dmac *pdmac;
659 unsigned int chan_id;
660};
661
3c2a0909
S
662static inline void put_unaligned_le32(u32 val, u8 *p)
663{
664 *p++ = val;
665 *p++ = val >> 8;
666 *p++ = val >> 16;
667 *p++ = val >> 24;
668}
669
670static inline u32 get_unaligned_le32(u8 *p)
671{
672 return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24;
673}
674
b7d861d9
BK
675static inline void _callback(struct pl330_req *r, enum pl330_op_err err)
676{
677 if (r && r->xfer_cb)
678 r->xfer_cb(r->token, err);
679}
680
681static inline bool _queue_empty(struct pl330_thread *thrd)
682{
683 return (IS_FREE(&thrd->req[0]) && IS_FREE(&thrd->req[1]))
684 ? true : false;
685}
686
687static inline bool _queue_full(struct pl330_thread *thrd)
688{
689 return (IS_FREE(&thrd->req[0]) || IS_FREE(&thrd->req[1]))
690 ? false : true;
691}
692
693static inline bool is_manager(struct pl330_thread *thrd)
694{
695 struct pl330_dmac *pl330 = thrd->dmac;
696
697 /* MANAGER is indexed at the end */
698 if (thrd->id == pl330->pinfo->pcfg.num_chan)
699 return true;
700 else
701 return false;
702}
703
704/* If manager of the thread is in Non-Secure mode */
705static inline bool _manager_ns(struct pl330_thread *thrd)
706{
707 struct pl330_dmac *pl330 = thrd->dmac;
708
709 return (pl330->pinfo->pcfg.mode & DMAC_MODE_NS) ? true : false;
710}
711
712static inline u32 get_id(struct pl330_info *pi, u32 off)
713{
714 void __iomem *regs = pi->base;
715 u32 id = 0;
716
717 id |= (readb(regs + off + 0x0) << 0);
718 id |= (readb(regs + off + 0x4) << 8);
719 id |= (readb(regs + off + 0x8) << 16);
720 id |= (readb(regs + off + 0xc) << 24);
721
722 return id;
723}
724
3ecf51a4
BK
725static inline u32 get_revision(u32 periph_id)
726{
727 return (periph_id >> PERIPH_REV_SHIFT) & PERIPH_REV_MASK;
728}
729
b7d861d9
BK
730static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[],
731 enum pl330_dst da, u16 val)
732{
733 if (dry_run)
734 return SZ_DMAADDH;
735
736 buf[0] = CMD_DMAADDH;
737 buf[0] |= (da << 1);
738 *((u16 *)&buf[1]) = val;
739
740 PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n",
741 da == 1 ? "DA" : "SA", val);
742
743 return SZ_DMAADDH;
744}
745
746static inline u32 _emit_END(unsigned dry_run, u8 buf[])
747{
748 if (dry_run)
749 return SZ_DMAEND;
750
751 buf[0] = CMD_DMAEND;
752
753 PL330_DBGCMD_DUMP(SZ_DMAEND, "\tDMAEND\n");
754
755 return SZ_DMAEND;
756}
757
3c2a0909 758static inline int _emit_FLUSHP(unsigned dry_run, u8 buf[], u8 peri)
b7d861d9
BK
759{
760 if (dry_run)
761 return SZ_DMAFLUSHP;
762
763 buf[0] = CMD_DMAFLUSHP;
764
765 peri &= 0x1f;
766 peri <<= 3;
767 buf[1] = peri;
768
769 PL330_DBGCMD_DUMP(SZ_DMAFLUSHP, "\tDMAFLUSHP %u\n", peri >> 3);
770
771 return SZ_DMAFLUSHP;
772}
773
774static inline u32 _emit_LD(unsigned dry_run, u8 buf[], enum pl330_cond cond)
775{
776 if (dry_run)
777 return SZ_DMALD;
778
779 buf[0] = CMD_DMALD;
780
781 if (cond == SINGLE)
782 buf[0] |= (0 << 1) | (1 << 0);
783 else if (cond == BURST)
784 buf[0] |= (1 << 1) | (1 << 0);
785
786 PL330_DBGCMD_DUMP(SZ_DMALD, "\tDMALD%c\n",
787 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
788
789 return SZ_DMALD;
790}
791
792static inline u32 _emit_LDP(unsigned dry_run, u8 buf[],
793 enum pl330_cond cond, u8 peri)
794{
795 if (dry_run)
796 return SZ_DMALDP;
797
798 buf[0] = CMD_DMALDP;
799
800 if (cond == BURST)
801 buf[0] |= (1 << 1);
802
803 peri &= 0x1f;
804 peri <<= 3;
805 buf[1] = peri;
806
807 PL330_DBGCMD_DUMP(SZ_DMALDP, "\tDMALDP%c %u\n",
808 cond == SINGLE ? 'S' : 'B', peri >> 3);
809
810 return SZ_DMALDP;
811}
812
3c2a0909 813static inline int _emit_LP(unsigned dry_run, u8 buf[],
b7d861d9
BK
814 unsigned loop, u8 cnt)
815{
816 if (dry_run)
817 return SZ_DMALP;
818
819 buf[0] = CMD_DMALP;
820
821 if (loop)
822 buf[0] |= (1 << 1);
823
824 cnt--; /* DMAC increments by 1 internally */
825 buf[1] = cnt;
826
827 PL330_DBGCMD_DUMP(SZ_DMALP, "\tDMALP_%c %u\n", loop ? '1' : '0', cnt);
828
829 return SZ_DMALP;
830}
831
832struct _arg_LPEND {
833 enum pl330_cond cond;
834 bool forever;
835 unsigned loop;
836 u8 bjump;
837};
838
3c2a0909 839static inline int _emit_LPEND(unsigned dry_run, u8 buf[],
b7d861d9
BK
840 const struct _arg_LPEND *arg)
841{
842 enum pl330_cond cond = arg->cond;
843 bool forever = arg->forever;
844 unsigned loop = arg->loop;
845 u8 bjump = arg->bjump;
846
847 if (dry_run)
848 return SZ_DMALPEND;
849
850 buf[0] = CMD_DMALPEND;
851
852 if (loop)
853 buf[0] |= (1 << 2);
854
855 if (!forever)
856 buf[0] |= (1 << 4);
857
858 if (cond == SINGLE)
859 buf[0] |= (0 << 1) | (1 << 0);
860 else if (cond == BURST)
861 buf[0] |= (1 << 1) | (1 << 0);
862
863 buf[1] = bjump;
864
865 PL330_DBGCMD_DUMP(SZ_DMALPEND, "\tDMALP%s%c_%c bjmpto_%x\n",
866 forever ? "FE" : "END",
867 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'),
868 loop ? '1' : '0',
869 bjump);
870
871 return SZ_DMALPEND;
872}
873
874static inline u32 _emit_KILL(unsigned dry_run, u8 buf[])
875{
876 if (dry_run)
877 return SZ_DMAKILL;
878
879 buf[0] = CMD_DMAKILL;
880
881 return SZ_DMAKILL;
882}
883
3c2a0909 884static inline int _emit_MOV(unsigned dry_run, u8 buf[],
b7d861d9
BK
885 enum dmamov_dst dst, u32 val)
886{
887 if (dry_run)
888 return SZ_DMAMOV;
889
890 buf[0] = CMD_DMAMOV;
891 buf[1] = dst;
3c2a0909
S
892
893 put_unaligned_le32(val, &buf[2]);
b7d861d9
BK
894
895 PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n",
896 dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val);
897
898 return SZ_DMAMOV;
899}
900
901static inline u32 _emit_NOP(unsigned dry_run, u8 buf[])
902{
903 if (dry_run)
904 return SZ_DMANOP;
905
906 buf[0] = CMD_DMANOP;
907
908 PL330_DBGCMD_DUMP(SZ_DMANOP, "\tDMANOP\n");
909
910 return SZ_DMANOP;
911}
912
913static inline u32 _emit_RMB(unsigned dry_run, u8 buf[])
914{
915 if (dry_run)
916 return SZ_DMARMB;
917
918 buf[0] = CMD_DMARMB;
919
920 PL330_DBGCMD_DUMP(SZ_DMARMB, "\tDMARMB\n");
921
922 return SZ_DMARMB;
923}
924
3c2a0909 925static inline int _emit_SEV(unsigned dry_run, u8 buf[], u8 ev)
b7d861d9
BK
926{
927 if (dry_run)
928 return SZ_DMASEV;
929
930 buf[0] = CMD_DMASEV;
931
932 ev &= 0x1f;
933 ev <<= 3;
934 buf[1] = ev;
935
936 PL330_DBGCMD_DUMP(SZ_DMASEV, "\tDMASEV %u\n", ev >> 3);
937
938 return SZ_DMASEV;
939}
940
941static inline u32 _emit_ST(unsigned dry_run, u8 buf[], enum pl330_cond cond)
942{
943 if (dry_run)
944 return SZ_DMAST;
945
946 buf[0] = CMD_DMAST;
947
948 if (cond == SINGLE)
949 buf[0] |= (0 << 1) | (1 << 0);
950 else if (cond == BURST)
951 buf[0] |= (1 << 1) | (1 << 0);
952
953 PL330_DBGCMD_DUMP(SZ_DMAST, "\tDMAST%c\n",
954 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
955
956 return SZ_DMAST;
957}
958
959static inline u32 _emit_STP(unsigned dry_run, u8 buf[],
960 enum pl330_cond cond, u8 peri)
961{
962 if (dry_run)
963 return SZ_DMASTP;
964
965 buf[0] = CMD_DMASTP;
966
967 if (cond == BURST)
968 buf[0] |= (1 << 1);
969
970 peri &= 0x1f;
971 peri <<= 3;
972 buf[1] = peri;
973
974 PL330_DBGCMD_DUMP(SZ_DMASTP, "\tDMASTP%c %u\n",
975 cond == SINGLE ? 'S' : 'B', peri >> 3);
976
977 return SZ_DMASTP;
978}
979
980static inline u32 _emit_STZ(unsigned dry_run, u8 buf[])
981{
982 if (dry_run)
983 return SZ_DMASTZ;
984
985 buf[0] = CMD_DMASTZ;
986
987 PL330_DBGCMD_DUMP(SZ_DMASTZ, "\tDMASTZ\n");
988
989 return SZ_DMASTZ;
990}
991
992static inline u32 _emit_WFE(unsigned dry_run, u8 buf[], u8 ev,
993 unsigned invalidate)
994{
995 if (dry_run)
996 return SZ_DMAWFE;
997
998 buf[0] = CMD_DMAWFE;
999
1000 ev &= 0x1f;
1001 ev <<= 3;
1002 buf[1] = ev;
1003
1004 if (invalidate)
1005 buf[1] |= (1 << 1);
1006
1007 PL330_DBGCMD_DUMP(SZ_DMAWFE, "\tDMAWFE %u%s\n",
1008 ev >> 3, invalidate ? ", I" : "");
1009
1010 return SZ_DMAWFE;
1011}
1012
1013static inline u32 _emit_WFP(unsigned dry_run, u8 buf[],
1014 enum pl330_cond cond, u8 peri)
1015{
1016 if (dry_run)
1017 return SZ_DMAWFP;
1018
1019 buf[0] = CMD_DMAWFP;
1020
1021 if (cond == SINGLE)
1022 buf[0] |= (0 << 1) | (0 << 0);
1023 else if (cond == BURST)
1024 buf[0] |= (1 << 1) | (0 << 0);
1025 else
1026 buf[0] |= (0 << 1) | (1 << 0);
1027
1028 peri &= 0x1f;
1029 peri <<= 3;
1030 buf[1] = peri;
1031
1032 PL330_DBGCMD_DUMP(SZ_DMAWFP, "\tDMAWFP%c %u\n",
1033 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'P'), peri >> 3);
1034
1035 return SZ_DMAWFP;
1036}
1037
1038static inline u32 _emit_WMB(unsigned dry_run, u8 buf[])
1039{
1040 if (dry_run)
1041 return SZ_DMAWMB;
1042
1043 buf[0] = CMD_DMAWMB;
1044
1045 PL330_DBGCMD_DUMP(SZ_DMAWMB, "\tDMAWMB\n");
1046
1047 return SZ_DMAWMB;
1048}
1049
1050struct _arg_GO {
1051 u8 chan;
1052 u32 addr;
1053 unsigned ns;
1054};
1055
1056static inline u32 _emit_GO(unsigned dry_run, u8 buf[],
1057 const struct _arg_GO *arg)
1058{
1059 u8 chan = arg->chan;
1060 u32 addr = arg->addr;
1061 unsigned ns = arg->ns;
1062
1063 if (dry_run)
1064 return SZ_DMAGO;
1065
1066 buf[0] = CMD_DMAGO;
1067 buf[0] |= (ns << 1);
1068
1069 buf[1] = chan & 0x7;
1070
3c2a0909 1071 put_unaligned_le32(addr, &buf[2]);
b7d861d9
BK
1072
1073 return SZ_DMAGO;
1074}
1075
b7d861d9
BK
1076/* Returns Time-Out */
1077static bool _until_dmac_idle(struct pl330_thread *thrd)
1078{
1079 void __iomem *regs = thrd->dmac->pinfo->base;
3c2a0909 1080 unsigned long timeout = jiffies + msecs_to_jiffies(5);
b7d861d9
BK
1081
1082 do {
1083 /* Until Manager is Idle */
1084 if (!(readl(regs + DBGSTATUS) & DBG_BUSY))
3c2a0909 1085 return false;
b7d861d9
BK
1086
1087 cpu_relax();
3c2a0909 1088 } while (time_before(jiffies, timeout));
b7d861d9 1089
3c2a0909
S
1090 return true;
1091}
b7d861d9 1092
3c2a0909
S
1093#ifndef CONFIG_ARM64
1094void set_secure_dma(void)
1095{
1096 int ret;
1097
1098 ret = ion_exynos_contig_heap_info(ION_EXYNOS_ID_SECDMA,
1099 &secdma_mem_info.base, &secdma_mem_info.size);
1100 if (ret) {
1101 pr_err("get ion exynos info failed\n");
1102 return;
1103 }
1104 pr_err("[%s] ion base: 0x%x, size:0x%x \n", __func__,
1105 secdma_mem_info.base, secdma_mem_info.size);
1106
1107 secure_dma_mode = true;
b7d861d9 1108}
3c2a0909 1109#endif
b7d861d9
BK
1110
1111static inline void _execute_DBGINSN(struct pl330_thread *thrd,
1112 u8 insn[], bool as_manager)
1113{
1114 void __iomem *regs = thrd->dmac->pinfo->base;
3c2a0909 1115 struct device_node *np = thrd->dmac->pinfo->dev->of_node;
b7d861d9 1116 u32 val;
3c2a0909 1117 int ret;
b7d861d9
BK
1118
1119 val = (insn[0] << 16) | (insn[1] << 24);
1120 if (!as_manager) {
1121 val |= (1 << 0);
1122 val |= (thrd->id << 8); /* Channel Number */
1123 }
3c2a0909
S
1124
1125 if (soc_is_exynos5430() && secure_dma_mode)
1126 if (np && of_dma_secure_mode(np)) {
1127 ret = exynos_smc(MC_FC_SECURE_DMA, val,
1128 *((u32 *)&insn[2]), secdma_mem_info.base);
1129 if (ret)
1130 dev_err(thrd->dmac->pinfo->dev, "dma smc failed\n");
1131 return;
1132 }
1133
b7d861d9
BK
1134 writel(val, regs + DBGINST0);
1135
3c2a0909 1136 val = get_unaligned_le32(&insn[2]);
b7d861d9
BK
1137 writel(val, regs + DBGINST1);
1138
1139 /* If timed out due to halted state-machine */
1140 if (_until_dmac_idle(thrd)) {
1141 dev_err(thrd->dmac->pinfo->dev, "DMAC halted!\n");
1142 return;
1143 }
1144
1145 /* Get going */
1146 writel(0, regs + DBGCMD);
1147}
1148
1149/*
1150 * Mark a _pl330_req as free.
1151 * We do it by writing DMAEND as the first instruction
1152 * because no valid request is going to have DMAEND as
1153 * its first instruction to execute.
1154 */
1155static void mark_free(struct pl330_thread *thrd, int idx)
1156{
1157 struct _pl330_req *req = &thrd->req[idx];
1158
1159 _emit_END(0, req->mc_cpu);
1160 req->mc_len = 0;
1161
1162 thrd->req_running = -1;
1163}
1164
1165static inline u32 _state(struct pl330_thread *thrd)
1166{
1167 void __iomem *regs = thrd->dmac->pinfo->base;
1168 u32 val;
1169
1170 if (is_manager(thrd))
1171 val = readl(regs + DS) & 0xf;
1172 else
1173 val = readl(regs + CS(thrd->id)) & 0xf;
1174
1175 switch (val) {
1176 case DS_ST_STOP:
1177 return PL330_STATE_STOPPED;
1178 case DS_ST_EXEC:
1179 return PL330_STATE_EXECUTING;
1180 case DS_ST_CMISS:
1181 return PL330_STATE_CACHEMISS;
1182 case DS_ST_UPDTPC:
1183 return PL330_STATE_UPDTPC;
1184 case DS_ST_WFE:
1185 return PL330_STATE_WFE;
1186 case DS_ST_FAULT:
1187 return PL330_STATE_FAULTING;
1188 case DS_ST_ATBRR:
1189 if (is_manager(thrd))
1190 return PL330_STATE_INVALID;
1191 else
1192 return PL330_STATE_ATBARRIER;
1193 case DS_ST_QBUSY:
1194 if (is_manager(thrd))
1195 return PL330_STATE_INVALID;
1196 else
1197 return PL330_STATE_QUEUEBUSY;
1198 case DS_ST_WFP:
1199 if (is_manager(thrd))
1200 return PL330_STATE_INVALID;
1201 else
1202 return PL330_STATE_WFP;
1203 case DS_ST_KILL:
1204 if (is_manager(thrd))
1205 return PL330_STATE_INVALID;
1206 else
1207 return PL330_STATE_KILLING;
1208 case DS_ST_CMPLT:
1209 if (is_manager(thrd))
1210 return PL330_STATE_INVALID;
1211 else
1212 return PL330_STATE_COMPLETING;
1213 case DS_ST_FLTCMP:
1214 if (is_manager(thrd))
1215 return PL330_STATE_INVALID;
1216 else
1217 return PL330_STATE_FAULT_COMPLETING;
1218 default:
1219 return PL330_STATE_INVALID;
1220 }
1221}
1222
1223static void _stop(struct pl330_thread *thrd)
1224{
1225 void __iomem *regs = thrd->dmac->pinfo->base;
1226 u8 insn[6] = {0, 0, 0, 0, 0, 0};
1227
1228 if (_state(thrd) == PL330_STATE_FAULT_COMPLETING)
1229 UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
1230
1231 /* Return if nothing needs to be done */
1232 if (_state(thrd) == PL330_STATE_COMPLETING
1233 || _state(thrd) == PL330_STATE_KILLING
1234 || _state(thrd) == PL330_STATE_STOPPED)
1235 return;
1236
1237 _emit_KILL(0, insn);
1238
1239 /* Stop generating interrupts for SEV */
1240 writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN);
1241
1242 _execute_DBGINSN(thrd, insn, is_manager(thrd));
1243}
1244
1245/* Start doing req 'idx' of thread 'thrd' */
1246static bool _trigger(struct pl330_thread *thrd)
1247{
1248 void __iomem *regs = thrd->dmac->pinfo->base;
1249 struct _pl330_req *req;
1250 struct pl330_req *r;
1251 struct _arg_GO go;
1252 unsigned ns;
1253 u8 insn[6] = {0, 0, 0, 0, 0, 0};
1254 int idx;
1255
1256 /* Return if already ACTIVE */
1257 if (_state(thrd) != PL330_STATE_STOPPED)
1258 return true;
1259
1260 idx = 1 - thrd->lstenq;
1261 if (!IS_FREE(&thrd->req[idx]))
1262 req = &thrd->req[idx];
1263 else {
1264 idx = thrd->lstenq;
1265 if (!IS_FREE(&thrd->req[idx]))
1266 req = &thrd->req[idx];
1267 else
1268 req = NULL;
1269 }
1270
1271 /* Return if no request */
1272 if (!req || !req->r)
1273 return true;
1274
1275 r = req->r;
1276
1277 if (r->cfg)
1278 ns = r->cfg->nonsecure ? 1 : 0;
1279 else if (readl(regs + CS(thrd->id)) & CS_CNS)
1280 ns = 1;
1281 else
1282 ns = 0;
1283
1284 /* See 'Abort Sources' point-4 at Page 2-25 */
1285 if (_manager_ns(thrd) && !ns)
1286 dev_info(thrd->dmac->pinfo->dev, "%s:%d Recipe for ABORT!\n",
1287 __func__, __LINE__);
1288
1289 go.chan = thrd->id;
1290 go.addr = req->mc_bus;
1291 go.ns = ns;
1292 _emit_GO(0, insn, &go);
1293
1294 /* Set to generate interrupts for SEV */
1295 writel(readl(regs + INTEN) | (1 << thrd->ev), regs + INTEN);
1296
1297 /* Only manager can execute GO */
1298 _execute_DBGINSN(thrd, insn, true);
1299
1300 thrd->req_running = idx;
1301
1302 return true;
1303}
1304
1305static bool _start(struct pl330_thread *thrd)
1306{
1307 switch (_state(thrd)) {
1308 case PL330_STATE_FAULT_COMPLETING:
1309 UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
1310
1311 if (_state(thrd) == PL330_STATE_KILLING)
3c2a0909 1312 UNTIL(thrd, PL330_STATE_STOPPED);
b7d861d9
BK
1313
1314 case PL330_STATE_FAULTING:
1315 _stop(thrd);
1316
1317 case PL330_STATE_KILLING:
1318 case PL330_STATE_COMPLETING:
3c2a0909 1319 UNTIL(thrd, PL330_STATE_STOPPED);
b7d861d9
BK
1320
1321 case PL330_STATE_STOPPED:
1322 return _trigger(thrd);
1323
1324 case PL330_STATE_WFP:
1325 case PL330_STATE_QUEUEBUSY:
1326 case PL330_STATE_ATBARRIER:
1327 case PL330_STATE_UPDTPC:
1328 case PL330_STATE_CACHEMISS:
1329 case PL330_STATE_EXECUTING:
1330 return true;
1331
1332 case PL330_STATE_WFE: /* For RESUME, nothing yet */
1333 default:
1334 return false;
1335 }
1336}
1337
1338static inline int _ldst_memtomem(unsigned dry_run, u8 buf[],
1339 const struct _xfer_spec *pxs, int cyc)
1340{
1341 int off = 0;
3ecf51a4 1342 struct pl330_config *pcfg = pxs->r->cfg->pcfg;
b7d861d9 1343
3ecf51a4
BK
1344 /* check lock-up free version */
1345 if (get_revision(pcfg->periph_id) >= PERIPH_REV_R1P0) {
1346 while (cyc--) {
1347 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1348 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1349 }
1350 } else {
1351 while (cyc--) {
1352 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1353 off += _emit_RMB(dry_run, &buf[off]);
1354 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1355 off += _emit_WMB(dry_run, &buf[off]);
1356 }
b7d861d9
BK
1357 }
1358
1359 return off;
1360}
1361
1362static inline int _ldst_devtomem(unsigned dry_run, u8 buf[],
1363 const struct _xfer_spec *pxs, int cyc)
1364{
1365 int off = 0;
3c2a0909 1366 enum pl330_cond cond = (pxs->r->cfg->brst_len == 1) ? SINGLE : BURST;
b7d861d9
BK
1367
1368 while (cyc--) {
3c2a0909
S
1369 off += _emit_WFP(dry_run, &buf[off], cond, pxs->r->peri);
1370 off += _emit_LDP(dry_run, &buf[off], cond, pxs->r->peri);
b7d861d9
BK
1371 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1372 off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
1373 }
1374
1375 return off;
1376}
1377
1378static inline int _ldst_memtodev(unsigned dry_run, u8 buf[],
1379 const struct _xfer_spec *pxs, int cyc)
1380{
1381 int off = 0;
3c2a0909 1382 enum pl330_cond cond = (pxs->r->cfg->brst_len == 1) ? SINGLE : BURST;
b7d861d9
BK
1383
1384 while (cyc--) {
3c2a0909 1385 off += _emit_WFP(dry_run, &buf[off], cond, pxs->r->peri);
b7d861d9 1386 off += _emit_LD(dry_run, &buf[off], ALWAYS);
3c2a0909 1387 off += _emit_STP(dry_run, &buf[off], cond, pxs->r->peri);
b7d861d9
BK
1388 off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
1389 }
1390
1391 return off;
1392}
1393
1394static int _bursts(unsigned dry_run, u8 buf[],
1395 const struct _xfer_spec *pxs, int cyc)
1396{
1397 int off = 0;
1398
1399 switch (pxs->r->rqtype) {
1400 case MEMTODEV:
1401 off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc);
1402 break;
1403 case DEVTOMEM:
1404 off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc);
1405 break;
1406 case MEMTOMEM:
1407 off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc);
1408 break;
1409 default:
1410 off += 0x40000000; /* Scare off the Client */
1411 break;
1412 }
1413
1414 return off;
1415}
1416
3c2a0909
S
1417/* Returns bytes consumed */
1418static inline int _loop_infiniteloop(unsigned dry_run, u8 buf[],
1419 unsigned long bursts, const struct _xfer_spec *pxs, int ev)
1420{
1421 int cyc, off;
1422 unsigned lcnt0, lcnt1, ljmp0, ljmp1, ljmpfe;
1423 struct _arg_LPEND lpend;
1424
1425 off = 0;
1426 ljmpfe = off;
1427 lcnt0 = pxs->r->infiniteloop;
1428
1429 if (bursts > 256) {
1430 lcnt1 = 256;
1431 cyc = bursts / 256;
1432 } else {
1433 lcnt1 = bursts;
1434 cyc = 1;
1435 }
1436
1437 /* forever loop */
1438 off += _emit_MOV(dry_run, &buf[off], SAR, pxs->x->src_addr);
1439 off += _emit_MOV(dry_run, &buf[off], DAR, pxs->x->dst_addr);
1440 if (pxs->r->rqtype != MEMTOMEM)
1441 off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
1442
1443 /* loop0 */
1444 off += _emit_LP(dry_run, &buf[off], 0, lcnt0);
1445 ljmp0 = off;
1446
1447 /* loop1 */
1448 off += _emit_LP(dry_run, &buf[off], 1, lcnt1);
1449 ljmp1 = off;
1450 off += _bursts(dry_run, &buf[off], pxs, cyc);
1451 lpend.cond = ALWAYS;
1452 lpend.forever = false;
1453 lpend.loop = 1;
1454 lpend.bjump = off - ljmp1;
1455 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1456
1457 /* remainder */
1458 lcnt1 = bursts - (lcnt1 * cyc);
1459
1460 if (lcnt1) {
1461 off += _emit_LP(dry_run, &buf[off], 1, lcnt1);
1462 ljmp1 = off;
1463 off += _bursts(dry_run, &buf[off], pxs, 1);
1464 lpend.cond = ALWAYS;
1465 lpend.forever = false;
1466 lpend.loop = 1;
1467 lpend.bjump = off - ljmp1;
1468 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1469 }
1470
1471 off += _emit_SEV(dry_run, &buf[off], ev);
1472
1473 lpend.cond = ALWAYS;
1474 lpend.forever = false;
1475 lpend.loop = 0;
1476 lpend.bjump = off - ljmp0;
1477 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1478
1479 lpend.cond = ALWAYS;
1480 lpend.forever = true;
1481 lpend.loop = 1;
1482 lpend.bjump = off - ljmpfe;
1483 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1484
1485 return off;
1486}
1487
b7d861d9
BK
1488/* Returns bytes consumed and updates bursts */
1489static inline int _loop(unsigned dry_run, u8 buf[],
1490 unsigned long *bursts, const struct _xfer_spec *pxs)
1491{
1492 int cyc, cycmax, szlp, szlpend, szbrst, off;
1493 unsigned lcnt0, lcnt1, ljmp0, ljmp1;
1494 struct _arg_LPEND lpend;
1495
1496 /* Max iterations possible in DMALP is 256 */
1497 if (*bursts >= 256*256) {
1498 lcnt1 = 256;
1499 lcnt0 = 256;
1500 cyc = *bursts / lcnt1 / lcnt0;
1501 } else if (*bursts > 256) {
1502 lcnt1 = 256;
1503 lcnt0 = *bursts / lcnt1;
1504 cyc = 1;
1505 } else {
1506 lcnt1 = *bursts;
1507 lcnt0 = 0;
1508 cyc = 1;
1509 }
1510
1511 szlp = _emit_LP(1, buf, 0, 0);
1512 szbrst = _bursts(1, buf, pxs, 1);
1513
1514 lpend.cond = ALWAYS;
1515 lpend.forever = false;
1516 lpend.loop = 0;
1517 lpend.bjump = 0;
1518 szlpend = _emit_LPEND(1, buf, &lpend);
1519
1520 if (lcnt0) {
1521 szlp *= 2;
1522 szlpend *= 2;
1523 }
1524
1525 /*
1526 * Max bursts that we can unroll due to limit on the
1527 * size of backward jump that can be encoded in DMALPEND
1528 * which is 8-bits and hence 255
1529 */
1530 cycmax = (255 - (szlp + szlpend)) / szbrst;
1531
1532 cyc = (cycmax < cyc) ? cycmax : cyc;
1533
1534 off = 0;
1535
1536 if (lcnt0) {
1537 off += _emit_LP(dry_run, &buf[off], 0, lcnt0);
1538 ljmp0 = off;
1539 }
1540
1541 off += _emit_LP(dry_run, &buf[off], 1, lcnt1);
1542 ljmp1 = off;
1543
1544 off += _bursts(dry_run, &buf[off], pxs, cyc);
1545
1546 lpend.cond = ALWAYS;
1547 lpend.forever = false;
1548 lpend.loop = 1;
1549 lpend.bjump = off - ljmp1;
1550 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1551
1552 if (lcnt0) {
1553 lpend.cond = ALWAYS;
1554 lpend.forever = false;
1555 lpend.loop = 0;
1556 lpend.bjump = off - ljmp0;
1557 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1558 }
1559
1560 *bursts = lcnt1 * cyc;
1561 if (lcnt0)
1562 *bursts *= lcnt0;
1563
1564 return off;
1565}
1566
3c2a0909
S
1567static inline int _setup_xfer_infiniteloop(unsigned dry_run, u8 buf[],
1568 const struct _xfer_spec *pxs, int ev)
1569{
1570 struct pl330_xfer *x = pxs->x;
1571 u32 ccr = pxs->ccr;
1572 unsigned long bursts = BYTE_TO_BURST(x->bytes, ccr);
1573 int off = 0;
1574
1575 /* Setup Loop(s) */
1576 off += _loop_infiniteloop(dry_run, &buf[off], bursts, pxs, ev);
1577
1578 return off;
1579}
1580
b7d861d9
BK
1581static inline int _setup_loops(unsigned dry_run, u8 buf[],
1582 const struct _xfer_spec *pxs)
1583{
1584 struct pl330_xfer *x = pxs->x;
1585 u32 ccr = pxs->ccr;
1586 unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr);
1587 int off = 0;
1588
1589 while (bursts) {
1590 c = bursts;
1591 off += _loop(dry_run, &buf[off], &c, pxs);
1592 bursts -= c;
1593 }
1594
1595 return off;
1596}
1597
1598static inline int _setup_xfer(unsigned dry_run, u8 buf[],
1599 const struct _xfer_spec *pxs)
1600{
1601 struct pl330_xfer *x = pxs->x;
1602 int off = 0;
1603
1604 /* DMAMOV SAR, x->src_addr */
1605 off += _emit_MOV(dry_run, &buf[off], SAR, x->src_addr);
1606 /* DMAMOV DAR, x->dst_addr */
1607 off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr);
3c2a0909
S
1608 if (pxs->r->rqtype != MEMTOMEM)
1609 off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
b7d861d9
BK
1610
1611 /* Setup Loop(s) */
1612 off += _setup_loops(dry_run, &buf[off], pxs);
1613
1614 return off;
1615}
1616
1617/*
1618 * A req is a sequence of one or more xfer units.
1619 * Returns the number of bytes taken to setup the MC for the req.
1620 */
1621static int _setup_req(unsigned dry_run, struct pl330_thread *thrd,
1622 unsigned index, struct _xfer_spec *pxs)
1623{
1624 struct _pl330_req *req = &thrd->req[index];
3c2a0909 1625 struct pl330_dmac *pl330 = thrd->dmac;
b7d861d9 1626 struct pl330_xfer *x;
3c2a0909 1627 u8 *buf;
b7d861d9 1628 int off = 0;
3c2a0909
S
1629 unsigned mcbufsize = thrd->dmac->pinfo->mcbufsz;
1630
1631 if (soc_is_exynos5422()) {
1632 if (pxs->r->sram) {
1633 req->mc_cpu = pl330->mcode_cpu_sram + thrd->id * mcbufsize +
1634 (mcbufsize / 2) * index;
1635 req->mc_bus = pl330->mcode_bus_sram + thrd->id * mcbufsize +
1636 (mcbufsize / 2) * index;
1637 } else {
1638 req->mc_cpu = pl330->mcode_cpu + thrd->id * mcbufsize +
1639 (mcbufsize / 2) * index;
1640 req->mc_bus = pl330->mcode_bus + thrd->id * mcbufsize +
1641 (mcbufsize / 2) * index;
1642 }
1643 }
1644
1645 buf = req->mc_cpu;
b7d861d9
BK
1646
1647 PL330_DBGMC_START(req->mc_bus);
1648
1649 /* DMAMOV CCR, ccr */
1650 off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr);
1651
1652 x = pxs->r->x;
3c2a0909
S
1653 if (!pxs->r->infiniteloop) {
1654 do {
1655 /* Error if xfer length is not aligned at burst size */
1656 if (x->bytes % (BRST_SIZE(pxs->ccr) *
1657 BRST_LEN(pxs->ccr)))
1658 return -EINVAL;
1659
1660 pxs->x = x;
1661 off += _setup_xfer(dry_run, &buf[off], pxs);
1662
1663 x = x->next;
1664 } while (x);
1665
1666 /* DMASEV peripheral/event */
1667 off += _emit_SEV(dry_run, &buf[off], thrd->ev);
1668 /* DMAEND */
1669 off += _emit_END(dry_run, &buf[off]);
1670 } else {
b7d861d9
BK
1671 /* Error if xfer length is not aligned at burst size */
1672 if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr)))
1673 return -EINVAL;
1674
1675 pxs->x = x;
3c2a0909
S
1676 off += _setup_xfer_infiniteloop(dry_run, &buf[off],
1677 pxs, thrd->ev);
1678 }
b7d861d9
BK
1679
1680 return off;
1681}
1682
1683static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc)
1684{
1685 u32 ccr = 0;
1686
1687 if (rqc->src_inc)
1688 ccr |= CC_SRCINC;
1689
1690 if (rqc->dst_inc)
1691 ccr |= CC_DSTINC;
1692
1693 /* We set same protection levels for Src and DST for now */
1694 if (rqc->privileged)
1695 ccr |= CC_SRCPRI | CC_DSTPRI;
1696 if (rqc->nonsecure)
1697 ccr |= CC_SRCNS | CC_DSTNS;
1698 if (rqc->insnaccess)
1699 ccr |= CC_SRCIA | CC_DSTIA;
1700
1701 ccr |= (((rqc->brst_len - 1) & 0xf) << CC_SRCBRSTLEN_SHFT);
1702 ccr |= (((rqc->brst_len - 1) & 0xf) << CC_DSTBRSTLEN_SHFT);
1703
1704 ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT);
1705 ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT);
1706
1707 ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT);
1708 ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT);
1709
1710 ccr |= (rqc->swap << CC_SWAP_SHFT);
1711
1712 return ccr;
1713}
1714
1715static inline bool _is_valid(u32 ccr)
1716{
1717 enum pl330_dstcachectrl dcctl;
1718 enum pl330_srccachectrl scctl;
1719
1720 dcctl = (ccr >> CC_DSTCCTRL_SHFT) & CC_DRCCCTRL_MASK;
1721 scctl = (ccr >> CC_SRCCCTRL_SHFT) & CC_SRCCCTRL_MASK;
1722
1723 if (dcctl == DINVALID1 || dcctl == DINVALID2
1724 || scctl == SINVALID1 || scctl == SINVALID2)
1725 return false;
1726 else
1727 return true;
1728}
1729
1730/*
1731 * Submit a list of xfers after which the client wants notification.
1732 * Client is not notified after each xfer unit, just once after all
1733 * xfer units are done or some error occurs.
1734 */
1735static int pl330_submit_req(void *ch_id, struct pl330_req *r)
1736{
1737 struct pl330_thread *thrd = ch_id;
1738 struct pl330_dmac *pl330;
1739 struct pl330_info *pi;
1740 struct _xfer_spec xs;
1741 unsigned long flags;
1742 void __iomem *regs;
1743 unsigned idx;
1744 u32 ccr;
1745 int ret = 0;
1746
1747 /* No Req or Unacquired Channel or DMAC */
1748 if (!r || !thrd || thrd->free)
1749 return -EINVAL;
1750
1751 pl330 = thrd->dmac;
1752 pi = pl330->pinfo;
1753 regs = pi->base;
1754
1755 if (pl330->state == DYING
1756 || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) {
1757 dev_info(thrd->dmac->pinfo->dev, "%s:%d\n",
1758 __func__, __LINE__);
1759 return -EAGAIN;
1760 }
1761
1762 /* If request for non-existing peripheral */
1763 if (r->rqtype != MEMTOMEM && r->peri >= pi->pcfg.num_peri) {
1764 dev_info(thrd->dmac->pinfo->dev,
1765 "%s:%d Invalid peripheral(%u)!\n",
1766 __func__, __LINE__, r->peri);
1767 return -EINVAL;
1768 }
1769
1770 spin_lock_irqsave(&pl330->lock, flags);
1771
1772 if (_queue_full(thrd)) {
1773 ret = -EAGAIN;
1774 goto xfer_exit;
1775 }
1776
b7d861d9
BK
1777
1778 /* Use last settings, if not provided */
2e2c682b
SK
1779 if (r->cfg) {
1780 /* Prefer Secure Channel */
1781 if (!_manager_ns(thrd))
1782 r->cfg->nonsecure = 0;
1783 else
1784 r->cfg->nonsecure = 1;
1785
b7d861d9 1786 ccr = _prepare_ccr(r->cfg);
2e2c682b 1787 } else {
b7d861d9 1788 ccr = readl(regs + CC(thrd->id));
2e2c682b 1789 }
b7d861d9
BK
1790
1791 /* If this req doesn't have valid xfer settings */
1792 if (!_is_valid(ccr)) {
1793 ret = -EINVAL;
1794 dev_info(thrd->dmac->pinfo->dev, "%s:%d Invalid CCR(%x)!\n",
1795 __func__, __LINE__, ccr);
1796 goto xfer_exit;
1797 }
1798
1799 idx = IS_FREE(&thrd->req[0]) ? 0 : 1;
1800
1801 xs.ccr = ccr;
1802 xs.r = r;
1803
1804 /* First dry run to check if req is acceptable */
1805 ret = _setup_req(1, thrd, idx, &xs);
1806 if (ret < 0)
1807 goto xfer_exit;
1808
1809 if (ret > pi->mcbufsz / 2) {
1810 dev_info(thrd->dmac->pinfo->dev,
1811 "%s:%d Trying increasing mcbufsz\n",
1812 __func__, __LINE__);
1813 ret = -ENOMEM;
1814 goto xfer_exit;
1815 }
1816
1817 /* Hook the request */
1818 thrd->lstenq = idx;
1819 thrd->req[idx].mc_len = _setup_req(0, thrd, idx, &xs);
1820 thrd->req[idx].r = r;
1821
1822 ret = 0;
1823
1824xfer_exit:
1825 spin_unlock_irqrestore(&pl330->lock, flags);
1826
1827 return ret;
1828}
1829
1830static void pl330_dotask(unsigned long data)
1831{
1832 struct pl330_dmac *pl330 = (struct pl330_dmac *) data;
1833 struct pl330_info *pi = pl330->pinfo;
1834 unsigned long flags;
1835 int i;
1836
1837 spin_lock_irqsave(&pl330->lock, flags);
1838
3c2a0909
S
1839 if (!pl330->usage_count) {
1840 pr_info("[%s] Channel is already free!\n",__func__);
1841 spin_unlock_irqrestore(&pl330->lock, flags);
1842 return;
1843 }
1844
b7d861d9
BK
1845 /* The DMAC itself gone nuts */
1846 if (pl330->dmac_tbd.reset_dmac) {
1847 pl330->state = DYING;
1848 /* Reset the manager too */
1849 pl330->dmac_tbd.reset_mngr = true;
1850 /* Clear the reset flag */
1851 pl330->dmac_tbd.reset_dmac = false;
1852 }
1853
1854 if (pl330->dmac_tbd.reset_mngr) {
1855 _stop(pl330->manager);
1856 /* Reset all channels */
1857 pl330->dmac_tbd.reset_chan = (1 << pi->pcfg.num_chan) - 1;
1858 /* Clear the reset flag */
1859 pl330->dmac_tbd.reset_mngr = false;
1860 }
1861
1862 for (i = 0; i < pi->pcfg.num_chan; i++) {
1863
1864 if (pl330->dmac_tbd.reset_chan & (1 << i)) {
1865 struct pl330_thread *thrd = &pl330->channels[i];
1866 void __iomem *regs = pi->base;
1867 enum pl330_op_err err;
1868
1869 _stop(thrd);
1870
1871 if (readl(regs + FSC) & (1 << thrd->id))
1872 err = PL330_ERR_FAIL;
1873 else
1874 err = PL330_ERR_ABORT;
1875
1876 spin_unlock_irqrestore(&pl330->lock, flags);
1877
1878 _callback(thrd->req[1 - thrd->lstenq].r, err);
1879 _callback(thrd->req[thrd->lstenq].r, err);
1880
1881 spin_lock_irqsave(&pl330->lock, flags);
1882
1883 thrd->req[0].r = NULL;
1884 thrd->req[1].r = NULL;
1885 mark_free(thrd, 0);
1886 mark_free(thrd, 1);
1887
1888 /* Clear the reset flag */
1889 pl330->dmac_tbd.reset_chan &= ~(1 << i);
1890 }
1891 }
1892
1893 spin_unlock_irqrestore(&pl330->lock, flags);
1894
1895 return;
1896}
1897
1898/* Returns 1 if state was updated, 0 otherwise */
1899static int pl330_update(const struct pl330_info *pi)
1900{
fdec53d5 1901 struct pl330_req *rqdone, *tmp;
b7d861d9
BK
1902 struct pl330_dmac *pl330;
1903 unsigned long flags;
1904 void __iomem *regs;
1905 u32 val;
1906 int id, ev, ret = 0;
1907
1908 if (!pi || !pi->pl330_data)
1909 return 0;
1910
1911 regs = pi->base;
1912 pl330 = pi->pl330_data;
1913
1914 spin_lock_irqsave(&pl330->lock, flags);
1915
3c2a0909
S
1916 if (!pl330->usage_count) {
1917 dev_err(pi->dev, "%s:%d event is not exist!\n", __func__, __LINE__);
1918 spin_unlock_irqrestore(&pl330->lock, flags);
1919 return 0;
1920 }
1921
b7d861d9
BK
1922 val = readl(regs + FSM) & 0x1;
1923 if (val)
1924 pl330->dmac_tbd.reset_mngr = true;
1925 else
1926 pl330->dmac_tbd.reset_mngr = false;
1927
1928 val = readl(regs + FSC) & ((1 << pi->pcfg.num_chan) - 1);
1929 pl330->dmac_tbd.reset_chan |= val;
1930 if (val) {
1931 int i = 0;
1932 while (i < pi->pcfg.num_chan) {
1933 if (val & (1 << i)) {
3c2a0909
S
1934 DBG_PRINT("[%s] Reset Channel-%d\t CS-%x FTC-%x\n",
1935 __func__, i, readl(regs + CS(i)),
b7d861d9
BK
1936 readl(regs + FTC(i)));
1937 _stop(&pl330->channels[i]);
1938 }
1939 i++;
1940 }
1941 }
1942
1943 /* Check which event happened i.e, thread notified */
1944 val = readl(regs + ES);
1945 if (pi->pcfg.num_events < 32
1946 && val & ~((1 << pi->pcfg.num_events) - 1)) {
1947 pl330->dmac_tbd.reset_dmac = true;
1948 dev_err(pi->dev, "%s:%d Unexpected!\n", __func__, __LINE__);
1949 ret = 1;
1950 goto updt_exit;
1951 }
1952
1953 for (ev = 0; ev < pi->pcfg.num_events; ev++) {
1954 if (val & (1 << ev)) { /* Event occurred */
1955 struct pl330_thread *thrd;
1956 u32 inten = readl(regs + INTEN);
1957 int active;
1958
1959 /* Clear the event */
1960 if (inten & (1 << ev))
1961 writel(1 << ev, regs + INTCLR);
1962
1963 ret = 1;
1964
1965 id = pl330->events[ev];
1966
3c2a0909
S
1967 if (id == -1) {
1968 DBG_PRINT("[%s] pl330_update id:%d\n",
1969 __func__, id);
1970 continue;
1971 }
1972
b7d861d9
BK
1973 thrd = &pl330->channels[id];
1974
1975 active = thrd->req_running;
1976 if (active == -1) /* Aborted */
1977 continue;
1978
fdec53d5 1979 rqdone = thrd->req[active].r;
fdec53d5 1980
3c2a0909
S
1981 if (!rqdone->infiniteloop) {
1982
1983 /* Detach the req */
1984 thrd->req[active].r = NULL;
b7d861d9 1985
3c2a0909
S
1986 mark_free(thrd, active);
1987
1988 /* Get going again ASAP */
1989 _start(thrd);
1990 }
b7d861d9
BK
1991
1992 /* For now, just make a list of callbacks to be done */
1993 list_add_tail(&rqdone->rqd, &pl330->req_done);
1994 }
1995 }
1996
1997 /* Now that we are in no hurry, do the callbacks */
fdec53d5
JM
1998 list_for_each_entry_safe(rqdone, tmp, &pl330->req_done, rqd) {
1999 list_del(&rqdone->rqd);
b7d861d9
BK
2000
2001 spin_unlock_irqrestore(&pl330->lock, flags);
fdec53d5 2002 _callback(rqdone, PL330_ERR_NONE);
b7d861d9
BK
2003 spin_lock_irqsave(&pl330->lock, flags);
2004 }
2005
2006updt_exit:
2007 spin_unlock_irqrestore(&pl330->lock, flags);
2008
2009 if (pl330->dmac_tbd.reset_dmac
2010 || pl330->dmac_tbd.reset_mngr
2011 || pl330->dmac_tbd.reset_chan) {
2012 ret = 1;
2013 tasklet_schedule(&pl330->tasks);
2014 }
2015
2016 return ret;
2017}
2018
2019static int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op)
2020{
2021 struct pl330_thread *thrd = ch_id;
2022 struct pl330_dmac *pl330;
2023 unsigned long flags;
ef08e782 2024 int ret = 0, active;
b7d861d9
BK
2025
2026 if (!thrd || thrd->free || thrd->dmac->state == DYING)
2027 return -EINVAL;
2028
2029 pl330 = thrd->dmac;
ef08e782 2030 active = thrd->req_running;
b7d861d9
BK
2031
2032 spin_lock_irqsave(&pl330->lock, flags);
2033
2034 switch (op) {
2035 case PL330_OP_FLUSH:
2036 /* Make sure the channel is stopped */
2037 _stop(thrd);
2038
3c2a0909
S
2039 if (soc_is_exynos5422() && pl330->pinfo->dev->of_node
2040 && of_dma_get_mcode_addr(pl330->pinfo->dev->of_node)) {
2041 udelay(10);
2042 }
2043
b7d861d9
BK
2044 thrd->req[0].r = NULL;
2045 thrd->req[1].r = NULL;
2046 mark_free(thrd, 0);
2047 mark_free(thrd, 1);
2048 break;
2049
2050 case PL330_OP_ABORT:
2051 /* Make sure the channel is stopped */
2052 _stop(thrd);
2053
2054 /* ABORT is only for the active req */
2055 if (active == -1)
2056 break;
2057
2058 thrd->req[active].r = NULL;
2059 mark_free(thrd, active);
2060
2061 /* Start the next */
2062 case PL330_OP_START:
2063 if ((active == -1) && !_start(thrd))
2064 ret = -EIO;
2065 break;
2066
2067 default:
2068 ret = -EINVAL;
2069 }
2070
2071 spin_unlock_irqrestore(&pl330->lock, flags);
2072 return ret;
2073}
2074
b7d861d9
BK
2075/* Reserve an event */
2076static inline int _alloc_event(struct pl330_thread *thrd)
2077{
2078 struct pl330_dmac *pl330 = thrd->dmac;
2079 struct pl330_info *pi = pl330->pinfo;
2080 int ev;
2081
2082 for (ev = 0; ev < pi->pcfg.num_events; ev++)
2083 if (pl330->events[ev] == -1) {
2084 pl330->events[ev] = thrd->id;
2085 return ev;
2086 }
2087
2088 return -1;
2089}
2090
2091static bool _chan_ns(const struct pl330_info *pi, int i)
2092{
2093 return pi->pcfg.irq_ns & (1 << i);
2094}
2095
2096/* Upon success, returns IdentityToken for the
2097 * allocated channel, NULL otherwise.
2098 */
2099static void *pl330_request_channel(const struct pl330_info *pi)
2100{
2101 struct pl330_thread *thrd = NULL;
2102 struct pl330_dmac *pl330;
2103 unsigned long flags;
2104 int chans, i;
2105
2106 if (!pi || !pi->pl330_data)
2107 return NULL;
b3040e40 2108
b7d861d9
BK
2109 pl330 = pi->pl330_data;
2110
2111 if (pl330->state == DYING)
2112 return NULL;
2113
2114 chans = pi->pcfg.num_chan;
2115
2116 spin_lock_irqsave(&pl330->lock, flags);
2117
2118 for (i = 0; i < chans; i++) {
2119 thrd = &pl330->channels[i];
2120 if ((thrd->free) && (!_manager_ns(thrd) ||
2121 _chan_ns(pi, i))) {
2122 thrd->ev = _alloc_event(thrd);
2123 if (thrd->ev >= 0) {
2124 thrd->free = false;
2125 thrd->lstenq = 1;
2126 thrd->req[0].r = NULL;
2127 mark_free(thrd, 0);
2128 thrd->req[1].r = NULL;
2129 mark_free(thrd, 1);
3c2a0909 2130 pl330->usage_count++;
b7d861d9
BK
2131 break;
2132 }
2133 }
2134 thrd = NULL;
2135 }
2136
2137 spin_unlock_irqrestore(&pl330->lock, flags);
2138
2139 return thrd;
2140}
2141
2142/* Release an event */
2143static inline void _free_event(struct pl330_thread *thrd, int ev)
2144{
2145 struct pl330_dmac *pl330 = thrd->dmac;
2146 struct pl330_info *pi = pl330->pinfo;
3c2a0909
S
2147 void __iomem *regs = pi->base;
2148 u32 inten = readl(regs + INTEN);
b7d861d9
BK
2149
2150 /* If the event is valid and was held by the thread */
2151 if (ev >= 0 && ev < pi->pcfg.num_events
3c2a0909 2152 && pl330->events[ev] == thrd->id) {
b7d861d9 2153 pl330->events[ev] = -1;
3c2a0909
S
2154
2155 if (readl(regs + ES) & (1 << ev)) {
2156 if (!(inten & (1 << ev)))
2157 writel(inten | (1 << ev), regs + INTEN);
2158 writel(1 << ev, regs + INTCLR);
2159 writel(inten & ~(1 << ev) , regs + INTEN);
2160 }
2161 pl330->usage_count--;
2162 }
b7d861d9
BK
2163}
2164
2165static void pl330_release_channel(void *ch_id)
2166{
2167 struct pl330_thread *thrd = ch_id;
2168 struct pl330_dmac *pl330;
2169 unsigned long flags;
2170
2171 if (!thrd || thrd->free)
2172 return;
2173
2174 _stop(thrd);
2175
2176 _callback(thrd->req[1 - thrd->lstenq].r, PL330_ERR_ABORT);
2177 _callback(thrd->req[thrd->lstenq].r, PL330_ERR_ABORT);
2178
2179 pl330 = thrd->dmac;
2180
2181 spin_lock_irqsave(&pl330->lock, flags);
2182 _free_event(thrd, thrd->ev);
2183 thrd->free = true;
2184 spin_unlock_irqrestore(&pl330->lock, flags);
2185}
2186
2187/* Initialize the structure for PL330 configuration, that can be used
2188 * by the client driver the make best use of the DMAC
2189 */
2190static void read_dmac_config(struct pl330_info *pi)
2191{
2192 void __iomem *regs = pi->base;
2193 u32 val;
2194
2195 val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT;
2196 val &= CRD_DATA_WIDTH_MASK;
2197 pi->pcfg.data_bus_width = 8 * (1 << val);
2198
2199 val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT;
2200 val &= CRD_DATA_BUFF_MASK;
2201 pi->pcfg.data_buf_dep = val + 1;
2202
2203 val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT;
2204 val &= CR0_NUM_CHANS_MASK;
2205 val += 1;
2206 pi->pcfg.num_chan = val;
2207
2208 val = readl(regs + CR0);
2209 if (val & CR0_PERIPH_REQ_SET) {
2210 val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK;
2211 val += 1;
2212 pi->pcfg.num_peri = val;
2213 pi->pcfg.peri_ns = readl(regs + CR4);
2214 } else {
2215 pi->pcfg.num_peri = 0;
2216 }
2217
2218 val = readl(regs + CR0);
2219 if (val & CR0_BOOT_MAN_NS)
2220 pi->pcfg.mode |= DMAC_MODE_NS;
2221 else
2222 pi->pcfg.mode &= ~DMAC_MODE_NS;
2223
2224 val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT;
2225 val &= CR0_NUM_EVENTS_MASK;
2226 val += 1;
2227 pi->pcfg.num_events = val;
2228
2229 pi->pcfg.irq_ns = readl(regs + CR3);
2230
2231 pi->pcfg.periph_id = get_id(pi, PERIPH_ID);
2232 pi->pcfg.pcell_id = get_id(pi, PCELL_ID);
2233}
2234
2235static inline void _reset_thread(struct pl330_thread *thrd)
2236{
2237 struct pl330_dmac *pl330 = thrd->dmac;
2238 struct pl330_info *pi = pl330->pinfo;
2239
2240 thrd->req[0].mc_cpu = pl330->mcode_cpu
2241 + (thrd->id * pi->mcbufsz);
2242 thrd->req[0].mc_bus = pl330->mcode_bus
2243 + (thrd->id * pi->mcbufsz);
2244 thrd->req[0].r = NULL;
2245 mark_free(thrd, 0);
2246
2247 thrd->req[1].mc_cpu = thrd->req[0].mc_cpu
2248 + pi->mcbufsz / 2;
2249 thrd->req[1].mc_bus = thrd->req[0].mc_bus
2250 + pi->mcbufsz / 2;
2251 thrd->req[1].r = NULL;
2252 mark_free(thrd, 1);
2253}
2254
2255static int dmac_alloc_threads(struct pl330_dmac *pl330)
2256{
2257 struct pl330_info *pi = pl330->pinfo;
2258 int chans = pi->pcfg.num_chan;
2259 struct pl330_thread *thrd;
2260 int i;
2261
2262 /* Allocate 1 Manager and 'chans' Channel threads */
2263 pl330->channels = kzalloc((1 + chans) * sizeof(*thrd),
2264 GFP_KERNEL);
2265 if (!pl330->channels)
2266 return -ENOMEM;
2267
2268 /* Init Channel threads */
2269 for (i = 0; i < chans; i++) {
2270 thrd = &pl330->channels[i];
2271 thrd->id = i;
2272 thrd->dmac = pl330;
2273 _reset_thread(thrd);
3c2a0909
S
2274
2275 /* Secure Channel */
2276 if (i == 0 && (soc_is_exynos5430() || soc_is_exynos5433() || soc_is_exynos7420()) &&
2277 pi->dev->of_node && of_dma_secure_mode(pi->dev->of_node)) {
2278 thrd->free = false;
2279 } else {
2280 thrd->free = true;
2281 }
b7d861d9
BK
2282 }
2283
2284 /* MANAGER is indexed at the end */
2285 thrd = &pl330->channels[chans];
2286 thrd->id = chans;
2287 thrd->dmac = pl330;
2288 thrd->free = false;
2289 pl330->manager = thrd;
2290
2291 return 0;
2292}
2293
2294static int dmac_alloc_resources(struct pl330_dmac *pl330)
2295{
2296 struct pl330_info *pi = pl330->pinfo;
2297 int chans = pi->pcfg.num_chan;
2298 int ret;
3c2a0909
S
2299 dma_addr_t addr;
2300
2301 if (pi->dev->of_node) {
2302 addr = of_dma_get_mcode_addr(pi->dev->of_node);
2303 if (addr) {
2304 set_dma_ops(pi->dev, &arm_exynos_dma_mcode_ops);
2305 pl330->mcode_bus = addr;
2306 }
2307 }
b3040e40 2308
b3040e40 2309 /*
b7d861d9
BK
2310 * Alloc MicroCode buffer for 'chans' Channel threads.
2311 * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
b3040e40 2312 */
b7d861d9
BK
2313 pl330->mcode_cpu = dma_alloc_coherent(pi->dev,
2314 chans * pi->mcbufsz,
2315 &pl330->mcode_bus, GFP_KERNEL);
3c2a0909 2316
b7d861d9
BK
2317 if (!pl330->mcode_cpu) {
2318 dev_err(pi->dev, "%s:%d Can't allocate memory!\n",
2319 __func__, __LINE__);
2320 return -ENOMEM;
2321 }
2322
2323 ret = dmac_alloc_threads(pl330);
2324 if (ret) {
2325 dev_err(pi->dev, "%s:%d Can't to create channels for DMAC!\n",
2326 __func__, __LINE__);
2327 dma_free_coherent(pi->dev,
2328 chans * pi->mcbufsz,
2329 pl330->mcode_cpu, pl330->mcode_bus);
2330 return ret;
2331 }
2332
2333 return 0;
2334}
2335
2336static int pl330_add(struct pl330_info *pi)
2337{
2338 struct pl330_dmac *pl330;
2339 void __iomem *regs;
2340 int i, ret;
2341
2342 if (!pi || !pi->dev)
2343 return -EINVAL;
2344
2345 /* If already added */
2346 if (pi->pl330_data)
2347 return -EINVAL;
2348
b3040e40 2349 /*
b7d861d9
BK
2350 * If the SoC can perform reset on the DMAC, then do it
2351 * before reading its configuration.
b3040e40 2352 */
b7d861d9
BK
2353 if (pi->dmac_reset)
2354 pi->dmac_reset(pi);
b3040e40 2355
b7d861d9 2356 regs = pi->base;
b3040e40 2357
b7d861d9
BK
2358 /* Check if we can handle this DMAC */
2359 if ((get_id(pi, PERIPH_ID) & 0xfffff) != PERIPH_ID_VAL
2360 || get_id(pi, PCELL_ID) != PCELL_ID_VAL) {
2361 dev_err(pi->dev, "PERIPH_ID 0x%x, PCELL_ID 0x%x !\n",
2362 get_id(pi, PERIPH_ID), get_id(pi, PCELL_ID));
2363 return -EINVAL;
2364 }
b3040e40 2365
b7d861d9
BK
2366 /* Read the configuration of the DMAC */
2367 read_dmac_config(pi);
b3040e40 2368
b7d861d9
BK
2369 if (pi->pcfg.num_events == 0) {
2370 dev_err(pi->dev, "%s:%d Can't work without events!\n",
2371 __func__, __LINE__);
2372 return -EINVAL;
2373 }
b3040e40 2374
b7d861d9
BK
2375 pl330 = kzalloc(sizeof(*pl330), GFP_KERNEL);
2376 if (!pl330) {
2377 dev_err(pi->dev, "%s:%d Can't allocate memory!\n",
2378 __func__, __LINE__);
2379 return -ENOMEM;
2380 }
b3040e40 2381
b7d861d9
BK
2382 /* Assign the info structure and private data */
2383 pl330->pinfo = pi;
2384 pi->pl330_data = pl330;
b3040e40 2385
b7d861d9 2386 spin_lock_init(&pl330->lock);
1b9bb715 2387
b7d861d9 2388 INIT_LIST_HEAD(&pl330->req_done);
42bc9cf4 2389
b7d861d9
BK
2390 /* Use default MC buffer size if not provided */
2391 if (!pi->mcbufsz)
2392 pi->mcbufsz = MCODE_BUFF_PER_REQ * 2;
b3040e40 2393
b7d861d9
BK
2394 /* Mark all events as free */
2395 for (i = 0; i < pi->pcfg.num_events; i++)
2396 pl330->events[i] = -1;
b3040e40 2397
b7d861d9
BK
2398 /* Allocate resources needed by the DMAC */
2399 ret = dmac_alloc_resources(pl330);
2400 if (ret) {
2401 dev_err(pi->dev, "Unable to create channels for DMAC\n");
2402 kfree(pl330);
2403 return ret;
2404 }
b3040e40 2405
b7d861d9 2406 tasklet_init(&pl330->tasks, pl330_dotask, (unsigned long) pl330);
b3040e40 2407
b7d861d9 2408 pl330->state = INIT;
3c2a0909 2409 pl330->usage_count = 0;
a2f5203f 2410
b7d861d9
BK
2411 return 0;
2412}
b3040e40 2413
b7d861d9
BK
2414static int dmac_free_threads(struct pl330_dmac *pl330)
2415{
2416 struct pl330_info *pi = pl330->pinfo;
2417 int chans = pi->pcfg.num_chan;
2418 struct pl330_thread *thrd;
2419 int i;
b3040e40 2420
b7d861d9
BK
2421 /* Release Channel threads */
2422 for (i = 0; i < chans; i++) {
2423 thrd = &pl330->channels[i];
2424 pl330_release_channel((void *)thrd);
2425 }
b3040e40 2426
b7d861d9
BK
2427 /* Free memory */
2428 kfree(pl330->channels);
b3040e40 2429
b7d861d9
BK
2430 return 0;
2431}
b3040e40 2432
b7d861d9
BK
2433static void dmac_free_resources(struct pl330_dmac *pl330)
2434{
2435 struct pl330_info *pi = pl330->pinfo;
2436 int chans = pi->pcfg.num_chan;
b3040e40 2437
b7d861d9
BK
2438 dmac_free_threads(pl330);
2439
2440 dma_free_coherent(pi->dev, chans * pi->mcbufsz,
2441 pl330->mcode_cpu, pl330->mcode_bus);
2442}
2443
2444static void pl330_del(struct pl330_info *pi)
2445{
2446 struct pl330_dmac *pl330;
2447
2448 if (!pi || !pi->pl330_data)
2449 return;
2450
2451 pl330 = pi->pl330_data;
2452
2453 pl330->state = UNINIT;
2454
2455 tasklet_kill(&pl330->tasks);
2456
2457 /* Free DMAC resources */
2458 dmac_free_resources(pl330);
2459
2460 kfree(pl330);
2461 pi->pl330_data = NULL;
2462}
b3040e40 2463
3e2ec13a
TA
2464/* forward declaration */
2465static struct amba_driver pl330_driver;
2466
b3040e40
JB
2467static inline struct dma_pl330_chan *
2468to_pchan(struct dma_chan *ch)
2469{
2470 if (!ch)
2471 return NULL;
2472
2473 return container_of(ch, struct dma_pl330_chan, chan);
2474}
2475
2476static inline struct dma_pl330_desc *
2477to_desc(struct dma_async_tx_descriptor *tx)
2478{
2479 return container_of(tx, struct dma_pl330_desc, txd);
2480}
2481
2482static inline void free_desc_list(struct list_head *list)
2483{
2484 struct dma_pl330_dmac *pdmac;
2485 struct dma_pl330_desc *desc;
c8473828 2486 struct dma_pl330_chan *pch = NULL;
b3040e40
JB
2487 unsigned long flags;
2488
b3040e40
JB
2489 /* Finish off the work list */
2490 list_for_each_entry(desc, list, node) {
2491 dma_async_tx_callback callback;
2492 void *param;
2493
2494 /* All desc in a list belong to same channel */
2495 pch = desc->pchan;
2496 callback = desc->txd.callback;
2497 param = desc->txd.callback_param;
2498
2499 if (callback)
2500 callback(param);
2501
2502 desc->pchan = NULL;
2503 }
2504
c8473828
OJ
2505 /* pch will be unset if list was empty */
2506 if (!pch)
2507 return;
2508
b3040e40
JB
2509 pdmac = pch->dmac;
2510
2511 spin_lock_irqsave(&pdmac->pool_lock, flags);
2512 list_splice_tail_init(list, &pdmac->desc_pool);
2513 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
2514}
2515
42bc9cf4
BK
2516static inline void handle_cyclic_desc_list(struct list_head *list)
2517{
2518 struct dma_pl330_desc *desc;
c8473828 2519 struct dma_pl330_chan *pch = NULL;
42bc9cf4
BK
2520 unsigned long flags;
2521
42bc9cf4
BK
2522 list_for_each_entry(desc, list, node) {
2523 dma_async_tx_callback callback;
2524
2525 /* Change status to reload it */
2526 desc->status = PREP;
2527 pch = desc->pchan;
2528 callback = desc->txd.callback;
3c2a0909
S
2529
2530 DBG_PRINT("[%s] before callback\n", __func__);
2531 if (callback && !pch->on_trigger)
42bc9cf4 2532 callback(desc->txd.callback_param);
3c2a0909
S
2533
2534 if (pch->on_trigger)
2535 pch->on_trigger = 0;
2536 DBG_PRINT("[%s] after callback\n", __func__);
42bc9cf4
BK
2537 }
2538
c8473828
OJ
2539 /* pch will be unset if list was empty */
2540 if (!pch)
2541 return;
2542
42bc9cf4
BK
2543 spin_lock_irqsave(&pch->lock, flags);
2544 list_splice_tail_init(list, &pch->work_list);
2545 spin_unlock_irqrestore(&pch->lock, flags);
2546}
2547
b3040e40
JB
2548static inline void fill_queue(struct dma_pl330_chan *pch)
2549{
2550 struct dma_pl330_desc *desc;
2551 int ret;
2552
2553 list_for_each_entry(desc, &pch->work_list, node) {
2554
2555 /* If already submitted */
2556 if (desc->status == BUSY)
30fb980b 2557 continue;
b3040e40
JB
2558
2559 ret = pl330_submit_req(pch->pl330_chid,
2560 &desc->req);
2561 if (!ret) {
2562 desc->status = BUSY;
b3040e40
JB
2563 } else if (ret == -EAGAIN) {
2564 /* QFull or DMAC Dying */
2565 break;
2566 } else {
2567 /* Unacceptable request */
2568 desc->status = DONE;
2569 dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n",
2570 __func__, __LINE__, desc->txd.cookie);
2571 tasklet_schedule(&pch->task);
2572 }
2573 }
2574}
2575
2576static void pl330_tasklet(unsigned long data)
2577{
2578 struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
2579 struct dma_pl330_desc *desc, *_dt;
2580 unsigned long flags;
2581 LIST_HEAD(list);
2582
2583 spin_lock_irqsave(&pch->lock, flags);
2584
2585 /* Pick up ripe tomatoes */
2586 list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
2587 if (desc->status == DONE) {
30c1dc0f 2588 if (!pch->cyclic)
eab21585 2589 dma_cookie_complete(&desc->txd);
b3040e40
JB
2590 list_move_tail(&desc->node, &list);
2591 }
2592
2593 /* Try to submit a req imm. next to the last completed cookie */
2594 fill_queue(pch);
2595
2596 /* Make sure the PL330 Channel thread is active */
2597 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
2598
2599 spin_unlock_irqrestore(&pch->lock, flags);
2600
42bc9cf4
BK
2601 if (pch->cyclic)
2602 handle_cyclic_desc_list(&list);
2603 else
2604 free_desc_list(&list);
b3040e40
JB
2605}
2606
2607static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
2608{
2609 struct dma_pl330_desc *desc = token;
2610 struct dma_pl330_chan *pch = desc->pchan;
3c2a0909 2611 struct dma_pl330_dmac *pdmac = NULL;
b3040e40
JB
2612 unsigned long flags;
2613
2614 /* If desc aborted */
2615 if (!pch)
2616 return;
2617
3c2a0909
S
2618 pdmac = pch->dmac;
2619
2620 spin_lock_irqsave(&pdmac->pool_lock, flags);
b3040e40
JB
2621
2622 desc->status = DONE;
2623
3c2a0909 2624 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
b3040e40 2625
3c2a0909
S
2626 if (desc->req.infiniteloop)
2627 pl330_tasklet((unsigned long)pch);
2628 else
2629 tasklet_schedule(&pch->task);
b3040e40
JB
2630}
2631
34d19355
PV
2632static bool pl330_dt_filter(struct dma_chan *chan, void *param)
2633{
2634 struct dma_pl330_filter_args *fargs = param;
2635
2636 if (chan->device != &fargs->pdmac->ddma)
2637 return false;
2638
2639 return (chan->chan_id == fargs->chan_id);
2640}
2641
3e2ec13a
TA
2642bool pl330_filter(struct dma_chan *chan, void *param)
2643{
cd072515 2644 u8 *peri_id;
3e2ec13a
TA
2645
2646 if (chan->device->dev->driver != &pl330_driver.drv)
2647 return false;
2648
cd072515 2649 peri_id = chan->private;
3c2a0909 2650 return *peri_id == (unsigned long)param;
3e2ec13a
TA
2651}
2652EXPORT_SYMBOL(pl330_filter);
2653
a80258f9
PV
2654static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec,
2655 struct of_dma *ofdma)
2656{
2657 int count = dma_spec->args_count;
2658 struct dma_pl330_dmac *pdmac = ofdma->of_dma_data;
2659 struct dma_pl330_filter_args fargs;
2660 dma_cap_mask_t cap;
2661
2662 if (!pdmac)
2663 return NULL;
2664
2665 if (count != 1)
2666 return NULL;
2667
2668 fargs.pdmac = pdmac;
2669 fargs.chan_id = dma_spec->args[0];
2670
2671 dma_cap_zero(cap);
2672 dma_cap_set(DMA_SLAVE, cap);
2673 dma_cap_set(DMA_CYCLIC, cap);
2674
2675 return dma_request_channel(cap, pl330_dt_filter, &fargs);
2676}
2677
b3040e40
JB
2678static int pl330_alloc_chan_resources(struct dma_chan *chan)
2679{
2680 struct dma_pl330_chan *pch = to_pchan(chan);
2681 struct dma_pl330_dmac *pdmac = pch->dmac;
2682 unsigned long flags;
2683
3c2a0909
S
2684#ifdef CONFIG_PM_RUNTIME
2685 pm_runtime_get_sync(pdmac->pif.dev);
2686#endif
2687
b3040e40
JB
2688 spin_lock_irqsave(&pch->lock, flags);
2689
d3ee98cd 2690 dma_cookie_init(chan);
42bc9cf4 2691 pch->cyclic = false;
b3040e40
JB
2692
2693 pch->pl330_chid = pl330_request_channel(&pdmac->pif);
2694 if (!pch->pl330_chid) {
2695 spin_unlock_irqrestore(&pch->lock, flags);
02747885 2696 return -ENOMEM;
b3040e40
JB
2697 }
2698
2699 tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
2700
2701 spin_unlock_irqrestore(&pch->lock, flags);
2702
2703 return 1;
2704}
2705
2706static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
2707{
2708 struct dma_pl330_chan *pch = to_pchan(chan);
ae43b886 2709 struct dma_pl330_desc *desc, *_dt;
b3040e40 2710 unsigned long flags;
1d0c1d60
BK
2711 struct dma_pl330_dmac *pdmac = pch->dmac;
2712 struct dma_slave_config *slave_config;
ae43b886 2713 LIST_HEAD(list);
b3040e40 2714
1d0c1d60
BK
2715 switch (cmd) {
2716 case DMA_TERMINATE_ALL:
2717 spin_lock_irqsave(&pch->lock, flags);
b3040e40 2718
1d0c1d60
BK
2719 /* FLUSH the PL330 Channel thread */
2720 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
b3040e40 2721
1d0c1d60 2722 /* Mark all desc done */
ae43b886 2723 list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
1d0c1d60 2724 desc->status = DONE;
ae43b886
BK
2725 list_move_tail(&desc->node, &list);
2726 }
b3040e40 2727
ae43b886 2728 list_splice_tail_init(&list, &pdmac->desc_pool);
1d0c1d60 2729 spin_unlock_irqrestore(&pch->lock, flags);
1d0c1d60
BK
2730 break;
2731 case DMA_SLAVE_CONFIG:
2732 slave_config = (struct dma_slave_config *)arg;
2733
db8196df 2734 if (slave_config->direction == DMA_MEM_TO_DEV) {
1d0c1d60
BK
2735 if (slave_config->dst_addr)
2736 pch->fifo_addr = slave_config->dst_addr;
2737 if (slave_config->dst_addr_width)
2738 pch->burst_sz = __ffs(slave_config->dst_addr_width);
2739 if (slave_config->dst_maxburst)
2740 pch->burst_len = slave_config->dst_maxburst;
db8196df 2741 } else if (slave_config->direction == DMA_DEV_TO_MEM) {
1d0c1d60
BK
2742 if (slave_config->src_addr)
2743 pch->fifo_addr = slave_config->src_addr;
2744 if (slave_config->src_addr_width)
2745 pch->burst_sz = __ffs(slave_config->src_addr_width);
2746 if (slave_config->src_maxburst)
2747 pch->burst_len = slave_config->src_maxburst;
2748 }
2749 break;
2750 default:
2751 dev_err(pch->dmac->pif.dev, "Not supported command.\n");
2752 return -ENXIO;
2753 }
b3040e40
JB
2754
2755 return 0;
2756}
2757
2758static void pl330_free_chan_resources(struct dma_chan *chan)
2759{
2760 struct dma_pl330_chan *pch = to_pchan(chan);
2761 unsigned long flags;
2762
b3040e40
JB
2763 tasklet_kill(&pch->task);
2764
c14a4eaf
BZ
2765 spin_lock_irqsave(&pch->lock, flags);
2766
b3040e40
JB
2767 pl330_release_channel(pch->pl330_chid);
2768 pch->pl330_chid = NULL;
2769
42bc9cf4
BK
2770 if (pch->cyclic)
2771 list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
2772
b3040e40 2773 spin_unlock_irqrestore(&pch->lock, flags);
3c2a0909
S
2774
2775#ifdef CONFIG_PM_RUNTIME
2776 pm_runtime_put_sync(pch->dmac->pif.dev);
2777#endif
b3040e40
JB
2778}
2779
2780static enum dma_status
2781pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
2782 struct dma_tx_state *txstate)
2783{
96a2af41 2784 return dma_cookie_status(chan, cookie, txstate);
b3040e40
JB
2785}
2786
2787static void pl330_issue_pending(struct dma_chan *chan)
2788{
3c2a0909
S
2789 struct dma_pl330_chan *ch = to_pchan(chan);
2790 ch->on_trigger = 1;
b3040e40
JB
2791 pl330_tasklet((unsigned long) to_pchan(chan));
2792}
2793
2794/*
2795 * We returned the last one of the circular list of descriptor(s)
2796 * from prep_xxx, so the argument to submit corresponds to the last
2797 * descriptor of the list.
2798 */
2799static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
2800{
2801 struct dma_pl330_desc *desc, *last = to_desc(tx);
2802 struct dma_pl330_chan *pch = to_pchan(tx->chan);
2803 dma_cookie_t cookie;
2804 unsigned long flags;
2805
2806 spin_lock_irqsave(&pch->lock, flags);
2807
2808 /* Assign cookies to all nodes */
b3040e40
JB
2809 while (!list_empty(&last->node)) {
2810 desc = list_entry(last->node.next, struct dma_pl330_desc, node);
9b0f8f01
LPC
2811 if (pch->cyclic) {
2812 desc->txd.callback = last->txd.callback;
2813 desc->txd.callback_param = last->txd.callback_param;
2814 }
b3040e40 2815
884485e1 2816 dma_cookie_assign(&desc->txd);
b3040e40
JB
2817
2818 list_move_tail(&desc->node, &pch->work_list);
2819 }
2820
884485e1 2821 cookie = dma_cookie_assign(&last->txd);
b3040e40 2822 list_add_tail(&last->node, &pch->work_list);
b3040e40
JB
2823 spin_unlock_irqrestore(&pch->lock, flags);
2824
2825 return cookie;
2826}
2827
2828static inline void _init_desc(struct dma_pl330_desc *desc)
2829{
2830 desc->pchan = NULL;
2831 desc->req.x = &desc->px;
2832 desc->req.token = desc;
2833 desc->rqcfg.swap = SWAP_NO;
2834 desc->rqcfg.privileged = 0;
2835 desc->rqcfg.insnaccess = 0;
2836 desc->rqcfg.scctl = SCCTRL0;
2837 desc->rqcfg.dcctl = DCCTRL0;
2838 desc->req.cfg = &desc->rqcfg;
2839 desc->req.xfer_cb = dma_pl330_rqcb;
2840 desc->txd.tx_submit = pl330_tx_submit;
2841
2842 INIT_LIST_HEAD(&desc->node);
2843}
2844
2845/* Returns the number of descriptors added to the DMAC pool */
5a67ac57 2846static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
b3040e40
JB
2847{
2848 struct dma_pl330_desc *desc;
2849 unsigned long flags;
2850 int i;
2851
2852 if (!pdmac)
2853 return 0;
2854
3c2a0909 2855 desc = kzalloc(count * sizeof(*desc), flg);
b3040e40
JB
2856 if (!desc)
2857 return 0;
2858
2859 spin_lock_irqsave(&pdmac->pool_lock, flags);
2860
2861 for (i = 0; i < count; i++) {
2862 _init_desc(&desc[i]);
2863 list_add_tail(&desc[i].node, &pdmac->desc_pool);
2864 }
2865
2866 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
2867
2868 return count;
2869}
2870
2871static struct dma_pl330_desc *
2872pluck_desc(struct dma_pl330_dmac *pdmac)
2873{
2874 struct dma_pl330_desc *desc = NULL;
2875 unsigned long flags;
2876
2877 if (!pdmac)
2878 return NULL;
2879
2880 spin_lock_irqsave(&pdmac->pool_lock, flags);
2881
2882 if (!list_empty(&pdmac->desc_pool)) {
2883 desc = list_entry(pdmac->desc_pool.next,
2884 struct dma_pl330_desc, node);
2885
2886 list_del_init(&desc->node);
2887
2888 desc->status = PREP;
2889 desc->txd.callback = NULL;
2890 }
2891
2892 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
2893
2894 return desc;
2895}
2896
2897static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
2898{
2899 struct dma_pl330_dmac *pdmac = pch->dmac;
cd072515 2900 u8 *peri_id = pch->chan.private;
b3040e40
JB
2901 struct dma_pl330_desc *desc;
2902
2903 /* Pluck one desc from the pool of DMAC */
2904 desc = pluck_desc(pdmac);
2905
2906 /* If the DMAC pool is empty, alloc new */
2907 if (!desc) {
2908 if (!add_desc(pdmac, GFP_ATOMIC, 1))
2909 return NULL;
2910
2911 /* Try again */
2912 desc = pluck_desc(pdmac);
2913 if (!desc) {
2914 dev_err(pch->dmac->pif.dev,
2915 "%s:%d ALERT!\n", __func__, __LINE__);
2916 return NULL;
2917 }
2918 }
2919
2920 /* Initialize the descriptor */
2921 desc->pchan = pch;
2922 desc->txd.cookie = 0;
2923 async_tx_ack(&desc->txd);
2924
3c2a0909 2925 desc->req.infiniteloop = 0;
cd072515 2926 desc->req.peri = peri_id ? pch->chan.chan_id : 0;
3ecf51a4 2927 desc->rqcfg.pcfg = &pch->dmac->pif.pcfg;
b3040e40
JB
2928
2929 dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
2930
2931 return desc;
2932}
2933
2934static inline void fill_px(struct pl330_xfer *px,
2935 dma_addr_t dst, dma_addr_t src, size_t len)
2936{
2937 px->next = NULL;
2938 px->bytes = len;
2939 px->dst_addr = dst;
2940 px->src_addr = src;
2941}
2942
2943static struct dma_pl330_desc *
2944__pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
2945 dma_addr_t src, size_t len)
2946{
2947 struct dma_pl330_desc *desc = pl330_get_desc(pch);
2948
2949 if (!desc) {
2950 dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
2951 __func__, __LINE__);
2952 return NULL;
2953 }
2954
2955 /*
2956 * Ideally we should lookout for reqs bigger than
2957 * those that can be programmed with 256 bytes of
2958 * MC buffer, but considering a req size is seldom
2959 * going to be word-unaligned and more than 200MB,
2960 * we take it easy.
2961 * Also, should the limit is reached we'd rather
2962 * have the platform increase MC buffer size than
2963 * complicating this API driver.
2964 */
2965 fill_px(&desc->px, dst, src, len);
2966
2967 return desc;
2968}
2969
2970/* Call after fixing burst size */
2971static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
2972{
2973 struct dma_pl330_chan *pch = desc->pchan;
2974 struct pl330_info *pi = &pch->dmac->pif;
2975 int burst_len;
2976
2977 burst_len = pi->pcfg.data_bus_width / 8;
2978 burst_len *= pi->pcfg.data_buf_dep;
2979 burst_len >>= desc->rqcfg.brst_size;
2980
2981 /* src/dst_burst_len can't be more than 16 */
3c2a0909
S
2982 if (soc_is_exynos5422() && burst_len > 8)
2983 burst_len = 8;
2984 else if (burst_len > 16)
b3040e40
JB
2985 burst_len = 16;
2986
2987 while (burst_len > 1) {
2988 if (!(len % (burst_len << desc->rqcfg.brst_size)))
2989 break;
2990 burst_len--;
2991 }
2992
2993 return burst_len;
2994}
2995
42bc9cf4
BK
2996static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
2997 struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
185ecb5f 2998 size_t period_len, enum dma_transfer_direction direction,
ec8b5e48 2999 unsigned long flags, void *context)
42bc9cf4 3000{
9b0f8f01 3001 struct dma_pl330_desc *desc = NULL, *first = NULL;
42bc9cf4 3002 struct dma_pl330_chan *pch = to_pchan(chan);
9b0f8f01
LPC
3003 struct dma_pl330_dmac *pdmac = pch->dmac;
3004 unsigned int i;
42bc9cf4
BK
3005 dma_addr_t dst;
3006 dma_addr_t src;
3c2a0909 3007 unsigned int *infinite = context;
42bc9cf4 3008
9b0f8f01 3009 if (len % period_len != 0)
42bc9cf4 3010 return NULL;
42bc9cf4 3011
9b0f8f01 3012 if (!is_slave_direction(direction)) {
42bc9cf4
BK
3013 dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
3014 __func__, __LINE__);
3015 return NULL;
3016 }
3017
9b0f8f01
LPC
3018 for (i = 0; i < len / period_len; i++) {
3019 desc = pl330_get_desc(pch);
3020 if (!desc) {
3021 dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
3022 __func__, __LINE__);
42bc9cf4 3023
9b0f8f01
LPC
3024 if (!first)
3025 return NULL;
3026
3027 spin_lock_irqsave(&pdmac->pool_lock, flags);
3028
3029 while (!list_empty(&first->node)) {
3030 desc = list_entry(first->node.next,
3031 struct dma_pl330_desc, node);
3032 list_move_tail(&desc->node, &pdmac->desc_pool);
3033 }
3034
3035 list_move_tail(&first->node, &pdmac->desc_pool);
3036
3037 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
42bc9cf4 3038
9b0f8f01
LPC
3039 return NULL;
3040 }
3041
3042 switch (direction) {
3043 case DMA_MEM_TO_DEV:
3044 desc->rqcfg.src_inc = 1;
3045 desc->rqcfg.dst_inc = 0;
3046 desc->req.rqtype = MEMTODEV;
3047 src = dma_addr;
3048 dst = pch->fifo_addr;
3049 break;
3050 case DMA_DEV_TO_MEM:
3051 desc->rqcfg.src_inc = 0;
3052 desc->rqcfg.dst_inc = 1;
3053 desc->req.rqtype = DEVTOMEM;
3054 src = pch->fifo_addr;
3055 dst = dma_addr;
3056 break;
3057 default:
3058 break;
3059 }
3060
3061 desc->rqcfg.brst_size = pch->burst_sz;
3062 desc->rqcfg.brst_len = 1;
3c2a0909
S
3063 desc->req.infiniteloop = *infinite;
3064
3065 if (soc_is_exynos5422()) {
3066 if (dma_addr >= AUDSS_SRAM &&
3067 dma_addr < (AUDSS_SRAM + AUDSS_SRAM_SIZE))
3068 desc->req.sram = true;
3069 else
3070 desc->req.sram = false;
3071 }
3072
9b0f8f01
LPC
3073 fill_px(&desc->px, dst, src, period_len);
3074
3075 if (!first)
3076 first = desc;
3077 else
3078 list_add_tail(&desc->node, &first->node);
3079
3080 dma_addr += period_len;
3081 }
3082
3083 if (!desc)
3084 return NULL;
3085
3086 pch->cyclic = true;
3087 desc->txd.flags = flags;
42bc9cf4
BK
3088
3089 return &desc->txd;
3090}
3091
b3040e40
JB
3092static struct dma_async_tx_descriptor *
3093pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
3094 dma_addr_t src, size_t len, unsigned long flags)
3095{
3096 struct dma_pl330_desc *desc;
3097 struct dma_pl330_chan *pch = to_pchan(chan);
b3040e40
JB
3098 struct pl330_info *pi;
3099 int burst;
3100
4e0e6109 3101 if (unlikely(!pch || !len))
b3040e40
JB
3102 return NULL;
3103
b3040e40
JB
3104 pi = &pch->dmac->pif;
3105
3106 desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
3107 if (!desc)
3108 return NULL;
3109
3110 desc->rqcfg.src_inc = 1;
3111 desc->rqcfg.dst_inc = 1;
cd072515 3112 desc->req.rqtype = MEMTOMEM;
b3040e40
JB
3113
3114 /* Select max possible burst size */
3115 burst = pi->pcfg.data_bus_width / 8;
3116
3117 while (burst > 1) {
3118 if (!(len % burst))
3119 break;
3120 burst /= 2;
3121 }
3122
3123 desc->rqcfg.brst_size = 0;
3124 while (burst != (1 << desc->rqcfg.brst_size))
3125 desc->rqcfg.brst_size++;
3126
3127 desc->rqcfg.brst_len = get_burst_len(desc, len);
3128
3129 desc->txd.flags = flags;
3130
3131 return &desc->txd;
3132}
3133
3134static struct dma_async_tx_descriptor *
3135pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
db8196df 3136 unsigned int sg_len, enum dma_transfer_direction direction,
185ecb5f 3137 unsigned long flg, void *context)
b3040e40
JB
3138{
3139 struct dma_pl330_desc *first, *desc = NULL;
3140 struct dma_pl330_chan *pch = to_pchan(chan);
b3040e40
JB
3141 struct scatterlist *sg;
3142 unsigned long flags;
1b9bb715 3143 int i;
b3040e40
JB
3144 dma_addr_t addr;
3145
cd072515 3146 if (unlikely(!pch || !sgl || !sg_len))
b3040e40
JB
3147 return NULL;
3148
1b9bb715 3149 addr = pch->fifo_addr;
b3040e40
JB
3150
3151 first = NULL;
3152
3153 for_each_sg(sgl, sg, sg_len, i) {
3154
3155 desc = pl330_get_desc(pch);
3156 if (!desc) {
3157 struct dma_pl330_dmac *pdmac = pch->dmac;
3158
3159 dev_err(pch->dmac->pif.dev,
3160 "%s:%d Unable to fetch desc\n",
3161 __func__, __LINE__);
3162 if (!first)
3163 return NULL;
3164
3165 spin_lock_irqsave(&pdmac->pool_lock, flags);
3166
3167 while (!list_empty(&first->node)) {
3168 desc = list_entry(first->node.next,
3169 struct dma_pl330_desc, node);
3170 list_move_tail(&desc->node, &pdmac->desc_pool);
3171 }
3172
3173 list_move_tail(&first->node, &pdmac->desc_pool);
3174
3175 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
3176
3177 return NULL;
3178 }
3179
3180 if (!first)
3181 first = desc;
3182 else
3183 list_add_tail(&desc->node, &first->node);
3184
db8196df 3185 if (direction == DMA_MEM_TO_DEV) {
b3040e40
JB
3186 desc->rqcfg.src_inc = 1;
3187 desc->rqcfg.dst_inc = 0;
cd072515 3188 desc->req.rqtype = MEMTODEV;
b3040e40
JB
3189 fill_px(&desc->px,
3190 addr, sg_dma_address(sg), sg_dma_len(sg));
3191 } else {
3192 desc->rqcfg.src_inc = 0;
3193 desc->rqcfg.dst_inc = 1;
cd072515 3194 desc->req.rqtype = DEVTOMEM;
b3040e40
JB
3195 fill_px(&desc->px,
3196 sg_dma_address(sg), addr, sg_dma_len(sg));
3197 }
3198
1b9bb715 3199 desc->rqcfg.brst_size = pch->burst_sz;
3c2a0909 3200 desc->rqcfg.brst_len = pch->burst_len;
b3040e40
JB
3201 }
3202
3203 /* Return the last desc in the chain */
3204 desc->txd.flags = flg;
3205 return &desc->txd;
3206}
3207
3208static irqreturn_t pl330_irq_handler(int irq, void *data)
3209{
3c2a0909
S
3210#if defined(CONFIG_PL330TEST_LOG)
3211 struct pl330_info *pi = data;
3212#endif
3213
3214 DBG_PRINT("[%s] devname:%s\n", __func__, dev_name(pi->dev));
3215 if (pl330_update(data)) {
3216 DBG_PRINT("[%s] irq_handler exit\n", __func__);
b3040e40 3217 return IRQ_HANDLED;
3c2a0909
S
3218 } else {
3219 DBG_PRINT("[%s] IRQ_NONE\n", __func__);
b3040e40 3220 return IRQ_NONE;
3c2a0909
S
3221 }
3222}
3223
3224int pl330_dma_getposition(struct dma_chan *chan,
3225 dma_addr_t *src, dma_addr_t *dst)
3226{
3227 struct dma_pl330_chan *pch = to_pchan(chan);
3228 struct pl330_info *pi;
3229 void __iomem *regs;
3230 struct pl330_thread *thrd;
3231
3232 if (unlikely(!pch))
3233 return -EINVAL;
3234
3235 thrd = pch->pl330_chid;
3236 pi = &pch->dmac->pif;
3237 regs = pi->base;
3238
3239 *src = readl(regs + SA(thrd->id));
3240 *dst = readl(regs + DA(thrd->id));
3241
3242 return 0;
3243}
3244EXPORT_SYMBOL(pl330_dma_getposition);
3245
3246static int pl330_fixup_ctrl(struct device *dev)
3247{
3248 if (soc_is_exynos7420()) {
3249 void __iomem *base;
3250
3251 base = ioremap(0x10ef0000, SZ_32);
3252 writel(0, base);
3253
3254 iounmap(base);
3255 }
3256 return 0;
b3040e40 3257}
3c2a0909
S
3258#ifdef CONFIG_PM
3259static const struct dev_pm_ops pl330_pm_ops = {
3260 .resume = pl330_fixup_ctrl,
3261};
3262
3263#define PL330_PM (&pl330_pm_ops)
3264
3265#else /* CONFIG_PM */
3266
3267#define PL330_PM NULL
3268
3269#endif /* !CONFIG_PM */
b3040e40 3270
463a1f8b 3271static int
aa25afad 3272pl330_probe(struct amba_device *adev, const struct amba_id *id)
b3040e40
JB
3273{
3274 struct dma_pl330_platdata *pdat;
3275 struct dma_pl330_dmac *pdmac;
0b94c577 3276 struct dma_pl330_chan *pch, *_p;
b3040e40
JB
3277 struct pl330_info *pi;
3278 struct dma_device *pd;
3279 struct resource *res;
3280 int i, ret, irq;
4e0e6109 3281 int num_chan;
b3040e40
JB
3282
3283 pdat = adev->dev.platform_data;
3284
b3040e40 3285 /* Allocate a new DMAC and its Channels */
e4d43c17 3286 pdmac = devm_kzalloc(&adev->dev, sizeof(*pdmac), GFP_KERNEL);
b3040e40
JB
3287 if (!pdmac) {
3288 dev_err(&adev->dev, "unable to allocate mem\n");
3289 return -ENOMEM;
3290 }
3291
3292 pi = &pdmac->pif;
3293 pi->dev = &adev->dev;
3294 pi->pl330_data = NULL;
4e0e6109 3295 pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
b3040e40
JB
3296
3297 res = &adev->res;
bcc7fa95
SK
3298 pi->base = devm_ioremap_resource(&adev->dev, res);
3299 if (IS_ERR(pi->base))
3300 return PTR_ERR(pi->base);
b3040e40 3301
a2f5203f
BK
3302 amba_set_drvdata(adev, pdmac);
3303
b3040e40
JB
3304 irq = adev->irq[0];
3305 ret = request_irq(irq, pl330_irq_handler, 0,
3306 dev_name(&adev->dev), pi);
3307 if (ret)
e4d43c17 3308 return ret;
b3040e40
JB
3309
3310 ret = pl330_add(pi);
3311 if (ret)
e4d43c17 3312 goto probe_err1;
b3040e40
JB
3313
3314 INIT_LIST_HEAD(&pdmac->desc_pool);
3315 spin_lock_init(&pdmac->pool_lock);
3316
3317 /* Create a descriptor pool of default size */
3318 if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC))
3319 dev_warn(&adev->dev, "unable to allocate desc\n");
3320
3321 pd = &pdmac->ddma;
3322 INIT_LIST_HEAD(&pd->channels);
3323
3324 /* Initialize channel parameters */
c8473828
OJ
3325 if (pdat)
3326 num_chan = max_t(int, pdat->nr_valid_peri, pi->pcfg.num_chan);
3327 else
3328 num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan);
3329
4e0e6109 3330 pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
61c6e753
SK
3331 if (!pdmac->peripherals) {
3332 ret = -ENOMEM;
3333 dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n");
e4d43c17 3334 goto probe_err2;
61c6e753 3335 }
b3040e40 3336
4e0e6109
RH
3337 for (i = 0; i < num_chan; i++) {
3338 pch = &pdmac->peripherals[i];
93ed5544
TA
3339 if (!adev->dev.of_node)
3340 pch->chan.private = pdat ? &pdat->peri_id[i] : NULL;
3341 else
3342 pch->chan.private = adev->dev.of_node;
b3040e40
JB
3343
3344 INIT_LIST_HEAD(&pch->work_list);
3345 spin_lock_init(&pch->lock);
3346 pch->pl330_chid = NULL;
b3040e40 3347 pch->chan.device = pd;
b3040e40
JB
3348 pch->dmac = pdmac;
3349
3350 /* Add the channel to the DMAC list */
b3040e40
JB
3351 list_add_tail(&pch->chan.device_node, &pd->channels);
3352 }
3353
3354 pd->dev = &adev->dev;
93ed5544 3355 if (pdat) {
cd072515 3356 pd->cap_mask = pdat->cap_mask;
93ed5544 3357 } else {
cd072515 3358 dma_cap_set(DMA_MEMCPY, pd->cap_mask);
93ed5544
TA
3359 if (pi->pcfg.num_peri) {
3360 dma_cap_set(DMA_SLAVE, pd->cap_mask);
3361 dma_cap_set(DMA_CYCLIC, pd->cap_mask);
5557a419 3362 dma_cap_set(DMA_PRIVATE, pd->cap_mask);
93ed5544
TA
3363 }
3364 }
b3040e40
JB
3365
3366 pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
3367 pd->device_free_chan_resources = pl330_free_chan_resources;
3368 pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
42bc9cf4 3369 pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
b3040e40
JB
3370 pd->device_tx_status = pl330_tx_status;
3371 pd->device_prep_slave_sg = pl330_prep_slave_sg;
3372 pd->device_control = pl330_control;
3373 pd->device_issue_pending = pl330_issue_pending;
3374
3375 ret = dma_async_device_register(pd);
3376 if (ret) {
3377 dev_err(&adev->dev, "unable to register DMAC\n");
0b94c577
PV
3378 goto probe_err3;
3379 }
3380
3381 if (adev->dev.of_node) {
3382 ret = of_dma_controller_register(adev->dev.of_node,
3383 of_dma_pl330_xlate, pdmac);
3384 if (ret) {
3385 dev_err(&adev->dev,
3386 "unable to register DMA to the generic DT DMA helpers\n");
3387 }
b3040e40 3388 }
3c2a0909 3389 pl330_fixup_ctrl(&adev->dev);
b3040e40 3390
b3040e40
JB
3391 dev_info(&adev->dev,
3392 "Loaded driver for PL330 DMAC-%d\n", adev->periphid);
3393 dev_info(&adev->dev,
3394 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
3395 pi->pcfg.data_buf_dep,
3396 pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
3397 pi->pcfg.num_peri, pi->pcfg.num_events);
3398
3c2a0909
S
3399#ifdef CONFIG_PM_RUNTIME
3400 pm_runtime_put_sync(&adev->dev);
3401#endif
3402
b3040e40 3403 return 0;
0b94c577
PV
3404probe_err3:
3405 amba_set_drvdata(adev, NULL);
b3040e40 3406
0b94c577
PV
3407 /* Idle the DMAC */
3408 list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
3409 chan.device_node) {
3410
3411 /* Remove the channel */
3412 list_del(&pch->chan.device_node);
3413
3414 /* Flush the channel */
3415 pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
3416 pl330_free_chan_resources(&pch->chan);
3417 }
b3040e40 3418probe_err2:
e4d43c17 3419 pl330_del(pi);
b3040e40 3420probe_err1:
e4d43c17 3421 free_irq(irq, pi);
b3040e40
JB
3422
3423 return ret;
3424}
3425
4bf27b8b 3426static int pl330_remove(struct amba_device *adev)
b3040e40
JB
3427{
3428 struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
3429 struct dma_pl330_chan *pch, *_p;
3430 struct pl330_info *pi;
b3040e40
JB
3431 int irq;
3432
3433 if (!pdmac)
3434 return 0;
3435
0b94c577
PV
3436 if (adev->dev.of_node)
3437 of_dma_controller_free(adev->dev.of_node);
421da89a 3438
0b94c577 3439 dma_async_device_unregister(&pdmac->ddma);
b3040e40
JB
3440 amba_set_drvdata(adev, NULL);
3441
3442 /* Idle the DMAC */
3443 list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
3444 chan.device_node) {
3445
3446 /* Remove the channel */
3447 list_del(&pch->chan.device_node);
3448
3449 /* Flush the channel */
3450 pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
3451 pl330_free_chan_resources(&pch->chan);
3452 }
3453
3454 pi = &pdmac->pif;
3455
3456 pl330_del(pi);
3457
3458 irq = adev->irq[0];
3459 free_irq(irq, pi);
3460
b3040e40
JB
3461 return 0;
3462}
3463
3464static struct amba_id pl330_ids[] = {
3465 {
3466 .id = 0x00041330,
3467 .mask = 0x000fffff,
3468 },
3469 { 0, 0 },
3470};
3471
e8fa516a
DM
3472MODULE_DEVICE_TABLE(amba, pl330_ids);
3473
b3040e40
JB
3474static struct amba_driver pl330_driver = {
3475 .drv = {
3476 .owner = THIS_MODULE,
3c2a0909 3477 .pm = PL330_PM,
b3040e40
JB
3478 .name = "dma-pl330",
3479 },
3480 .id_table = pl330_ids,
3481 .probe = pl330_probe,
3482 .remove = pl330_remove,
3483};
3484
9e5ed094 3485module_amba_driver(pl330_driver);
b3040e40
JB
3486
3487MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
3488MODULE_DESCRIPTION("API Driver for PL330 DMAC");
3489MODULE_LICENSE("GPL");