Linux-2.6.12-rc2
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / arch / arm26 / kernel / dma.c
1 /*
2 * linux/arch/arm26/kernel/dma.c
3 *
4 * Copyright (C) 1995-2000 Russell King
5 * 2003 Ian Molton
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Front-end to the DMA handling. This handles the allocation/freeing
12 * of DMA channels, and provides a unified interface to the machines
13 * DMA facilities.
14 */
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/sched.h>
18 #include <linux/mman.h>
19 #include <linux/init.h>
20 #include <linux/spinlock.h>
21 #include <linux/errno.h>
22
23 #include <asm/dma.h>
24
25 DEFINE_SPINLOCK(dma_spin_lock);
26
27 static dma_t dma_chan[MAX_DMA_CHANNELS];
28
29 /*
30 * Get dma list for /proc/dma
31 */
32 int get_dma_list(char *buf)
33 {
34 dma_t *dma;
35 char *p = buf;
36 int i;
37
38 for (i = 0, dma = dma_chan; i < MAX_DMA_CHANNELS; i++, dma++)
39 if (dma->lock)
40 p += sprintf(p, "%2d: %14s %s\n", i,
41 dma->d_ops->type, dma->device_id);
42
43 return p - buf;
44 }
45
46 /*
47 * Request DMA channel
48 *
49 * On certain platforms, we have to allocate an interrupt as well...
50 */
51 int request_dma(dmach_t channel, const char *device_id)
52 {
53 dma_t *dma = dma_chan + channel;
54 int ret;
55
56 if (channel >= MAX_DMA_CHANNELS || !dma->d_ops)
57 goto bad_dma;
58
59 if (xchg(&dma->lock, 1) != 0)
60 goto busy;
61
62 dma->device_id = device_id;
63 dma->active = 0;
64 dma->invalid = 1;
65
66 ret = 0;
67 if (dma->d_ops->request)
68 ret = dma->d_ops->request(channel, dma);
69
70 if (ret)
71 xchg(&dma->lock, 0);
72
73 return ret;
74
75 bad_dma:
76 printk(KERN_ERR "dma: trying to allocate DMA%d\n", channel);
77 return -EINVAL;
78
79 busy:
80 return -EBUSY;
81 }
82
83 /*
84 * Free DMA channel
85 *
86 * On certain platforms, we have to free interrupt as well...
87 */
88 void free_dma(dmach_t channel)
89 {
90 dma_t *dma = dma_chan + channel;
91
92 if (channel >= MAX_DMA_CHANNELS || !dma->d_ops)
93 goto bad_dma;
94
95 if (dma->active) {
96 printk(KERN_ERR "dma%d: freeing active DMA\n", channel);
97 dma->d_ops->disable(channel, dma);
98 dma->active = 0;
99 }
100
101 if (xchg(&dma->lock, 0) != 0) {
102 if (dma->d_ops->free)
103 dma->d_ops->free(channel, dma);
104 return;
105 }
106
107 printk(KERN_ERR "dma%d: trying to free free DMA\n", channel);
108 return;
109
110 bad_dma:
111 printk(KERN_ERR "dma: trying to free DMA%d\n", channel);
112 }
113
114 /* Set DMA Scatter-Gather list
115 */
116 void set_dma_sg (dmach_t channel, struct scatterlist *sg, int nr_sg)
117 {
118 dma_t *dma = dma_chan + channel;
119
120 if (dma->active)
121 printk(KERN_ERR "dma%d: altering DMA SG while "
122 "DMA active\n", channel);
123
124 dma->sg = sg;
125 dma->sgcount = nr_sg;
126 dma->using_sg = 1;
127 dma->invalid = 1;
128 }
129
130 /* Set DMA address
131 *
132 * Copy address to the structure, and set the invalid bit
133 */
134 void set_dma_addr (dmach_t channel, unsigned long physaddr)
135 {
136 dma_t *dma = dma_chan + channel;
137
138 if (dma->active)
139 printk(KERN_ERR "dma%d: altering DMA address while "
140 "DMA active\n", channel);
141
142 dma->sg = &dma->buf;
143 dma->sgcount = 1;
144 dma->buf.__address = (char *)physaddr;//FIXME - not pretty
145 dma->using_sg = 0;
146 dma->invalid = 1;
147 }
148
149 /* Set DMA byte count
150 *
151 * Copy address to the structure, and set the invalid bit
152 */
153 void set_dma_count (dmach_t channel, unsigned long count)
154 {
155 dma_t *dma = dma_chan + channel;
156
157 if (dma->active)
158 printk(KERN_ERR "dma%d: altering DMA count while "
159 "DMA active\n", channel);
160
161 dma->sg = &dma->buf;
162 dma->sgcount = 1;
163 dma->buf.length = count;
164 dma->using_sg = 0;
165 dma->invalid = 1;
166 }
167
168 /* Set DMA direction mode
169 */
170 void set_dma_mode (dmach_t channel, dmamode_t mode)
171 {
172 dma_t *dma = dma_chan + channel;
173
174 if (dma->active)
175 printk(KERN_ERR "dma%d: altering DMA mode while "
176 "DMA active\n", channel);
177
178 dma->dma_mode = mode;
179 dma->invalid = 1;
180 }
181
182 /* Enable DMA channel
183 */
184 void enable_dma (dmach_t channel)
185 {
186 dma_t *dma = dma_chan + channel;
187
188 if (!dma->lock)
189 goto free_dma;
190
191 if (dma->active == 0) {
192 dma->active = 1;
193 dma->d_ops->enable(channel, dma);
194 }
195 return;
196
197 free_dma:
198 printk(KERN_ERR "dma%d: trying to enable free DMA\n", channel);
199 BUG();
200 }
201
202 /* Disable DMA channel
203 */
204 void disable_dma (dmach_t channel)
205 {
206 dma_t *dma = dma_chan + channel;
207
208 if (!dma->lock)
209 goto free_dma;
210
211 if (dma->active == 1) {
212 dma->active = 0;
213 dma->d_ops->disable(channel, dma);
214 }
215 return;
216
217 free_dma:
218 printk(KERN_ERR "dma%d: trying to disable free DMA\n", channel);
219 BUG();
220 }
221
222 /*
223 * Is the specified DMA channel active?
224 */
225 int dma_channel_active(dmach_t channel)
226 {
227 return dma_chan[channel].active;
228 }
229
230 void set_dma_page(dmach_t channel, char pagenr)
231 {
232 printk(KERN_ERR "dma%d: trying to set_dma_page\n", channel);
233 }
234
235 void set_dma_speed(dmach_t channel, int cycle_ns)
236 {
237 dma_t *dma = dma_chan + channel;
238 int ret = 0;
239
240 if (dma->d_ops->setspeed)
241 ret = dma->d_ops->setspeed(channel, dma, cycle_ns);
242 dma->speed = ret;
243 }
244
245 int get_dma_residue(dmach_t channel)
246 {
247 dma_t *dma = dma_chan + channel;
248 int ret = 0;
249
250 if (dma->d_ops->residue)
251 ret = dma->d_ops->residue(channel, dma);
252
253 return ret;
254 }
255
256 void __init init_dma(void)
257 {
258 arch_dma_init(dma_chan);
259 }
260
261 EXPORT_SYMBOL(request_dma);
262 EXPORT_SYMBOL(free_dma);
263 EXPORT_SYMBOL(enable_dma);
264 EXPORT_SYMBOL(disable_dma);
265 EXPORT_SYMBOL(set_dma_addr);
266 EXPORT_SYMBOL(set_dma_count);
267 EXPORT_SYMBOL(set_dma_mode);
268 EXPORT_SYMBOL(set_dma_page);
269 EXPORT_SYMBOL(get_dma_residue);
270 EXPORT_SYMBOL(set_dma_sg);
271 EXPORT_SYMBOL(set_dma_speed);
272
273 EXPORT_SYMBOL(dma_spin_lock);