Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / sound / core / memalloc.c
1 /*
2 * Copyright (c) by Jaroslav Kysela <perex@suse.cz>
3 * Takashi Iwai <tiwai@suse.de>
4 *
5 * Generic memory allocators
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24 #include <linux/module.h>
25 #include <linux/proc_fs.h>
26 #include <linux/init.h>
27 #include <linux/pci.h>
28 #include <linux/slab.h>
29 #include <linux/mm.h>
30 #include <asm/uaccess.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/moduleparam.h>
33 #include <linux/mutex.h>
34 #include <sound/memalloc.h>
35 #ifdef CONFIG_SBUS
36 #include <asm/sbus.h>
37 #endif
38
39
40 MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>, Jaroslav Kysela <perex@suse.cz>");
41 MODULE_DESCRIPTION("Memory allocator for ALSA system.");
42 MODULE_LICENSE("GPL");
43
44
45 /*
46 */
47
48 void *snd_malloc_sgbuf_pages(struct device *device,
49 size_t size, struct snd_dma_buffer *dmab,
50 size_t *res_size);
51 int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab);
52
53 /*
54 */
55
56 static DEFINE_MUTEX(list_mutex);
57 static LIST_HEAD(mem_list_head);
58
59 /* buffer preservation list */
60 struct snd_mem_list {
61 struct snd_dma_buffer buffer;
62 unsigned int id;
63 struct list_head list;
64 };
65
66 /* id for pre-allocated buffers */
67 #define SNDRV_DMA_DEVICE_UNUSED (unsigned int)-1
68
69 #ifdef CONFIG_SND_DEBUG
70 #define __ASTRING__(x) #x
71 #define snd_assert(expr, args...) do {\
72 if (!(expr)) {\
73 printk(KERN_ERR "snd-malloc: BUG? (%s) (called from %p)\n", __ASTRING__(expr), __builtin_return_address(0));\
74 args;\
75 }\
76 } while (0)
77 #else
78 #define snd_assert(expr, args...) /**/
79 #endif
80
81 /*
82 * Hacks
83 */
84
85 #if defined(__i386__)
86 /*
87 * A hack to allocate large buffers via dma_alloc_coherent()
88 *
89 * since dma_alloc_coherent always tries GFP_DMA when the requested
90 * pci memory region is below 32bit, it happens quite often that even
91 * 2 order of pages cannot be allocated.
92 *
93 * so in the following, we allocate at first without dma_mask, so that
94 * allocation will be done without GFP_DMA. if the area doesn't match
95 * with the requested region, then realloate with the original dma_mask
96 * again.
97 *
98 * Really, we want to move this type of thing into dma_alloc_coherent()
99 * so dma_mask doesn't have to be messed with.
100 */
101
102 static void *snd_dma_hack_alloc_coherent(struct device *dev, size_t size,
103 dma_addr_t *dma_handle,
104 gfp_t flags)
105 {
106 void *ret;
107 u64 dma_mask, coherent_dma_mask;
108
109 if (dev == NULL || !dev->dma_mask)
110 return dma_alloc_coherent(dev, size, dma_handle, flags);
111 dma_mask = *dev->dma_mask;
112 coherent_dma_mask = dev->coherent_dma_mask;
113 *dev->dma_mask = 0xffffffff; /* do without masking */
114 dev->coherent_dma_mask = 0xffffffff; /* do without masking */
115 ret = dma_alloc_coherent(dev, size, dma_handle, flags);
116 *dev->dma_mask = dma_mask; /* restore */
117 dev->coherent_dma_mask = coherent_dma_mask; /* restore */
118 if (ret) {
119 /* obtained address is out of range? */
120 if (((unsigned long)*dma_handle + size - 1) & ~dma_mask) {
121 /* reallocate with the proper mask */
122 dma_free_coherent(dev, size, ret, *dma_handle);
123 ret = dma_alloc_coherent(dev, size, dma_handle, flags);
124 }
125 } else {
126 /* wish to success now with the proper mask... */
127 if (dma_mask != 0xffffffffUL) {
128 /* allocation with GFP_ATOMIC to avoid the long stall */
129 flags &= ~GFP_KERNEL;
130 flags |= GFP_ATOMIC;
131 ret = dma_alloc_coherent(dev, size, dma_handle, flags);
132 }
133 }
134 return ret;
135 }
136
137 /* redefine dma_alloc_coherent for some architectures */
138 #undef dma_alloc_coherent
139 #define dma_alloc_coherent snd_dma_hack_alloc_coherent
140
141 #endif /* arch */
142
143 /*
144 *
145 * Generic memory allocators
146 *
147 */
148
149 static long snd_allocated_pages; /* holding the number of allocated pages */
150
151 static inline void inc_snd_pages(int order)
152 {
153 snd_allocated_pages += 1 << order;
154 }
155
156 static inline void dec_snd_pages(int order)
157 {
158 snd_allocated_pages -= 1 << order;
159 }
160
161 /**
162 * snd_malloc_pages - allocate pages with the given size
163 * @size: the size to allocate in bytes
164 * @gfp_flags: the allocation conditions, GFP_XXX
165 *
166 * Allocates the physically contiguous pages with the given size.
167 *
168 * Returns the pointer of the buffer, or NULL if no enoguh memory.
169 */
170 void *snd_malloc_pages(size_t size, gfp_t gfp_flags)
171 {
172 int pg;
173 void *res;
174
175 snd_assert(size > 0, return NULL);
176 snd_assert(gfp_flags != 0, return NULL);
177 gfp_flags |= __GFP_COMP; /* compound page lets parts be mapped */
178 pg = get_order(size);
179 if ((res = (void *) __get_free_pages(gfp_flags, pg)) != NULL)
180 inc_snd_pages(pg);
181 return res;
182 }
183
184 /**
185 * snd_free_pages - release the pages
186 * @ptr: the buffer pointer to release
187 * @size: the allocated buffer size
188 *
189 * Releases the buffer allocated via snd_malloc_pages().
190 */
191 void snd_free_pages(void *ptr, size_t size)
192 {
193 int pg;
194
195 if (ptr == NULL)
196 return;
197 pg = get_order(size);
198 dec_snd_pages(pg);
199 free_pages((unsigned long) ptr, pg);
200 }
201
202 /*
203 *
204 * Bus-specific memory allocators
205 *
206 */
207
208 /* allocate the coherent DMA pages */
209 static void *snd_malloc_dev_pages(struct device *dev, size_t size, dma_addr_t *dma)
210 {
211 int pg;
212 void *res;
213 gfp_t gfp_flags;
214
215 snd_assert(size > 0, return NULL);
216 snd_assert(dma != NULL, return NULL);
217 pg = get_order(size);
218 gfp_flags = GFP_KERNEL
219 | __GFP_COMP /* compound page lets parts be mapped */
220 | __GFP_NORETRY /* don't trigger OOM-killer */
221 | __GFP_NOWARN; /* no stack trace print - this call is non-critical */
222 res = dma_alloc_coherent(dev, PAGE_SIZE << pg, dma, gfp_flags);
223 if (res != NULL)
224 inc_snd_pages(pg);
225
226 return res;
227 }
228
229 /* free the coherent DMA pages */
230 static void snd_free_dev_pages(struct device *dev, size_t size, void *ptr,
231 dma_addr_t dma)
232 {
233 int pg;
234
235 if (ptr == NULL)
236 return;
237 pg = get_order(size);
238 dec_snd_pages(pg);
239 dma_free_coherent(dev, PAGE_SIZE << pg, ptr, dma);
240 }
241
242 #ifdef CONFIG_SBUS
243
244 static void *snd_malloc_sbus_pages(struct device *dev, size_t size,
245 dma_addr_t *dma_addr)
246 {
247 struct sbus_dev *sdev = (struct sbus_dev *)dev;
248 int pg;
249 void *res;
250
251 snd_assert(size > 0, return NULL);
252 snd_assert(dma_addr != NULL, return NULL);
253 pg = get_order(size);
254 res = sbus_alloc_consistent(sdev, PAGE_SIZE * (1 << pg), dma_addr);
255 if (res != NULL)
256 inc_snd_pages(pg);
257 return res;
258 }
259
260 static void snd_free_sbus_pages(struct device *dev, size_t size,
261 void *ptr, dma_addr_t dma_addr)
262 {
263 struct sbus_dev *sdev = (struct sbus_dev *)dev;
264 int pg;
265
266 if (ptr == NULL)
267 return;
268 pg = get_order(size);
269 dec_snd_pages(pg);
270 sbus_free_consistent(sdev, PAGE_SIZE * (1 << pg), ptr, dma_addr);
271 }
272
273 #endif /* CONFIG_SBUS */
274
275 /*
276 *
277 * ALSA generic memory management
278 *
279 */
280
281
282 /**
283 * snd_dma_alloc_pages - allocate the buffer area according to the given type
284 * @type: the DMA buffer type
285 * @device: the device pointer
286 * @size: the buffer size to allocate
287 * @dmab: buffer allocation record to store the allocated data
288 *
289 * Calls the memory-allocator function for the corresponding
290 * buffer type.
291 *
292 * Returns zero if the buffer with the given size is allocated successfuly,
293 * other a negative value at error.
294 */
295 int snd_dma_alloc_pages(int type, struct device *device, size_t size,
296 struct snd_dma_buffer *dmab)
297 {
298 snd_assert(size > 0, return -ENXIO);
299 snd_assert(dmab != NULL, return -ENXIO);
300
301 dmab->dev.type = type;
302 dmab->dev.dev = device;
303 dmab->bytes = 0;
304 switch (type) {
305 case SNDRV_DMA_TYPE_CONTINUOUS:
306 dmab->area = snd_malloc_pages(size, (unsigned long)device);
307 dmab->addr = 0;
308 break;
309 #ifdef CONFIG_SBUS
310 case SNDRV_DMA_TYPE_SBUS:
311 dmab->area = snd_malloc_sbus_pages(device, size, &dmab->addr);
312 break;
313 #endif
314 case SNDRV_DMA_TYPE_DEV:
315 dmab->area = snd_malloc_dev_pages(device, size, &dmab->addr);
316 break;
317 case SNDRV_DMA_TYPE_DEV_SG:
318 snd_malloc_sgbuf_pages(device, size, dmab, NULL);
319 break;
320 default:
321 printk(KERN_ERR "snd-malloc: invalid device type %d\n", type);
322 dmab->area = NULL;
323 dmab->addr = 0;
324 return -ENXIO;
325 }
326 if (! dmab->area)
327 return -ENOMEM;
328 dmab->bytes = size;
329 return 0;
330 }
331
332 /**
333 * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
334 * @type: the DMA buffer type
335 * @device: the device pointer
336 * @size: the buffer size to allocate
337 * @dmab: buffer allocation record to store the allocated data
338 *
339 * Calls the memory-allocator function for the corresponding
340 * buffer type. When no space is left, this function reduces the size and
341 * tries to allocate again. The size actually allocated is stored in
342 * res_size argument.
343 *
344 * Returns zero if the buffer with the given size is allocated successfuly,
345 * other a negative value at error.
346 */
347 int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
348 struct snd_dma_buffer *dmab)
349 {
350 int err;
351
352 snd_assert(size > 0, return -ENXIO);
353 snd_assert(dmab != NULL, return -ENXIO);
354
355 while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
356 if (err != -ENOMEM)
357 return err;
358 size >>= 1;
359 if (size <= PAGE_SIZE)
360 return -ENOMEM;
361 }
362 if (! dmab->area)
363 return -ENOMEM;
364 return 0;
365 }
366
367
368 /**
369 * snd_dma_free_pages - release the allocated buffer
370 * @dmab: the buffer allocation record to release
371 *
372 * Releases the allocated buffer via snd_dma_alloc_pages().
373 */
374 void snd_dma_free_pages(struct snd_dma_buffer *dmab)
375 {
376 switch (dmab->dev.type) {
377 case SNDRV_DMA_TYPE_CONTINUOUS:
378 snd_free_pages(dmab->area, dmab->bytes);
379 break;
380 #ifdef CONFIG_SBUS
381 case SNDRV_DMA_TYPE_SBUS:
382 snd_free_sbus_pages(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
383 break;
384 #endif
385 case SNDRV_DMA_TYPE_DEV:
386 snd_free_dev_pages(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
387 break;
388 case SNDRV_DMA_TYPE_DEV_SG:
389 snd_free_sgbuf_pages(dmab);
390 break;
391 default:
392 printk(KERN_ERR "snd-malloc: invalid device type %d\n", dmab->dev.type);
393 }
394 }
395
396
397 /**
398 * snd_dma_get_reserved - get the reserved buffer for the given device
399 * @dmab: the buffer allocation record to store
400 * @id: the buffer id
401 *
402 * Looks for the reserved-buffer list and re-uses if the same buffer
403 * is found in the list. When the buffer is found, it's removed from the free list.
404 *
405 * Returns the size of buffer if the buffer is found, or zero if not found.
406 */
407 size_t snd_dma_get_reserved_buf(struct snd_dma_buffer *dmab, unsigned int id)
408 {
409 struct list_head *p;
410 struct snd_mem_list *mem;
411
412 snd_assert(dmab, return 0);
413
414 mutex_lock(&list_mutex);
415 list_for_each(p, &mem_list_head) {
416 mem = list_entry(p, struct snd_mem_list, list);
417 if (mem->id == id &&
418 (mem->buffer.dev.dev == NULL || dmab->dev.dev == NULL ||
419 ! memcmp(&mem->buffer.dev, &dmab->dev, sizeof(dmab->dev)))) {
420 struct device *dev = dmab->dev.dev;
421 list_del(p);
422 *dmab = mem->buffer;
423 if (dmab->dev.dev == NULL)
424 dmab->dev.dev = dev;
425 kfree(mem);
426 mutex_unlock(&list_mutex);
427 return dmab->bytes;
428 }
429 }
430 mutex_unlock(&list_mutex);
431 return 0;
432 }
433
434 /**
435 * snd_dma_reserve_buf - reserve the buffer
436 * @dmab: the buffer to reserve
437 * @id: the buffer id
438 *
439 * Reserves the given buffer as a reserved buffer.
440 *
441 * Returns zero if successful, or a negative code at error.
442 */
443 int snd_dma_reserve_buf(struct snd_dma_buffer *dmab, unsigned int id)
444 {
445 struct snd_mem_list *mem;
446
447 snd_assert(dmab, return -EINVAL);
448 mem = kmalloc(sizeof(*mem), GFP_KERNEL);
449 if (! mem)
450 return -ENOMEM;
451 mutex_lock(&list_mutex);
452 mem->buffer = *dmab;
453 mem->id = id;
454 list_add_tail(&mem->list, &mem_list_head);
455 mutex_unlock(&list_mutex);
456 return 0;
457 }
458
459 /*
460 * purge all reserved buffers
461 */
462 static void free_all_reserved_pages(void)
463 {
464 struct list_head *p;
465 struct snd_mem_list *mem;
466
467 mutex_lock(&list_mutex);
468 while (! list_empty(&mem_list_head)) {
469 p = mem_list_head.next;
470 mem = list_entry(p, struct snd_mem_list, list);
471 list_del(p);
472 snd_dma_free_pages(&mem->buffer);
473 kfree(mem);
474 }
475 mutex_unlock(&list_mutex);
476 }
477
478
479 #ifdef CONFIG_PROC_FS
480 /*
481 * proc file interface
482 */
483 #define SND_MEM_PROC_FILE "driver/snd-page-alloc"
484 static struct proc_dir_entry *snd_mem_proc;
485
486 static int snd_mem_proc_read(char *page, char **start, off_t off,
487 int count, int *eof, void *data)
488 {
489 int len = 0;
490 long pages = snd_allocated_pages >> (PAGE_SHIFT-12);
491 struct list_head *p;
492 struct snd_mem_list *mem;
493 int devno;
494 static char *types[] = { "UNKNOWN", "CONT", "DEV", "DEV-SG", "SBUS" };
495
496 mutex_lock(&list_mutex);
497 len += snprintf(page + len, count - len,
498 "pages : %li bytes (%li pages per %likB)\n",
499 pages * PAGE_SIZE, pages, PAGE_SIZE / 1024);
500 devno = 0;
501 list_for_each(p, &mem_list_head) {
502 mem = list_entry(p, struct snd_mem_list, list);
503 devno++;
504 len += snprintf(page + len, count - len,
505 "buffer %d : ID %08x : type %s\n",
506 devno, mem->id, types[mem->buffer.dev.type]);
507 len += snprintf(page + len, count - len,
508 " addr = 0x%lx, size = %d bytes\n",
509 (unsigned long)mem->buffer.addr, (int)mem->buffer.bytes);
510 }
511 mutex_unlock(&list_mutex);
512 return len;
513 }
514
515 /* FIXME: for pci only - other bus? */
516 #ifdef CONFIG_PCI
517 #define gettoken(bufp) strsep(bufp, " \t\n")
518
519 static int snd_mem_proc_write(struct file *file, const char __user *buffer,
520 unsigned long count, void *data)
521 {
522 char buf[128];
523 char *token, *p;
524
525 if (count > ARRAY_SIZE(buf) - 1)
526 count = ARRAY_SIZE(buf) - 1;
527 if (copy_from_user(buf, buffer, count))
528 return -EFAULT;
529 buf[ARRAY_SIZE(buf) - 1] = '\0';
530
531 p = buf;
532 token = gettoken(&p);
533 if (! token || *token == '#')
534 return (int)count;
535 if (strcmp(token, "add") == 0) {
536 char *endp;
537 int vendor, device, size, buffers;
538 long mask;
539 int i, alloced;
540 struct pci_dev *pci;
541
542 if ((token = gettoken(&p)) == NULL ||
543 (vendor = simple_strtol(token, NULL, 0)) <= 0 ||
544 (token = gettoken(&p)) == NULL ||
545 (device = simple_strtol(token, NULL, 0)) <= 0 ||
546 (token = gettoken(&p)) == NULL ||
547 (mask = simple_strtol(token, NULL, 0)) < 0 ||
548 (token = gettoken(&p)) == NULL ||
549 (size = memparse(token, &endp)) < 64*1024 ||
550 size > 16*1024*1024 /* too big */ ||
551 (token = gettoken(&p)) == NULL ||
552 (buffers = simple_strtol(token, NULL, 0)) <= 0 ||
553 buffers > 4) {
554 printk(KERN_ERR "snd-page-alloc: invalid proc write format\n");
555 return (int)count;
556 }
557 vendor &= 0xffff;
558 device &= 0xffff;
559
560 alloced = 0;
561 pci = NULL;
562 while ((pci = pci_get_device(vendor, device, pci)) != NULL) {
563 if (mask > 0 && mask < 0xffffffff) {
564 if (pci_set_dma_mask(pci, mask) < 0 ||
565 pci_set_consistent_dma_mask(pci, mask) < 0) {
566 printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", mask, vendor, device);
567 return (int)count;
568 }
569 }
570 for (i = 0; i < buffers; i++) {
571 struct snd_dma_buffer dmab;
572 memset(&dmab, 0, sizeof(dmab));
573 if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
574 size, &dmab) < 0) {
575 printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size);
576 pci_dev_put(pci);
577 return (int)count;
578 }
579 snd_dma_reserve_buf(&dmab, snd_dma_pci_buf_id(pci));
580 }
581 alloced++;
582 }
583 if (! alloced) {
584 for (i = 0; i < buffers; i++) {
585 struct snd_dma_buffer dmab;
586 memset(&dmab, 0, sizeof(dmab));
587 /* FIXME: We can allocate only in ZONE_DMA
588 * without a device pointer!
589 */
590 if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, NULL,
591 size, &dmab) < 0) {
592 printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size);
593 break;
594 }
595 snd_dma_reserve_buf(&dmab, (unsigned int)((vendor << 16) | device));
596 }
597 }
598 } else if (strcmp(token, "erase") == 0)
599 /* FIXME: need for releasing each buffer chunk? */
600 free_all_reserved_pages();
601 else
602 printk(KERN_ERR "snd-page-alloc: invalid proc cmd\n");
603 return (int)count;
604 }
605 #endif /* CONFIG_PCI */
606 #endif /* CONFIG_PROC_FS */
607
608 /*
609 * module entry
610 */
611
612 static int __init snd_mem_init(void)
613 {
614 #ifdef CONFIG_PROC_FS
615 snd_mem_proc = create_proc_entry(SND_MEM_PROC_FILE, 0644, NULL);
616 if (snd_mem_proc) {
617 snd_mem_proc->read_proc = snd_mem_proc_read;
618 #ifdef CONFIG_PCI
619 snd_mem_proc->write_proc = snd_mem_proc_write;
620 #endif
621 }
622 #endif
623 return 0;
624 }
625
626 static void __exit snd_mem_exit(void)
627 {
628 remove_proc_entry(SND_MEM_PROC_FILE, NULL);
629 free_all_reserved_pages();
630 if (snd_allocated_pages > 0)
631 printk(KERN_ERR "snd-malloc: Memory leak? pages not freed = %li\n", snd_allocated_pages);
632 }
633
634
635 module_init(snd_mem_init)
636 module_exit(snd_mem_exit)
637
638
639 /*
640 * exports
641 */
642 EXPORT_SYMBOL(snd_dma_alloc_pages);
643 EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
644 EXPORT_SYMBOL(snd_dma_free_pages);
645
646 EXPORT_SYMBOL(snd_dma_get_reserved_buf);
647 EXPORT_SYMBOL(snd_dma_reserve_buf);
648
649 EXPORT_SYMBOL(snd_malloc_pages);
650 EXPORT_SYMBOL(snd_free_pages);