include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / sound / pci / emu10k1 / memory.c
1 /*
2 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
3 * Copyright (c) by Takashi Iwai <tiwai@suse.de>
4 *
5 * EMU10K1 memory page allocation (PTB area)
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24 #include <linux/pci.h>
25 #include <linux/gfp.h>
26 #include <linux/time.h>
27 #include <linux/mutex.h>
28
29 #include <sound/core.h>
30 #include <sound/emu10k1.h>
31
32 /* page arguments of these two macros are Emu page (4096 bytes), not like
33 * aligned pages in others
34 */
35 #define __set_ptb_entry(emu,page,addr) \
36 (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << 1) | (page)))
37
38 #define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE)
39 #define MAX_ALIGN_PAGES (MAXPAGES / UNIT_PAGES)
40 /* get aligned page from offset address */
41 #define get_aligned_page(offset) ((offset) >> PAGE_SHIFT)
42 /* get offset address from aligned page */
43 #define aligned_page_offset(page) ((page) << PAGE_SHIFT)
44
45 #if PAGE_SIZE == 4096
46 /* page size == EMUPAGESIZE */
47 /* fill PTB entrie(s) corresponding to page with addr */
48 #define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr)
49 /* fill PTB entrie(s) corresponding to page with silence pointer */
50 #define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr)
51 #else
52 /* fill PTB entries -- we need to fill UNIT_PAGES entries */
53 static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr)
54 {
55 int i;
56 page *= UNIT_PAGES;
57 for (i = 0; i < UNIT_PAGES; i++, page++) {
58 __set_ptb_entry(emu, page, addr);
59 addr += EMUPAGESIZE;
60 }
61 }
62 static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page)
63 {
64 int i;
65 page *= UNIT_PAGES;
66 for (i = 0; i < UNIT_PAGES; i++, page++)
67 /* do not increment ptr */
68 __set_ptb_entry(emu, page, emu->silent_page.addr);
69 }
70 #endif /* PAGE_SIZE */
71
72
73 /*
74 */
75 static int synth_alloc_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
76 static int synth_free_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
77
78 #define get_emu10k1_memblk(l,member) list_entry(l, struct snd_emu10k1_memblk, member)
79
80
81 /* initialize emu10k1 part */
82 static void emu10k1_memblk_init(struct snd_emu10k1_memblk *blk)
83 {
84 blk->mapped_page = -1;
85 INIT_LIST_HEAD(&blk->mapped_link);
86 INIT_LIST_HEAD(&blk->mapped_order_link);
87 blk->map_locked = 0;
88
89 blk->first_page = get_aligned_page(blk->mem.offset);
90 blk->last_page = get_aligned_page(blk->mem.offset + blk->mem.size - 1);
91 blk->pages = blk->last_page - blk->first_page + 1;
92 }
93
94 /*
95 * search empty region on PTB with the given size
96 *
97 * if an empty region is found, return the page and store the next mapped block
98 * in nextp
99 * if not found, return a negative error code.
100 */
101 static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp)
102 {
103 int page = 0, found_page = -ENOMEM;
104 int max_size = npages;
105 int size;
106 struct list_head *candidate = &emu->mapped_link_head;
107 struct list_head *pos;
108
109 list_for_each (pos, &emu->mapped_link_head) {
110 struct snd_emu10k1_memblk *blk = get_emu10k1_memblk(pos, mapped_link);
111 if (blk->mapped_page < 0)
112 continue;
113 size = blk->mapped_page - page;
114 if (size == npages) {
115 *nextp = pos;
116 return page;
117 }
118 else if (size > max_size) {
119 /* we look for the maximum empty hole */
120 max_size = size;
121 candidate = pos;
122 found_page = page;
123 }
124 page = blk->mapped_page + blk->pages;
125 }
126 size = MAX_ALIGN_PAGES - page;
127 if (size >= max_size) {
128 *nextp = pos;
129 return page;
130 }
131 *nextp = candidate;
132 return found_page;
133 }
134
135 /*
136 * map a memory block onto emu10k1's PTB
137 *
138 * call with memblk_lock held
139 */
140 static int map_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
141 {
142 int page, pg;
143 struct list_head *next;
144
145 page = search_empty_map_area(emu, blk->pages, &next);
146 if (page < 0) /* not found */
147 return page;
148 /* insert this block in the proper position of mapped list */
149 list_add_tail(&blk->mapped_link, next);
150 /* append this as a newest block in order list */
151 list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
152 blk->mapped_page = page;
153 /* fill PTB */
154 for (pg = blk->first_page; pg <= blk->last_page; pg++) {
155 set_ptb_entry(emu, page, emu->page_addr_table[pg]);
156 page++;
157 }
158 return 0;
159 }
160
161 /*
162 * unmap the block
163 * return the size of resultant empty pages
164 *
165 * call with memblk_lock held
166 */
167 static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
168 {
169 int start_page, end_page, mpage, pg;
170 struct list_head *p;
171 struct snd_emu10k1_memblk *q;
172
173 /* calculate the expected size of empty region */
174 if ((p = blk->mapped_link.prev) != &emu->mapped_link_head) {
175 q = get_emu10k1_memblk(p, mapped_link);
176 start_page = q->mapped_page + q->pages;
177 } else
178 start_page = 0;
179 if ((p = blk->mapped_link.next) != &emu->mapped_link_head) {
180 q = get_emu10k1_memblk(p, mapped_link);
181 end_page = q->mapped_page;
182 } else
183 end_page = MAX_ALIGN_PAGES;
184
185 /* remove links */
186 list_del(&blk->mapped_link);
187 list_del(&blk->mapped_order_link);
188 /* clear PTB */
189 mpage = blk->mapped_page;
190 for (pg = blk->first_page; pg <= blk->last_page; pg++) {
191 set_silent_ptb(emu, mpage);
192 mpage++;
193 }
194 blk->mapped_page = -1;
195 return end_page - start_page; /* return the new empty size */
196 }
197
198 /*
199 * search empty pages with the given size, and create a memory block
200 *
201 * unlike synth_alloc the memory block is aligned to the page start
202 */
203 static struct snd_emu10k1_memblk *
204 search_empty(struct snd_emu10k1 *emu, int size)
205 {
206 struct list_head *p;
207 struct snd_emu10k1_memblk *blk;
208 int page, psize;
209
210 psize = get_aligned_page(size + PAGE_SIZE -1);
211 page = 0;
212 list_for_each(p, &emu->memhdr->block) {
213 blk = get_emu10k1_memblk(p, mem.list);
214 if (page + psize <= blk->first_page)
215 goto __found_pages;
216 page = blk->last_page + 1;
217 }
218 if (page + psize > emu->max_cache_pages)
219 return NULL;
220
221 __found_pages:
222 /* create a new memory block */
223 blk = (struct snd_emu10k1_memblk *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev);
224 if (blk == NULL)
225 return NULL;
226 blk->mem.offset = aligned_page_offset(page); /* set aligned offset */
227 emu10k1_memblk_init(blk);
228 return blk;
229 }
230
231
232 /*
233 * check if the given pointer is valid for pages
234 */
235 static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr)
236 {
237 if (addr & ~emu->dma_mask) {
238 snd_printk(KERN_ERR "max memory size is 0x%lx (addr = 0x%lx)!!\n", emu->dma_mask, (unsigned long)addr);
239 return 0;
240 }
241 if (addr & (EMUPAGESIZE-1)) {
242 snd_printk(KERN_ERR "page is not aligned\n");
243 return 0;
244 }
245 return 1;
246 }
247
248 /*
249 * map the given memory block on PTB.
250 * if the block is already mapped, update the link order.
251 * if no empty pages are found, tries to release unsed memory blocks
252 * and retry the mapping.
253 */
254 int snd_emu10k1_memblk_map(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
255 {
256 int err;
257 int size;
258 struct list_head *p, *nextp;
259 struct snd_emu10k1_memblk *deleted;
260 unsigned long flags;
261
262 spin_lock_irqsave(&emu->memblk_lock, flags);
263 if (blk->mapped_page >= 0) {
264 /* update order link */
265 list_del(&blk->mapped_order_link);
266 list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
267 spin_unlock_irqrestore(&emu->memblk_lock, flags);
268 return 0;
269 }
270 if ((err = map_memblk(emu, blk)) < 0) {
271 /* no enough page - try to unmap some blocks */
272 /* starting from the oldest block */
273 p = emu->mapped_order_link_head.next;
274 for (; p != &emu->mapped_order_link_head; p = nextp) {
275 nextp = p->next;
276 deleted = get_emu10k1_memblk(p, mapped_order_link);
277 if (deleted->map_locked)
278 continue;
279 size = unmap_memblk(emu, deleted);
280 if (size >= blk->pages) {
281 /* ok the empty region is enough large */
282 err = map_memblk(emu, blk);
283 break;
284 }
285 }
286 }
287 spin_unlock_irqrestore(&emu->memblk_lock, flags);
288 return err;
289 }
290
291 EXPORT_SYMBOL(snd_emu10k1_memblk_map);
292
293 /*
294 * page allocation for DMA
295 */
296 struct snd_util_memblk *
297 snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *substream)
298 {
299 struct snd_pcm_runtime *runtime = substream->runtime;
300 struct snd_util_memhdr *hdr;
301 struct snd_emu10k1_memblk *blk;
302 int page, err, idx;
303
304 if (snd_BUG_ON(!emu))
305 return NULL;
306 if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
307 runtime->dma_bytes >= MAXPAGES * EMUPAGESIZE))
308 return NULL;
309 hdr = emu->memhdr;
310 if (snd_BUG_ON(!hdr))
311 return NULL;
312
313 mutex_lock(&hdr->block_mutex);
314 blk = search_empty(emu, runtime->dma_bytes);
315 if (blk == NULL) {
316 mutex_unlock(&hdr->block_mutex);
317 return NULL;
318 }
319 /* fill buffer addresses but pointers are not stored so that
320 * snd_free_pci_page() is not called in in synth_free()
321 */
322 idx = 0;
323 for (page = blk->first_page; page <= blk->last_page; page++, idx++) {
324 unsigned long ofs = idx << PAGE_SHIFT;
325 dma_addr_t addr;
326 addr = snd_pcm_sgbuf_get_addr(substream, ofs);
327 if (! is_valid_page(emu, addr)) {
328 printk(KERN_ERR "emu: failure page = %d\n", idx);
329 mutex_unlock(&hdr->block_mutex);
330 return NULL;
331 }
332 emu->page_addr_table[page] = addr;
333 emu->page_ptr_table[page] = NULL;
334 }
335
336 /* set PTB entries */
337 blk->map_locked = 1; /* do not unmap this block! */
338 err = snd_emu10k1_memblk_map(emu, blk);
339 if (err < 0) {
340 __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
341 mutex_unlock(&hdr->block_mutex);
342 return NULL;
343 }
344 mutex_unlock(&hdr->block_mutex);
345 return (struct snd_util_memblk *)blk;
346 }
347
348
349 /*
350 * release DMA buffer from page table
351 */
352 int snd_emu10k1_free_pages(struct snd_emu10k1 *emu, struct snd_util_memblk *blk)
353 {
354 if (snd_BUG_ON(!emu || !blk))
355 return -EINVAL;
356 return snd_emu10k1_synth_free(emu, blk);
357 }
358
359
360 /*
361 * memory allocation using multiple pages (for synth)
362 * Unlike the DMA allocation above, non-contiguous pages are assined.
363 */
364
365 /*
366 * allocate a synth sample area
367 */
368 struct snd_util_memblk *
369 snd_emu10k1_synth_alloc(struct snd_emu10k1 *hw, unsigned int size)
370 {
371 struct snd_emu10k1_memblk *blk;
372 struct snd_util_memhdr *hdr = hw->memhdr;
373
374 mutex_lock(&hdr->block_mutex);
375 blk = (struct snd_emu10k1_memblk *)__snd_util_mem_alloc(hdr, size);
376 if (blk == NULL) {
377 mutex_unlock(&hdr->block_mutex);
378 return NULL;
379 }
380 if (synth_alloc_pages(hw, blk)) {
381 __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
382 mutex_unlock(&hdr->block_mutex);
383 return NULL;
384 }
385 snd_emu10k1_memblk_map(hw, blk);
386 mutex_unlock(&hdr->block_mutex);
387 return (struct snd_util_memblk *)blk;
388 }
389
390 EXPORT_SYMBOL(snd_emu10k1_synth_alloc);
391
392 /*
393 * free a synth sample area
394 */
395 int
396 snd_emu10k1_synth_free(struct snd_emu10k1 *emu, struct snd_util_memblk *memblk)
397 {
398 struct snd_util_memhdr *hdr = emu->memhdr;
399 struct snd_emu10k1_memblk *blk = (struct snd_emu10k1_memblk *)memblk;
400 unsigned long flags;
401
402 mutex_lock(&hdr->block_mutex);
403 spin_lock_irqsave(&emu->memblk_lock, flags);
404 if (blk->mapped_page >= 0)
405 unmap_memblk(emu, blk);
406 spin_unlock_irqrestore(&emu->memblk_lock, flags);
407 synth_free_pages(emu, blk);
408 __snd_util_mem_free(hdr, memblk);
409 mutex_unlock(&hdr->block_mutex);
410 return 0;
411 }
412
413 EXPORT_SYMBOL(snd_emu10k1_synth_free);
414
415 /* check new allocation range */
416 static void get_single_page_range(struct snd_util_memhdr *hdr,
417 struct snd_emu10k1_memblk *blk,
418 int *first_page_ret, int *last_page_ret)
419 {
420 struct list_head *p;
421 struct snd_emu10k1_memblk *q;
422 int first_page, last_page;
423 first_page = blk->first_page;
424 if ((p = blk->mem.list.prev) != &hdr->block) {
425 q = get_emu10k1_memblk(p, mem.list);
426 if (q->last_page == first_page)
427 first_page++; /* first page was already allocated */
428 }
429 last_page = blk->last_page;
430 if ((p = blk->mem.list.next) != &hdr->block) {
431 q = get_emu10k1_memblk(p, mem.list);
432 if (q->first_page == last_page)
433 last_page--; /* last page was already allocated */
434 }
435 *first_page_ret = first_page;
436 *last_page_ret = last_page;
437 }
438
439 /* release allocated pages */
440 static void __synth_free_pages(struct snd_emu10k1 *emu, int first_page,
441 int last_page)
442 {
443 int page;
444
445 for (page = first_page; page <= last_page; page++) {
446 free_page((unsigned long)emu->page_ptr_table[page]);
447 emu->page_addr_table[page] = 0;
448 emu->page_ptr_table[page] = NULL;
449 }
450 }
451
452 /*
453 * allocate kernel pages
454 */
455 static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
456 {
457 int page, first_page, last_page;
458
459 emu10k1_memblk_init(blk);
460 get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
461 /* allocate kernel pages */
462 for (page = first_page; page <= last_page; page++) {
463 /* first try to allocate from <4GB zone */
464 struct page *p = alloc_page(GFP_KERNEL | GFP_DMA32 |
465 __GFP_NOWARN);
466 if (!p || (page_to_pfn(p) & ~(emu->dma_mask >> PAGE_SHIFT))) {
467 if (p)
468 __free_page(p);
469 /* try to allocate from <16MB zone */
470 p = alloc_page(GFP_ATOMIC | GFP_DMA |
471 __GFP_NORETRY | /* no OOM-killer */
472 __GFP_NOWARN);
473 }
474 if (!p) {
475 __synth_free_pages(emu, first_page, page - 1);
476 return -ENOMEM;
477 }
478 emu->page_addr_table[page] = page_to_phys(p);
479 emu->page_ptr_table[page] = page_address(p);
480 }
481 return 0;
482 }
483
484 /*
485 * free pages
486 */
487 static int synth_free_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
488 {
489 int first_page, last_page;
490
491 get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
492 __synth_free_pages(emu, first_page, last_page);
493 return 0;
494 }
495
496 /* calculate buffer pointer from offset address */
497 static inline void *offset_ptr(struct snd_emu10k1 *emu, int page, int offset)
498 {
499 char *ptr;
500 if (snd_BUG_ON(page < 0 || page >= emu->max_cache_pages))
501 return NULL;
502 ptr = emu->page_ptr_table[page];
503 if (! ptr) {
504 printk(KERN_ERR "emu10k1: access to NULL ptr: page = %d\n", page);
505 return NULL;
506 }
507 ptr += offset & (PAGE_SIZE - 1);
508 return (void*)ptr;
509 }
510
511 /*
512 * bzero(blk + offset, size)
513 */
514 int snd_emu10k1_synth_bzero(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
515 int offset, int size)
516 {
517 int page, nextofs, end_offset, temp, temp1;
518 void *ptr;
519 struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
520
521 offset += blk->offset & (PAGE_SIZE - 1);
522 end_offset = offset + size;
523 page = get_aligned_page(offset);
524 do {
525 nextofs = aligned_page_offset(page + 1);
526 temp = nextofs - offset;
527 temp1 = end_offset - offset;
528 if (temp1 < temp)
529 temp = temp1;
530 ptr = offset_ptr(emu, page + p->first_page, offset);
531 if (ptr)
532 memset(ptr, 0, temp);
533 offset = nextofs;
534 page++;
535 } while (offset < end_offset);
536 return 0;
537 }
538
539 EXPORT_SYMBOL(snd_emu10k1_synth_bzero);
540
541 /*
542 * copy_from_user(blk + offset, data, size)
543 */
544 int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
545 int offset, const char __user *data, int size)
546 {
547 int page, nextofs, end_offset, temp, temp1;
548 void *ptr;
549 struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
550
551 offset += blk->offset & (PAGE_SIZE - 1);
552 end_offset = offset + size;
553 page = get_aligned_page(offset);
554 do {
555 nextofs = aligned_page_offset(page + 1);
556 temp = nextofs - offset;
557 temp1 = end_offset - offset;
558 if (temp1 < temp)
559 temp = temp1;
560 ptr = offset_ptr(emu, page + p->first_page, offset);
561 if (ptr && copy_from_user(ptr, data, temp))
562 return -EFAULT;
563 offset = nextofs;
564 data += temp;
565 page++;
566 } while (offset < end_offset);
567 return 0;
568 }
569
570 EXPORT_SYMBOL(snd_emu10k1_synth_copy_from_user);