ARM: at91: fix board-rm9200-dt after sys_timer conversion
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / mm / dma-mapping.c
1 /*
2 * linux/arch/arm/mm/dma-mapping.c
3 *
4 * Copyright (C) 2000-2004 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * DMA uncached mapping support.
11 */
12 #include <linux/module.h>
13 #include <linux/mm.h>
14 #include <linux/gfp.h>
15 #include <linux/errno.h>
16 #include <linux/list.h>
17 #include <linux/init.h>
18 #include <linux/device.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/dma-contiguous.h>
21 #include <linux/highmem.h>
22 #include <linux/memblock.h>
23 #include <linux/slab.h>
24 #include <linux/iommu.h>
25 #include <linux/io.h>
26 #include <linux/vmalloc.h>
27 #include <linux/sizes.h>
28
29 #include <asm/memory.h>
30 #include <asm/highmem.h>
31 #include <asm/cacheflush.h>
32 #include <asm/tlbflush.h>
33 #include <asm/mach/arch.h>
34 #include <asm/dma-iommu.h>
35 #include <asm/mach/map.h>
36 #include <asm/system_info.h>
37 #include <asm/dma-contiguous.h>
38
39 #include "mm.h"
40
41 /*
42 * The DMA API is built upon the notion of "buffer ownership". A buffer
43 * is either exclusively owned by the CPU (and therefore may be accessed
44 * by it) or exclusively owned by the DMA device. These helper functions
45 * represent the transitions between these two ownership states.
46 *
47 * Note, however, that on later ARMs, this notion does not work due to
48 * speculative prefetches. We model our approach on the assumption that
49 * the CPU does do speculative prefetches, which means we clean caches
50 * before transfers and delay cache invalidation until transfer completion.
51 *
52 */
53 static void __dma_page_cpu_to_dev(struct page *, unsigned long,
54 size_t, enum dma_data_direction);
55 static void __dma_page_dev_to_cpu(struct page *, unsigned long,
56 size_t, enum dma_data_direction);
57
58 /**
59 * arm_dma_map_page - map a portion of a page for streaming DMA
60 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
61 * @page: page that buffer resides in
62 * @offset: offset into page for start of buffer
63 * @size: size of buffer to map
64 * @dir: DMA transfer direction
65 *
66 * Ensure that any data held in the cache is appropriately discarded
67 * or written back.
68 *
69 * The device owns this memory once this call has completed. The CPU
70 * can regain ownership by calling dma_unmap_page().
71 */
72 static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
73 unsigned long offset, size_t size, enum dma_data_direction dir,
74 struct dma_attrs *attrs)
75 {
76 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
77 __dma_page_cpu_to_dev(page, offset, size, dir);
78 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
79 }
80
81 static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page,
82 unsigned long offset, size_t size, enum dma_data_direction dir,
83 struct dma_attrs *attrs)
84 {
85 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
86 }
87
88 /**
89 * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
90 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
91 * @handle: DMA address of buffer
92 * @size: size of buffer (same as passed to dma_map_page)
93 * @dir: DMA transfer direction (same as passed to dma_map_page)
94 *
95 * Unmap a page streaming mode DMA translation. The handle and size
96 * must match what was provided in the previous dma_map_page() call.
97 * All other usages are undefined.
98 *
99 * After this call, reads by the CPU to the buffer are guaranteed to see
100 * whatever the device wrote there.
101 */
102 static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
103 size_t size, enum dma_data_direction dir,
104 struct dma_attrs *attrs)
105 {
106 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
107 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
108 handle & ~PAGE_MASK, size, dir);
109 }
110
111 static void arm_dma_sync_single_for_cpu(struct device *dev,
112 dma_addr_t handle, size_t size, enum dma_data_direction dir)
113 {
114 unsigned int offset = handle & (PAGE_SIZE - 1);
115 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
116 __dma_page_dev_to_cpu(page, offset, size, dir);
117 }
118
119 static void arm_dma_sync_single_for_device(struct device *dev,
120 dma_addr_t handle, size_t size, enum dma_data_direction dir)
121 {
122 unsigned int offset = handle & (PAGE_SIZE - 1);
123 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
124 __dma_page_cpu_to_dev(page, offset, size, dir);
125 }
126
127 struct dma_map_ops arm_dma_ops = {
128 .alloc = arm_dma_alloc,
129 .free = arm_dma_free,
130 .mmap = arm_dma_mmap,
131 .get_sgtable = arm_dma_get_sgtable,
132 .map_page = arm_dma_map_page,
133 .unmap_page = arm_dma_unmap_page,
134 .map_sg = arm_dma_map_sg,
135 .unmap_sg = arm_dma_unmap_sg,
136 .sync_single_for_cpu = arm_dma_sync_single_for_cpu,
137 .sync_single_for_device = arm_dma_sync_single_for_device,
138 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
139 .sync_sg_for_device = arm_dma_sync_sg_for_device,
140 .set_dma_mask = arm_dma_set_mask,
141 };
142 EXPORT_SYMBOL(arm_dma_ops);
143
144 static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
145 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs);
146 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
147 dma_addr_t handle, struct dma_attrs *attrs);
148
149 struct dma_map_ops arm_coherent_dma_ops = {
150 .alloc = arm_coherent_dma_alloc,
151 .free = arm_coherent_dma_free,
152 .mmap = arm_dma_mmap,
153 .get_sgtable = arm_dma_get_sgtable,
154 .map_page = arm_coherent_dma_map_page,
155 .map_sg = arm_dma_map_sg,
156 .set_dma_mask = arm_dma_set_mask,
157 };
158 EXPORT_SYMBOL(arm_coherent_dma_ops);
159
160 static u64 get_coherent_dma_mask(struct device *dev)
161 {
162 u64 mask = (u64)arm_dma_limit;
163
164 if (dev) {
165 mask = dev->coherent_dma_mask;
166
167 /*
168 * Sanity check the DMA mask - it must be non-zero, and
169 * must be able to be satisfied by a DMA allocation.
170 */
171 if (mask == 0) {
172 dev_warn(dev, "coherent DMA mask is unset\n");
173 return 0;
174 }
175
176 if ((~mask) & (u64)arm_dma_limit) {
177 dev_warn(dev, "coherent DMA mask %#llx is smaller "
178 "than system GFP_DMA mask %#llx\n",
179 mask, (u64)arm_dma_limit);
180 return 0;
181 }
182 }
183
184 return mask;
185 }
186
187 static void __dma_clear_buffer(struct page *page, size_t size)
188 {
189 void *ptr;
190 /*
191 * Ensure that the allocated pages are zeroed, and that any data
192 * lurking in the kernel direct-mapped region is invalidated.
193 */
194 ptr = page_address(page);
195 if (ptr) {
196 memset(ptr, 0, size);
197 dmac_flush_range(ptr, ptr + size);
198 outer_flush_range(__pa(ptr), __pa(ptr) + size);
199 }
200 }
201
202 /*
203 * Allocate a DMA buffer for 'dev' of size 'size' using the
204 * specified gfp mask. Note that 'size' must be page aligned.
205 */
206 static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
207 {
208 unsigned long order = get_order(size);
209 struct page *page, *p, *e;
210
211 page = alloc_pages(gfp, order);
212 if (!page)
213 return NULL;
214
215 /*
216 * Now split the huge page and free the excess pages
217 */
218 split_page(page, order);
219 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
220 __free_page(p);
221
222 __dma_clear_buffer(page, size);
223
224 return page;
225 }
226
227 /*
228 * Free a DMA buffer. 'size' must be page aligned.
229 */
230 static void __dma_free_buffer(struct page *page, size_t size)
231 {
232 struct page *e = page + (size >> PAGE_SHIFT);
233
234 while (page < e) {
235 __free_page(page);
236 page++;
237 }
238 }
239
240 #ifdef CONFIG_MMU
241 #ifdef CONFIG_HUGETLB_PAGE
242 #error ARM Coherent DMA allocator does not (yet) support huge TLB
243 #endif
244
245 static void *__alloc_from_contiguous(struct device *dev, size_t size,
246 pgprot_t prot, struct page **ret_page);
247
248 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
249 pgprot_t prot, struct page **ret_page,
250 const void *caller);
251
252 static void *
253 __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
254 const void *caller)
255 {
256 struct vm_struct *area;
257 unsigned long addr;
258
259 /*
260 * DMA allocation can be mapped to user space, so lets
261 * set VM_USERMAP flags too.
262 */
263 area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
264 caller);
265 if (!area)
266 return NULL;
267 addr = (unsigned long)area->addr;
268 area->phys_addr = __pfn_to_phys(page_to_pfn(page));
269
270 if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) {
271 vunmap((void *)addr);
272 return NULL;
273 }
274 return (void *)addr;
275 }
276
277 static void __dma_free_remap(void *cpu_addr, size_t size)
278 {
279 unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP;
280 struct vm_struct *area = find_vm_area(cpu_addr);
281 if (!area || (area->flags & flags) != flags) {
282 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
283 return;
284 }
285 unmap_kernel_range((unsigned long)cpu_addr, size);
286 vunmap(cpu_addr);
287 }
288
289 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
290
291 struct dma_pool {
292 size_t size;
293 spinlock_t lock;
294 unsigned long *bitmap;
295 unsigned long nr_pages;
296 void *vaddr;
297 struct page **pages;
298 };
299
300 static struct dma_pool atomic_pool = {
301 .size = DEFAULT_DMA_COHERENT_POOL_SIZE,
302 };
303
304 static int __init early_coherent_pool(char *p)
305 {
306 atomic_pool.size = memparse(p, &p);
307 return 0;
308 }
309 early_param("coherent_pool", early_coherent_pool);
310
311 void __init init_dma_coherent_pool_size(unsigned long size)
312 {
313 /*
314 * Catch any attempt to set the pool size too late.
315 */
316 BUG_ON(atomic_pool.vaddr);
317
318 /*
319 * Set architecture specific coherent pool size only if
320 * it has not been changed by kernel command line parameter.
321 */
322 if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE)
323 atomic_pool.size = size;
324 }
325
326 /*
327 * Initialise the coherent pool for atomic allocations.
328 */
329 static int __init atomic_pool_init(void)
330 {
331 struct dma_pool *pool = &atomic_pool;
332 pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
333 unsigned long nr_pages = pool->size >> PAGE_SHIFT;
334 unsigned long *bitmap;
335 struct page *page;
336 struct page **pages;
337 void *ptr;
338 int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
339
340 bitmap = kzalloc(bitmap_size, GFP_KERNEL);
341 if (!bitmap)
342 goto no_bitmap;
343
344 pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
345 if (!pages)
346 goto no_pages;
347
348 if (IS_ENABLED(CONFIG_CMA))
349 ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
350 else
351 ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
352 &page, NULL);
353 if (ptr) {
354 int i;
355
356 for (i = 0; i < nr_pages; i++)
357 pages[i] = page + i;
358
359 spin_lock_init(&pool->lock);
360 pool->vaddr = ptr;
361 pool->pages = pages;
362 pool->bitmap = bitmap;
363 pool->nr_pages = nr_pages;
364 pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
365 (unsigned)pool->size / 1024);
366 return 0;
367 }
368
369 kfree(pages);
370 no_pages:
371 kfree(bitmap);
372 no_bitmap:
373 pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
374 (unsigned)pool->size / 1024);
375 return -ENOMEM;
376 }
377 /*
378 * CMA is activated by core_initcall, so we must be called after it.
379 */
380 postcore_initcall(atomic_pool_init);
381
382 struct dma_contig_early_reserve {
383 phys_addr_t base;
384 unsigned long size;
385 };
386
387 static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
388
389 static int dma_mmu_remap_num __initdata;
390
391 void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
392 {
393 dma_mmu_remap[dma_mmu_remap_num].base = base;
394 dma_mmu_remap[dma_mmu_remap_num].size = size;
395 dma_mmu_remap_num++;
396 }
397
398 void __init dma_contiguous_remap(void)
399 {
400 int i;
401 for (i = 0; i < dma_mmu_remap_num; i++) {
402 phys_addr_t start = dma_mmu_remap[i].base;
403 phys_addr_t end = start + dma_mmu_remap[i].size;
404 struct map_desc map;
405 unsigned long addr;
406
407 if (end > arm_lowmem_limit)
408 end = arm_lowmem_limit;
409 if (start >= end)
410 continue;
411
412 map.pfn = __phys_to_pfn(start);
413 map.virtual = __phys_to_virt(start);
414 map.length = end - start;
415 map.type = MT_MEMORY_DMA_READY;
416
417 /*
418 * Clear previous low-memory mapping
419 */
420 for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
421 addr += PMD_SIZE)
422 pmd_clear(pmd_off_k(addr));
423
424 iotable_init(&map, 1);
425 }
426 }
427
428 static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
429 void *data)
430 {
431 struct page *page = virt_to_page(addr);
432 pgprot_t prot = *(pgprot_t *)data;
433
434 set_pte_ext(pte, mk_pte(page, prot), 0);
435 return 0;
436 }
437
438 static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
439 {
440 unsigned long start = (unsigned long) page_address(page);
441 unsigned end = start + size;
442
443 apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
444 dsb();
445 flush_tlb_kernel_range(start, end);
446 }
447
448 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
449 pgprot_t prot, struct page **ret_page,
450 const void *caller)
451 {
452 struct page *page;
453 void *ptr;
454 page = __dma_alloc_buffer(dev, size, gfp);
455 if (!page)
456 return NULL;
457
458 ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
459 if (!ptr) {
460 __dma_free_buffer(page, size);
461 return NULL;
462 }
463
464 *ret_page = page;
465 return ptr;
466 }
467
468 static void *__alloc_from_pool(size_t size, struct page **ret_page)
469 {
470 struct dma_pool *pool = &atomic_pool;
471 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
472 unsigned int pageno;
473 unsigned long flags;
474 void *ptr = NULL;
475 unsigned long align_mask;
476
477 if (!pool->vaddr) {
478 WARN(1, "coherent pool not initialised!\n");
479 return NULL;
480 }
481
482 /*
483 * Align the region allocation - allocations from pool are rather
484 * small, so align them to their order in pages, minimum is a page
485 * size. This helps reduce fragmentation of the DMA space.
486 */
487 align_mask = (1 << get_order(size)) - 1;
488
489 spin_lock_irqsave(&pool->lock, flags);
490 pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages,
491 0, count, align_mask);
492 if (pageno < pool->nr_pages) {
493 bitmap_set(pool->bitmap, pageno, count);
494 ptr = pool->vaddr + PAGE_SIZE * pageno;
495 *ret_page = pool->pages[pageno];
496 } else {
497 pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
498 "Please increase it with coherent_pool= kernel parameter!\n",
499 (unsigned)pool->size / 1024);
500 }
501 spin_unlock_irqrestore(&pool->lock, flags);
502
503 return ptr;
504 }
505
506 static bool __in_atomic_pool(void *start, size_t size)
507 {
508 struct dma_pool *pool = &atomic_pool;
509 void *end = start + size;
510 void *pool_start = pool->vaddr;
511 void *pool_end = pool->vaddr + pool->size;
512
513 if (start < pool_start || start >= pool_end)
514 return false;
515
516 if (end <= pool_end)
517 return true;
518
519 WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n",
520 start, end - 1, pool_start, pool_end - 1);
521
522 return false;
523 }
524
525 static int __free_from_pool(void *start, size_t size)
526 {
527 struct dma_pool *pool = &atomic_pool;
528 unsigned long pageno, count;
529 unsigned long flags;
530
531 if (!__in_atomic_pool(start, size))
532 return 0;
533
534 pageno = (start - pool->vaddr) >> PAGE_SHIFT;
535 count = size >> PAGE_SHIFT;
536
537 spin_lock_irqsave(&pool->lock, flags);
538 bitmap_clear(pool->bitmap, pageno, count);
539 spin_unlock_irqrestore(&pool->lock, flags);
540
541 return 1;
542 }
543
544 static void *__alloc_from_contiguous(struct device *dev, size_t size,
545 pgprot_t prot, struct page **ret_page)
546 {
547 unsigned long order = get_order(size);
548 size_t count = size >> PAGE_SHIFT;
549 struct page *page;
550
551 page = dma_alloc_from_contiguous(dev, count, order);
552 if (!page)
553 return NULL;
554
555 __dma_clear_buffer(page, size);
556 __dma_remap(page, size, prot);
557
558 *ret_page = page;
559 return page_address(page);
560 }
561
562 static void __free_from_contiguous(struct device *dev, struct page *page,
563 size_t size)
564 {
565 __dma_remap(page, size, pgprot_kernel);
566 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
567 }
568
569 static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
570 {
571 prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ?
572 pgprot_writecombine(prot) :
573 pgprot_dmacoherent(prot);
574 return prot;
575 }
576
577 #define nommu() 0
578
579 #else /* !CONFIG_MMU */
580
581 #define nommu() 1
582
583 #define __get_dma_pgprot(attrs, prot) __pgprot(0)
584 #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
585 #define __alloc_from_pool(size, ret_page) NULL
586 #define __alloc_from_contiguous(dev, size, prot, ret) NULL
587 #define __free_from_pool(cpu_addr, size) 0
588 #define __free_from_contiguous(dev, page, size) do { } while (0)
589 #define __dma_free_remap(cpu_addr, size) do { } while (0)
590
591 #endif /* CONFIG_MMU */
592
593 static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
594 struct page **ret_page)
595 {
596 struct page *page;
597 page = __dma_alloc_buffer(dev, size, gfp);
598 if (!page)
599 return NULL;
600
601 *ret_page = page;
602 return page_address(page);
603 }
604
605
606
607 static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
608 gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller)
609 {
610 u64 mask = get_coherent_dma_mask(dev);
611 struct page *page = NULL;
612 void *addr;
613
614 #ifdef CONFIG_DMA_API_DEBUG
615 u64 limit = (mask + 1) & ~mask;
616 if (limit && size >= limit) {
617 dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
618 size, mask);
619 return NULL;
620 }
621 #endif
622
623 if (!mask)
624 return NULL;
625
626 if (mask < 0xffffffffULL)
627 gfp |= GFP_DMA;
628
629 /*
630 * Following is a work-around (a.k.a. hack) to prevent pages
631 * with __GFP_COMP being passed to split_page() which cannot
632 * handle them. The real problem is that this flag probably
633 * should be 0 on ARM as it is not supported on this
634 * platform; see CONFIG_HUGETLBFS.
635 */
636 gfp &= ~(__GFP_COMP);
637
638 *handle = DMA_ERROR_CODE;
639 size = PAGE_ALIGN(size);
640
641 if (is_coherent || nommu())
642 addr = __alloc_simple_buffer(dev, size, gfp, &page);
643 else if (gfp & GFP_ATOMIC)
644 addr = __alloc_from_pool(size, &page);
645 else if (!IS_ENABLED(CONFIG_CMA))
646 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
647 else
648 addr = __alloc_from_contiguous(dev, size, prot, &page);
649
650 if (addr)
651 *handle = pfn_to_dma(dev, page_to_pfn(page));
652
653 return addr;
654 }
655
656 /*
657 * Allocate DMA-coherent memory space and return both the kernel remapped
658 * virtual and bus address for that space.
659 */
660 void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
661 gfp_t gfp, struct dma_attrs *attrs)
662 {
663 pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
664 void *memory;
665
666 if (dma_alloc_from_coherent(dev, size, handle, &memory))
667 return memory;
668
669 return __dma_alloc(dev, size, handle, gfp, prot, false,
670 __builtin_return_address(0));
671 }
672
673 static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
674 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
675 {
676 pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
677 void *memory;
678
679 if (dma_alloc_from_coherent(dev, size, handle, &memory))
680 return memory;
681
682 return __dma_alloc(dev, size, handle, gfp, prot, true,
683 __builtin_return_address(0));
684 }
685
686 /*
687 * Create userspace mapping for the DMA-coherent memory.
688 */
689 int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
690 void *cpu_addr, dma_addr_t dma_addr, size_t size,
691 struct dma_attrs *attrs)
692 {
693 int ret = -ENXIO;
694 #ifdef CONFIG_MMU
695 unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
696 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
697 unsigned long pfn = dma_to_pfn(dev, dma_addr);
698 unsigned long off = vma->vm_pgoff;
699
700 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
701
702 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
703 return ret;
704
705 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
706 ret = remap_pfn_range(vma, vma->vm_start,
707 pfn + off,
708 vma->vm_end - vma->vm_start,
709 vma->vm_page_prot);
710 }
711 #endif /* CONFIG_MMU */
712
713 return ret;
714 }
715
716 /*
717 * Free a buffer as defined by the above mapping.
718 */
719 static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
720 dma_addr_t handle, struct dma_attrs *attrs,
721 bool is_coherent)
722 {
723 struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
724
725 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
726 return;
727
728 size = PAGE_ALIGN(size);
729
730 if (is_coherent || nommu()) {
731 __dma_free_buffer(page, size);
732 } else if (__free_from_pool(cpu_addr, size)) {
733 return;
734 } else if (!IS_ENABLED(CONFIG_CMA)) {
735 __dma_free_remap(cpu_addr, size);
736 __dma_free_buffer(page, size);
737 } else {
738 /*
739 * Non-atomic allocations cannot be freed with IRQs disabled
740 */
741 WARN_ON(irqs_disabled());
742 __free_from_contiguous(dev, page, size);
743 }
744 }
745
746 void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
747 dma_addr_t handle, struct dma_attrs *attrs)
748 {
749 __arm_dma_free(dev, size, cpu_addr, handle, attrs, false);
750 }
751
752 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
753 dma_addr_t handle, struct dma_attrs *attrs)
754 {
755 __arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
756 }
757
758 int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
759 void *cpu_addr, dma_addr_t handle, size_t size,
760 struct dma_attrs *attrs)
761 {
762 struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
763 int ret;
764
765 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
766 if (unlikely(ret))
767 return ret;
768
769 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
770 return 0;
771 }
772
773 static void dma_cache_maint_page(struct page *page, unsigned long offset,
774 size_t size, enum dma_data_direction dir,
775 void (*op)(const void *, size_t, int))
776 {
777 /*
778 * A single sg entry may refer to multiple physically contiguous
779 * pages. But we still need to process highmem pages individually.
780 * If highmem is not configured then the bulk of this loop gets
781 * optimized out.
782 */
783 size_t left = size;
784 do {
785 size_t len = left;
786 void *vaddr;
787
788 if (PageHighMem(page)) {
789 if (len + offset > PAGE_SIZE) {
790 if (offset >= PAGE_SIZE) {
791 page += offset / PAGE_SIZE;
792 offset %= PAGE_SIZE;
793 }
794 len = PAGE_SIZE - offset;
795 }
796 vaddr = kmap_high_get(page);
797 if (vaddr) {
798 vaddr += offset;
799 op(vaddr, len, dir);
800 kunmap_high(page);
801 } else if (cache_is_vipt()) {
802 /* unmapped pages might still be cached */
803 vaddr = kmap_atomic(page);
804 op(vaddr + offset, len, dir);
805 kunmap_atomic(vaddr);
806 }
807 } else {
808 vaddr = page_address(page) + offset;
809 op(vaddr, len, dir);
810 }
811 offset = 0;
812 page++;
813 left -= len;
814 } while (left);
815 }
816
817 /*
818 * Make an area consistent for devices.
819 * Note: Drivers should NOT use this function directly, as it will break
820 * platforms with CONFIG_DMABOUNCE.
821 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
822 */
823 static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
824 size_t size, enum dma_data_direction dir)
825 {
826 unsigned long paddr;
827
828 dma_cache_maint_page(page, off, size, dir, dmac_map_area);
829
830 paddr = page_to_phys(page) + off;
831 if (dir == DMA_FROM_DEVICE) {
832 outer_inv_range(paddr, paddr + size);
833 } else {
834 outer_clean_range(paddr, paddr + size);
835 }
836 /* FIXME: non-speculating: flush on bidirectional mappings? */
837 }
838
839 static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
840 size_t size, enum dma_data_direction dir)
841 {
842 unsigned long paddr = page_to_phys(page) + off;
843
844 /* FIXME: non-speculating: not required */
845 /* don't bother invalidating if DMA to device */
846 if (dir != DMA_TO_DEVICE)
847 outer_inv_range(paddr, paddr + size);
848
849 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
850
851 /*
852 * Mark the D-cache clean for this page to avoid extra flushing.
853 */
854 if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
855 set_bit(PG_dcache_clean, &page->flags);
856 }
857
858 /**
859 * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
860 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
861 * @sg: list of buffers
862 * @nents: number of buffers to map
863 * @dir: DMA transfer direction
864 *
865 * Map a set of buffers described by scatterlist in streaming mode for DMA.
866 * This is the scatter-gather version of the dma_map_single interface.
867 * Here the scatter gather list elements are each tagged with the
868 * appropriate dma address and length. They are obtained via
869 * sg_dma_{address,length}.
870 *
871 * Device ownership issues as mentioned for dma_map_single are the same
872 * here.
873 */
874 int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
875 enum dma_data_direction dir, struct dma_attrs *attrs)
876 {
877 struct dma_map_ops *ops = get_dma_ops(dev);
878 struct scatterlist *s;
879 int i, j;
880
881 for_each_sg(sg, s, nents, i) {
882 #ifdef CONFIG_NEED_SG_DMA_LENGTH
883 s->dma_length = s->length;
884 #endif
885 s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
886 s->length, dir, attrs);
887 if (dma_mapping_error(dev, s->dma_address))
888 goto bad_mapping;
889 }
890 return nents;
891
892 bad_mapping:
893 for_each_sg(sg, s, i, j)
894 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
895 return 0;
896 }
897
898 /**
899 * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
900 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
901 * @sg: list of buffers
902 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
903 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
904 *
905 * Unmap a set of streaming mode DMA translations. Again, CPU access
906 * rules concerning calls here are the same as for dma_unmap_single().
907 */
908 void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
909 enum dma_data_direction dir, struct dma_attrs *attrs)
910 {
911 struct dma_map_ops *ops = get_dma_ops(dev);
912 struct scatterlist *s;
913
914 int i;
915
916 for_each_sg(sg, s, nents, i)
917 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
918 }
919
920 /**
921 * arm_dma_sync_sg_for_cpu
922 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
923 * @sg: list of buffers
924 * @nents: number of buffers to map (returned from dma_map_sg)
925 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
926 */
927 void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
928 int nents, enum dma_data_direction dir)
929 {
930 struct dma_map_ops *ops = get_dma_ops(dev);
931 struct scatterlist *s;
932 int i;
933
934 for_each_sg(sg, s, nents, i)
935 ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length,
936 dir);
937 }
938
939 /**
940 * arm_dma_sync_sg_for_device
941 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
942 * @sg: list of buffers
943 * @nents: number of buffers to map (returned from dma_map_sg)
944 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
945 */
946 void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
947 int nents, enum dma_data_direction dir)
948 {
949 struct dma_map_ops *ops = get_dma_ops(dev);
950 struct scatterlist *s;
951 int i;
952
953 for_each_sg(sg, s, nents, i)
954 ops->sync_single_for_device(dev, sg_dma_address(s), s->length,
955 dir);
956 }
957
958 /*
959 * Return whether the given device DMA address mask can be supported
960 * properly. For example, if your device can only drive the low 24-bits
961 * during bus mastering, then you would pass 0x00ffffff as the mask
962 * to this function.
963 */
964 int dma_supported(struct device *dev, u64 mask)
965 {
966 if (mask < (u64)arm_dma_limit)
967 return 0;
968 return 1;
969 }
970 EXPORT_SYMBOL(dma_supported);
971
972 int arm_dma_set_mask(struct device *dev, u64 dma_mask)
973 {
974 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
975 return -EIO;
976
977 *dev->dma_mask = dma_mask;
978
979 return 0;
980 }
981
982 #define PREALLOC_DMA_DEBUG_ENTRIES 4096
983
984 static int __init dma_debug_do_init(void)
985 {
986 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
987 return 0;
988 }
989 fs_initcall(dma_debug_do_init);
990
991 #ifdef CONFIG_ARM_DMA_USE_IOMMU
992
993 /* IOMMU */
994
995 static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
996 size_t size)
997 {
998 unsigned int order = get_order(size);
999 unsigned int align = 0;
1000 unsigned int count, start;
1001 unsigned long flags;
1002
1003 count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
1004 (1 << mapping->order) - 1) >> mapping->order;
1005
1006 if (order > mapping->order)
1007 align = (1 << (order - mapping->order)) - 1;
1008
1009 spin_lock_irqsave(&mapping->lock, flags);
1010 start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
1011 count, align);
1012 if (start > mapping->bits) {
1013 spin_unlock_irqrestore(&mapping->lock, flags);
1014 return DMA_ERROR_CODE;
1015 }
1016
1017 bitmap_set(mapping->bitmap, start, count);
1018 spin_unlock_irqrestore(&mapping->lock, flags);
1019
1020 return mapping->base + (start << (mapping->order + PAGE_SHIFT));
1021 }
1022
1023 static inline void __free_iova(struct dma_iommu_mapping *mapping,
1024 dma_addr_t addr, size_t size)
1025 {
1026 unsigned int start = (addr - mapping->base) >>
1027 (mapping->order + PAGE_SHIFT);
1028 unsigned int count = ((size >> PAGE_SHIFT) +
1029 (1 << mapping->order) - 1) >> mapping->order;
1030 unsigned long flags;
1031
1032 spin_lock_irqsave(&mapping->lock, flags);
1033 bitmap_clear(mapping->bitmap, start, count);
1034 spin_unlock_irqrestore(&mapping->lock, flags);
1035 }
1036
1037 static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
1038 gfp_t gfp, struct dma_attrs *attrs)
1039 {
1040 struct page **pages;
1041 int count = size >> PAGE_SHIFT;
1042 int array_size = count * sizeof(struct page *);
1043 int i = 0;
1044
1045 if (array_size <= PAGE_SIZE)
1046 pages = kzalloc(array_size, gfp);
1047 else
1048 pages = vzalloc(array_size);
1049 if (!pages)
1050 return NULL;
1051
1052 if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs))
1053 {
1054 unsigned long order = get_order(size);
1055 struct page *page;
1056
1057 page = dma_alloc_from_contiguous(dev, count, order);
1058 if (!page)
1059 goto error;
1060
1061 __dma_clear_buffer(page, size);
1062
1063 for (i = 0; i < count; i++)
1064 pages[i] = page + i;
1065
1066 return pages;
1067 }
1068
1069 while (count) {
1070 int j, order = __fls(count);
1071
1072 pages[i] = alloc_pages(gfp | __GFP_NOWARN, order);
1073 while (!pages[i] && order)
1074 pages[i] = alloc_pages(gfp | __GFP_NOWARN, --order);
1075 if (!pages[i])
1076 goto error;
1077
1078 if (order) {
1079 split_page(pages[i], order);
1080 j = 1 << order;
1081 while (--j)
1082 pages[i + j] = pages[i] + j;
1083 }
1084
1085 __dma_clear_buffer(pages[i], PAGE_SIZE << order);
1086 i += 1 << order;
1087 count -= 1 << order;
1088 }
1089
1090 return pages;
1091 error:
1092 while (i--)
1093 if (pages[i])
1094 __free_pages(pages[i], 0);
1095 if (array_size <= PAGE_SIZE)
1096 kfree(pages);
1097 else
1098 vfree(pages);
1099 return NULL;
1100 }
1101
1102 static int __iommu_free_buffer(struct device *dev, struct page **pages,
1103 size_t size, struct dma_attrs *attrs)
1104 {
1105 int count = size >> PAGE_SHIFT;
1106 int array_size = count * sizeof(struct page *);
1107 int i;
1108
1109 if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
1110 dma_release_from_contiguous(dev, pages[0], count);
1111 } else {
1112 for (i = 0; i < count; i++)
1113 if (pages[i])
1114 __free_pages(pages[i], 0);
1115 }
1116
1117 if (array_size <= PAGE_SIZE)
1118 kfree(pages);
1119 else
1120 vfree(pages);
1121 return 0;
1122 }
1123
1124 /*
1125 * Create a CPU mapping for a specified pages
1126 */
1127 static void *
1128 __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
1129 const void *caller)
1130 {
1131 unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1132 struct vm_struct *area;
1133 unsigned long p;
1134
1135 area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
1136 caller);
1137 if (!area)
1138 return NULL;
1139
1140 area->pages = pages;
1141 area->nr_pages = nr_pages;
1142 p = (unsigned long)area->addr;
1143
1144 for (i = 0; i < nr_pages; i++) {
1145 phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i]));
1146 if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot))
1147 goto err;
1148 p += PAGE_SIZE;
1149 }
1150 return area->addr;
1151 err:
1152 unmap_kernel_range((unsigned long)area->addr, size);
1153 vunmap(area->addr);
1154 return NULL;
1155 }
1156
1157 /*
1158 * Create a mapping in device IO address space for specified pages
1159 */
1160 static dma_addr_t
1161 __iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
1162 {
1163 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1164 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1165 dma_addr_t dma_addr, iova;
1166 int i, ret = DMA_ERROR_CODE;
1167
1168 dma_addr = __alloc_iova(mapping, size);
1169 if (dma_addr == DMA_ERROR_CODE)
1170 return dma_addr;
1171
1172 iova = dma_addr;
1173 for (i = 0; i < count; ) {
1174 unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
1175 phys_addr_t phys = page_to_phys(pages[i]);
1176 unsigned int len, j;
1177
1178 for (j = i + 1; j < count; j++, next_pfn++)
1179 if (page_to_pfn(pages[j]) != next_pfn)
1180 break;
1181
1182 len = (j - i) << PAGE_SHIFT;
1183 ret = iommu_map(mapping->domain, iova, phys, len, 0);
1184 if (ret < 0)
1185 goto fail;
1186 iova += len;
1187 i = j;
1188 }
1189 return dma_addr;
1190 fail:
1191 iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
1192 __free_iova(mapping, dma_addr, size);
1193 return DMA_ERROR_CODE;
1194 }
1195
1196 static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
1197 {
1198 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1199
1200 /*
1201 * add optional in-page offset from iova to size and align
1202 * result to page size
1203 */
1204 size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
1205 iova &= PAGE_MASK;
1206
1207 iommu_unmap(mapping->domain, iova, size);
1208 __free_iova(mapping, iova, size);
1209 return 0;
1210 }
1211
1212 static struct page **__atomic_get_pages(void *addr)
1213 {
1214 struct dma_pool *pool = &atomic_pool;
1215 struct page **pages = pool->pages;
1216 int offs = (addr - pool->vaddr) >> PAGE_SHIFT;
1217
1218 return pages + offs;
1219 }
1220
1221 static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
1222 {
1223 struct vm_struct *area;
1224
1225 if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
1226 return __atomic_get_pages(cpu_addr);
1227
1228 if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
1229 return cpu_addr;
1230
1231 area = find_vm_area(cpu_addr);
1232 if (area && (area->flags & VM_ARM_DMA_CONSISTENT))
1233 return area->pages;
1234 return NULL;
1235 }
1236
1237 static void *__iommu_alloc_atomic(struct device *dev, size_t size,
1238 dma_addr_t *handle)
1239 {
1240 struct page *page;
1241 void *addr;
1242
1243 addr = __alloc_from_pool(size, &page);
1244 if (!addr)
1245 return NULL;
1246
1247 *handle = __iommu_create_mapping(dev, &page, size);
1248 if (*handle == DMA_ERROR_CODE)
1249 goto err_mapping;
1250
1251 return addr;
1252
1253 err_mapping:
1254 __free_from_pool(addr, size);
1255 return NULL;
1256 }
1257
1258 static void __iommu_free_atomic(struct device *dev, struct page **pages,
1259 dma_addr_t handle, size_t size)
1260 {
1261 __iommu_remove_mapping(dev, handle, size);
1262 __free_from_pool(page_address(pages[0]), size);
1263 }
1264
1265 static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
1266 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
1267 {
1268 pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
1269 struct page **pages;
1270 void *addr = NULL;
1271
1272 *handle = DMA_ERROR_CODE;
1273 size = PAGE_ALIGN(size);
1274
1275 if (gfp & GFP_ATOMIC)
1276 return __iommu_alloc_atomic(dev, size, handle);
1277
1278 pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
1279 if (!pages)
1280 return NULL;
1281
1282 *handle = __iommu_create_mapping(dev, pages, size);
1283 if (*handle == DMA_ERROR_CODE)
1284 goto err_buffer;
1285
1286 if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
1287 return pages;
1288
1289 addr = __iommu_alloc_remap(pages, size, gfp, prot,
1290 __builtin_return_address(0));
1291 if (!addr)
1292 goto err_mapping;
1293
1294 return addr;
1295
1296 err_mapping:
1297 __iommu_remove_mapping(dev, *handle, size);
1298 err_buffer:
1299 __iommu_free_buffer(dev, pages, size, attrs);
1300 return NULL;
1301 }
1302
1303 static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
1304 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1305 struct dma_attrs *attrs)
1306 {
1307 unsigned long uaddr = vma->vm_start;
1308 unsigned long usize = vma->vm_end - vma->vm_start;
1309 struct page **pages = __iommu_get_pages(cpu_addr, attrs);
1310
1311 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
1312
1313 if (!pages)
1314 return -ENXIO;
1315
1316 do {
1317 int ret = vm_insert_page(vma, uaddr, *pages++);
1318 if (ret) {
1319 pr_err("Remapping memory failed: %d\n", ret);
1320 return ret;
1321 }
1322 uaddr += PAGE_SIZE;
1323 usize -= PAGE_SIZE;
1324 } while (usize > 0);
1325
1326 return 0;
1327 }
1328
1329 /*
1330 * free a page as defined by the above mapping.
1331 * Must not be called with IRQs disabled.
1332 */
1333 void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
1334 dma_addr_t handle, struct dma_attrs *attrs)
1335 {
1336 struct page **pages = __iommu_get_pages(cpu_addr, attrs);
1337 size = PAGE_ALIGN(size);
1338
1339 if (!pages) {
1340 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
1341 return;
1342 }
1343
1344 if (__in_atomic_pool(cpu_addr, size)) {
1345 __iommu_free_atomic(dev, pages, handle, size);
1346 return;
1347 }
1348
1349 if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
1350 unmap_kernel_range((unsigned long)cpu_addr, size);
1351 vunmap(cpu_addr);
1352 }
1353
1354 __iommu_remove_mapping(dev, handle, size);
1355 __iommu_free_buffer(dev, pages, size, attrs);
1356 }
1357
1358 static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
1359 void *cpu_addr, dma_addr_t dma_addr,
1360 size_t size, struct dma_attrs *attrs)
1361 {
1362 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1363 struct page **pages = __iommu_get_pages(cpu_addr, attrs);
1364
1365 if (!pages)
1366 return -ENXIO;
1367
1368 return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
1369 GFP_KERNEL);
1370 }
1371
1372 /*
1373 * Map a part of the scatter-gather list into contiguous io address space
1374 */
1375 static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1376 size_t size, dma_addr_t *handle,
1377 enum dma_data_direction dir, struct dma_attrs *attrs,
1378 bool is_coherent)
1379 {
1380 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1381 dma_addr_t iova, iova_base;
1382 int ret = 0;
1383 unsigned int count;
1384 struct scatterlist *s;
1385
1386 size = PAGE_ALIGN(size);
1387 *handle = DMA_ERROR_CODE;
1388
1389 iova_base = iova = __alloc_iova(mapping, size);
1390 if (iova == DMA_ERROR_CODE)
1391 return -ENOMEM;
1392
1393 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
1394 phys_addr_t phys = page_to_phys(sg_page(s));
1395 unsigned int len = PAGE_ALIGN(s->offset + s->length);
1396
1397 if (!is_coherent &&
1398 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
1399 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1400
1401 ret = iommu_map(mapping->domain, iova, phys, len, 0);
1402 if (ret < 0)
1403 goto fail;
1404 count += len >> PAGE_SHIFT;
1405 iova += len;
1406 }
1407 *handle = iova_base;
1408
1409 return 0;
1410 fail:
1411 iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
1412 __free_iova(mapping, iova_base, size);
1413 return ret;
1414 }
1415
1416 static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1417 enum dma_data_direction dir, struct dma_attrs *attrs,
1418 bool is_coherent)
1419 {
1420 struct scatterlist *s = sg, *dma = sg, *start = sg;
1421 int i, count = 0;
1422 unsigned int offset = s->offset;
1423 unsigned int size = s->offset + s->length;
1424 unsigned int max = dma_get_max_seg_size(dev);
1425
1426 for (i = 1; i < nents; i++) {
1427 s = sg_next(s);
1428
1429 s->dma_address = DMA_ERROR_CODE;
1430 s->dma_length = 0;
1431
1432 if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
1433 if (__map_sg_chunk(dev, start, size, &dma->dma_address,
1434 dir, attrs, is_coherent) < 0)
1435 goto bad_mapping;
1436
1437 dma->dma_address += offset;
1438 dma->dma_length = size - offset;
1439
1440 size = offset = s->offset;
1441 start = s;
1442 dma = sg_next(dma);
1443 count += 1;
1444 }
1445 size += s->length;
1446 }
1447 if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs,
1448 is_coherent) < 0)
1449 goto bad_mapping;
1450
1451 dma->dma_address += offset;
1452 dma->dma_length = size - offset;
1453
1454 return count+1;
1455
1456 bad_mapping:
1457 for_each_sg(sg, s, count, i)
1458 __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
1459 return 0;
1460 }
1461
1462 /**
1463 * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1464 * @dev: valid struct device pointer
1465 * @sg: list of buffers
1466 * @nents: number of buffers to map
1467 * @dir: DMA transfer direction
1468 *
1469 * Map a set of i/o coherent buffers described by scatterlist in streaming
1470 * mode for DMA. The scatter gather list elements are merged together (if
1471 * possible) and tagged with the appropriate dma address and length. They are
1472 * obtained via sg_dma_{address,length}.
1473 */
1474 int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
1475 int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
1476 {
1477 return __iommu_map_sg(dev, sg, nents, dir, attrs, true);
1478 }
1479
1480 /**
1481 * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1482 * @dev: valid struct device pointer
1483 * @sg: list of buffers
1484 * @nents: number of buffers to map
1485 * @dir: DMA transfer direction
1486 *
1487 * Map a set of buffers described by scatterlist in streaming mode for DMA.
1488 * The scatter gather list elements are merged together (if possible) and
1489 * tagged with the appropriate dma address and length. They are obtained via
1490 * sg_dma_{address,length}.
1491 */
1492 int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
1493 int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
1494 {
1495 return __iommu_map_sg(dev, sg, nents, dir, attrs, false);
1496 }
1497
1498 static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
1499 int nents, enum dma_data_direction dir, struct dma_attrs *attrs,
1500 bool is_coherent)
1501 {
1502 struct scatterlist *s;
1503 int i;
1504
1505 for_each_sg(sg, s, nents, i) {
1506 if (sg_dma_len(s))
1507 __iommu_remove_mapping(dev, sg_dma_address(s),
1508 sg_dma_len(s));
1509 if (!is_coherent &&
1510 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
1511 __dma_page_dev_to_cpu(sg_page(s), s->offset,
1512 s->length, dir);
1513 }
1514 }
1515
1516 /**
1517 * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1518 * @dev: valid struct device pointer
1519 * @sg: list of buffers
1520 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1521 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1522 *
1523 * Unmap a set of streaming mode DMA translations. Again, CPU access
1524 * rules concerning calls here are the same as for dma_unmap_single().
1525 */
1526 void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
1527 int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
1528 {
1529 __iommu_unmap_sg(dev, sg, nents, dir, attrs, true);
1530 }
1531
1532 /**
1533 * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1534 * @dev: valid struct device pointer
1535 * @sg: list of buffers
1536 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1537 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1538 *
1539 * Unmap a set of streaming mode DMA translations. Again, CPU access
1540 * rules concerning calls here are the same as for dma_unmap_single().
1541 */
1542 void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
1543 enum dma_data_direction dir, struct dma_attrs *attrs)
1544 {
1545 __iommu_unmap_sg(dev, sg, nents, dir, attrs, false);
1546 }
1547
1548 /**
1549 * arm_iommu_sync_sg_for_cpu
1550 * @dev: valid struct device pointer
1551 * @sg: list of buffers
1552 * @nents: number of buffers to map (returned from dma_map_sg)
1553 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1554 */
1555 void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1556 int nents, enum dma_data_direction dir)
1557 {
1558 struct scatterlist *s;
1559 int i;
1560
1561 for_each_sg(sg, s, nents, i)
1562 __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
1563
1564 }
1565
1566 /**
1567 * arm_iommu_sync_sg_for_device
1568 * @dev: valid struct device pointer
1569 * @sg: list of buffers
1570 * @nents: number of buffers to map (returned from dma_map_sg)
1571 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1572 */
1573 void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1574 int nents, enum dma_data_direction dir)
1575 {
1576 struct scatterlist *s;
1577 int i;
1578
1579 for_each_sg(sg, s, nents, i)
1580 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1581 }
1582
1583
1584 /**
1585 * arm_coherent_iommu_map_page
1586 * @dev: valid struct device pointer
1587 * @page: page that buffer resides in
1588 * @offset: offset into page for start of buffer
1589 * @size: size of buffer to map
1590 * @dir: DMA transfer direction
1591 *
1592 * Coherent IOMMU aware version of arm_dma_map_page()
1593 */
1594 static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page,
1595 unsigned long offset, size_t size, enum dma_data_direction dir,
1596 struct dma_attrs *attrs)
1597 {
1598 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1599 dma_addr_t dma_addr;
1600 int ret, len = PAGE_ALIGN(size + offset);
1601
1602 dma_addr = __alloc_iova(mapping, len);
1603 if (dma_addr == DMA_ERROR_CODE)
1604 return dma_addr;
1605
1606 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, 0);
1607 if (ret < 0)
1608 goto fail;
1609
1610 return dma_addr + offset;
1611 fail:
1612 __free_iova(mapping, dma_addr, len);
1613 return DMA_ERROR_CODE;
1614 }
1615
1616 /**
1617 * arm_iommu_map_page
1618 * @dev: valid struct device pointer
1619 * @page: page that buffer resides in
1620 * @offset: offset into page for start of buffer
1621 * @size: size of buffer to map
1622 * @dir: DMA transfer direction
1623 *
1624 * IOMMU aware version of arm_dma_map_page()
1625 */
1626 static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
1627 unsigned long offset, size_t size, enum dma_data_direction dir,
1628 struct dma_attrs *attrs)
1629 {
1630 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
1631 __dma_page_cpu_to_dev(page, offset, size, dir);
1632
1633 return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
1634 }
1635
1636 /**
1637 * arm_coherent_iommu_unmap_page
1638 * @dev: valid struct device pointer
1639 * @handle: DMA address of buffer
1640 * @size: size of buffer (same as passed to dma_map_page)
1641 * @dir: DMA transfer direction (same as passed to dma_map_page)
1642 *
1643 * Coherent IOMMU aware version of arm_dma_unmap_page()
1644 */
1645 static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
1646 size_t size, enum dma_data_direction dir,
1647 struct dma_attrs *attrs)
1648 {
1649 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1650 dma_addr_t iova = handle & PAGE_MASK;
1651 int offset = handle & ~PAGE_MASK;
1652 int len = PAGE_ALIGN(size + offset);
1653
1654 if (!iova)
1655 return;
1656
1657 iommu_unmap(mapping->domain, iova, len);
1658 __free_iova(mapping, iova, len);
1659 }
1660
1661 /**
1662 * arm_iommu_unmap_page
1663 * @dev: valid struct device pointer
1664 * @handle: DMA address of buffer
1665 * @size: size of buffer (same as passed to dma_map_page)
1666 * @dir: DMA transfer direction (same as passed to dma_map_page)
1667 *
1668 * IOMMU aware version of arm_dma_unmap_page()
1669 */
1670 static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
1671 size_t size, enum dma_data_direction dir,
1672 struct dma_attrs *attrs)
1673 {
1674 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1675 dma_addr_t iova = handle & PAGE_MASK;
1676 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1677 int offset = handle & ~PAGE_MASK;
1678 int len = PAGE_ALIGN(size + offset);
1679
1680 if (!iova)
1681 return;
1682
1683 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
1684 __dma_page_dev_to_cpu(page, offset, size, dir);
1685
1686 iommu_unmap(mapping->domain, iova, len);
1687 __free_iova(mapping, iova, len);
1688 }
1689
1690 static void arm_iommu_sync_single_for_cpu(struct device *dev,
1691 dma_addr_t handle, size_t size, enum dma_data_direction dir)
1692 {
1693 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1694 dma_addr_t iova = handle & PAGE_MASK;
1695 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1696 unsigned int offset = handle & ~PAGE_MASK;
1697
1698 if (!iova)
1699 return;
1700
1701 __dma_page_dev_to_cpu(page, offset, size, dir);
1702 }
1703
1704 static void arm_iommu_sync_single_for_device(struct device *dev,
1705 dma_addr_t handle, size_t size, enum dma_data_direction dir)
1706 {
1707 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1708 dma_addr_t iova = handle & PAGE_MASK;
1709 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1710 unsigned int offset = handle & ~PAGE_MASK;
1711
1712 if (!iova)
1713 return;
1714
1715 __dma_page_cpu_to_dev(page, offset, size, dir);
1716 }
1717
1718 struct dma_map_ops iommu_ops = {
1719 .alloc = arm_iommu_alloc_attrs,
1720 .free = arm_iommu_free_attrs,
1721 .mmap = arm_iommu_mmap_attrs,
1722 .get_sgtable = arm_iommu_get_sgtable,
1723
1724 .map_page = arm_iommu_map_page,
1725 .unmap_page = arm_iommu_unmap_page,
1726 .sync_single_for_cpu = arm_iommu_sync_single_for_cpu,
1727 .sync_single_for_device = arm_iommu_sync_single_for_device,
1728
1729 .map_sg = arm_iommu_map_sg,
1730 .unmap_sg = arm_iommu_unmap_sg,
1731 .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
1732 .sync_sg_for_device = arm_iommu_sync_sg_for_device,
1733 };
1734
1735 struct dma_map_ops iommu_coherent_ops = {
1736 .alloc = arm_iommu_alloc_attrs,
1737 .free = arm_iommu_free_attrs,
1738 .mmap = arm_iommu_mmap_attrs,
1739 .get_sgtable = arm_iommu_get_sgtable,
1740
1741 .map_page = arm_coherent_iommu_map_page,
1742 .unmap_page = arm_coherent_iommu_unmap_page,
1743
1744 .map_sg = arm_coherent_iommu_map_sg,
1745 .unmap_sg = arm_coherent_iommu_unmap_sg,
1746 };
1747
1748 /**
1749 * arm_iommu_create_mapping
1750 * @bus: pointer to the bus holding the client device (for IOMMU calls)
1751 * @base: start address of the valid IO address space
1752 * @size: size of the valid IO address space
1753 * @order: accuracy of the IO addresses allocations
1754 *
1755 * Creates a mapping structure which holds information about used/unused
1756 * IO address ranges, which is required to perform memory allocation and
1757 * mapping with IOMMU aware functions.
1758 *
1759 * The client device need to be attached to the mapping with
1760 * arm_iommu_attach_device function.
1761 */
1762 struct dma_iommu_mapping *
1763 arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
1764 int order)
1765 {
1766 unsigned int count = size >> (PAGE_SHIFT + order);
1767 unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
1768 struct dma_iommu_mapping *mapping;
1769 int err = -ENOMEM;
1770
1771 if (!count)
1772 return ERR_PTR(-EINVAL);
1773
1774 mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
1775 if (!mapping)
1776 goto err;
1777
1778 mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
1779 if (!mapping->bitmap)
1780 goto err2;
1781
1782 mapping->base = base;
1783 mapping->bits = BITS_PER_BYTE * bitmap_size;
1784 mapping->order = order;
1785 spin_lock_init(&mapping->lock);
1786
1787 mapping->domain = iommu_domain_alloc(bus);
1788 if (!mapping->domain)
1789 goto err3;
1790
1791 kref_init(&mapping->kref);
1792 return mapping;
1793 err3:
1794 kfree(mapping->bitmap);
1795 err2:
1796 kfree(mapping);
1797 err:
1798 return ERR_PTR(err);
1799 }
1800
1801 static void release_iommu_mapping(struct kref *kref)
1802 {
1803 struct dma_iommu_mapping *mapping =
1804 container_of(kref, struct dma_iommu_mapping, kref);
1805
1806 iommu_domain_free(mapping->domain);
1807 kfree(mapping->bitmap);
1808 kfree(mapping);
1809 }
1810
1811 void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
1812 {
1813 if (mapping)
1814 kref_put(&mapping->kref, release_iommu_mapping);
1815 }
1816
1817 /**
1818 * arm_iommu_attach_device
1819 * @dev: valid struct device pointer
1820 * @mapping: io address space mapping structure (returned from
1821 * arm_iommu_create_mapping)
1822 *
1823 * Attaches specified io address space mapping to the provided device,
1824 * this replaces the dma operations (dma_map_ops pointer) with the
1825 * IOMMU aware version. More than one client might be attached to
1826 * the same io address space mapping.
1827 */
1828 int arm_iommu_attach_device(struct device *dev,
1829 struct dma_iommu_mapping *mapping)
1830 {
1831 int err;
1832
1833 err = iommu_attach_device(mapping->domain, dev);
1834 if (err)
1835 return err;
1836
1837 kref_get(&mapping->kref);
1838 dev->archdata.mapping = mapping;
1839 set_dma_ops(dev, &iommu_ops);
1840
1841 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
1842 return 0;
1843 }
1844
1845 #endif