Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / arch / arm / plat-omap / iovmm.c
1 /*
2 * omap iommu: simple virtual address space management
3 *
4 * Copyright (C) 2008-2009 Nokia Corporation
5 *
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 #include <linux/err.h>
14 #include <linux/vmalloc.h>
15 #include <linux/device.h>
16 #include <linux/scatterlist.h>
17
18 #include <asm/cacheflush.h>
19 #include <asm/mach/map.h>
20
21 #include <mach/iommu.h>
22 #include <mach/iovmm.h>
23
24 #include "iopgtable.h"
25
26 /*
27 * A device driver needs to create address mappings between:
28 *
29 * - iommu/device address
30 * - physical address
31 * - mpu virtual address
32 *
33 * There are 4 possible patterns for them:
34 *
35 * |iova/ mapping iommu_ page
36 * | da pa va (d)-(p)-(v) function type
37 * ---------------------------------------------------------------------------
38 * 1 | c c c 1 - 1 - 1 _kmap() / _kunmap() s
39 * 2 | c c,a c 1 - 1 - 1 _kmalloc()/ _kfree() s
40 * 3 | c d c 1 - n - 1 _vmap() / _vunmap() s
41 * 4 | c d,a c 1 - n - 1 _vmalloc()/ _vfree() n*
42 *
43 *
44 * 'iova': device iommu virtual address
45 * 'da': alias of 'iova'
46 * 'pa': physical address
47 * 'va': mpu virtual address
48 *
49 * 'c': contiguous memory area
50 * 'd': dicontiguous memory area
51 * 'a': anonymous memory allocation
52 * '()': optional feature
53 *
54 * 'n': a normal page(4KB) size is used.
55 * 's': multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used.
56 *
57 * '*': not yet, but feasible.
58 */
59
60 static struct kmem_cache *iovm_area_cachep;
61
62 /* return total bytes of sg buffers */
63 static size_t sgtable_len(const struct sg_table *sgt)
64 {
65 unsigned int i, total = 0;
66 struct scatterlist *sg;
67
68 if (!sgt)
69 return 0;
70
71 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
72 size_t bytes;
73
74 bytes = sg_dma_len(sg);
75
76 if (!iopgsz_ok(bytes)) {
77 pr_err("%s: sg[%d] not iommu pagesize(%x)\n",
78 __func__, i, bytes);
79 return 0;
80 }
81
82 total += bytes;
83 }
84
85 return total;
86 }
87 #define sgtable_ok(x) (!!sgtable_len(x))
88
89 /*
90 * calculate the optimal number sg elements from total bytes based on
91 * iommu superpages
92 */
93 static unsigned int sgtable_nents(size_t bytes)
94 {
95 int i;
96 unsigned int nr_entries;
97 const unsigned long pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
98
99 if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
100 pr_err("%s: wrong size %08x\n", __func__, bytes);
101 return 0;
102 }
103
104 nr_entries = 0;
105 for (i = 0; i < ARRAY_SIZE(pagesize); i++) {
106 if (bytes >= pagesize[i]) {
107 nr_entries += (bytes / pagesize[i]);
108 bytes %= pagesize[i];
109 }
110 }
111 BUG_ON(bytes);
112
113 return nr_entries;
114 }
115
116 /* allocate and initialize sg_table header(a kind of 'superblock') */
117 static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags)
118 {
119 unsigned int nr_entries;
120 int err;
121 struct sg_table *sgt;
122
123 if (!bytes)
124 return ERR_PTR(-EINVAL);
125
126 if (!IS_ALIGNED(bytes, PAGE_SIZE))
127 return ERR_PTR(-EINVAL);
128
129 /* FIXME: IOVMF_DA_FIXED should support 'superpages' */
130 if ((flags & IOVMF_LINEAR) && (flags & IOVMF_DA_ANON)) {
131 nr_entries = sgtable_nents(bytes);
132 if (!nr_entries)
133 return ERR_PTR(-EINVAL);
134 } else
135 nr_entries = bytes / PAGE_SIZE;
136
137 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
138 if (!sgt)
139 return ERR_PTR(-ENOMEM);
140
141 err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
142 if (err)
143 return ERR_PTR(err);
144
145 pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
146
147 return sgt;
148 }
149
150 /* free sg_table header(a kind of superblock) */
151 static void sgtable_free(struct sg_table *sgt)
152 {
153 if (!sgt)
154 return;
155
156 sg_free_table(sgt);
157 kfree(sgt);
158
159 pr_debug("%s: sgt:%p\n", __func__, sgt);
160 }
161
162 /* map 'sglist' to a contiguous mpu virtual area and return 'va' */
163 static void *vmap_sg(const struct sg_table *sgt)
164 {
165 u32 va;
166 size_t total;
167 unsigned int i;
168 struct scatterlist *sg;
169 struct vm_struct *new;
170 const struct mem_type *mtype;
171
172 mtype = get_mem_type(MT_DEVICE);
173 if (!mtype)
174 return ERR_PTR(-EINVAL);
175
176 total = sgtable_len(sgt);
177 if (!total)
178 return ERR_PTR(-EINVAL);
179
180 new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
181 if (!new)
182 return ERR_PTR(-ENOMEM);
183 va = (u32)new->addr;
184
185 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
186 size_t bytes;
187 u32 pa;
188 int err;
189
190 pa = sg_phys(sg);
191 bytes = sg_dma_len(sg);
192
193 BUG_ON(bytes != PAGE_SIZE);
194
195 err = ioremap_page(va, pa, mtype);
196 if (err)
197 goto err_out;
198
199 va += bytes;
200 }
201
202 flush_cache_vmap((unsigned long)new->addr,
203 (unsigned long)(new->addr + total));
204 return new->addr;
205
206 err_out:
207 WARN_ON(1); /* FIXME: cleanup some mpu mappings */
208 vunmap(new->addr);
209 return ERR_PTR(-EAGAIN);
210 }
211
212 static inline void vunmap_sg(const void *va)
213 {
214 vunmap(va);
215 }
216
217 static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da)
218 {
219 struct iovm_struct *tmp;
220
221 list_for_each_entry(tmp, &obj->mmap, list) {
222 if ((da >= tmp->da_start) && (da < tmp->da_end)) {
223 size_t len;
224
225 len = tmp->da_end - tmp->da_start;
226
227 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
228 __func__, tmp->da_start, da, tmp->da_end, len,
229 tmp->flags);
230
231 return tmp;
232 }
233 }
234
235 return NULL;
236 }
237
238 /**
239 * find_iovm_area - find iovma which includes @da
240 * @da: iommu device virtual address
241 *
242 * Find the existing iovma starting at @da
243 */
244 struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da)
245 {
246 struct iovm_struct *area;
247
248 mutex_lock(&obj->mmap_lock);
249 area = __find_iovm_area(obj, da);
250 mutex_unlock(&obj->mmap_lock);
251
252 return area;
253 }
254 EXPORT_SYMBOL_GPL(find_iovm_area);
255
256 /*
257 * This finds the hole(area) which fits the requested address and len
258 * in iovmas mmap, and returns the new allocated iovma.
259 */
260 static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da,
261 size_t bytes, u32 flags)
262 {
263 struct iovm_struct *new, *tmp;
264 u32 start, prev_end, alignement;
265
266 if (!obj || !bytes)
267 return ERR_PTR(-EINVAL);
268
269 start = da;
270 alignement = PAGE_SIZE;
271
272 if (flags & IOVMF_DA_ANON) {
273 /*
274 * Reserve the first page for NULL
275 */
276 start = PAGE_SIZE;
277 if (flags & IOVMF_LINEAR)
278 alignement = iopgsz_max(bytes);
279 start = roundup(start, alignement);
280 }
281
282 tmp = NULL;
283 if (list_empty(&obj->mmap))
284 goto found;
285
286 prev_end = 0;
287 list_for_each_entry(tmp, &obj->mmap, list) {
288
289 if ((prev_end <= start) && (start + bytes < tmp->da_start))
290 goto found;
291
292 if (flags & IOVMF_DA_ANON)
293 start = roundup(tmp->da_end, alignement);
294
295 prev_end = tmp->da_end;
296 }
297
298 if ((start >= prev_end) && (ULONG_MAX - start >= bytes))
299 goto found;
300
301 dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
302 __func__, da, bytes, flags);
303
304 return ERR_PTR(-EINVAL);
305
306 found:
307 new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
308 if (!new)
309 return ERR_PTR(-ENOMEM);
310
311 new->iommu = obj;
312 new->da_start = start;
313 new->da_end = start + bytes;
314 new->flags = flags;
315
316 /*
317 * keep ascending order of iovmas
318 */
319 if (tmp)
320 list_add_tail(&new->list, &tmp->list);
321 else
322 list_add(&new->list, &obj->mmap);
323
324 dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
325 __func__, new->da_start, start, new->da_end, bytes, flags);
326
327 return new;
328 }
329
330 static void free_iovm_area(struct iommu *obj, struct iovm_struct *area)
331 {
332 size_t bytes;
333
334 BUG_ON(!obj || !area);
335
336 bytes = area->da_end - area->da_start;
337
338 dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
339 __func__, area->da_start, area->da_end, bytes, area->flags);
340
341 list_del(&area->list);
342 kmem_cache_free(iovm_area_cachep, area);
343 }
344
345 /**
346 * da_to_va - convert (d) to (v)
347 * @obj: objective iommu
348 * @da: iommu device virtual address
349 * @va: mpu virtual address
350 *
351 * Returns mpu virtual addr which corresponds to a given device virtual addr
352 */
353 void *da_to_va(struct iommu *obj, u32 da)
354 {
355 void *va = NULL;
356 struct iovm_struct *area;
357
358 mutex_lock(&obj->mmap_lock);
359
360 area = __find_iovm_area(obj, da);
361 if (!area) {
362 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
363 goto out;
364 }
365 va = area->va;
366 mutex_unlock(&obj->mmap_lock);
367 out:
368 return va;
369 }
370 EXPORT_SYMBOL_GPL(da_to_va);
371
372 static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
373 {
374 unsigned int i;
375 struct scatterlist *sg;
376 void *va = _va;
377 void *va_end;
378
379 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
380 struct page *pg;
381 const size_t bytes = PAGE_SIZE;
382
383 /*
384 * iommu 'superpage' isn't supported with 'iommu_vmalloc()'
385 */
386 pg = vmalloc_to_page(va);
387 BUG_ON(!pg);
388 sg_set_page(sg, pg, bytes, 0);
389
390 va += bytes;
391 }
392
393 va_end = _va + PAGE_SIZE * i;
394 flush_cache_vmap((unsigned long)_va, (unsigned long)va_end);
395 }
396
397 static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
398 {
399 /*
400 * Actually this is not necessary at all, just exists for
401 * consistency of the code readibility.
402 */
403 BUG_ON(!sgt);
404 }
405
406 static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, size_t len)
407 {
408 unsigned int i;
409 struct scatterlist *sg;
410 void *va;
411
412 va = phys_to_virt(pa);
413
414 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
415 size_t bytes;
416
417 bytes = iopgsz_max(len);
418
419 BUG_ON(!iopgsz_ok(bytes));
420
421 sg_set_buf(sg, phys_to_virt(pa), bytes);
422 /*
423 * 'pa' is cotinuous(linear).
424 */
425 pa += bytes;
426 len -= bytes;
427 }
428 BUG_ON(len);
429
430 clean_dcache_area(va, len);
431 }
432
433 static inline void sgtable_drain_kmalloc(struct sg_table *sgt)
434 {
435 /*
436 * Actually this is not necessary at all, just exists for
437 * consistency of the code readibility
438 */
439 BUG_ON(!sgt);
440 }
441
442 /* create 'da' <-> 'pa' mapping from 'sgt' */
443 static int map_iovm_area(struct iommu *obj, struct iovm_struct *new,
444 const struct sg_table *sgt, u32 flags)
445 {
446 int err;
447 unsigned int i, j;
448 struct scatterlist *sg;
449 u32 da = new->da_start;
450
451 if (!obj || !new || !sgt)
452 return -EINVAL;
453
454 BUG_ON(!sgtable_ok(sgt));
455
456 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
457 u32 pa;
458 int pgsz;
459 size_t bytes;
460 struct iotlb_entry e;
461
462 pa = sg_phys(sg);
463 bytes = sg_dma_len(sg);
464
465 flags &= ~IOVMF_PGSZ_MASK;
466 pgsz = bytes_to_iopgsz(bytes);
467 if (pgsz < 0)
468 goto err_out;
469 flags |= pgsz;
470
471 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
472 i, da, pa, bytes);
473
474 iotlb_init_entry(&e, da, pa, flags);
475 err = iopgtable_store_entry(obj, &e);
476 if (err)
477 goto err_out;
478
479 da += bytes;
480 }
481 return 0;
482
483 err_out:
484 da = new->da_start;
485
486 for_each_sg(sgt->sgl, sg, i, j) {
487 size_t bytes;
488
489 bytes = iopgtable_clear_entry(obj, da);
490
491 BUG_ON(!iopgsz_ok(bytes));
492
493 da += bytes;
494 }
495 return err;
496 }
497
498 /* release 'da' <-> 'pa' mapping */
499 static void unmap_iovm_area(struct iommu *obj, struct iovm_struct *area)
500 {
501 u32 start;
502 size_t total = area->da_end - area->da_start;
503
504 BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
505
506 start = area->da_start;
507 while (total > 0) {
508 size_t bytes;
509
510 bytes = iopgtable_clear_entry(obj, start);
511 if (bytes == 0)
512 bytes = PAGE_SIZE;
513 else
514 dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
515 __func__, start, bytes, area->flags);
516
517 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
518
519 total -= bytes;
520 start += bytes;
521 }
522 BUG_ON(total);
523 }
524
525 /* template function for all unmapping */
526 static struct sg_table *unmap_vm_area(struct iommu *obj, const u32 da,
527 void (*fn)(const void *), u32 flags)
528 {
529 struct sg_table *sgt = NULL;
530 struct iovm_struct *area;
531
532 if (!IS_ALIGNED(da, PAGE_SIZE)) {
533 dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
534 return NULL;
535 }
536
537 mutex_lock(&obj->mmap_lock);
538
539 area = __find_iovm_area(obj, da);
540 if (!area) {
541 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
542 goto out;
543 }
544
545 if ((area->flags & flags) != flags) {
546 dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
547 area->flags);
548 goto out;
549 }
550 sgt = (struct sg_table *)area->sgt;
551
552 unmap_iovm_area(obj, area);
553
554 fn(area->va);
555
556 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
557 area->da_start, da, area->da_end,
558 area->da_end - area->da_start, area->flags);
559
560 free_iovm_area(obj, area);
561 out:
562 mutex_unlock(&obj->mmap_lock);
563
564 return sgt;
565 }
566
567 static u32 map_iommu_region(struct iommu *obj, u32 da,
568 const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
569 {
570 int err = -ENOMEM;
571 struct iovm_struct *new;
572
573 mutex_lock(&obj->mmap_lock);
574
575 new = alloc_iovm_area(obj, da, bytes, flags);
576 if (IS_ERR(new)) {
577 err = PTR_ERR(new);
578 goto err_alloc_iovma;
579 }
580 new->va = va;
581 new->sgt = sgt;
582
583 if (map_iovm_area(obj, new, sgt, new->flags))
584 goto err_map;
585
586 mutex_unlock(&obj->mmap_lock);
587
588 dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
589 __func__, new->da_start, bytes, new->flags, va);
590
591 return new->da_start;
592
593 err_map:
594 free_iovm_area(obj, new);
595 err_alloc_iovma:
596 mutex_unlock(&obj->mmap_lock);
597 return err;
598 }
599
600 static inline u32 __iommu_vmap(struct iommu *obj, u32 da,
601 const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
602 {
603 return map_iommu_region(obj, da, sgt, va, bytes, flags);
604 }
605
606 /**
607 * iommu_vmap - (d)-(p)-(v) address mapper
608 * @obj: objective iommu
609 * @sgt: address of scatter gather table
610 * @flags: iovma and page property
611 *
612 * Creates 1-n-1 mapping with given @sgt and returns @da.
613 * All @sgt element must be io page size aligned.
614 */
615 u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt,
616 u32 flags)
617 {
618 size_t bytes;
619 void *va;
620
621 if (!obj || !obj->dev || !sgt)
622 return -EINVAL;
623
624 bytes = sgtable_len(sgt);
625 if (!bytes)
626 return -EINVAL;
627 bytes = PAGE_ALIGN(bytes);
628
629 va = vmap_sg(sgt);
630 if (IS_ERR(va))
631 return PTR_ERR(va);
632
633 flags &= IOVMF_HW_MASK;
634 flags |= IOVMF_DISCONT;
635 flags |= IOVMF_MMIO;
636 flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
637
638 da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
639 if (IS_ERR_VALUE(da))
640 vunmap_sg(va);
641
642 return da;
643 }
644 EXPORT_SYMBOL_GPL(iommu_vmap);
645
646 /**
647 * iommu_vunmap - release virtual mapping obtained by 'iommu_vmap()'
648 * @obj: objective iommu
649 * @da: iommu device virtual address
650 *
651 * Free the iommu virtually contiguous memory area starting at
652 * @da, which was returned by 'iommu_vmap()'.
653 */
654 struct sg_table *iommu_vunmap(struct iommu *obj, u32 da)
655 {
656 struct sg_table *sgt;
657 /*
658 * 'sgt' is allocated before 'iommu_vmalloc()' is called.
659 * Just returns 'sgt' to the caller to free
660 */
661 sgt = unmap_vm_area(obj, da, vunmap_sg, IOVMF_DISCONT | IOVMF_MMIO);
662 if (!sgt)
663 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
664 return sgt;
665 }
666 EXPORT_SYMBOL_GPL(iommu_vunmap);
667
668 /**
669 * iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
670 * @obj: objective iommu
671 * @da: contiguous iommu virtual memory
672 * @bytes: allocation size
673 * @flags: iovma and page property
674 *
675 * Allocate @bytes linearly and creates 1-n-1 mapping and returns
676 * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set.
677 */
678 u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
679 {
680 void *va;
681 struct sg_table *sgt;
682
683 if (!obj || !obj->dev || !bytes)
684 return -EINVAL;
685
686 bytes = PAGE_ALIGN(bytes);
687
688 va = vmalloc(bytes);
689 if (!va)
690 return -ENOMEM;
691
692 sgt = sgtable_alloc(bytes, flags);
693 if (IS_ERR(sgt)) {
694 da = PTR_ERR(sgt);
695 goto err_sgt_alloc;
696 }
697 sgtable_fill_vmalloc(sgt, va);
698
699 flags &= IOVMF_HW_MASK;
700 flags |= IOVMF_DISCONT;
701 flags |= IOVMF_ALLOC;
702 flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
703
704 da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
705 if (IS_ERR_VALUE(da))
706 goto err_iommu_vmap;
707
708 return da;
709
710 err_iommu_vmap:
711 sgtable_drain_vmalloc(sgt);
712 sgtable_free(sgt);
713 err_sgt_alloc:
714 vfree(va);
715 return da;
716 }
717 EXPORT_SYMBOL_GPL(iommu_vmalloc);
718
719 /**
720 * iommu_vfree - release memory allocated by 'iommu_vmalloc()'
721 * @obj: objective iommu
722 * @da: iommu device virtual address
723 *
724 * Frees the iommu virtually continuous memory area starting at
725 * @da, as obtained from 'iommu_vmalloc()'.
726 */
727 void iommu_vfree(struct iommu *obj, const u32 da)
728 {
729 struct sg_table *sgt;
730
731 sgt = unmap_vm_area(obj, da, vfree, IOVMF_DISCONT | IOVMF_ALLOC);
732 if (!sgt)
733 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
734 sgtable_free(sgt);
735 }
736 EXPORT_SYMBOL_GPL(iommu_vfree);
737
738 static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va,
739 size_t bytes, u32 flags)
740 {
741 struct sg_table *sgt;
742
743 sgt = sgtable_alloc(bytes, flags);
744 if (IS_ERR(sgt))
745 return PTR_ERR(sgt);
746
747 sgtable_fill_kmalloc(sgt, pa, bytes);
748
749 da = map_iommu_region(obj, da, sgt, va, bytes, flags);
750 if (IS_ERR_VALUE(da)) {
751 sgtable_drain_kmalloc(sgt);
752 sgtable_free(sgt);
753 }
754
755 return da;
756 }
757
758 /**
759 * iommu_kmap - (d)-(p)-(v) address mapper
760 * @obj: objective iommu
761 * @da: contiguous iommu virtual memory
762 * @pa: contiguous physical memory
763 * @flags: iovma and page property
764 *
765 * Creates 1-1-1 mapping and returns @da again, which can be
766 * adjusted if 'IOVMF_DA_ANON' is set.
767 */
768 u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes,
769 u32 flags)
770 {
771 void *va;
772
773 if (!obj || !obj->dev || !bytes)
774 return -EINVAL;
775
776 bytes = PAGE_ALIGN(bytes);
777
778 va = ioremap(pa, bytes);
779 if (!va)
780 return -ENOMEM;
781
782 flags &= IOVMF_HW_MASK;
783 flags |= IOVMF_LINEAR;
784 flags |= IOVMF_MMIO;
785 flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
786
787 da = __iommu_kmap(obj, da, pa, va, bytes, flags);
788 if (IS_ERR_VALUE(da))
789 iounmap(va);
790
791 return da;
792 }
793 EXPORT_SYMBOL_GPL(iommu_kmap);
794
795 /**
796 * iommu_kunmap - release virtual mapping obtained by 'iommu_kmap()'
797 * @obj: objective iommu
798 * @da: iommu device virtual address
799 *
800 * Frees the iommu virtually contiguous memory area starting at
801 * @da, which was passed to and was returned by'iommu_kmap()'.
802 */
803 void iommu_kunmap(struct iommu *obj, u32 da)
804 {
805 struct sg_table *sgt;
806 typedef void (*func_t)(const void *);
807
808 sgt = unmap_vm_area(obj, da, (func_t)__iounmap,
809 IOVMF_LINEAR | IOVMF_MMIO);
810 if (!sgt)
811 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
812 sgtable_free(sgt);
813 }
814 EXPORT_SYMBOL_GPL(iommu_kunmap);
815
816 /**
817 * iommu_kmalloc - (d)-(p)-(v) address allocator and mapper
818 * @obj: objective iommu
819 * @da: contiguous iommu virtual memory
820 * @bytes: bytes for allocation
821 * @flags: iovma and page property
822 *
823 * Allocate @bytes linearly and creates 1-1-1 mapping and returns
824 * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set.
825 */
826 u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
827 {
828 void *va;
829 u32 pa;
830
831 if (!obj || !obj->dev || !bytes)
832 return -EINVAL;
833
834 bytes = PAGE_ALIGN(bytes);
835
836 va = kmalloc(bytes, GFP_KERNEL | GFP_DMA);
837 if (!va)
838 return -ENOMEM;
839 pa = virt_to_phys(va);
840
841 flags &= IOVMF_HW_MASK;
842 flags |= IOVMF_LINEAR;
843 flags |= IOVMF_ALLOC;
844 flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
845
846 da = __iommu_kmap(obj, da, pa, va, bytes, flags);
847 if (IS_ERR_VALUE(da))
848 kfree(va);
849
850 return da;
851 }
852 EXPORT_SYMBOL_GPL(iommu_kmalloc);
853
854 /**
855 * iommu_kfree - release virtual mapping obtained by 'iommu_kmalloc()'
856 * @obj: objective iommu
857 * @da: iommu device virtual address
858 *
859 * Frees the iommu virtually contiguous memory area starting at
860 * @da, which was passed to and was returned by'iommu_kmalloc()'.
861 */
862 void iommu_kfree(struct iommu *obj, u32 da)
863 {
864 struct sg_table *sgt;
865
866 sgt = unmap_vm_area(obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC);
867 if (!sgt)
868 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
869 sgtable_free(sgt);
870 }
871 EXPORT_SYMBOL_GPL(iommu_kfree);
872
873
874 static int __init iovmm_init(void)
875 {
876 const unsigned long flags = SLAB_HWCACHE_ALIGN;
877 struct kmem_cache *p;
878
879 p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
880 flags, NULL);
881 if (!p)
882 return -ENOMEM;
883 iovm_area_cachep = p;
884
885 return 0;
886 }
887 module_init(iovmm_init);
888
889 static void __exit iovmm_exit(void)
890 {
891 kmem_cache_destroy(iovm_area_cachep);
892 }
893 module_exit(iovmm_exit);
894
895 MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
896 MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
897 MODULE_LICENSE("GPL v2");