ext4: Fix time encoding with extra epoch bits
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / arch / arm / plat-omap / iovmm.c
1 /*
2 * omap iommu: simple virtual address space management
3 *
4 * Copyright (C) 2008-2009 Nokia Corporation
5 *
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 #include <linux/err.h>
14 #include <linux/vmalloc.h>
15 #include <linux/device.h>
16 #include <linux/scatterlist.h>
17
18 #include <asm/cacheflush.h>
19 #include <asm/mach/map.h>
20
21 #include <mach/iommu.h>
22 #include <mach/iovmm.h>
23
24 #include "iopgtable.h"
25
26 /*
27 * A device driver needs to create address mappings between:
28 *
29 * - iommu/device address
30 * - physical address
31 * - mpu virtual address
32 *
33 * There are 4 possible patterns for them:
34 *
35 * |iova/ mapping iommu_ page
36 * | da pa va (d)-(p)-(v) function type
37 * ---------------------------------------------------------------------------
38 * 1 | c c c 1 - 1 - 1 _kmap() / _kunmap() s
39 * 2 | c c,a c 1 - 1 - 1 _kmalloc()/ _kfree() s
40 * 3 | c d c 1 - n - 1 _vmap() / _vunmap() s
41 * 4 | c d,a c 1 - n - 1 _vmalloc()/ _vfree() n*
42 *
43 *
44 * 'iova': device iommu virtual address
45 * 'da': alias of 'iova'
46 * 'pa': physical address
47 * 'va': mpu virtual address
48 *
49 * 'c': contiguous memory area
50 * 'd': dicontiguous memory area
51 * 'a': anonymous memory allocation
52 * '()': optional feature
53 *
54 * 'n': a normal page(4KB) size is used.
55 * 's': multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used.
56 *
57 * '*': not yet, but feasible.
58 */
59
60 static struct kmem_cache *iovm_area_cachep;
61
62 /* return total bytes of sg buffers */
63 static size_t sgtable_len(const struct sg_table *sgt)
64 {
65 unsigned int i, total = 0;
66 struct scatterlist *sg;
67
68 if (!sgt)
69 return 0;
70
71 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
72 size_t bytes;
73
74 bytes = sg_dma_len(sg);
75
76 if (!iopgsz_ok(bytes)) {
77 pr_err("%s: sg[%d] not iommu pagesize(%x)\n",
78 __func__, i, bytes);
79 return 0;
80 }
81
82 total += bytes;
83 }
84
85 return total;
86 }
87 #define sgtable_ok(x) (!!sgtable_len(x))
88
89 /*
90 * calculate the optimal number sg elements from total bytes based on
91 * iommu superpages
92 */
93 static unsigned int sgtable_nents(size_t bytes)
94 {
95 int i;
96 unsigned int nr_entries;
97 const unsigned long pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
98
99 if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
100 pr_err("%s: wrong size %08x\n", __func__, bytes);
101 return 0;
102 }
103
104 nr_entries = 0;
105 for (i = 0; i < ARRAY_SIZE(pagesize); i++) {
106 if (bytes >= pagesize[i]) {
107 nr_entries += (bytes / pagesize[i]);
108 bytes %= pagesize[i];
109 }
110 }
111 BUG_ON(bytes);
112
113 return nr_entries;
114 }
115
116 /* allocate and initialize sg_table header(a kind of 'superblock') */
117 static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags)
118 {
119 unsigned int nr_entries;
120 int err;
121 struct sg_table *sgt;
122
123 if (!bytes)
124 return ERR_PTR(-EINVAL);
125
126 if (!IS_ALIGNED(bytes, PAGE_SIZE))
127 return ERR_PTR(-EINVAL);
128
129 /* FIXME: IOVMF_DA_FIXED should support 'superpages' */
130 if ((flags & IOVMF_LINEAR) && (flags & IOVMF_DA_ANON)) {
131 nr_entries = sgtable_nents(bytes);
132 if (!nr_entries)
133 return ERR_PTR(-EINVAL);
134 } else
135 nr_entries = bytes / PAGE_SIZE;
136
137 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
138 if (!sgt)
139 return ERR_PTR(-ENOMEM);
140
141 err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
142 if (err)
143 return ERR_PTR(err);
144
145 pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
146
147 return sgt;
148 }
149
150 /* free sg_table header(a kind of superblock) */
151 static void sgtable_free(struct sg_table *sgt)
152 {
153 if (!sgt)
154 return;
155
156 sg_free_table(sgt);
157 kfree(sgt);
158
159 pr_debug("%s: sgt:%p\n", __func__, sgt);
160 }
161
162 /* map 'sglist' to a contiguous mpu virtual area and return 'va' */
163 static void *vmap_sg(const struct sg_table *sgt)
164 {
165 u32 va;
166 size_t total;
167 unsigned int i;
168 struct scatterlist *sg;
169 struct vm_struct *new;
170 const struct mem_type *mtype;
171
172 mtype = get_mem_type(MT_DEVICE);
173 if (!mtype)
174 return ERR_PTR(-EINVAL);
175
176 total = sgtable_len(sgt);
177 if (!total)
178 return ERR_PTR(-EINVAL);
179
180 new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
181 if (!new)
182 return ERR_PTR(-ENOMEM);
183 va = (u32)new->addr;
184
185 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
186 size_t bytes;
187 u32 pa;
188 int err;
189
190 pa = sg_phys(sg);
191 bytes = sg_dma_len(sg);
192
193 BUG_ON(bytes != PAGE_SIZE);
194
195 err = ioremap_page(va, pa, mtype);
196 if (err)
197 goto err_out;
198
199 va += bytes;
200 }
201
202 flush_cache_vmap(new->addr, new->addr + total);
203 return new->addr;
204
205 err_out:
206 WARN_ON(1); /* FIXME: cleanup some mpu mappings */
207 vunmap(new->addr);
208 return ERR_PTR(-EAGAIN);
209 }
210
211 static inline void vunmap_sg(const void *va)
212 {
213 vunmap(va);
214 }
215
216 static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da)
217 {
218 struct iovm_struct *tmp;
219
220 list_for_each_entry(tmp, &obj->mmap, list) {
221 if ((da >= tmp->da_start) && (da < tmp->da_end)) {
222 size_t len;
223
224 len = tmp->da_end - tmp->da_start;
225
226 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
227 __func__, tmp->da_start, da, tmp->da_end, len,
228 tmp->flags);
229
230 return tmp;
231 }
232 }
233
234 return NULL;
235 }
236
237 /**
238 * find_iovm_area - find iovma which includes @da
239 * @da: iommu device virtual address
240 *
241 * Find the existing iovma starting at @da
242 */
243 struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da)
244 {
245 struct iovm_struct *area;
246
247 mutex_lock(&obj->mmap_lock);
248 area = __find_iovm_area(obj, da);
249 mutex_unlock(&obj->mmap_lock);
250
251 return area;
252 }
253 EXPORT_SYMBOL_GPL(find_iovm_area);
254
255 /*
256 * This finds the hole(area) which fits the requested address and len
257 * in iovmas mmap, and returns the new allocated iovma.
258 */
259 static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da,
260 size_t bytes, u32 flags)
261 {
262 struct iovm_struct *new, *tmp;
263 u32 start, prev_end, alignement;
264
265 if (!obj || !bytes)
266 return ERR_PTR(-EINVAL);
267
268 start = da;
269 alignement = PAGE_SIZE;
270
271 if (flags & IOVMF_DA_ANON) {
272 /*
273 * Reserve the first page for NULL
274 */
275 start = PAGE_SIZE;
276 if (flags & IOVMF_LINEAR)
277 alignement = iopgsz_max(bytes);
278 start = roundup(start, alignement);
279 }
280
281 tmp = NULL;
282 if (list_empty(&obj->mmap))
283 goto found;
284
285 prev_end = 0;
286 list_for_each_entry(tmp, &obj->mmap, list) {
287
288 if ((prev_end <= start) && (start + bytes < tmp->da_start))
289 goto found;
290
291 if (flags & IOVMF_DA_ANON)
292 start = roundup(tmp->da_end, alignement);
293
294 prev_end = tmp->da_end;
295 }
296
297 if ((start >= prev_end) && (ULONG_MAX - start >= bytes))
298 goto found;
299
300 dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
301 __func__, da, bytes, flags);
302
303 return ERR_PTR(-EINVAL);
304
305 found:
306 new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
307 if (!new)
308 return ERR_PTR(-ENOMEM);
309
310 new->iommu = obj;
311 new->da_start = start;
312 new->da_end = start + bytes;
313 new->flags = flags;
314
315 /*
316 * keep ascending order of iovmas
317 */
318 if (tmp)
319 list_add_tail(&new->list, &tmp->list);
320 else
321 list_add(&new->list, &obj->mmap);
322
323 dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
324 __func__, new->da_start, start, new->da_end, bytes, flags);
325
326 return new;
327 }
328
329 static void free_iovm_area(struct iommu *obj, struct iovm_struct *area)
330 {
331 size_t bytes;
332
333 BUG_ON(!obj || !area);
334
335 bytes = area->da_end - area->da_start;
336
337 dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
338 __func__, area->da_start, area->da_end, bytes, area->flags);
339
340 list_del(&area->list);
341 kmem_cache_free(iovm_area_cachep, area);
342 }
343
344 /**
345 * da_to_va - convert (d) to (v)
346 * @obj: objective iommu
347 * @da: iommu device virtual address
348 * @va: mpu virtual address
349 *
350 * Returns mpu virtual addr which corresponds to a given device virtual addr
351 */
352 void *da_to_va(struct iommu *obj, u32 da)
353 {
354 void *va = NULL;
355 struct iovm_struct *area;
356
357 mutex_lock(&obj->mmap_lock);
358
359 area = __find_iovm_area(obj, da);
360 if (!area) {
361 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
362 goto out;
363 }
364 va = area->va;
365 mutex_unlock(&obj->mmap_lock);
366 out:
367 return va;
368 }
369 EXPORT_SYMBOL_GPL(da_to_va);
370
371 static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
372 {
373 unsigned int i;
374 struct scatterlist *sg;
375 void *va = _va;
376 void *va_end;
377
378 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
379 struct page *pg;
380 const size_t bytes = PAGE_SIZE;
381
382 /*
383 * iommu 'superpage' isn't supported with 'iommu_vmalloc()'
384 */
385 pg = vmalloc_to_page(va);
386 BUG_ON(!pg);
387 sg_set_page(sg, pg, bytes, 0);
388
389 va += bytes;
390 }
391
392 va_end = _va + PAGE_SIZE * i;
393 flush_cache_vmap(_va, va_end);
394 }
395
396 static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
397 {
398 /*
399 * Actually this is not necessary at all, just exists for
400 * consistency of the code readibility.
401 */
402 BUG_ON(!sgt);
403 }
404
405 static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, size_t len)
406 {
407 unsigned int i;
408 struct scatterlist *sg;
409 void *va;
410
411 va = phys_to_virt(pa);
412
413 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
414 size_t bytes;
415
416 bytes = iopgsz_max(len);
417
418 BUG_ON(!iopgsz_ok(bytes));
419
420 sg_set_buf(sg, phys_to_virt(pa), bytes);
421 /*
422 * 'pa' is cotinuous(linear).
423 */
424 pa += bytes;
425 len -= bytes;
426 }
427 BUG_ON(len);
428
429 clean_dcache_area(va, len);
430 }
431
432 static inline void sgtable_drain_kmalloc(struct sg_table *sgt)
433 {
434 /*
435 * Actually this is not necessary at all, just exists for
436 * consistency of the code readibility
437 */
438 BUG_ON(!sgt);
439 }
440
441 /* create 'da' <-> 'pa' mapping from 'sgt' */
442 static int map_iovm_area(struct iommu *obj, struct iovm_struct *new,
443 const struct sg_table *sgt, u32 flags)
444 {
445 int err;
446 unsigned int i, j;
447 struct scatterlist *sg;
448 u32 da = new->da_start;
449
450 if (!obj || !new || !sgt)
451 return -EINVAL;
452
453 BUG_ON(!sgtable_ok(sgt));
454
455 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
456 u32 pa;
457 int pgsz;
458 size_t bytes;
459 struct iotlb_entry e;
460
461 pa = sg_phys(sg);
462 bytes = sg_dma_len(sg);
463
464 flags &= ~IOVMF_PGSZ_MASK;
465 pgsz = bytes_to_iopgsz(bytes);
466 if (pgsz < 0)
467 goto err_out;
468 flags |= pgsz;
469
470 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
471 i, da, pa, bytes);
472
473 iotlb_init_entry(&e, da, pa, flags);
474 err = iopgtable_store_entry(obj, &e);
475 if (err)
476 goto err_out;
477
478 da += bytes;
479 }
480 return 0;
481
482 err_out:
483 da = new->da_start;
484
485 for_each_sg(sgt->sgl, sg, i, j) {
486 size_t bytes;
487
488 bytes = iopgtable_clear_entry(obj, da);
489
490 BUG_ON(!iopgsz_ok(bytes));
491
492 da += bytes;
493 }
494 return err;
495 }
496
497 /* release 'da' <-> 'pa' mapping */
498 static void unmap_iovm_area(struct iommu *obj, struct iovm_struct *area)
499 {
500 u32 start;
501 size_t total = area->da_end - area->da_start;
502
503 BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
504
505 start = area->da_start;
506 while (total > 0) {
507 size_t bytes;
508
509 bytes = iopgtable_clear_entry(obj, start);
510 if (bytes == 0)
511 bytes = PAGE_SIZE;
512 else
513 dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
514 __func__, start, bytes, area->flags);
515
516 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
517
518 total -= bytes;
519 start += bytes;
520 }
521 BUG_ON(total);
522 }
523
524 /* template function for all unmapping */
525 static struct sg_table *unmap_vm_area(struct iommu *obj, const u32 da,
526 void (*fn)(const void *), u32 flags)
527 {
528 struct sg_table *sgt = NULL;
529 struct iovm_struct *area;
530
531 if (!IS_ALIGNED(da, PAGE_SIZE)) {
532 dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
533 return NULL;
534 }
535
536 mutex_lock(&obj->mmap_lock);
537
538 area = __find_iovm_area(obj, da);
539 if (!area) {
540 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
541 goto out;
542 }
543
544 if ((area->flags & flags) != flags) {
545 dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
546 area->flags);
547 goto out;
548 }
549 sgt = (struct sg_table *)area->sgt;
550
551 unmap_iovm_area(obj, area);
552
553 fn(area->va);
554
555 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
556 area->da_start, da, area->da_end,
557 area->da_end - area->da_start, area->flags);
558
559 free_iovm_area(obj, area);
560 out:
561 mutex_unlock(&obj->mmap_lock);
562
563 return sgt;
564 }
565
566 static u32 map_iommu_region(struct iommu *obj, u32 da,
567 const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
568 {
569 int err = -ENOMEM;
570 struct iovm_struct *new;
571
572 mutex_lock(&obj->mmap_lock);
573
574 new = alloc_iovm_area(obj, da, bytes, flags);
575 if (IS_ERR(new)) {
576 err = PTR_ERR(new);
577 goto err_alloc_iovma;
578 }
579 new->va = va;
580 new->sgt = sgt;
581
582 if (map_iovm_area(obj, new, sgt, new->flags))
583 goto err_map;
584
585 mutex_unlock(&obj->mmap_lock);
586
587 dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
588 __func__, new->da_start, bytes, new->flags, va);
589
590 return new->da_start;
591
592 err_map:
593 free_iovm_area(obj, new);
594 err_alloc_iovma:
595 mutex_unlock(&obj->mmap_lock);
596 return err;
597 }
598
599 static inline u32 __iommu_vmap(struct iommu *obj, u32 da,
600 const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
601 {
602 return map_iommu_region(obj, da, sgt, va, bytes, flags);
603 }
604
605 /**
606 * iommu_vmap - (d)-(p)-(v) address mapper
607 * @obj: objective iommu
608 * @sgt: address of scatter gather table
609 * @flags: iovma and page property
610 *
611 * Creates 1-n-1 mapping with given @sgt and returns @da.
612 * All @sgt element must be io page size aligned.
613 */
614 u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt,
615 u32 flags)
616 {
617 size_t bytes;
618 void *va;
619
620 if (!obj || !obj->dev || !sgt)
621 return -EINVAL;
622
623 bytes = sgtable_len(sgt);
624 if (!bytes)
625 return -EINVAL;
626 bytes = PAGE_ALIGN(bytes);
627
628 va = vmap_sg(sgt);
629 if (IS_ERR(va))
630 return PTR_ERR(va);
631
632 flags &= IOVMF_HW_MASK;
633 flags |= IOVMF_DISCONT;
634 flags |= IOVMF_MMIO;
635 flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
636
637 da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
638 if (IS_ERR_VALUE(da))
639 vunmap_sg(va);
640
641 return da;
642 }
643 EXPORT_SYMBOL_GPL(iommu_vmap);
644
645 /**
646 * iommu_vunmap - release virtual mapping obtained by 'iommu_vmap()'
647 * @obj: objective iommu
648 * @da: iommu device virtual address
649 *
650 * Free the iommu virtually contiguous memory area starting at
651 * @da, which was returned by 'iommu_vmap()'.
652 */
653 struct sg_table *iommu_vunmap(struct iommu *obj, u32 da)
654 {
655 struct sg_table *sgt;
656 /*
657 * 'sgt' is allocated before 'iommu_vmalloc()' is called.
658 * Just returns 'sgt' to the caller to free
659 */
660 sgt = unmap_vm_area(obj, da, vunmap_sg, IOVMF_DISCONT | IOVMF_MMIO);
661 if (!sgt)
662 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
663 return sgt;
664 }
665 EXPORT_SYMBOL_GPL(iommu_vunmap);
666
667 /**
668 * iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
669 * @obj: objective iommu
670 * @da: contiguous iommu virtual memory
671 * @bytes: allocation size
672 * @flags: iovma and page property
673 *
674 * Allocate @bytes linearly and creates 1-n-1 mapping and returns
675 * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set.
676 */
677 u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
678 {
679 void *va;
680 struct sg_table *sgt;
681
682 if (!obj || !obj->dev || !bytes)
683 return -EINVAL;
684
685 bytes = PAGE_ALIGN(bytes);
686
687 va = vmalloc(bytes);
688 if (!va)
689 return -ENOMEM;
690
691 sgt = sgtable_alloc(bytes, flags);
692 if (IS_ERR(sgt)) {
693 da = PTR_ERR(sgt);
694 goto err_sgt_alloc;
695 }
696 sgtable_fill_vmalloc(sgt, va);
697
698 flags &= IOVMF_HW_MASK;
699 flags |= IOVMF_DISCONT;
700 flags |= IOVMF_ALLOC;
701 flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
702
703 da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
704 if (IS_ERR_VALUE(da))
705 goto err_iommu_vmap;
706
707 return da;
708
709 err_iommu_vmap:
710 sgtable_drain_vmalloc(sgt);
711 sgtable_free(sgt);
712 err_sgt_alloc:
713 vfree(va);
714 return da;
715 }
716 EXPORT_SYMBOL_GPL(iommu_vmalloc);
717
718 /**
719 * iommu_vfree - release memory allocated by 'iommu_vmalloc()'
720 * @obj: objective iommu
721 * @da: iommu device virtual address
722 *
723 * Frees the iommu virtually continuous memory area starting at
724 * @da, as obtained from 'iommu_vmalloc()'.
725 */
726 void iommu_vfree(struct iommu *obj, const u32 da)
727 {
728 struct sg_table *sgt;
729
730 sgt = unmap_vm_area(obj, da, vfree, IOVMF_DISCONT | IOVMF_ALLOC);
731 if (!sgt)
732 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
733 sgtable_free(sgt);
734 }
735 EXPORT_SYMBOL_GPL(iommu_vfree);
736
737 static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va,
738 size_t bytes, u32 flags)
739 {
740 struct sg_table *sgt;
741
742 sgt = sgtable_alloc(bytes, flags);
743 if (IS_ERR(sgt))
744 return PTR_ERR(sgt);
745
746 sgtable_fill_kmalloc(sgt, pa, bytes);
747
748 da = map_iommu_region(obj, da, sgt, va, bytes, flags);
749 if (IS_ERR_VALUE(da)) {
750 sgtable_drain_kmalloc(sgt);
751 sgtable_free(sgt);
752 }
753
754 return da;
755 }
756
757 /**
758 * iommu_kmap - (d)-(p)-(v) address mapper
759 * @obj: objective iommu
760 * @da: contiguous iommu virtual memory
761 * @pa: contiguous physical memory
762 * @flags: iovma and page property
763 *
764 * Creates 1-1-1 mapping and returns @da again, which can be
765 * adjusted if 'IOVMF_DA_ANON' is set.
766 */
767 u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes,
768 u32 flags)
769 {
770 void *va;
771
772 if (!obj || !obj->dev || !bytes)
773 return -EINVAL;
774
775 bytes = PAGE_ALIGN(bytes);
776
777 va = ioremap(pa, bytes);
778 if (!va)
779 return -ENOMEM;
780
781 flags &= IOVMF_HW_MASK;
782 flags |= IOVMF_LINEAR;
783 flags |= IOVMF_MMIO;
784 flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
785
786 da = __iommu_kmap(obj, da, pa, va, bytes, flags);
787 if (IS_ERR_VALUE(da))
788 iounmap(va);
789
790 return da;
791 }
792 EXPORT_SYMBOL_GPL(iommu_kmap);
793
794 /**
795 * iommu_kunmap - release virtual mapping obtained by 'iommu_kmap()'
796 * @obj: objective iommu
797 * @da: iommu device virtual address
798 *
799 * Frees the iommu virtually contiguous memory area starting at
800 * @da, which was passed to and was returned by'iommu_kmap()'.
801 */
802 void iommu_kunmap(struct iommu *obj, u32 da)
803 {
804 struct sg_table *sgt;
805 typedef void (*func_t)(const void *);
806
807 sgt = unmap_vm_area(obj, da, (func_t)__iounmap,
808 IOVMF_LINEAR | IOVMF_MMIO);
809 if (!sgt)
810 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
811 sgtable_free(sgt);
812 }
813 EXPORT_SYMBOL_GPL(iommu_kunmap);
814
815 /**
816 * iommu_kmalloc - (d)-(p)-(v) address allocator and mapper
817 * @obj: objective iommu
818 * @da: contiguous iommu virtual memory
819 * @bytes: bytes for allocation
820 * @flags: iovma and page property
821 *
822 * Allocate @bytes linearly and creates 1-1-1 mapping and returns
823 * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set.
824 */
825 u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
826 {
827 void *va;
828 u32 pa;
829
830 if (!obj || !obj->dev || !bytes)
831 return -EINVAL;
832
833 bytes = PAGE_ALIGN(bytes);
834
835 va = kmalloc(bytes, GFP_KERNEL | GFP_DMA);
836 if (!va)
837 return -ENOMEM;
838 pa = virt_to_phys(va);
839
840 flags &= IOVMF_HW_MASK;
841 flags |= IOVMF_LINEAR;
842 flags |= IOVMF_ALLOC;
843 flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
844
845 da = __iommu_kmap(obj, da, pa, va, bytes, flags);
846 if (IS_ERR_VALUE(da))
847 kfree(va);
848
849 return da;
850 }
851 EXPORT_SYMBOL_GPL(iommu_kmalloc);
852
853 /**
854 * iommu_kfree - release virtual mapping obtained by 'iommu_kmalloc()'
855 * @obj: objective iommu
856 * @da: iommu device virtual address
857 *
858 * Frees the iommu virtually contiguous memory area starting at
859 * @da, which was passed to and was returned by'iommu_kmalloc()'.
860 */
861 void iommu_kfree(struct iommu *obj, u32 da)
862 {
863 struct sg_table *sgt;
864
865 sgt = unmap_vm_area(obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC);
866 if (!sgt)
867 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
868 sgtable_free(sgt);
869 }
870 EXPORT_SYMBOL_GPL(iommu_kfree);
871
872
873 static int __init iovmm_init(void)
874 {
875 const unsigned long flags = SLAB_HWCACHE_ALIGN;
876 struct kmem_cache *p;
877
878 p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
879 flags, NULL);
880 if (!p)
881 return -ENOMEM;
882 iovm_area_cachep = p;
883
884 return 0;
885 }
886 module_init(iovmm_init);
887
888 static void __exit iovmm_exit(void)
889 {
890 kmem_cache_destroy(iovm_area_cachep);
891 }
892 module_exit(iovmm_exit);
893
894 MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
895 MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
896 MODULE_LICENSE("GPL v2");