1 /* linux/drivers/iommu/exynos_iovmm.c
3 * Copyright (c) 2011-2012 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #ifdef CONFIG_EXYNOS_IOMMU_DEBUG
15 #include <linux/kernel.h>
16 #include <linux/hardirq.h>
17 #include <linux/slab.h>
18 #include <linux/scatterlist.h>
19 #include <linux/err.h>
20 #include <linux/debugfs.h>
22 #include <linux/exynos_iovmm.h>
25 #include "exynos-iommu.h"
27 #define SZ_768M (SZ_512M + SZ_256M)
29 static int find_iovm_id(struct exynos_iovmm
*vmm
,
30 struct exynos_vm_region
*region
)
34 if ((region
->start
< IOVA_START
) ||
35 (region
->start
> (IOVA_START
+ IOVM_SIZE
)))
38 for (i
= 0; i
< MAX_NUM_PLANE
; i
++) {
39 if (region
->start
< (vmm
->iova_start
[i
] + vmm
->iovm_size
[i
]))
46 /* alloc_iovm_region - Allocate IO virtual memory region
47 * vmm: virtual memory allocator
48 * size: total size to allocate vm region from @vmm.
49 * align: alignment constraints of the allocated virtual address
50 * max_align: maximum alignment of allocated virtual address. allocated address
51 * does not need to satisfy larger alignment than max_align.
52 * exact_align_mask: constraints of the special case that allocated address
53 * must satisfy when it is multiple of align but of max_align.
54 * If this is not 0, allocated address must satisfy the following
56 * ((allocated address) % max_align) / align = exact_align_mask
57 * offset: must be smaller than PAGE_SIZE. Just a valut to be added to the
58 * allocated virtual address. This does not effect to the allocaded size
61 * This function returns allocated IO virtual address that satisfies the given
62 * constraints. Returns 0 if this function is not able to allocate IO virtual
65 static dma_addr_t
alloc_iovm_region(struct exynos_iovmm
*vmm
, size_t size
,
66 size_t align
, size_t max_align
, size_t exact_align_mask
,
73 struct exynos_vm_region
*region
;
75 BUG_ON(align
& (align
- 1));
76 BUG_ON(offset
>= PAGE_SIZE
);
78 /* To avoid allocating prefetched iovm region */
79 vsize
= ALIGN(size
+ SZ_256K
, SZ_256K
) >> PAGE_SHIFT
;
81 exact_align_mask
>>= PAGE_SHIFT
;
82 max_align
>>= PAGE_SHIFT
;
84 spin_lock(&vmm
->bitmap_lock
);
86 index
= find_next_zero_bit(vmm
->vm_map
[id
],
87 IOVM_NUM_PAGES(vmm
->iovm_size
[id
]), index
);
90 if (exact_align_mask
) {
91 if ((index
& ~(align
- 1) & (max_align
- 1)) >
93 index
= ALIGN(index
, max_align
);
94 index
|= exact_align_mask
;
96 index
= ALIGN(index
, align
);
99 if (index
>= IOVM_NUM_PAGES(vmm
->iovm_size
[id
])) {
100 spin_unlock(&vmm
->bitmap_lock
);
104 if (test_bit(index
, vmm
->vm_map
[id
]))
110 if (end
>= IOVM_NUM_PAGES(vmm
->iovm_size
[id
])) {
111 spin_unlock(&vmm
->bitmap_lock
);
115 i
= find_next_bit(vmm
->vm_map
[id
], end
, index
);
121 bitmap_set(vmm
->vm_map
[id
], index
, vsize
);
123 spin_unlock(&vmm
->bitmap_lock
);
125 vstart
= (index
<< PAGE_SHIFT
) + vmm
->iova_start
[id
] + offset
;
127 region
= kmalloc(sizeof(*region
), GFP_KERNEL
);
128 if (unlikely(!region
)) {
129 spin_lock(&vmm
->bitmap_lock
);
130 bitmap_clear(vmm
->vm_map
[id
], index
, vsize
);
131 spin_unlock(&vmm
->bitmap_lock
);
135 INIT_LIST_HEAD(®ion
->node
);
136 region
->start
= vstart
;
137 region
->size
= vsize
<< PAGE_SHIFT
;
139 spin_lock(&vmm
->vmlist_lock
);
140 list_add_tail(®ion
->node
, &vmm
->regions_list
);
141 vmm
->allocated_size
[id
] += region
->size
;
142 vmm
->num_areas
[id
]++;
144 spin_unlock(&vmm
->vmlist_lock
);
146 return region
->start
;
149 struct exynos_vm_region
*find_iovm_region(struct exynos_iovmm
*vmm
,
152 struct exynos_vm_region
*region
;
154 spin_lock(&vmm
->vmlist_lock
);
156 list_for_each_entry(region
, &vmm
->regions_list
, node
) {
157 if (region
->start
<= iova
&&
158 (region
->start
+ region
->size
) > iova
) {
159 spin_unlock(&vmm
->vmlist_lock
);
164 spin_unlock(&vmm
->vmlist_lock
);
169 static struct exynos_vm_region
*remove_iovm_region(struct exynos_iovmm
*vmm
,
172 struct exynos_vm_region
*region
;
174 spin_lock(&vmm
->vmlist_lock
);
176 list_for_each_entry(region
, &vmm
->regions_list
, node
) {
177 if (region
->start
== iova
) {
180 id
= find_iovm_id(vmm
, region
);
184 list_del(®ion
->node
);
185 vmm
->allocated_size
[id
] -= region
->size
;
186 vmm
->num_areas
[id
]--;
188 spin_unlock(&vmm
->vmlist_lock
);
193 spin_unlock(&vmm
->vmlist_lock
);
198 static void free_iovm_region(struct exynos_iovmm
*vmm
,
199 struct exynos_vm_region
*region
)
206 id
= find_iovm_id(vmm
, region
);
212 spin_lock(&vmm
->bitmap_lock
);
213 bitmap_clear(vmm
->vm_map
[id
],
214 (region
->start
- vmm
->iova_start
[id
]) >> PAGE_SHIFT
,
215 region
->size
>> PAGE_SHIFT
);
216 spin_unlock(&vmm
->bitmap_lock
);
218 SYSMMU_EVENT_LOG_IOVMM_UNMAP(IOVMM_TO_LOG(vmm
),
219 region
->start
, region
->start
+ region
->size
);
224 static dma_addr_t
add_iovm_region(struct exynos_iovmm
*vmm
,
225 dma_addr_t start
, size_t size
)
227 struct exynos_vm_region
*region
, *pos
;
229 region
= kmalloc(sizeof(*region
), GFP_KERNEL
);
233 INIT_LIST_HEAD(®ion
->node
);
234 region
->start
= start
;
237 spin_lock(&vmm
->vmlist_lock
);
239 list_for_each_entry(pos
, &vmm
->regions_list
, node
) {
240 if ((start
< (pos
->start
+ pos
->size
)) &&
241 ((start
+ size
) > pos
->start
)) {
242 spin_unlock(&vmm
->vmlist_lock
);
248 list_add(®ion
->node
, &vmm
->regions_list
);
250 spin_unlock(&vmm
->vmlist_lock
);
255 static void show_iovm_regions(struct exynos_iovmm
*vmm
)
257 struct exynos_vm_region
*pos
;
259 pr_err("LISTING IOVMM REGIONS...\n");
260 spin_lock(&vmm
->vmlist_lock
);
261 list_for_each_entry(pos
, &vmm
->regions_list
, node
) {
262 pr_err("REGION: %#x (SIZE: %#x)\n", pos
->start
, pos
->size
);
264 spin_unlock(&vmm
->vmlist_lock
);
265 pr_err("END OF LISTING IOVMM REGIONS...\n");
268 int iovmm_activate(struct device
*dev
)
270 struct exynos_iovmm
*vmm
= exynos_get_iovmm(dev
);
272 return iommu_attach_device(vmm
->domain
, dev
);
275 void iovmm_deactivate(struct device
*dev
)
277 struct exynos_iovmm
*vmm
= exynos_get_iovmm(dev
);
279 iommu_detach_device(vmm
->domain
, dev
);
282 /* iovmm_map - allocate and map IO virtual memory for the given device
283 * dev: device that has IO virtual address space managed by IOVMM
284 * sg: list of physically contiguous memory chunks. The preceding chunk needs to
285 * be larger than the following chunks in sg for efficient mapping and
286 * performance. If elements of sg are more than one, physical address of
287 * each chunk needs to be aligned by its size for efficent mapping and TLB
289 * offset: offset in bytes to be mapped and accessed by dev.
290 * size: size in bytes to be mapped and accessed by dev.
292 * This function allocates IO virtual memory for the given device and maps the
293 * given physical memory conveyed by sg into the allocated IO memory region.
294 * Returns allocated IO virtual address if it allocates and maps successfull.
295 * Otherwise, minus error number. Caller must check if the return value of this
296 * function with IS_ERR_VALUE().
298 dma_addr_t
iovmm_map(struct device
*dev
, struct scatterlist
*sg
, off_t offset
,
299 size_t size
, enum dma_data_direction direction
, int id
)
302 dma_addr_t addr
, start
= 0;
303 size_t mapped_size
= 0;
304 struct exynos_iovmm
*vmm
= exynos_get_iovmm(dev
);
305 size_t exact_align_mask
= 0;
306 size_t max_align
, align
;
309 struct scatterlist
*tsg
;
311 if ((id
< 0) || (id
>= MAX_NUM_PLANE
)) {
312 dev_err(dev
, "%s: Invalid plane ID %d\n", __func__
, id
);
316 for (; (sg
!= NULL
) && (sg_dma_len(sg
) < offset
); sg
= sg_next(sg
))
317 offset
-= sg_dma_len(sg
);
320 dev_err(dev
, "IOVMM: invalid offset to %s.\n", __func__
);
324 if (direction
!= DMA_TO_DEVICE
)
327 if (id
>= (vmm
->inplanes
+ vmm
->onplanes
)) {
328 dev_err(dev
, "%s: id(%d) is larger than the number of IOVMs\n",
335 start_off
= offset_in_page(sg_phys(sg
) + offset
);
336 size
= PAGE_ALIGN(size
+ start_off
);
338 if (size
>= SECT_SIZE
)
339 max_align
= SECT_SIZE
;
340 else if (size
< LPAGE_SIZE
)
341 max_align
= SPAGE_SIZE
;
343 max_align
= LPAGE_SIZE
;
345 if (sg_next(sg
) == NULL
) {/* physically contiguous chunk */
346 /* 'align' must be biggest 2^n that satisfies:
347 * 'address of physical memory' % 'align' = 0
349 align
= 1 << __ffs(page_to_phys(sg_page(sg
)));
351 exact_align_mask
= page_to_phys(sg_page(sg
)) & (max_align
- 1);
353 if ((size
- exact_align_mask
) < max_align
) {
355 exact_align_mask
= exact_align_mask
& (max_align
- 1);
358 if (align
> max_align
)
361 exact_align_mask
&= ~(align
- 1);
363 align
= 1 << __ffs(page_to_phys(sg_page(sg
)));
364 align
= min_t(size_t, align
, max_align
);
368 start
= alloc_iovm_region(vmm
, size
, align
, max_align
,
369 exact_align_mask
, start_off
, id
);
371 spin_lock(&vmm
->vmlist_lock
);
373 "%s: Not enough IOVM space to allocate %#zx/%#zx\n",
374 __func__
, size
, align
);
376 "%s: Total %#zx (%d), Allocated %#zx , Chunks %d\n",
377 __func__
, vmm
->iovm_size
[id
], id
,
378 vmm
->allocated_size
[id
], vmm
->num_areas
[id
]);
379 spin_unlock(&vmm
->vmlist_lock
);
384 addr
= start
- start_off
;
390 len
= sg_dma_len(sg
);
392 /* if back to back sg entries are contiguous consolidate them */
393 while (sg_next(sg
) &&
394 sg_phys(sg
) + sg_dma_len(sg
) == sg_phys(sg_next(sg
))) {
395 len
+= sg_dma_len(sg_next(sg
));
405 if (offset_in_page(phys
)) {
406 len
+= offset_in_page(phys
);
407 phys
= round_down(phys
, PAGE_SIZE
);
410 len
= PAGE_ALIGN(len
);
412 if (len
> (size
- mapped_size
))
413 len
= size
- mapped_size
;
415 ret
= iommu_map(vmm
->domain
, addr
, phys
, len
, 0);
417 dev_err(dev
, "iommu_map failed w/ err: %d\n", ret
);
423 } while ((sg
= sg_next(sg
)) && (mapped_size
< size
));
425 BUG_ON(mapped_size
> size
);
427 if (mapped_size
< size
) {
428 dev_err(dev
, "mapped_size(%#zx) is smaller than size(%#zx)\n",
431 dev_err(dev
, "ret: %d\n", ret
);
437 TRACE_LOG_DEV(dev
, "IOVMM: Allocated VM region @ %#zx/%#zx bytes.\n",
441 struct exynos_vm_region
*reg
= find_iovm_region(vmm
, start
);
444 SYSMMU_EVENT_LOG_IOVMM_MAP(IOVMM_TO_LOG(vmm
),
445 start
, start
+ size
, reg
->size
- size
);
451 iommu_unmap(vmm
->domain
, start
- start_off
, mapped_size
);
452 free_iovm_region(vmm
, remove_iovm_region(vmm
, start
));
456 "Failed(%d) to map IOVMM REGION %pa (SIZE: %#zx, mapped: %#zx)\n",
457 ret
, &start
, size
, mapped_size
);
460 pr_err("SGLIST[%d].size = %#x\n", idx
++, tsg
->length
);
461 } while ((tsg
= sg_next(tsg
)));
463 show_iovm_regions(vmm
);
467 "IOVMM: Failed to allocated VM region for %#zx bytes.\n", size
);
468 return (dma_addr_t
)ret
;
471 void iovmm_unmap(struct device
*dev
, dma_addr_t iova
)
473 struct exynos_iovmm
*vmm
= exynos_get_iovmm(dev
);
474 struct exynos_vm_region
*region
;
477 /* This function must not be called in IRQ handlers */
480 region
= remove_iovm_region(vmm
, iova
);
482 if (WARN_ON(region
->start
!= iova
)) {
484 "IOVMM: iova %pa and region %#x @ %#x mismatch\n",
485 &iova
, region
->size
, region
->start
);
486 show_iovm_regions(vmm
);
487 /* reinsert iovm region */
488 add_iovm_region(vmm
, region
->start
, region
->size
);
492 unmap_size
= iommu_unmap(vmm
->domain
, iova
& PAGE_MASK
,
494 if (unlikely(unmap_size
!= region
->size
)) {
495 dev_err(dev
, "Failed to unmap IOVMM REGION %#x "
496 "(SIZE: %#x, iova: %pa, unmapped: %#zx)\n",
498 region
->size
, &iova
, unmap_size
);
499 show_iovm_regions(vmm
);
505 exynos_sysmmu_tlb_invalidate(vmm
->domain
, region
->start
, region
->size
);
507 free_iovm_region(vmm
, region
);
509 TRACE_LOG_DEV(dev
, "IOVMM: Unmapped %#x bytes from %#x.\n",
512 dev_err(dev
, "IOVMM: No IOVM region %pa to free.\n", &iova
);
516 int iovmm_map_oto(struct device
*dev
, phys_addr_t phys
, size_t size
)
518 struct exynos_iovmm
*vmm
= exynos_get_iovmm(dev
);
521 BUG_ON(!IS_ALIGNED(phys
, PAGE_SIZE
));
522 BUG_ON(!IS_ALIGNED(size
, PAGE_SIZE
));
524 if (WARN_ON((phys
+ size
) >= IOVA_START
)) {
526 "Unable to create one to one mapping for %#zx @ %pa\n",
531 if (!add_iovm_region(vmm
, (dma_addr_t
)phys
, size
))
534 ret
= iommu_map(vmm
->domain
, (dma_addr_t
)phys
, phys
, size
, 0);
536 free_iovm_region(vmm
,
537 remove_iovm_region(vmm
, (dma_addr_t
)phys
));
542 void iovmm_unmap_oto(struct device
*dev
, phys_addr_t phys
)
544 struct exynos_iovmm
*vmm
= exynos_get_iovmm(dev
);
545 struct exynos_vm_region
*region
;
548 /* This function must not be called in IRQ handlers */
550 BUG_ON(!IS_ALIGNED(phys
, PAGE_SIZE
));
552 region
= remove_iovm_region(vmm
, (dma_addr_t
)phys
);
554 unmap_size
= iommu_unmap(vmm
->domain
, (dma_addr_t
)phys
,
556 WARN_ON(unmap_size
!= region
->size
);
558 exynos_sysmmu_tlb_invalidate(vmm
->domain
, (dma_addr_t
)phys
,
561 free_iovm_region(vmm
, region
);
563 TRACE_LOG_DEV(dev
, "IOVMM: Unmapped %#x bytes from %#x.\n",
568 static struct dentry
*exynos_iovmm_debugfs_root
;
569 static struct dentry
*exynos_iommu_debugfs_root
;
571 static int exynos_iovmm_create_debugfs(void)
573 exynos_iovmm_debugfs_root
= debugfs_create_dir("iovmm", NULL
);
574 if (!exynos_iovmm_debugfs_root
)
575 pr_err("IOVMM: Failed to create debugfs entry\n");
577 pr_info("IOVMM: Created debugfs entry at debugfs/iovmm\n");
579 exynos_iommu_debugfs_root
= debugfs_create_dir("iommu", NULL
);
580 if (!exynos_iommu_debugfs_root
)
581 pr_err("IOMMU: Failed to create debugfs entry\n");
583 pr_info("IOMMU: Created debugfs entry at debugfs/iommu\n");
587 subsys_initcall(exynos_iovmm_create_debugfs
);
589 static int iovmm_debug_show(struct seq_file
*s
, void *unused
)
591 struct exynos_iovmm
*vmm
= s
->private;
594 seq_printf(s
, "%.6s %10.s %10.s %10.s %6.s\n",
595 "REGION", "VASTART", "SIZE", "FREE", "CHUNKS");
596 seq_puts(s
, "---------------------------------------------\n");
598 spin_lock(&vmm
->vmlist_lock
);
599 while (i
< vmm
->inplanes
) {
600 seq_printf(s
, "%3s[%d] %pa %#10zx %#10zx %d\n",
601 "in", i
, &vmm
->iova_start
[i
], vmm
->iovm_size
[i
],
602 vmm
->iovm_size
[i
] - vmm
->allocated_size
[i
],
606 while (i
< (vmm
->inplanes
+ vmm
->onplanes
)) {
607 seq_printf(s
, "%3s[%d] %pa %#10zx %#10zx %d\n",
608 "out", i
- vmm
->inplanes
, &vmm
->iova_start
[i
],
610 vmm
->iovm_size
[i
] - vmm
->allocated_size
[i
],
614 seq_puts(s
, "---------------------------------------------\n");
615 seq_printf(s
, "Total number of mappings : %d\n", vmm
->num_map
);
616 seq_printf(s
, "Total number of unmappings: %d\n", vmm
->num_unmap
);
617 spin_unlock(&vmm
->vmlist_lock
);
622 static int iovmm_debug_open(struct inode
*inode
, struct file
*file
)
624 return single_open(file
, iovmm_debug_show
, inode
->i_private
);
627 static ssize_t
iovmm_debug_write(struct file
*filp
, const char __user
*p
,
628 size_t len
, loff_t
*off
)
630 struct seq_file
*s
= filp
->private_data
;
631 struct exynos_iovmm
*vmm
= s
->private;
632 /* clears the map count in IOVMM */
633 spin_lock(&vmm
->vmlist_lock
);
636 spin_unlock(&vmm
->vmlist_lock
);
640 static const struct file_operations iovmm_debug_fops
= {
641 .open
= iovmm_debug_open
,
643 .write
= iovmm_debug_write
,
645 .release
= single_release
,
648 static void iovmm_register_debugfs(struct exynos_iovmm
*vmm
)
650 if (!exynos_iovmm_debugfs_root
)
653 debugfs_create_file(dev_name(vmm
->dev
), 0664,
654 exynos_iovmm_debugfs_root
, vmm
, &iovmm_debug_fops
);
657 int exynos_create_iovmm(struct device
*dev
, int inplanes
, int onplanes
)
659 static unsigned long iovmcfg
[MAX_NUM_PLANE
][MAX_NUM_PLANE
] = {
660 {IOVM_SIZE
, 0, 0, 0, 0, 0},
661 {SZ_2G
, IOVM_SIZE
- SZ_2G
, 0, 0, 0, 0},
662 {SZ_1G
+ SZ_256M
, SZ_1G
, SZ_1G
, 0, 0, 0},
663 {SZ_1G
, SZ_768M
, SZ_768M
, SZ_768M
, 0, 0},
664 {SZ_1G
, SZ_768M
, SZ_512M
, SZ_768M
, SZ_256M
, 0},
665 {SZ_1G
, SZ_512M
, SZ_256M
, SZ_768M
, SZ_512M
, SZ_256M
},
667 int i
, nplanes
, ret
= 0;
669 struct exynos_iovmm
*vmm
;
670 struct exynos_iommu_owner
*owner
= dev
->archdata
.iommu
;
675 nplanes
= inplanes
+ onplanes
;
676 if (WARN_ON(!owner
) || nplanes
> MAX_NUM_PLANE
|| nplanes
< 1) {
681 vmm
= kzalloc(sizeof(*vmm
), GFP_KERNEL
);
687 for (i
= 0; i
< nplanes
; i
++) {
688 vmm
->iovm_size
[i
] = iovmcfg
[nplanes
- 1][i
];
689 vmm
->iova_start
[i
] = IOVA_START
+ sum_iovm
;
690 vmm
->vm_map
[i
] = kzalloc(IOVM_BITMAP_SIZE(vmm
->iovm_size
[i
]),
692 if (!vmm
->vm_map
[i
]) {
694 goto err_setup_domain
;
696 sum_iovm
+= iovmcfg
[nplanes
- 1][i
];
697 dev_info(dev
, "IOVMM: IOVM SIZE = %#zx B, IOVMM from %pa.\n",
698 vmm
->iovm_size
[i
], &vmm
->iova_start
[i
]);
701 vmm
->inplanes
= inplanes
;
702 vmm
->onplanes
= onplanes
;
703 vmm
->domain
= iommu_domain_alloc(&platform_bus_type
);
706 goto err_setup_domain
;
709 ret
= exynos_iommu_init_event_log(IOVMM_TO_LOG(vmm
), IOVMM_LOG_LEN
);
711 iovmm_add_log_to_debugfs(exynos_iovmm_debugfs_root
,
712 IOVMM_TO_LOG(vmm
), dev_name(dev
));
714 iommu_add_log_to_debugfs(exynos_iommu_debugfs_root
,
715 IOMMU_TO_LOG(vmm
->domain
), dev_name(dev
));
717 goto err_init_event_log
;
720 spin_lock_init(&vmm
->vmlist_lock
);
721 spin_lock_init(&vmm
->bitmap_lock
);
723 INIT_LIST_HEAD(&vmm
->regions_list
);
726 owner
->vmm_data
= vmm
;
728 iovmm_register_debugfs(vmm
);
730 dev_dbg(dev
, "IOVMM: Created %#x B IOVMM from %#x.\n",
731 IOVM_SIZE
, IOVA_START
);
735 iommu_domain_free(vmm
->domain
);
737 for (i
= 0; i
< nplanes
; i
++)
738 kfree(vmm
->vm_map
[i
]);
741 dev_err(dev
, "IOVMM: Failed to create IOVMM (%d)\n", ret
);
746 void iovmm_set_fault_handler(struct device
*dev
,
747 iommu_fault_handler_t handler
, void *token
)
749 struct exynos_iovmm
*vmm
= exynos_get_iovmm(dev
);
750 iommu_set_fault_handler(vmm
->domain
, handler
, token
);