Commit | Line | Data |
---|---|---|
3c2a0909 S |
1 | /* linux/drivers/iommu/exynos_iovmm.c |
2 | * | |
3 | * Copyright (c) 2011-2012 Samsung Electronics Co., Ltd. | |
4 | * http://www.samsung.com | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | ||
11 | #ifdef CONFIG_EXYNOS_IOMMU_DEBUG | |
12 | #define DEBUG | |
13 | #endif | |
14 | ||
15 | #include <linux/kernel.h> | |
16 | #include <linux/hardirq.h> | |
17 | #include <linux/slab.h> | |
18 | #include <linux/scatterlist.h> | |
19 | #include <linux/err.h> | |
20 | #include <linux/debugfs.h> | |
21 | #include <linux/delay.h> | |
22 | ||
23 | #include <linux/exynos_iovmm.h> | |
24 | #include <plat/cpu.h> | |
25 | ||
26 | #include "exynos-iommu.h" | |
27 | ||
28 | /* IOVM region: [0x1000000, 0xE0000000) */ | |
29 | #define IOVA_START_V6 0x10000000 | |
30 | #define IOVM_SIZE_V6 (0xE0000000 - IOVA_START_V6) | |
31 | #define sg_physically_continuous(sg) (sg_next(sg) == NULL) | |
32 | ||
33 | /* alloc_iovm_region - Allocate IO virtual memory region | |
34 | * vmm: virtual memory allocator | |
35 | * size: total size to allocate vm region from @vmm. | |
36 | * align: alignment constraints of the allocated virtual address | |
37 | * max_align: maximum alignment of allocated virtual address. allocated address | |
38 | * does not need to satisfy larger alignment than max_align. | |
39 | * section_offset: page size-aligned offset of iova start address within an 1MB | |
40 | * boundary. The caller of alloc_iovm_region will obtain the | |
41 | * allocated iova + section_offset. This is provided just for the | |
42 | * physically contiguous memory. | |
43 | * page_offset: must be smaller than PAGE_SIZE. Just a valut to be added to the | |
44 | * allocated virtual address. This does not effect to the allocaded size | |
45 | * and address. | |
46 | * | |
47 | * This function returns allocated IO virtual address that satisfies the given | |
48 | * constraints: the caller will get the allocated virtual address plus | |
49 | * (section_offset + page_offset). Returns 0 if this function is not able | |
50 | * to allocate IO virtual memory. | |
51 | */ | |
52 | static dma_addr_t alloc_iovm_region(struct exynos_iovmm *vmm, size_t size, | |
53 | size_t section_offset, | |
54 | off_t page_offset) | |
55 | { | |
56 | u32 index = 0; | |
57 | u32 vstart; | |
58 | u32 vsize; | |
59 | unsigned long end, i; | |
60 | struct exynos_vm_region *region; | |
61 | size_t align = SECT_SIZE; | |
62 | ||
63 | BUG_ON(page_offset >= PAGE_SIZE); | |
64 | ||
65 | /* To avoid allocating prefetched iovm region */ | |
66 | vsize = (ALIGN(size + SZ_128K, SZ_128K) + section_offset) >> PAGE_SHIFT; | |
67 | align >>= PAGE_SHIFT; | |
68 | section_offset >>= PAGE_SHIFT; | |
69 | ||
70 | spin_lock(&vmm->bitmap_lock); | |
71 | again: | |
72 | index = find_next_zero_bit(vmm->vm_map[0], | |
73 | IOVM_NUM_PAGES(vmm->iovm_size[0]), index); | |
74 | ||
75 | if (align) { | |
76 | index = ALIGN(index, align); | |
77 | if (index >= IOVM_NUM_PAGES(vmm->iovm_size[0])) { | |
78 | spin_unlock(&vmm->bitmap_lock); | |
79 | return 0; | |
80 | } | |
81 | ||
82 | if (test_bit(index, vmm->vm_map[0])) | |
83 | goto again; | |
84 | } | |
85 | ||
86 | end = index + vsize; | |
87 | ||
88 | if (end >= IOVM_NUM_PAGES(vmm->iovm_size[0])) { | |
89 | spin_unlock(&vmm->bitmap_lock); | |
90 | return 0; | |
91 | } | |
92 | ||
93 | i = find_next_bit(vmm->vm_map[0], end, index); | |
94 | if (i < end) { | |
95 | index = i + 1; | |
96 | goto again; | |
97 | } | |
98 | ||
99 | bitmap_set(vmm->vm_map[0], index, vsize); | |
100 | ||
101 | spin_unlock(&vmm->bitmap_lock); | |
102 | ||
103 | vstart = (index << PAGE_SHIFT) + vmm->iova_start[0] + page_offset; | |
104 | ||
105 | region = kmalloc(sizeof(*region), GFP_KERNEL); | |
106 | if (unlikely(!region)) { | |
107 | spin_lock(&vmm->bitmap_lock); | |
108 | bitmap_clear(vmm->vm_map[0], index, vsize); | |
109 | spin_unlock(&vmm->bitmap_lock); | |
110 | return 0; | |
111 | } | |
112 | ||
113 | INIT_LIST_HEAD(®ion->node); | |
114 | region->start = vstart; | |
115 | region->size = vsize << PAGE_SHIFT; | |
116 | region->dummy_size = region->size - size; | |
117 | region->section_off = section_offset << PAGE_SHIFT; | |
118 | ||
119 | spin_lock(&vmm->vmlist_lock); | |
120 | list_add_tail(®ion->node, &vmm->regions_list); | |
121 | vmm->allocated_size[0] += region->size; | |
122 | vmm->num_areas[0]++; | |
123 | vmm->num_map++; | |
124 | spin_unlock(&vmm->vmlist_lock); | |
125 | ||
126 | return region->start + region->section_off; | |
127 | } | |
128 | ||
129 | struct exynos_vm_region *find_iovm_region(struct exynos_iovmm *vmm, | |
130 | dma_addr_t iova) | |
131 | { | |
132 | struct exynos_vm_region *region; | |
133 | ||
134 | spin_lock(&vmm->vmlist_lock); | |
135 | ||
136 | list_for_each_entry(region, &vmm->regions_list, node) { | |
137 | if (region->start <= iova && | |
138 | (region->start + region->size) > iova) { | |
139 | spin_unlock(&vmm->vmlist_lock); | |
140 | return region; | |
141 | } | |
142 | } | |
143 | ||
144 | spin_unlock(&vmm->vmlist_lock); | |
145 | ||
146 | return NULL; | |
147 | } | |
148 | ||
149 | static struct exynos_vm_region *remove_iovm_region(struct exynos_iovmm *vmm, | |
150 | dma_addr_t iova) | |
151 | { | |
152 | struct exynos_vm_region *region; | |
153 | ||
154 | spin_lock(&vmm->vmlist_lock); | |
155 | ||
156 | list_for_each_entry(region, &vmm->regions_list, node) { | |
157 | if (region->start + region->section_off == iova) { | |
158 | list_del(®ion->node); | |
159 | vmm->allocated_size[0] -= region->size; | |
160 | vmm->num_areas[0]--; | |
161 | vmm->num_unmap++; | |
162 | spin_unlock(&vmm->vmlist_lock); | |
163 | return region; | |
164 | } | |
165 | } | |
166 | ||
167 | spin_unlock(&vmm->vmlist_lock); | |
168 | ||
169 | return NULL; | |
170 | } | |
171 | ||
172 | static void free_iovm_region(struct exynos_iovmm *vmm, | |
173 | struct exynos_vm_region *region) | |
174 | { | |
175 | if (!region) | |
176 | return; | |
177 | ||
178 | spin_lock(&vmm->bitmap_lock); | |
179 | bitmap_clear(vmm->vm_map[0], | |
180 | (region->start - vmm->iova_start[0]) >> PAGE_SHIFT, | |
181 | region->size >> PAGE_SHIFT); | |
182 | spin_unlock(&vmm->bitmap_lock); | |
183 | ||
184 | SYSMMU_EVENT_LOG_IOVMM_UNMAP(IOVMM_TO_LOG(vmm), | |
185 | region->start, region->start + region->size); | |
186 | ||
187 | kfree(region); | |
188 | } | |
189 | ||
190 | static dma_addr_t add_iovm_region(struct exynos_iovmm *vmm, | |
191 | dma_addr_t start, size_t size) | |
192 | { | |
193 | struct exynos_vm_region *region, *pos; | |
194 | ||
195 | region = kmalloc(sizeof(*region), GFP_KERNEL); | |
196 | if (!region) | |
197 | return 0; | |
198 | ||
199 | INIT_LIST_HEAD(®ion->node); | |
200 | region->start = start; | |
201 | region->size = size; | |
202 | ||
203 | spin_lock(&vmm->vmlist_lock); | |
204 | ||
205 | list_for_each_entry(pos, &vmm->regions_list, node) { | |
206 | if ((start < (pos->start + pos->size)) && | |
207 | ((start + size) > pos->start)) { | |
208 | spin_unlock(&vmm->vmlist_lock); | |
209 | kfree(region); | |
210 | return 0; | |
211 | } | |
212 | } | |
213 | ||
214 | list_add(®ion->node, &vmm->regions_list); | |
215 | ||
216 | spin_unlock(&vmm->vmlist_lock); | |
217 | ||
218 | return start; | |
219 | } | |
220 | ||
221 | static void show_iovm_regions(struct exynos_iovmm *vmm) | |
222 | { | |
223 | struct exynos_vm_region *pos; | |
224 | ||
225 | pr_err("LISTING IOVMM REGIONS...\n"); | |
226 | spin_lock(&vmm->vmlist_lock); | |
227 | list_for_each_entry(pos, &vmm->regions_list, node) { | |
228 | pr_err("REGION: %#x (SIZE: %#x, +[%#x, %#x])\n", | |
229 | pos->start, pos->size, | |
230 | pos->section_off, pos->dummy_size); | |
231 | } | |
232 | spin_unlock(&vmm->vmlist_lock); | |
233 | pr_err("END OF LISTING IOVMM REGIONS...\n"); | |
234 | } | |
235 | ||
236 | int iovmm_activate(struct device *dev) | |
237 | { | |
238 | struct exynos_iovmm *vmm = exynos_get_iovmm(dev); | |
239 | ||
240 | if (!vmm) { | |
241 | dev_err(dev, "%s: IOVMM not found\n", __func__); | |
242 | return -EINVAL; | |
243 | } | |
244 | ||
245 | return iommu_attach_device(vmm->domain, dev); | |
246 | } | |
247 | ||
248 | void iovmm_deactivate(struct device *dev) | |
249 | { | |
250 | struct exynos_iovmm *vmm = exynos_get_iovmm(dev); | |
251 | ||
252 | if (!vmm) { | |
253 | dev_err(dev, "%s: IOVMM not found\n", __func__); | |
254 | return; | |
255 | } | |
256 | ||
257 | iommu_detach_device(vmm->domain, dev); | |
258 | } | |
259 | ||
260 | /* iovmm_map - allocate and map IO virtual memory for the given device | |
261 | * dev: device that has IO virtual address space managed by IOVMM | |
262 | * sg: list of physically contiguous memory chunks. The preceding chunk needs to | |
263 | * be larger than the following chunks in sg for efficient mapping and | |
264 | * performance. If elements of sg are more than one, physical address of | |
265 | * each chunk needs to be aligned by its size for efficent mapping and TLB | |
266 | * utilization. | |
267 | * offset: offset in bytes to be mapped and accessed by dev. | |
268 | * size: size in bytes to be mapped and accessed by dev. | |
269 | * | |
270 | * This function allocates IO virtual memory for the given device and maps the | |
271 | * given physical memory conveyed by sg into the allocated IO memory region. | |
272 | * Returns allocated IO virtual address if it allocates and maps successfull. | |
273 | * Otherwise, minus error number. Caller must check if the return value of this | |
274 | * function with IS_ERR_VALUE(). | |
275 | */ | |
276 | dma_addr_t iovmm_map(struct device *dev, struct scatterlist *sg, off_t offset, | |
277 | size_t size, enum dma_data_direction direction, int id) | |
278 | { | |
279 | off_t start_off; | |
280 | dma_addr_t addr, start = 0; | |
281 | size_t mapped_size = 0; | |
282 | struct exynos_iovmm *vmm = exynos_get_iovmm(dev); | |
283 | size_t section_offset = 0; /* section offset of contig. mem */ | |
284 | int ret = 0; | |
285 | int idx; | |
286 | struct scatterlist *tsg; | |
287 | struct exynos_vm_region *region; | |
288 | ||
289 | if (vmm == NULL) { | |
290 | dev_err(dev, "%s: IOVMM not found\n", __func__); | |
291 | return -EINVAL; | |
292 | } | |
293 | ||
294 | for (; (sg != NULL) && (sg->length < offset); sg = sg_next(sg)) | |
295 | offset -= sg->length; | |
296 | ||
297 | if (sg == NULL) { | |
298 | dev_err(dev, "IOVMM: invalid offset to %s.\n", __func__); | |
299 | return -EINVAL; | |
300 | } | |
301 | ||
302 | tsg = sg; | |
303 | ||
304 | start_off = offset_in_page(sg_phys(sg) + offset); | |
305 | size = PAGE_ALIGN(size + start_off); | |
306 | ||
307 | if (sg_physically_continuous(sg)) { | |
308 | size_t aligned_pad_size; | |
309 | phys_addr_t phys = page_to_phys(sg_page(sg)); | |
310 | section_offset = phys & (~SECT_MASK); | |
311 | aligned_pad_size = ALIGN(phys, SECT_SIZE) - phys; | |
312 | if ((sg->length - aligned_pad_size) < SECT_SIZE) { | |
313 | aligned_pad_size = ALIGN(phys, LPAGE_SIZE) - phys; | |
314 | if ((sg->length - aligned_pad_size) >= LPAGE_SIZE) | |
315 | section_offset = phys & (~LPAGE_MASK); | |
316 | else | |
317 | section_offset = 0; | |
318 | } | |
319 | } | |
320 | start = alloc_iovm_region(vmm, size, section_offset, start_off); | |
321 | if (!start) { | |
322 | spin_lock(&vmm->vmlist_lock); | |
323 | dev_err(dev, "%s: Not enough IOVM space to allocate %#zx\n", | |
324 | __func__, size); | |
325 | dev_err(dev, "%s: Total %#zx, Allocated %#zx , Chunks %d\n", | |
326 | __func__, vmm->iovm_size[0], | |
327 | vmm->allocated_size[0], vmm->num_areas[0]); | |
328 | spin_unlock(&vmm->vmlist_lock); | |
329 | ret = -ENOMEM; | |
330 | goto err_map_nomem; | |
331 | } | |
332 | ||
333 | addr = start - start_off; | |
334 | ||
335 | do { | |
336 | phys_addr_t phys; | |
337 | size_t len; | |
338 | ||
339 | phys = sg_phys(sg); | |
340 | len = sg->length; | |
341 | ||
342 | /* if back to back sg entries are contiguous consolidate them */ | |
343 | while (sg_next(sg) && | |
344 | sg_phys(sg) + sg->length == sg_phys(sg_next(sg))) { | |
345 | len += sg_next(sg)->length; | |
346 | sg = sg_next(sg); | |
347 | } | |
348 | ||
349 | if (offset > 0) { | |
350 | len -= offset; | |
351 | phys += offset; | |
352 | offset = 0; | |
353 | } | |
354 | ||
355 | if (offset_in_page(phys)) { | |
356 | len += offset_in_page(phys); | |
357 | phys = round_down(phys, PAGE_SIZE); | |
358 | } | |
359 | ||
360 | len = PAGE_ALIGN(len); | |
361 | ||
362 | if (len > (size - mapped_size)) | |
363 | len = size - mapped_size; | |
364 | ||
365 | ret = iommu_map(vmm->domain, addr, phys, len, 0); | |
366 | if (ret) { | |
367 | dev_err(dev, "iommu_map failed w/ err: %d\n", ret); | |
368 | break; | |
369 | } | |
370 | ||
371 | addr += len; | |
372 | mapped_size += len; | |
373 | } while ((sg = sg_next(sg)) && (mapped_size < size)); | |
374 | ||
375 | BUG_ON(mapped_size > size); | |
376 | ||
377 | if (mapped_size < size) { | |
378 | dev_err(dev, "mapped_size(%#zx) is smaller than size(%#zx)\n", | |
379 | mapped_size, size); | |
380 | if (!ret) { | |
381 | dev_err(dev, "ret: %d\n", ret); | |
382 | ret = -EINVAL; | |
383 | } | |
384 | goto err_map_map; | |
385 | } | |
386 | ||
387 | region = find_iovm_region(vmm, start); | |
388 | BUG_ON(!region); | |
389 | ||
390 | /* | |
391 | * If pretched SLPD is a fault SLPD in zero_l2_table, FLPD cache | |
392 | * or prefetch buffer caches the address of zero_l2_table. | |
393 | * This function replaces the zero_l2_table with new L2 page | |
394 | * table to write valid mappings. | |
395 | * Accessing the valid area may cause page fault since FLPD | |
396 | * cache may still caches zero_l2_table for the valid area | |
397 | * instead of new L2 page table that have the mapping | |
398 | * information of the valid area | |
399 | * Thus any replacement of zero_l2_table with other valid L2 | |
400 | * page table must involve FLPD cache invalidation if the System | |
401 | * MMU have prefetch feature and FLPD cache (version 3.3). | |
402 | * FLPD cache invalidation is performed with TLB invalidation | |
403 | * by VPN without blocking. It is safe to invalidate TLB without | |
404 | * blocking because the target address of TLB invalidation is | |
405 | * not currently mapped. | |
406 | */ | |
407 | ||
408 | exynos_sysmmu_tlb_invalidate(vmm->domain, region->start, region->size); | |
409 | ||
410 | TRACE_LOG_DEV(dev, "IOVMM: Allocated VM region @ %#x/%#x bytes.\n", | |
411 | start, size); | |
412 | ||
413 | SYSMMU_EVENT_LOG_IOVMM_MAP(IOVMM_TO_LOG(vmm), start, start + size, | |
414 | region->size - size); | |
415 | ||
416 | return start; | |
417 | ||
418 | err_map_map: | |
419 | iommu_unmap(vmm->domain, start - start_off, mapped_size); | |
420 | free_iovm_region(vmm, remove_iovm_region(vmm, start)); | |
421 | ||
422 | dev_err(dev, | |
423 | "Failed(%d) to map IOVMM REGION %pa (SIZE: %#zx, mapped: %#zx)\n", | |
424 | ret, &start, size, mapped_size); | |
425 | idx = 0; | |
426 | do { | |
427 | pr_err("SGLIST[%d].size = %#x\n", idx++, tsg->length); | |
428 | } while ((tsg = sg_next(tsg))); | |
429 | ||
430 | show_iovm_regions(vmm); | |
431 | ||
432 | err_map_nomem: | |
433 | TRACE_LOG_DEV(dev, | |
434 | "IOVMM: Failed to allocated VM region for %#x bytes.\n", size); | |
435 | return (dma_addr_t)ret; | |
436 | } | |
437 | ||
438 | void iovmm_unmap(struct device *dev, dma_addr_t iova) | |
439 | { | |
440 | struct exynos_iovmm *vmm = exynos_get_iovmm(dev); | |
441 | struct exynos_vm_region *region; | |
442 | size_t unmap_size; | |
443 | ||
444 | /* This function must not be called in IRQ handlers */ | |
445 | BUG_ON(in_irq()); | |
446 | ||
447 | if (vmm == NULL) { | |
448 | dev_err(dev, "%s: IOVMM not found\n", __func__); | |
449 | return; | |
450 | } | |
451 | ||
452 | region = remove_iovm_region(vmm, iova); | |
453 | if (region) { | |
454 | u32 start = region->start + region->section_off; | |
455 | u32 size = region->size - region->dummy_size; | |
456 | ||
457 | /* clear page offset */ | |
458 | if (WARN_ON(start != iova)) { | |
459 | dev_err(dev, "IOVMM: " | |
460 | "iova %pa and region %#x(+%#x)@%#x(-%#x) mismatch\n", | |
461 | &iova, region->size, region->dummy_size, | |
462 | region->start, region->section_off); | |
463 | show_iovm_regions(vmm); | |
464 | /* reinsert iovm region */ | |
465 | add_iovm_region(vmm, region->start, region->size); | |
466 | kfree(region); | |
467 | return; | |
468 | } | |
469 | unmap_size = iommu_unmap(vmm->domain, start & SPAGE_MASK, size); | |
470 | if (unlikely(unmap_size != size)) { | |
471 | dev_err(dev, | |
472 | "Failed to unmap REGION of %#x:\n", start); | |
473 | dev_err(dev, "(SIZE: %#x, iova: %pa, unmapped: %#zx)\n", | |
474 | size, &iova, unmap_size); | |
475 | show_iovm_regions(vmm); | |
476 | kfree(region); | |
477 | BUG(); | |
478 | return; | |
479 | } | |
480 | ||
481 | exynos_sysmmu_tlb_invalidate(vmm->domain, region->start, region->size); | |
482 | ||
483 | /* 60us is required to guarantee that PTW ends itself */ | |
484 | udelay(60); | |
485 | ||
486 | free_iovm_region(vmm, region); | |
487 | ||
488 | TRACE_LOG_DEV(dev, "IOVMM: Unmapped %#x bytes from %#x.\n", | |
489 | unmap_size, iova); | |
490 | } else { | |
491 | dev_err(dev, "IOVMM: No IOVM region %pa to free.\n", &iova); | |
492 | } | |
493 | } | |
494 | ||
495 | int iovmm_map_oto(struct device *dev, phys_addr_t phys, size_t size) | |
496 | { | |
497 | struct exynos_iovmm *vmm = exynos_get_iovmm(dev); | |
498 | int ret; | |
499 | ||
500 | BUG_ON(!IS_ALIGNED(phys, PAGE_SIZE)); | |
501 | BUG_ON(!IS_ALIGNED(size, PAGE_SIZE)); | |
502 | ||
503 | if (vmm == NULL) { | |
504 | dev_err(dev, "%s: IOVMM not found\n", __func__); | |
505 | return -EINVAL; | |
506 | } | |
507 | ||
508 | if (WARN_ON((phys + size) >= IOVA_START_V6)) { | |
509 | dev_err(dev, | |
510 | "Unable to create one to one mapping for %#zx @ %pa\n", | |
511 | size, &phys); | |
512 | return -EINVAL; | |
513 | } | |
514 | ||
515 | if (!add_iovm_region(vmm, (dma_addr_t)phys, size)) | |
516 | return -EADDRINUSE; | |
517 | ||
518 | ret = iommu_map(vmm->domain, (dma_addr_t)phys, phys, size, 0); | |
519 | if (ret < 0) | |
520 | free_iovm_region(vmm, | |
521 | remove_iovm_region(vmm, (dma_addr_t)phys)); | |
522 | ||
523 | return ret; | |
524 | } | |
525 | ||
526 | void iovmm_unmap_oto(struct device *dev, phys_addr_t phys) | |
527 | { | |
528 | struct exynos_iovmm *vmm = exynos_get_iovmm(dev); | |
529 | struct exynos_vm_region *region; | |
530 | size_t unmap_size; | |
531 | ||
532 | /* This function must not be called in IRQ handlers */ | |
533 | BUG_ON(in_irq()); | |
534 | BUG_ON(!IS_ALIGNED(phys, PAGE_SIZE)); | |
535 | ||
536 | if (vmm == NULL) { | |
537 | dev_err(dev, "%s: IOVMM not found\n", __func__); | |
538 | return; | |
539 | } | |
540 | ||
541 | region = remove_iovm_region(vmm, (dma_addr_t)phys); | |
542 | if (region) { | |
543 | unmap_size = iommu_unmap(vmm->domain, (dma_addr_t)phys, | |
544 | region->size); | |
545 | WARN_ON(unmap_size != region->size); | |
546 | ||
547 | exynos_sysmmu_tlb_invalidate(vmm->domain, (dma_addr_t)phys, | |
548 | region->size); | |
549 | ||
550 | free_iovm_region(vmm, region); | |
551 | ||
552 | TRACE_LOG_DEV(dev, "IOVMM: Unmapped %#x bytes from %#x.\n", | |
553 | unmap_size, phys); | |
554 | } | |
555 | } | |
556 | ||
557 | static struct dentry *exynos_iovmm_debugfs_root; | |
558 | static struct dentry *exynos_iommu_debugfs_root; | |
559 | ||
560 | static int exynos_iovmm_create_debugfs(void) | |
561 | { | |
562 | exynos_iovmm_debugfs_root = debugfs_create_dir("iovmm", NULL); | |
563 | if (!exynos_iovmm_debugfs_root) | |
564 | pr_err("IOVMM: Failed to create debugfs entry\n"); | |
565 | else | |
566 | pr_info("IOVMM: Created debugfs entry at debugfs/iovmm\n"); | |
567 | ||
568 | exynos_iommu_debugfs_root = debugfs_create_dir("iommu", NULL); | |
569 | if (!exynos_iommu_debugfs_root) | |
570 | pr_err("IOMMU: Failed to create debugfs entry\n"); | |
571 | else | |
572 | pr_info("IOMMU: Created debugfs entry at debugfs/iommu\n"); | |
573 | ||
574 | return 0; | |
575 | } | |
576 | arch_initcall(exynos_iovmm_create_debugfs); | |
577 | ||
578 | static int iovmm_debug_show(struct seq_file *s, void *unused) | |
579 | { | |
580 | struct exynos_iovmm *vmm = s->private; | |
581 | int i = 0; | |
582 | ||
583 | seq_printf(s, "%.6s %10.s %10.s %10.s %6.s\n", | |
584 | "REGION", "VASTART", "SIZE", "FREE", "CHUNKS"); | |
585 | seq_puts(s, "---------------------------------------------\n"); | |
586 | ||
587 | spin_lock(&vmm->vmlist_lock); | |
588 | while (i < vmm->inplanes) { | |
589 | seq_printf(s, "%3s[%d] %#x %#10zx %#10zx %d\n", | |
590 | "in", i, vmm->iova_start[i], vmm->iovm_size[i], | |
591 | vmm->iovm_size[i] - vmm->allocated_size[i], | |
592 | vmm->num_areas[i]); | |
593 | i++; | |
594 | } | |
595 | while (i < (vmm->inplanes + vmm->onplanes)) { | |
596 | seq_printf(s, "%3s[%d] %#x %#10zx %#10zx %d\n", | |
597 | "out", i - vmm->inplanes, vmm->iova_start[i], | |
598 | vmm->iovm_size[i], | |
599 | vmm->iovm_size[i] - vmm->allocated_size[i], | |
600 | vmm->num_areas[i]); | |
601 | i++; | |
602 | } | |
603 | seq_puts(s, "---------------------------------------------\n"); | |
604 | seq_printf(s, "Total number of mappings : %d\n", vmm->num_map); | |
605 | seq_printf(s, "Total number of unmappings: %d\n", vmm->num_unmap); | |
606 | spin_unlock(&vmm->vmlist_lock); | |
607 | ||
608 | return 0; | |
609 | } | |
610 | ||
611 | static int iovmm_debug_open(struct inode *inode, struct file *file) | |
612 | { | |
613 | return single_open(file, iovmm_debug_show, inode->i_private); | |
614 | } | |
615 | ||
616 | static ssize_t iovmm_debug_write(struct file *filp, const char __user *p, | |
617 | size_t len, loff_t *off) | |
618 | { | |
619 | struct seq_file *s = filp->private_data; | |
620 | struct exynos_iovmm *vmm = s->private; | |
621 | /* clears the map count in IOVMM */ | |
622 | spin_lock(&vmm->vmlist_lock); | |
623 | vmm->num_map = 0; | |
624 | vmm->num_unmap = 0; | |
625 | spin_unlock(&vmm->vmlist_lock); | |
626 | return len; | |
627 | } | |
628 | ||
629 | static const struct file_operations iovmm_debug_fops = { | |
630 | .open = iovmm_debug_open, | |
631 | .read = seq_read, | |
632 | .write = iovmm_debug_write, | |
633 | .llseek = seq_lseek, | |
634 | .release = single_release, | |
635 | }; | |
636 | ||
637 | static void iovmm_register_debugfs(struct exynos_iovmm *vmm) | |
638 | { | |
639 | if (!exynos_iovmm_debugfs_root) | |
640 | return; | |
641 | ||
642 | debugfs_create_file(vmm->domain_name, 0664, | |
643 | exynos_iovmm_debugfs_root, vmm, &iovmm_debug_fops); | |
644 | } | |
645 | ||
646 | int exynos_create_iovmm(struct device *dev, int inplanes, int onplanes) | |
647 | { | |
648 | return 0; | |
649 | } | |
650 | ||
651 | struct exynos_iovmm *exynos_create_single_iovmm(const char *name) | |
652 | { | |
653 | struct exynos_iovmm *vmm; | |
654 | int ret = 0; | |
655 | ||
656 | vmm = kzalloc(sizeof(*vmm), GFP_KERNEL); | |
657 | if (!vmm) { | |
658 | ret = -ENOMEM; | |
659 | goto err_alloc_vmm; | |
660 | } | |
661 | ||
662 | vmm->iovm_size[0] = IOVM_SIZE_V6; | |
663 | vmm->iova_start[0] = IOVA_START_V6; | |
664 | vmm->vm_map[0] = kzalloc(IOVM_BITMAP_SIZE(IOVM_SIZE_V6), GFP_KERNEL); | |
665 | if (!vmm->vm_map[0]) { | |
666 | ret = -ENOMEM; | |
667 | goto err_setup_domain; | |
668 | } | |
669 | ||
670 | vmm->inplanes = 1; | |
671 | vmm->onplanes = 0; | |
672 | vmm->domain = iommu_domain_alloc(&platform_bus_type); | |
673 | if (!vmm->domain) { | |
674 | ret = -ENOMEM; | |
675 | goto err_setup_domain; | |
676 | } | |
677 | ||
678 | ret = exynos_iommu_init_event_log(IOVMM_TO_LOG(vmm), IOVMM_LOG_LEN); | |
679 | if (!ret) { | |
680 | iovmm_add_log_to_debugfs(exynos_iovmm_debugfs_root, | |
681 | IOVMM_TO_LOG(vmm), name); | |
682 | ||
683 | iommu_add_log_to_debugfs(exynos_iommu_debugfs_root, | |
684 | IOMMU_TO_LOG(vmm->domain), name); | |
685 | } else { | |
686 | goto err_init_event_log; | |
687 | } | |
688 | ||
689 | spin_lock_init(&vmm->vmlist_lock); | |
690 | spin_lock_init(&vmm->bitmap_lock); | |
691 | ||
692 | INIT_LIST_HEAD(&vmm->regions_list); | |
693 | ||
694 | vmm->domain_name = name; | |
695 | ||
696 | iovmm_register_debugfs(vmm); | |
697 | ||
698 | pr_debug("%s IOVMM: Created %#x B IOVMM from %#x.\n", | |
699 | name, IOVM_SIZE_V6, IOVA_START_V6); | |
700 | return vmm; | |
701 | ||
702 | err_init_event_log: | |
703 | iommu_domain_free(vmm->domain); | |
704 | err_setup_domain: | |
705 | kfree(vmm); | |
706 | err_alloc_vmm: | |
707 | pr_err("%s IOVMM: Failed to create IOVMM (%d)\n", name, ret); | |
708 | ||
709 | return ERR_PTR(ret); | |
710 | } | |
711 | ||
712 | void iovmm_set_fault_handler(struct device *dev, | |
713 | iommu_fault_handler_t handler, void *token) | |
714 | { | |
715 | int ret; | |
716 | ||
717 | ret = exynos_sysmmu_add_fault_notifier(dev, handler, token); | |
718 | if (ret) | |
719 | dev_err(dev, "Failed to %s's fault notifier\n", dev_name(dev)); | |
720 | } |