Commit | Line | Data |
---|---|---|
3c2a0909 S |
1 | /* linux/drivers/iommu/exynos_iovmm.c |
2 | * | |
3 | * Copyright (c) 2011-2012 Samsung Electronics Co., Ltd. | |
4 | * http://www.samsung.com | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | ||
11 | #ifdef CONFIG_EXYNOS_IOMMU_DEBUG | |
12 | #define DEBUG | |
13 | #endif | |
14 | ||
15 | #include <linux/kernel.h> | |
16 | #include <linux/hardirq.h> | |
17 | #include <linux/slab.h> | |
18 | #include <linux/scatterlist.h> | |
19 | #include <linux/err.h> | |
20 | #include <linux/debugfs.h> | |
21 | ||
22 | #include <linux/exynos_iovmm.h> | |
23 | #include <plat/cpu.h> | |
24 | ||
25 | #include "exynos-iommu.h" | |
26 | ||
27 | #define SZ_768M (SZ_512M + SZ_256M) | |
28 | ||
29 | static int find_iovm_id(struct exynos_iovmm *vmm, | |
30 | struct exynos_vm_region *region) | |
31 | { | |
32 | int i; | |
33 | ||
34 | if ((region->start < IOVA_START) || | |
35 | (region->start > (IOVA_START + IOVM_SIZE))) | |
36 | return -EINVAL; | |
37 | ||
38 | for (i = 0; i < MAX_NUM_PLANE; i++) { | |
39 | if (region->start < (vmm->iova_start[i] + vmm->iovm_size[i])) | |
40 | return i; | |
41 | } | |
42 | return -EINVAL; | |
43 | } | |
44 | ||
45 | ||
46 | /* alloc_iovm_region - Allocate IO virtual memory region | |
47 | * vmm: virtual memory allocator | |
48 | * size: total size to allocate vm region from @vmm. | |
49 | * align: alignment constraints of the allocated virtual address | |
50 | * max_align: maximum alignment of allocated virtual address. allocated address | |
51 | * does not need to satisfy larger alignment than max_align. | |
52 | * exact_align_mask: constraints of the special case that allocated address | |
53 | * must satisfy when it is multiple of align but of max_align. | |
54 | * If this is not 0, allocated address must satisfy the following | |
55 | * constraint: | |
56 | * ((allocated address) % max_align) / align = exact_align_mask | |
57 | * offset: must be smaller than PAGE_SIZE. Just a valut to be added to the | |
58 | * allocated virtual address. This does not effect to the allocaded size | |
59 | * and address. | |
60 | * | |
61 | * This function returns allocated IO virtual address that satisfies the given | |
62 | * constraints. Returns 0 if this function is not able to allocate IO virtual | |
63 | * memory | |
64 | */ | |
65 | static dma_addr_t alloc_iovm_region(struct exynos_iovmm *vmm, size_t size, | |
66 | size_t align, size_t max_align, size_t exact_align_mask, | |
67 | off_t offset, int id) | |
68 | { | |
69 | dma_addr_t index = 0; | |
70 | dma_addr_t vstart; | |
71 | size_t vsize; | |
72 | unsigned long end, i; | |
73 | struct exynos_vm_region *region; | |
74 | ||
75 | BUG_ON(align & (align - 1)); | |
76 | BUG_ON(offset >= PAGE_SIZE); | |
77 | ||
78 | /* To avoid allocating prefetched iovm region */ | |
79 | vsize = ALIGN(size + SZ_256K, SZ_256K) >> PAGE_SHIFT; | |
80 | align >>= PAGE_SHIFT; | |
81 | exact_align_mask >>= PAGE_SHIFT; | |
82 | max_align >>= PAGE_SHIFT; | |
83 | ||
84 | spin_lock(&vmm->bitmap_lock); | |
85 | again: | |
86 | index = find_next_zero_bit(vmm->vm_map[id], | |
87 | IOVM_NUM_PAGES(vmm->iovm_size[id]), index); | |
88 | ||
89 | if (align) { | |
90 | if (exact_align_mask) { | |
91 | if ((index & ~(align - 1) & (max_align - 1)) > | |
92 | exact_align_mask) | |
93 | index = ALIGN(index, max_align); | |
94 | index |= exact_align_mask; | |
95 | } else { | |
96 | index = ALIGN(index, align); | |
97 | } | |
98 | ||
99 | if (index >= IOVM_NUM_PAGES(vmm->iovm_size[id])) { | |
100 | spin_unlock(&vmm->bitmap_lock); | |
101 | return 0; | |
102 | } | |
103 | ||
104 | if (test_bit(index, vmm->vm_map[id])) | |
105 | goto again; | |
106 | } | |
107 | ||
108 | end = index + vsize; | |
109 | ||
110 | if (end >= IOVM_NUM_PAGES(vmm->iovm_size[id])) { | |
111 | spin_unlock(&vmm->bitmap_lock); | |
112 | return 0; | |
113 | } | |
114 | ||
115 | i = find_next_bit(vmm->vm_map[id], end, index); | |
116 | if (i < end) { | |
117 | index = i + 1; | |
118 | goto again; | |
119 | } | |
120 | ||
121 | bitmap_set(vmm->vm_map[id], index, vsize); | |
122 | ||
123 | spin_unlock(&vmm->bitmap_lock); | |
124 | ||
125 | vstart = (index << PAGE_SHIFT) + vmm->iova_start[id] + offset; | |
126 | ||
127 | region = kmalloc(sizeof(*region), GFP_KERNEL); | |
128 | if (unlikely(!region)) { | |
129 | spin_lock(&vmm->bitmap_lock); | |
130 | bitmap_clear(vmm->vm_map[id], index, vsize); | |
131 | spin_unlock(&vmm->bitmap_lock); | |
132 | return 0; | |
133 | } | |
134 | ||
135 | INIT_LIST_HEAD(®ion->node); | |
136 | region->start = vstart; | |
137 | region->size = vsize << PAGE_SHIFT; | |
138 | ||
139 | spin_lock(&vmm->vmlist_lock); | |
140 | list_add_tail(®ion->node, &vmm->regions_list); | |
141 | vmm->allocated_size[id] += region->size; | |
142 | vmm->num_areas[id]++; | |
143 | vmm->num_map++; | |
144 | spin_unlock(&vmm->vmlist_lock); | |
145 | ||
146 | return region->start; | |
147 | } | |
148 | ||
149 | struct exynos_vm_region *find_iovm_region(struct exynos_iovmm *vmm, | |
150 | dma_addr_t iova) | |
151 | { | |
152 | struct exynos_vm_region *region; | |
153 | ||
154 | spin_lock(&vmm->vmlist_lock); | |
155 | ||
156 | list_for_each_entry(region, &vmm->regions_list, node) { | |
157 | if (region->start <= iova && | |
158 | (region->start + region->size) > iova) { | |
159 | spin_unlock(&vmm->vmlist_lock); | |
160 | return region; | |
161 | } | |
162 | } | |
163 | ||
164 | spin_unlock(&vmm->vmlist_lock); | |
165 | ||
166 | return NULL; | |
167 | } | |
168 | ||
169 | static struct exynos_vm_region *remove_iovm_region(struct exynos_iovmm *vmm, | |
170 | dma_addr_t iova) | |
171 | { | |
172 | struct exynos_vm_region *region; | |
173 | ||
174 | spin_lock(&vmm->vmlist_lock); | |
175 | ||
176 | list_for_each_entry(region, &vmm->regions_list, node) { | |
177 | if (region->start == iova) { | |
178 | int id; | |
179 | ||
180 | id = find_iovm_id(vmm, region); | |
181 | if (id < 0) | |
182 | continue; | |
183 | ||
184 | list_del(®ion->node); | |
185 | vmm->allocated_size[id] -= region->size; | |
186 | vmm->num_areas[id]--; | |
187 | vmm->num_unmap++; | |
188 | spin_unlock(&vmm->vmlist_lock); | |
189 | return region; | |
190 | } | |
191 | } | |
192 | ||
193 | spin_unlock(&vmm->vmlist_lock); | |
194 | ||
195 | return NULL; | |
196 | } | |
197 | ||
198 | static void free_iovm_region(struct exynos_iovmm *vmm, | |
199 | struct exynos_vm_region *region) | |
200 | { | |
201 | int id; | |
202 | ||
203 | if (!region) | |
204 | return; | |
205 | ||
206 | id = find_iovm_id(vmm, region); | |
207 | if (id < 0) { | |
208 | kfree(region); | |
209 | return; | |
210 | } | |
211 | ||
212 | spin_lock(&vmm->bitmap_lock); | |
213 | bitmap_clear(vmm->vm_map[id], | |
214 | (region->start - vmm->iova_start[id]) >> PAGE_SHIFT, | |
215 | region->size >> PAGE_SHIFT); | |
216 | spin_unlock(&vmm->bitmap_lock); | |
217 | ||
218 | SYSMMU_EVENT_LOG_IOVMM_UNMAP(IOVMM_TO_LOG(vmm), | |
219 | region->start, region->start + region->size); | |
220 | ||
221 | kfree(region); | |
222 | } | |
223 | ||
224 | static dma_addr_t add_iovm_region(struct exynos_iovmm *vmm, | |
225 | dma_addr_t start, size_t size) | |
226 | { | |
227 | struct exynos_vm_region *region, *pos; | |
228 | ||
229 | region = kmalloc(sizeof(*region), GFP_KERNEL); | |
230 | if (!region) | |
231 | return 0; | |
232 | ||
233 | INIT_LIST_HEAD(®ion->node); | |
234 | region->start = start; | |
235 | region->size = size; | |
236 | ||
237 | spin_lock(&vmm->vmlist_lock); | |
238 | ||
239 | list_for_each_entry(pos, &vmm->regions_list, node) { | |
240 | if ((start < (pos->start + pos->size)) && | |
241 | ((start + size) > pos->start)) { | |
242 | spin_unlock(&vmm->vmlist_lock); | |
243 | kfree(region); | |
244 | return 0; | |
245 | } | |
246 | } | |
247 | ||
248 | list_add(®ion->node, &vmm->regions_list); | |
249 | ||
250 | spin_unlock(&vmm->vmlist_lock); | |
251 | ||
252 | return start; | |
253 | } | |
254 | ||
255 | static void show_iovm_regions(struct exynos_iovmm *vmm) | |
256 | { | |
257 | struct exynos_vm_region *pos; | |
258 | ||
259 | pr_err("LISTING IOVMM REGIONS...\n"); | |
260 | spin_lock(&vmm->vmlist_lock); | |
261 | list_for_each_entry(pos, &vmm->regions_list, node) { | |
262 | pr_err("REGION: %#x (SIZE: %#x)\n", pos->start, pos->size); | |
263 | } | |
264 | spin_unlock(&vmm->vmlist_lock); | |
265 | pr_err("END OF LISTING IOVMM REGIONS...\n"); | |
266 | } | |
267 | ||
268 | int iovmm_activate(struct device *dev) | |
269 | { | |
270 | struct exynos_iovmm *vmm = exynos_get_iovmm(dev); | |
271 | ||
272 | return iommu_attach_device(vmm->domain, dev); | |
273 | } | |
274 | ||
275 | void iovmm_deactivate(struct device *dev) | |
276 | { | |
277 | struct exynos_iovmm *vmm = exynos_get_iovmm(dev); | |
278 | ||
279 | iommu_detach_device(vmm->domain, dev); | |
280 | } | |
281 | ||
282 | /* iovmm_map - allocate and map IO virtual memory for the given device | |
283 | * dev: device that has IO virtual address space managed by IOVMM | |
284 | * sg: list of physically contiguous memory chunks. The preceding chunk needs to | |
285 | * be larger than the following chunks in sg for efficient mapping and | |
286 | * performance. If elements of sg are more than one, physical address of | |
287 | * each chunk needs to be aligned by its size for efficent mapping and TLB | |
288 | * utilization. | |
289 | * offset: offset in bytes to be mapped and accessed by dev. | |
290 | * size: size in bytes to be mapped and accessed by dev. | |
291 | * | |
292 | * This function allocates IO virtual memory for the given device and maps the | |
293 | * given physical memory conveyed by sg into the allocated IO memory region. | |
294 | * Returns allocated IO virtual address if it allocates and maps successfull. | |
295 | * Otherwise, minus error number. Caller must check if the return value of this | |
296 | * function with IS_ERR_VALUE(). | |
297 | */ | |
298 | dma_addr_t iovmm_map(struct device *dev, struct scatterlist *sg, off_t offset, | |
299 | size_t size, enum dma_data_direction direction, int id) | |
300 | { | |
301 | off_t start_off; | |
302 | dma_addr_t addr, start = 0; | |
303 | size_t mapped_size = 0; | |
304 | struct exynos_iovmm *vmm = exynos_get_iovmm(dev); | |
305 | size_t exact_align_mask = 0; | |
306 | size_t max_align, align; | |
307 | int ret = 0; | |
308 | int idx; | |
309 | struct scatterlist *tsg; | |
310 | ||
311 | if ((id < 0) || (id >= MAX_NUM_PLANE)) { | |
312 | dev_err(dev, "%s: Invalid plane ID %d\n", __func__, id); | |
313 | return -EINVAL; | |
314 | } | |
315 | ||
316 | for (; (sg != NULL) && (sg_dma_len(sg) < offset); sg = sg_next(sg)) | |
317 | offset -= sg_dma_len(sg); | |
318 | ||
319 | if (sg == NULL) { | |
320 | dev_err(dev, "IOVMM: invalid offset to %s.\n", __func__); | |
321 | return -EINVAL; | |
322 | } | |
323 | ||
324 | if (direction != DMA_TO_DEVICE) | |
325 | id += vmm->inplanes; | |
326 | ||
327 | if (id >= (vmm->inplanes + vmm->onplanes)) { | |
328 | dev_err(dev, "%s: id(%d) is larger than the number of IOVMs\n", | |
329 | __func__, id); | |
330 | return -EINVAL; | |
331 | } | |
332 | ||
333 | tsg = sg; | |
334 | ||
335 | start_off = offset_in_page(sg_phys(sg) + offset); | |
336 | size = PAGE_ALIGN(size + start_off); | |
337 | ||
338 | if (size >= SECT_SIZE) | |
339 | max_align = SECT_SIZE; | |
340 | else if (size < LPAGE_SIZE) | |
341 | max_align = SPAGE_SIZE; | |
342 | else | |
343 | max_align = LPAGE_SIZE; | |
344 | ||
345 | if (sg_next(sg) == NULL) {/* physically contiguous chunk */ | |
346 | /* 'align' must be biggest 2^n that satisfies: | |
347 | * 'address of physical memory' % 'align' = 0 | |
348 | */ | |
349 | align = 1 << __ffs(page_to_phys(sg_page(sg))); | |
350 | ||
351 | exact_align_mask = page_to_phys(sg_page(sg)) & (max_align - 1); | |
352 | ||
353 | if ((size - exact_align_mask) < max_align) { | |
354 | max_align /= 16; | |
355 | exact_align_mask = exact_align_mask & (max_align - 1); | |
356 | } | |
357 | ||
358 | if (align > max_align) | |
359 | align = max_align; | |
360 | ||
361 | exact_align_mask &= ~(align - 1); | |
362 | } else { | |
363 | align = 1 << __ffs(page_to_phys(sg_page(sg))); | |
364 | align = min_t(size_t, align, max_align); | |
365 | max_align = align; | |
366 | } | |
367 | ||
368 | start = alloc_iovm_region(vmm, size, align, max_align, | |
369 | exact_align_mask, start_off, id); | |
370 | if (!start) { | |
371 | spin_lock(&vmm->vmlist_lock); | |
372 | dev_err(dev, | |
373 | "%s: Not enough IOVM space to allocate %#zx/%#zx\n", | |
374 | __func__, size, align); | |
375 | dev_err(dev, | |
376 | "%s: Total %#zx (%d), Allocated %#zx , Chunks %d\n", | |
377 | __func__, vmm->iovm_size[id], id, | |
378 | vmm->allocated_size[id], vmm->num_areas[id]); | |
379 | spin_unlock(&vmm->vmlist_lock); | |
380 | ret = -ENOMEM; | |
381 | goto err_map_nomem; | |
382 | } | |
383 | ||
384 | addr = start - start_off; | |
385 | do { | |
386 | phys_addr_t phys; | |
387 | size_t len; | |
388 | ||
389 | phys = sg_phys(sg); | |
390 | len = sg_dma_len(sg); | |
391 | ||
392 | /* if back to back sg entries are contiguous consolidate them */ | |
393 | while (sg_next(sg) && | |
394 | sg_phys(sg) + sg_dma_len(sg) == sg_phys(sg_next(sg))) { | |
395 | len += sg_dma_len(sg_next(sg)); | |
396 | sg = sg_next(sg); | |
397 | } | |
398 | ||
399 | if (offset > 0) { | |
400 | len -= offset; | |
401 | phys += offset; | |
402 | offset = 0; | |
403 | } | |
404 | ||
405 | if (offset_in_page(phys)) { | |
406 | len += offset_in_page(phys); | |
407 | phys = round_down(phys, PAGE_SIZE); | |
408 | } | |
409 | ||
410 | len = PAGE_ALIGN(len); | |
411 | ||
412 | if (len > (size - mapped_size)) | |
413 | len = size - mapped_size; | |
414 | ||
415 | ret = iommu_map(vmm->domain, addr, phys, len, 0); | |
416 | if (ret) { | |
417 | dev_err(dev, "iommu_map failed w/ err: %d\n", ret); | |
418 | break; | |
419 | } | |
420 | ||
421 | addr += len; | |
422 | mapped_size += len; | |
423 | } while ((sg = sg_next(sg)) && (mapped_size < size)); | |
424 | ||
425 | BUG_ON(mapped_size > size); | |
426 | ||
427 | if (mapped_size < size) { | |
428 | dev_err(dev, "mapped_size(%#zx) is smaller than size(%#zx)\n", | |
429 | mapped_size, size); | |
430 | if (!ret) { | |
431 | dev_err(dev, "ret: %d\n", ret); | |
432 | ret = -EINVAL; | |
433 | } | |
434 | goto err_map_map; | |
435 | } | |
436 | ||
437 | TRACE_LOG_DEV(dev, "IOVMM: Allocated VM region @ %#zx/%#zx bytes.\n", | |
438 | start, size); | |
439 | ||
440 | { | |
441 | struct exynos_vm_region *reg = find_iovm_region(vmm, start); | |
442 | BUG_ON(!reg); | |
443 | ||
444 | SYSMMU_EVENT_LOG_IOVMM_MAP(IOVMM_TO_LOG(vmm), | |
445 | start, start + size, reg->size - size); | |
446 | } | |
447 | ||
448 | return start; | |
449 | ||
450 | err_map_map: | |
451 | iommu_unmap(vmm->domain, start - start_off, mapped_size); | |
452 | free_iovm_region(vmm, remove_iovm_region(vmm, start)); | |
453 | ||
454 | start -= start_off; | |
455 | dev_err(dev, | |
456 | "Failed(%d) to map IOVMM REGION %pa (SIZE: %#zx, mapped: %#zx)\n", | |
457 | ret, &start, size, mapped_size); | |
458 | idx = 0; | |
459 | do { | |
460 | pr_err("SGLIST[%d].size = %#x\n", idx++, tsg->length); | |
461 | } while ((tsg = sg_next(tsg))); | |
462 | ||
463 | show_iovm_regions(vmm); | |
464 | ||
465 | err_map_nomem: | |
466 | TRACE_LOG_DEV(dev, | |
467 | "IOVMM: Failed to allocated VM region for %#zx bytes.\n", size); | |
468 | return (dma_addr_t)ret; | |
469 | } | |
470 | ||
471 | void iovmm_unmap(struct device *dev, dma_addr_t iova) | |
472 | { | |
473 | struct exynos_iovmm *vmm = exynos_get_iovmm(dev); | |
474 | struct exynos_vm_region *region; | |
475 | size_t unmap_size; | |
476 | ||
477 | /* This function must not be called in IRQ handlers */ | |
478 | BUG_ON(in_irq()); | |
479 | ||
480 | region = remove_iovm_region(vmm, iova); | |
481 | if (region) { | |
482 | if (WARN_ON(region->start != iova)) { | |
483 | dev_err(dev, | |
484 | "IOVMM: iova %pa and region %#x @ %#x mismatch\n", | |
485 | &iova, region->size, region->start); | |
486 | show_iovm_regions(vmm); | |
487 | /* reinsert iovm region */ | |
488 | add_iovm_region(vmm, region->start, region->size); | |
489 | kfree(region); | |
490 | return; | |
491 | } | |
492 | unmap_size = iommu_unmap(vmm->domain, iova & PAGE_MASK, | |
493 | region->size); | |
494 | if (unlikely(unmap_size != region->size)) { | |
495 | dev_err(dev, "Failed to unmap IOVMM REGION %#x " | |
496 | "(SIZE: %#x, iova: %pa, unmapped: %#zx)\n", | |
497 | region->start, | |
498 | region->size, &iova, unmap_size); | |
499 | show_iovm_regions(vmm); | |
500 | kfree(region); | |
501 | BUG(); | |
502 | return; | |
503 | } | |
504 | ||
505 | exynos_sysmmu_tlb_invalidate(vmm->domain, region->start, region->size); | |
506 | ||
507 | free_iovm_region(vmm, region); | |
508 | ||
509 | TRACE_LOG_DEV(dev, "IOVMM: Unmapped %#x bytes from %#x.\n", | |
510 | unmap_size, iova); | |
511 | } else { | |
512 | dev_err(dev, "IOVMM: No IOVM region %pa to free.\n", &iova); | |
513 | } | |
514 | } | |
515 | ||
516 | int iovmm_map_oto(struct device *dev, phys_addr_t phys, size_t size) | |
517 | { | |
518 | struct exynos_iovmm *vmm = exynos_get_iovmm(dev); | |
519 | int ret; | |
520 | ||
521 | BUG_ON(!IS_ALIGNED(phys, PAGE_SIZE)); | |
522 | BUG_ON(!IS_ALIGNED(size, PAGE_SIZE)); | |
523 | ||
524 | if (WARN_ON((phys + size) >= IOVA_START)) { | |
525 | dev_err(dev, | |
526 | "Unable to create one to one mapping for %#zx @ %pa\n", | |
527 | size, &phys); | |
528 | return -EINVAL; | |
529 | } | |
530 | ||
531 | if (!add_iovm_region(vmm, (dma_addr_t)phys, size)) | |
532 | return -EADDRINUSE; | |
533 | ||
534 | ret = iommu_map(vmm->domain, (dma_addr_t)phys, phys, size, 0); | |
535 | if (ret < 0) | |
536 | free_iovm_region(vmm, | |
537 | remove_iovm_region(vmm, (dma_addr_t)phys)); | |
538 | ||
539 | return ret; | |
540 | } | |
541 | ||
542 | void iovmm_unmap_oto(struct device *dev, phys_addr_t phys) | |
543 | { | |
544 | struct exynos_iovmm *vmm = exynos_get_iovmm(dev); | |
545 | struct exynos_vm_region *region; | |
546 | size_t unmap_size; | |
547 | ||
548 | /* This function must not be called in IRQ handlers */ | |
549 | BUG_ON(in_irq()); | |
550 | BUG_ON(!IS_ALIGNED(phys, PAGE_SIZE)); | |
551 | ||
552 | region = remove_iovm_region(vmm, (dma_addr_t)phys); | |
553 | if (region) { | |
554 | unmap_size = iommu_unmap(vmm->domain, (dma_addr_t)phys, | |
555 | region->size); | |
556 | WARN_ON(unmap_size != region->size); | |
557 | ||
558 | exynos_sysmmu_tlb_invalidate(vmm->domain, (dma_addr_t)phys, | |
559 | region->size); | |
560 | ||
561 | free_iovm_region(vmm, region); | |
562 | ||
563 | TRACE_LOG_DEV(dev, "IOVMM: Unmapped %#x bytes from %#x.\n", | |
564 | unmap_size, phys); | |
565 | } | |
566 | } | |
567 | ||
568 | static struct dentry *exynos_iovmm_debugfs_root; | |
569 | static struct dentry *exynos_iommu_debugfs_root; | |
570 | ||
571 | static int exynos_iovmm_create_debugfs(void) | |
572 | { | |
573 | exynos_iovmm_debugfs_root = debugfs_create_dir("iovmm", NULL); | |
574 | if (!exynos_iovmm_debugfs_root) | |
575 | pr_err("IOVMM: Failed to create debugfs entry\n"); | |
576 | else | |
577 | pr_info("IOVMM: Created debugfs entry at debugfs/iovmm\n"); | |
578 | ||
579 | exynos_iommu_debugfs_root = debugfs_create_dir("iommu", NULL); | |
580 | if (!exynos_iommu_debugfs_root) | |
581 | pr_err("IOMMU: Failed to create debugfs entry\n"); | |
582 | else | |
583 | pr_info("IOMMU: Created debugfs entry at debugfs/iommu\n"); | |
584 | ||
585 | return 0; | |
586 | } | |
587 | subsys_initcall(exynos_iovmm_create_debugfs); | |
588 | ||
589 | static int iovmm_debug_show(struct seq_file *s, void *unused) | |
590 | { | |
591 | struct exynos_iovmm *vmm = s->private; | |
592 | int i = 0; | |
593 | ||
594 | seq_printf(s, "%.6s %10.s %10.s %10.s %6.s\n", | |
595 | "REGION", "VASTART", "SIZE", "FREE", "CHUNKS"); | |
596 | seq_puts(s, "---------------------------------------------\n"); | |
597 | ||
598 | spin_lock(&vmm->vmlist_lock); | |
599 | while (i < vmm->inplanes) { | |
600 | seq_printf(s, "%3s[%d] %pa %#10zx %#10zx %d\n", | |
601 | "in", i, &vmm->iova_start[i], vmm->iovm_size[i], | |
602 | vmm->iovm_size[i] - vmm->allocated_size[i], | |
603 | vmm->num_areas[i]); | |
604 | i++; | |
605 | } | |
606 | while (i < (vmm->inplanes + vmm->onplanes)) { | |
607 | seq_printf(s, "%3s[%d] %pa %#10zx %#10zx %d\n", | |
608 | "out", i - vmm->inplanes, &vmm->iova_start[i], | |
609 | vmm->iovm_size[i], | |
610 | vmm->iovm_size[i] - vmm->allocated_size[i], | |
611 | vmm->num_areas[i]); | |
612 | i++; | |
613 | } | |
614 | seq_puts(s, "---------------------------------------------\n"); | |
615 | seq_printf(s, "Total number of mappings : %d\n", vmm->num_map); | |
616 | seq_printf(s, "Total number of unmappings: %d\n", vmm->num_unmap); | |
617 | spin_unlock(&vmm->vmlist_lock); | |
618 | ||
619 | return 0; | |
620 | } | |
621 | ||
622 | static int iovmm_debug_open(struct inode *inode, struct file *file) | |
623 | { | |
624 | return single_open(file, iovmm_debug_show, inode->i_private); | |
625 | } | |
626 | ||
627 | static ssize_t iovmm_debug_write(struct file *filp, const char __user *p, | |
628 | size_t len, loff_t *off) | |
629 | { | |
630 | struct seq_file *s = filp->private_data; | |
631 | struct exynos_iovmm *vmm = s->private; | |
632 | /* clears the map count in IOVMM */ | |
633 | spin_lock(&vmm->vmlist_lock); | |
634 | vmm->num_map = 0; | |
635 | vmm->num_unmap = 0; | |
636 | spin_unlock(&vmm->vmlist_lock); | |
637 | return len; | |
638 | } | |
639 | ||
640 | static const struct file_operations iovmm_debug_fops = { | |
641 | .open = iovmm_debug_open, | |
642 | .read = seq_read, | |
643 | .write = iovmm_debug_write, | |
644 | .llseek = seq_lseek, | |
645 | .release = single_release, | |
646 | }; | |
647 | ||
648 | static void iovmm_register_debugfs(struct exynos_iovmm *vmm) | |
649 | { | |
650 | if (!exynos_iovmm_debugfs_root) | |
651 | return; | |
652 | ||
653 | debugfs_create_file(dev_name(vmm->dev), 0664, | |
654 | exynos_iovmm_debugfs_root, vmm, &iovmm_debug_fops); | |
655 | } | |
656 | ||
657 | int exynos_create_iovmm(struct device *dev, int inplanes, int onplanes) | |
658 | { | |
659 | static unsigned long iovmcfg[MAX_NUM_PLANE][MAX_NUM_PLANE] = { | |
660 | {IOVM_SIZE, 0, 0, 0, 0, 0}, | |
661 | {SZ_2G, IOVM_SIZE - SZ_2G, 0, 0, 0, 0}, | |
662 | {SZ_1G + SZ_256M, SZ_1G, SZ_1G, 0, 0, 0}, | |
663 | {SZ_1G, SZ_768M, SZ_768M, SZ_768M , 0, 0}, | |
664 | {SZ_1G, SZ_768M, SZ_512M, SZ_768M, SZ_256M, 0}, | |
665 | {SZ_1G, SZ_512M, SZ_256M, SZ_768M, SZ_512M, SZ_256M}, | |
666 | }; | |
667 | int i, nplanes, ret = 0; | |
668 | size_t sum_iovm = 0; | |
669 | struct exynos_iovmm *vmm; | |
670 | struct exynos_iommu_owner *owner = dev->archdata.iommu; | |
671 | ||
672 | if (owner->vmm_data) | |
673 | return 0; | |
674 | ||
675 | nplanes = inplanes + onplanes; | |
676 | if (WARN_ON(!owner) || nplanes > MAX_NUM_PLANE || nplanes < 1) { | |
677 | ret = -ENOSYS; | |
678 | goto err_alloc_vmm; | |
679 | } | |
680 | ||
681 | vmm = kzalloc(sizeof(*vmm), GFP_KERNEL); | |
682 | if (!vmm) { | |
683 | ret = -ENOMEM; | |
684 | goto err_alloc_vmm; | |
685 | } | |
686 | ||
687 | for (i = 0; i < nplanes; i++) { | |
688 | vmm->iovm_size[i] = iovmcfg[nplanes - 1][i]; | |
689 | vmm->iova_start[i] = IOVA_START + sum_iovm; | |
690 | vmm->vm_map[i] = kzalloc(IOVM_BITMAP_SIZE(vmm->iovm_size[i]), | |
691 | GFP_KERNEL); | |
692 | if (!vmm->vm_map[i]) { | |
693 | ret = -ENOMEM; | |
694 | goto err_setup_domain; | |
695 | } | |
696 | sum_iovm += iovmcfg[nplanes - 1][i]; | |
697 | dev_info(dev, "IOVMM: IOVM SIZE = %#zx B, IOVMM from %pa.\n", | |
698 | vmm->iovm_size[i], &vmm->iova_start[i]); | |
699 | } | |
700 | ||
701 | vmm->inplanes = inplanes; | |
702 | vmm->onplanes = onplanes; | |
703 | vmm->domain = iommu_domain_alloc(&platform_bus_type); | |
704 | if (!vmm->domain) { | |
705 | ret = -ENOMEM; | |
706 | goto err_setup_domain; | |
707 | } | |
708 | ||
709 | ret = exynos_iommu_init_event_log(IOVMM_TO_LOG(vmm), IOVMM_LOG_LEN); | |
710 | if (!ret) { | |
711 | iovmm_add_log_to_debugfs(exynos_iovmm_debugfs_root, | |
712 | IOVMM_TO_LOG(vmm), dev_name(dev)); | |
713 | ||
714 | iommu_add_log_to_debugfs(exynos_iommu_debugfs_root, | |
715 | IOMMU_TO_LOG(vmm->domain), dev_name(dev)); | |
716 | } else { | |
717 | goto err_init_event_log; | |
718 | } | |
719 | ||
720 | spin_lock_init(&vmm->vmlist_lock); | |
721 | spin_lock_init(&vmm->bitmap_lock); | |
722 | ||
723 | INIT_LIST_HEAD(&vmm->regions_list); | |
724 | ||
725 | vmm->dev = dev; | |
726 | owner->vmm_data = vmm; | |
727 | ||
728 | iovmm_register_debugfs(vmm); | |
729 | ||
730 | dev_dbg(dev, "IOVMM: Created %#x B IOVMM from %#x.\n", | |
731 | IOVM_SIZE, IOVA_START); | |
732 | return 0; | |
733 | ||
734 | err_init_event_log: | |
735 | iommu_domain_free(vmm->domain); | |
736 | err_setup_domain: | |
737 | for (i = 0; i < nplanes; i++) | |
738 | kfree(vmm->vm_map[i]); | |
739 | kfree(vmm); | |
740 | err_alloc_vmm: | |
741 | dev_err(dev, "IOVMM: Failed to create IOVMM (%d)\n", ret); | |
742 | ||
743 | return ret; | |
744 | } | |
745 | ||
746 | void iovmm_set_fault_handler(struct device *dev, | |
747 | iommu_fault_handler_t handler, void *token) | |
748 | { | |
749 | struct exynos_iovmm *vmm = exynos_get_iovmm(dev); | |
750 | iommu_set_fault_handler(vmm->domain, handler, token); | |
751 | } |