ion: silence logical-not-parentheses warning
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / drivers / staging / android / ion / ion.c
1 /*
2 *
3 * drivers/staging/android/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18 #include <linux/device.h>
19 #include <linux/atomic.h>
20 #include <linux/err.h>
21 #include <linux/file.h>
22 #include <linux/freezer.h>
23 #include <linux/fs.h>
24 #include <linux/anon_inodes.h>
25 #include <linux/kthread.h>
26 #include <linux/list.h>
27 #include <linux/memblock.h>
28 #include <linux/miscdevice.h>
29 #include <linux/export.h>
30 #include <linux/mm.h>
31 #include <linux/mm_types.h>
32 #include <linux/rbtree.h>
33 #include <linux/slab.h>
34 #include <linux/seq_file.h>
35 #include <linux/uaccess.h>
36 #include <linux/vmalloc.h>
37 #include <linux/debugfs.h>
38 #include <linux/dma-buf.h>
39 #include <linux/idr.h>
40 #include <linux/exynos_iovmm.h>
41 #include <linux/exynos_ion.h>
42 #include <linux/highmem.h>
43
44 #include "ion.h"
45 #include <asm/cacheflush.h>
46 #include <asm/tlbflush.h>
47
48 #define CREATE_TRACE_POINTS
49 #include "ion_priv.h"
50 #include "compat_ion.h"
51
52 /**
53 * struct ion_device - the metadata of the ion device node
54 * @dev: the actual misc device
55 * @buffers: an rb tree of all the existing buffers
56 * @buffer_lock: lock protecting the tree of buffers
57 * @lock: rwsem protecting the tree of heaps and clients
58 * @heaps: list of all the heaps in the system
59 * @user_clients: list of all the clients created from userspace
60 */
61 struct ion_device {
62 struct miscdevice dev;
63 struct rb_root buffers;
64 struct mutex buffer_lock;
65 struct rw_semaphore lock;
66 struct plist_head heaps;
67 long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
68 unsigned long arg);
69 struct rb_root clients;
70 struct dentry *debug_root;
71 struct dentry *heaps_debug_root;
72 struct dentry *clients_debug_root;
73
74 #ifdef CONFIG_ION_EXYNOS_STAT_LOG
75 /* event log */
76 struct dentry *buffer_debug_file;
77 struct dentry *event_debug_file;
78 struct ion_eventlog eventlog[ION_EVENT_LOG_MAX];
79 atomic_t event_idx;
80 #endif
81 };
82
83 /**
84 * struct ion_client - a process/hw block local address space
85 * @node: node in the tree of all clients
86 * @dev: backpointer to ion device
87 * @handles: an rb tree of all the handles in this client
88 * @idr: an idr space for allocating handle ids
89 * @lock: lock protecting the tree of handles
90 * @name: used for debugging
91 * @display_name: used for debugging (unique version of @name)
92 * @display_serial: used for debugging (to make display_name unique)
93 * @task: used for debugging
94 *
95 * A client represents a list of buffers this client may access.
96 * The mutex stored here is used to protect both handles tree
97 * as well as the handles themselves, and should be held while modifying either.
98 */
99 struct ion_client {
100 struct rb_node node;
101 struct ion_device *dev;
102 struct rb_root handles;
103 struct idr idr;
104 struct mutex lock;
105 const char *name;
106 char *display_name;
107 int display_serial;
108 struct task_struct *task;
109 pid_t pid;
110 struct dentry *debug_root;
111 };
112
113 /**
114 * ion_handle - a client local reference to a buffer
115 * @ref: reference count
116 * @client: back pointer to the client the buffer resides in
117 * @buffer: pointer to the buffer
118 * @node: node in the client's handle rbtree
119 * @kmap_cnt: count of times this client has mapped to kernel
120 * @id: client-unique id allocated by client->idr
121 *
122 * Modifications to node, map_cnt or mapping should be protected by the
123 * lock in the client. Other fields are never changed after initialization.
124 */
125 struct ion_handle {
126 struct kref ref;
127 unsigned int user_ref_count;
128 struct ion_client *client;
129 struct ion_buffer *buffer;
130 struct rb_node node;
131 unsigned int kmap_cnt;
132 int id;
133 };
134
135 struct ion_device *g_idev;
136
137 static inline struct page *ion_buffer_page(struct page *page)
138 {
139 return (struct page *)((unsigned long)page & ~(1UL));
140 }
141
142 static inline bool ion_buffer_page_is_dirty(struct page *page)
143 {
144 return !!((unsigned long)page & 1UL);
145 }
146
147 static inline void ion_buffer_page_dirty(struct page **page)
148 {
149 *page = (struct page *)((unsigned long)(*page) | 1UL);
150 }
151
152 static inline void ion_buffer_page_clean(struct page **page)
153 {
154 *page = (struct page *)((unsigned long)(*page) & ~(1UL));
155 }
156
157 void ion_debug_heap_usage_show(struct ion_heap *heap)
158 {
159 struct scatterlist *sg;
160 struct sg_table *table;
161 struct rb_node *n;
162 struct page *page;
163 struct ion_device *dev = heap->dev;
164 int i;
165 ion_phys_addr_t paddr;
166
167 /* show the usage for only contiguous buffer */
168 if ((heap->type != ION_HEAP_TYPE_CARVEOUT)
169 && (heap->type != ION_HEAP_TYPE_DMA))
170 return;
171
172 pr_err("[HEAP %16s (id %4d) DETAIL USAGE]\n", heap->name, heap->id);
173
174 mutex_lock(&dev->buffer_lock);
175 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
176 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
177 node);
178 if (buffer->heap->id != heap->id)
179 continue;
180 table = buffer->sg_table;
181 for_each_sg(table->sgl, sg, table->nents, i) {
182 page = sg_page(sg);
183 paddr = PFN_PHYS(page_to_pfn(page));
184 pr_err("[%16lx--%16lx] %16zu\n",
185 paddr, paddr + sg->length, buffer->size);
186 }
187 }
188 mutex_unlock(&dev->buffer_lock);
189 }
190
191 #ifdef CONFIG_ION_EXYNOS_STAT_LOG
192 static inline void ION_EVENT_ALLOC(struct ion_buffer *buffer, ktime_t begin)
193 {
194 struct ion_device *dev = buffer->dev;
195 int idx = atomic_inc_return(&dev->event_idx);
196 struct ion_eventlog *log = &dev->eventlog[idx % ION_EVENT_LOG_MAX];
197 struct ion_event_alloc *data = &log->data.alloc;
198
199 log->type = ION_EVENT_TYPE_ALLOC;
200 log->begin = begin;
201 log->done = ktime_get();
202 data->id = buffer;
203 data->heap = buffer->heap;
204 data->size = buffer->size;
205 data->flags = buffer->flags;
206 }
207
208 static inline void ION_EVENT_FREE(struct ion_buffer *buffer, ktime_t begin)
209 {
210 struct ion_device *dev = buffer->dev;
211 int idx = atomic_inc_return(&dev->event_idx) % ION_EVENT_LOG_MAX;
212 struct ion_eventlog *log = &dev->eventlog[idx];
213 struct ion_event_free *data = &log->data.free;
214
215 log->type = ION_EVENT_TYPE_FREE;
216 log->begin = begin;
217 log->done = ktime_get();
218 data->id = buffer;
219 data->heap = buffer->heap;
220 data->size = buffer->size;
221 data->shrinker = (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE);
222 }
223
224 static inline void ION_EVENT_MMAP(struct ion_buffer *buffer, ktime_t begin)
225 {
226 struct ion_device *dev = buffer->dev;
227 int idx = atomic_inc_return(&dev->event_idx) % ION_EVENT_LOG_MAX;
228 struct ion_eventlog *log = &dev->eventlog[idx];
229 struct ion_event_mmap *data = &log->data.mmap;
230
231 log->type = ION_EVENT_TYPE_MMAP;
232 log->begin = begin;
233 log->done = ktime_get();
234 data->id = buffer;
235 data->heap = buffer->heap;
236 data->size = buffer->size;
237 }
238
239 void ION_EVENT_SHRINK(struct ion_device *dev, size_t size)
240 {
241 int idx = atomic_inc_return(&dev->event_idx) % ION_EVENT_LOG_MAX;
242 struct ion_eventlog *log = &dev->eventlog[idx];
243
244 log->type = ION_EVENT_TYPE_SHRINK;
245 log->begin = ktime_get();
246 log->done = ktime_set(0, 0);
247 log->data.shrink.size = size;
248 }
249
250 void ION_EVENT_CLEAR(struct ion_buffer *buffer, ktime_t begin)
251 {
252 struct ion_device *dev = buffer->dev;
253 int idx = atomic_inc_return(&dev->event_idx) % ION_EVENT_LOG_MAX;
254 struct ion_eventlog *log = &dev->eventlog[idx];
255 struct ion_event_clear *data = &log->data.clear;
256
257 log->type = ION_EVENT_TYPE_CLEAR;
258 log->begin = begin;
259 log->done = ktime_get();
260 data->id = buffer;
261 data->heap = buffer->heap;
262 data->size = buffer->size;
263 data->flags = buffer->flags;
264 }
265
266 static struct ion_task *ion_buffer_task_lookup(struct ion_buffer *buffer,
267 struct device *master)
268 {
269 bool found = false;
270 struct ion_task *task;
271
272 list_for_each_entry(task, &buffer->master_list, list) {
273 if (task->master == master) {
274 found = true;
275 break;
276 }
277 }
278
279 return found ? task : NULL;
280 }
281
282 static void ion_buffer_set_task_info(struct ion_buffer *buffer)
283 {
284 INIT_LIST_HEAD(&buffer->master_list);
285 get_task_comm(buffer->task_comm, current->group_leader);
286 get_task_comm(buffer->thread_comm, current);
287 buffer->pid = task_pid_nr(current->group_leader);
288 buffer->tid = task_pid_nr(current);
289 }
290
291 static void ion_buffer_task_add(struct ion_buffer *buffer,
292 struct device *master)
293 {
294 struct ion_task *task;
295
296 task = ion_buffer_task_lookup(buffer, master);
297 if (!task) {
298 task = kzalloc(sizeof(*task), GFP_KERNEL);
299 if (task) {
300 task->master = master;
301 kref_init(&task->ref);
302 list_add_tail(&task->list, &buffer->master_list);
303 }
304 } else {
305 kref_get(&task->ref);
306 }
307 }
308
309 static void ion_buffer_task_add_lock(struct ion_buffer *buffer,
310 struct device *master)
311 {
312 mutex_lock(&buffer->lock);
313 ion_buffer_task_add(buffer, master);
314 mutex_unlock(&buffer->lock);
315 }
316
317 static void __ion_buffer_task_remove(struct kref *kref)
318 {
319 struct ion_task *task = container_of(kref, struct ion_task, ref);
320
321 list_del(&task->list);
322 kfree(task);
323 }
324
325 static void ion_buffer_task_remove(struct ion_buffer *buffer,
326 struct device *master)
327 {
328 struct ion_task *task, *tmp;
329
330 list_for_each_entry_safe(task, tmp, &buffer->master_list, list) {
331 if (task->master == master) {
332 kref_put(&task->ref, __ion_buffer_task_remove);
333 break;
334 }
335 }
336 }
337
338 static void ion_buffer_task_remove_lock(struct ion_buffer *buffer,
339 struct device *master)
340 {
341 mutex_lock(&buffer->lock);
342 ion_buffer_task_remove(buffer, master);
343 mutex_unlock(&buffer->lock);
344 }
345
346 static void ion_buffer_task_remove_all(struct ion_buffer *buffer)
347 {
348 struct ion_task *task, *tmp;
349
350 mutex_lock(&buffer->lock);
351 list_for_each_entry_safe(task, tmp, &buffer->master_list, list) {
352 list_del(&task->list);
353 kfree(task);
354 }
355 mutex_unlock(&buffer->lock);
356 }
357 #else
358 #define ION_EVENT_ALLOC(buffer, begin) do { } while (0)
359 #define ION_EVENT_FREE(buffer, begin) do { } while (0)
360 #define ION_EVENT_MMAP(buffer, begin) do { } while (0)
361 #define ion_buffer_set_task_info(buffer) do { } while (0)
362 #define ion_buffer_task_add(buffer, master) do { } while (0)
363 #define ion_buffer_task_add_lock(buffer, master) do { } while (0)
364 #define ion_buffer_task_remove(buffer, master) do { } while (0)
365 #define ion_buffer_task_remove_lock(buffer, master) do { } while (0)
366 #define ion_buffer_task_remove_all(buffer) do { } while (0)
367 #endif
368
369 /* this function should only be called while dev->lock is held */
370 static void ion_buffer_add(struct ion_device *dev,
371 struct ion_buffer *buffer)
372 {
373 struct rb_node **p = &dev->buffers.rb_node;
374 struct rb_node *parent = NULL;
375 struct ion_buffer *entry;
376
377 while (*p) {
378 parent = *p;
379 entry = rb_entry(parent, struct ion_buffer, node);
380
381 if (buffer < entry) {
382 p = &(*p)->rb_left;
383 } else if (buffer > entry) {
384 p = &(*p)->rb_right;
385 } else {
386 pr_err("%s: buffer already found.", __func__);
387 BUG();
388 }
389 }
390
391 rb_link_node(&buffer->node, parent, p);
392 rb_insert_color(&buffer->node, &dev->buffers);
393
394 ion_buffer_set_task_info(buffer);
395 ion_buffer_task_add(buffer, dev->dev.this_device);
396 }
397
398 /* this function should only be called while dev->lock is held */
399 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
400 struct ion_device *dev,
401 unsigned long len,
402 unsigned long align,
403 unsigned long flags)
404 {
405 struct ion_buffer *buffer;
406 struct sg_table *table;
407 struct scatterlist *sg;
408 int i, ret;
409
410 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
411 if (!buffer)
412 return ERR_PTR(-ENOMEM);
413
414 buffer->heap = heap;
415 buffer->flags = flags;
416 buffer->size = len;
417 kref_init(&buffer->ref);
418
419 ret = heap->ops->allocate(heap, buffer, len, align, flags);
420
421 if (ret) {
422 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
423 goto err2;
424
425 ion_heap_freelist_drain(heap, 0);
426 ret = heap->ops->allocate(heap, buffer, len, align,
427 flags);
428 if (ret)
429 goto err2;
430 }
431
432 buffer->dev = dev;
433
434 table = heap->ops->map_dma(heap, buffer);
435 if (WARN_ONCE(table == NULL,
436 "heap->ops->map_dma should return ERR_PTR on error"))
437 table = ERR_PTR(-EINVAL);
438 if (IS_ERR(table)) {
439 ret = -EINVAL;
440 goto err1;
441 }
442
443 buffer->sg_table = table;
444 if (ion_buffer_fault_user_mappings(buffer)) {
445 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
446 struct scatterlist *sg;
447 int i, j, k = 0;
448
449 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
450 if (!buffer->pages) {
451 ret = -ENOMEM;
452 goto err;
453 }
454
455 for_each_sg(table->sgl, sg, table->nents, i) {
456 struct page *page = sg_page(sg);
457
458 for (j = 0; j < sg->length / PAGE_SIZE; j++)
459 buffer->pages[k++] = page++;
460 }
461 }
462
463 buffer->dev = dev;
464 buffer->size = len;
465 INIT_LIST_HEAD(&buffer->vmas);
466 INIT_LIST_HEAD(&buffer->iovas);
467 mutex_init(&buffer->lock);
468 /*
469 * this will set up dma addresses for the sglist -- it is not
470 * technically correct as per the dma api -- a specific
471 * device isn't really taking ownership here. However, in practice on
472 * our systems the only dma_address space is physical addresses.
473 * Additionally, we can't afford the overhead of invalidating every
474 * allocation via dma_map_sg. The implicit contract here is that
475 * memory coming from the heaps is ready for dma, ie if it has a
476 * cached mapping that mapping has been invalidated
477 */
478 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
479 sg_dma_address(sg) = sg_phys(sg);
480 sg_dma_len(sg) = sg->length;
481 }
482 mutex_lock(&dev->buffer_lock);
483 ion_buffer_add(dev, buffer);
484 mutex_unlock(&dev->buffer_lock);
485 return buffer;
486
487 err:
488 heap->ops->unmap_dma(heap, buffer);
489 err1:
490 heap->ops->free(buffer);
491 err2:
492 kfree(buffer);
493 return ERR_PTR(ret);
494 }
495
496 void ion_buffer_destroy(struct ion_buffer *buffer)
497 {
498 struct ion_iovm_map *iovm_map;
499 struct ion_iovm_map *tmp;
500
501 ION_EVENT_BEGIN();
502 trace_ion_free_start((unsigned long) buffer, buffer->size,
503 buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE);
504
505 if (WARN_ON(buffer->kmap_cnt > 0))
506 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
507
508 list_for_each_entry_safe(iovm_map, tmp, &buffer->iovas, list) {
509 iovmm_unmap(iovm_map->dev, iovm_map->iova);
510 list_del(&iovm_map->list);
511 kfree(iovm_map);
512 }
513
514 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
515 buffer->heap->ops->free(buffer);
516 vfree(buffer->pages);
517
518 ion_buffer_task_remove_all(buffer);
519 ION_EVENT_FREE(buffer, ION_EVENT_DONE());
520 trace_ion_free_end((unsigned long) buffer, buffer->size,
521 buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE);
522 kfree(buffer);
523 }
524
525 static void _ion_buffer_destroy(struct kref *kref)
526 {
527 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
528 struct ion_heap *heap = buffer->heap;
529 struct ion_device *dev = buffer->dev;
530
531 mutex_lock(&dev->buffer_lock);
532 rb_erase(&buffer->node, &dev->buffers);
533 mutex_unlock(&dev->buffer_lock);
534
535 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
536 ion_heap_freelist_add(heap, buffer);
537 else
538 ion_buffer_destroy(buffer);
539 }
540
541 static void ion_buffer_get(struct ion_buffer *buffer)
542 {
543 kref_get(&buffer->ref);
544 }
545
546 static int ion_buffer_put(struct ion_buffer *buffer)
547 {
548 return kref_put(&buffer->ref, _ion_buffer_destroy);
549 }
550
551 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
552 {
553 mutex_lock(&buffer->lock);
554 buffer->handle_count++;
555 mutex_unlock(&buffer->lock);
556 }
557
558 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
559 {
560 /*
561 * when a buffer is removed from a handle, if it is not in
562 * any other handles, copy the taskcomm and the pid of the
563 * process it's being removed from into the buffer. At this
564 * point there will be no way to track what processes this buffer is
565 * being used by, it only exists as a dma_buf file descriptor.
566 * The taskcomm and pid can provide a debug hint as to where this fd
567 * is in the system
568 */
569 mutex_lock(&buffer->lock);
570 buffer->handle_count--;
571 BUG_ON(buffer->handle_count < 0);
572 if (!buffer->handle_count) {
573 struct task_struct *task;
574
575 task = current->group_leader;
576 get_task_comm(buffer->task_comm, task);
577 buffer->pid = task_pid_nr(task);
578 }
579 mutex_unlock(&buffer->lock);
580 }
581
582 static bool ion_handle_validate(struct ion_client *client,
583 struct ion_handle *handle)
584 {
585 WARN_ON(!mutex_is_locked(&client->lock));
586 return idr_find(&client->idr, handle->id) == handle;
587 }
588
589 static struct ion_handle *ion_handle_create(struct ion_client *client,
590 struct ion_buffer *buffer)
591 {
592 struct ion_handle *handle;
593
594 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
595 if (!handle)
596 return ERR_PTR(-ENOMEM);
597 kref_init(&handle->ref);
598 RB_CLEAR_NODE(&handle->node);
599 handle->client = client;
600 ion_buffer_get(buffer);
601 ion_buffer_add_to_handle(buffer);
602 handle->buffer = buffer;
603
604 return handle;
605 }
606
607 static void ion_handle_kmap_put(struct ion_handle *);
608
609 static void ion_handle_destroy(struct kref *kref)
610 {
611 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
612 struct ion_client *client = handle->client;
613 struct ion_buffer *buffer = handle->buffer;
614
615 mutex_lock(&buffer->lock);
616 while (handle->kmap_cnt)
617 ion_handle_kmap_put(handle);
618 mutex_unlock(&buffer->lock);
619
620 idr_remove(&client->idr, handle->id);
621 if (!RB_EMPTY_NODE(&handle->node))
622 rb_erase(&handle->node, &client->handles);
623
624 ion_buffer_remove_from_handle(buffer);
625 ion_buffer_put(buffer);
626
627 kfree(handle);
628 }
629
630 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
631 {
632 return handle->buffer;
633 }
634
635 static void ion_handle_get(struct ion_handle *handle)
636 {
637 kref_get(&handle->ref);
638 }
639
640 /* Must hold the client lock */
641 static struct ion_handle* ion_handle_get_check_overflow(struct ion_handle *handle)
642 {
643 if (atomic_read(&handle->ref.refcount) + 1 == 0)
644 return ERR_PTR(-EOVERFLOW);
645 ion_handle_get(handle);
646 return handle;
647 }
648
649 static int ion_handle_put_nolock(struct ion_handle *handle)
650 {
651 return kref_put(&handle->ref, ion_handle_destroy);
652 }
653
654 int ion_handle_put(struct ion_handle *handle)
655 {
656 int ret;
657
658 mutex_lock(&handle->client->lock);
659 if (!ion_handle_validate(handle->client, handle)) {
660 WARN(1, "%s: invalid handle passed to free.\n", __func__);
661 mutex_unlock(&handle->client->lock);
662 return -EINVAL;
663 }
664
665 ret = ion_handle_put_nolock(handle);
666 mutex_unlock(&handle->client->lock);
667
668 return ret;
669 }
670
671 /* Must hold the client lock */
672 static void user_ion_handle_get(struct ion_handle *handle)
673 {
674 if (handle->user_ref_count++ == 0) {
675 kref_get(&handle->ref);
676 }
677 }
678
679 /* Must hold the client lock */
680 static struct ion_handle* user_ion_handle_get_check_overflow(struct ion_handle *handle)
681 {
682 if (handle->user_ref_count + 1 == 0)
683 return ERR_PTR(-EOVERFLOW);
684 user_ion_handle_get(handle);
685 return handle;
686 }
687
688 /* passes a kref to the user ref count.
689 * We know we're holding a kref to the object before and
690 * after this call, so no need to reverify handle. */
691 static struct ion_handle* pass_to_user(struct ion_handle *handle)
692 {
693 struct ion_client *client = handle->client;
694 struct ion_handle *ret;
695
696 mutex_lock(&client->lock);
697 ret = user_ion_handle_get_check_overflow(handle);
698 ion_handle_put_nolock(handle);
699 mutex_unlock(&client->lock);
700 return ret;
701 }
702
703 /* Must hold the client lock */
704 static int user_ion_handle_put_nolock(struct ion_handle *handle)
705 {
706 int ret = 0;
707
708 if (--handle->user_ref_count == 0) {
709 ret = ion_handle_put_nolock(handle);
710 }
711
712 return ret;
713 }
714
715 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
716 struct ion_buffer *buffer)
717 {
718 struct rb_node *n = client->handles.rb_node;
719
720 while (n) {
721 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
722
723 if (buffer < entry->buffer)
724 n = n->rb_left;
725 else if (buffer > entry->buffer)
726 n = n->rb_right;
727 else
728 return entry;
729 }
730 return ERR_PTR(-EINVAL);
731 }
732
733 static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
734 int id)
735 {
736 struct ion_handle *handle;
737
738 handle = idr_find(&client->idr, id);
739 if (handle)
740 return ion_handle_get_check_overflow(handle);
741
742 return ERR_PTR(-EINVAL);
743 }
744
745 struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
746 int id)
747 {
748 struct ion_handle *handle;
749
750 mutex_lock(&client->lock);
751 handle = ion_handle_get_by_id_nolock(client, id);
752 mutex_unlock(&client->lock);
753
754 return handle;
755 }
756
757 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
758 {
759 int id;
760 struct rb_node **p = &client->handles.rb_node;
761 struct rb_node *parent = NULL;
762 struct ion_handle *entry;
763
764 id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
765 if (id < 0) {
766 pr_err("%s: Fail to get bad id (ret %d)\n", __func__, id);
767 return id;
768 }
769
770 handle->id = id;
771
772 while (*p) {
773 parent = *p;
774 entry = rb_entry(parent, struct ion_handle, node);
775
776 if (handle->buffer < entry->buffer)
777 p = &(*p)->rb_left;
778 else if (handle->buffer > entry->buffer)
779 p = &(*p)->rb_right;
780 else
781 WARN(1, "%s: buffer already found.", __func__);
782 }
783
784 rb_link_node(&handle->node, parent, p);
785 rb_insert_color(&handle->node, &client->handles);
786
787 return 0;
788 }
789
790 unsigned int ion_parse_heap_id(unsigned int heap_id_mask, unsigned int flags);
791
792 static size_t ion_buffer_get_total_size_by_pid(struct ion_client *client)
793 {
794 struct ion_device *dev = client->dev;
795 pid_t pid = client->pid;
796 size_t pid_total_size = 0;
797 struct rb_node *n;
798
799 mutex_lock(&dev->buffer_lock);
800 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
801 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
802 node);
803 mutex_lock(&buffer->lock);
804 if (pid == buffer->pid)
805 pid_total_size += buffer->size;
806 mutex_unlock(&buffer->lock);
807 }
808 mutex_unlock(&dev->buffer_lock);
809
810 return pid_total_size;
811 }
812
813 static struct ion_handle *__ion_alloc(struct ion_client *client, size_t len,
814 size_t align, unsigned int heap_id_mask,
815 unsigned int flags, bool grab_handle)
816 {
817 struct ion_handle *handle;
818 struct ion_device *dev = client->dev;
819 struct ion_buffer *buffer = NULL;
820 struct ion_heap *heap;
821 int ret;
822
823 ION_EVENT_BEGIN();
824 trace_ion_alloc_start(client->name, 0, len, align, heap_id_mask, flags);
825
826 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
827 len, align, heap_id_mask, flags);
828 /*
829 * traverse the list of heaps available in this system in priority
830 * order. If the heap type is supported by the client, and matches the
831 * request of the caller allocate from it. Repeat until allocate has
832 * succeeded or all heaps have been tried
833 */
834 len = PAGE_ALIGN(len);
835 if (WARN_ON(!len)) {
836 trace_ion_alloc_fail(client->name, EINVAL, len,
837 align, heap_id_mask, flags);
838 return ERR_PTR(-EINVAL);
839 }
840
841 if (len / PAGE_SIZE > totalram_pages / 4) {
842 size_t pid_total_size = ion_buffer_get_total_size_by_pid(client);
843
844 if ((len + pid_total_size) / PAGE_SIZE > totalram_pages / 2) {
845 pr_err("%s: len %zu total %zu heap_id_mask %u flags %x\n",
846 __func__, len, pid_total_size, heap_id_mask, flags);
847 return ERR_PTR(-EINVAL);
848 }
849 }
850
851 down_read(&dev->lock);
852 heap_id_mask = ion_parse_heap_id(heap_id_mask, flags);
853 if (heap_id_mask == 0)
854 return ERR_PTR(-EINVAL);
855
856 plist_for_each_entry(heap, &dev->heaps, node) {
857 /* if the caller didn't specify this heap id */
858 if (!((1 << heap->id) & heap_id_mask))
859 continue;
860 buffer = ion_buffer_create(heap, dev, len, align, flags);
861 if (!IS_ERR(buffer))
862 break;
863 }
864 up_read(&dev->lock);
865
866 if (buffer == NULL) {
867 trace_ion_alloc_fail(client->name, ENODEV, len,
868 align, heap_id_mask, flags);
869 return ERR_PTR(-ENODEV);
870 }
871
872 if (IS_ERR(buffer)) {
873 trace_ion_alloc_fail(client->name, PTR_ERR(buffer),
874 len, align, heap_id_mask, flags);
875 return ERR_CAST(buffer);
876 }
877
878 handle = ion_handle_create(client, buffer);
879
880 /*
881 * ion_buffer_create will create a buffer with a ref_cnt of 1,
882 * and ion_handle_create will take a second reference, drop one here
883 */
884 ion_buffer_put(buffer);
885
886 if (IS_ERR(handle)) {
887 trace_ion_alloc_fail(client->name, (unsigned long) buffer,
888 len, align, heap_id_mask, flags);
889 return handle;
890 }
891
892 mutex_lock(&client->lock);
893 if (grab_handle)
894 ion_handle_get(handle);
895 ret = ion_handle_add(client, handle);
896 mutex_unlock(&client->lock);
897 if (ret) {
898 ion_handle_put(handle);
899 handle = ERR_PTR(ret);
900 trace_ion_alloc_fail(client->name, (unsigned long ) buffer,
901 len, align, heap_id_mask, flags);
902 }
903
904 ION_EVENT_ALLOC(buffer, ION_EVENT_DONE());
905 trace_ion_alloc_end(client->name, (unsigned long) buffer,
906 len, align, heap_id_mask, flags);
907
908 return handle;
909 }
910
911 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
912 size_t align, unsigned int heap_id_mask,
913 unsigned int flags)
914 {
915 return __ion_alloc(client, len, align, heap_id_mask, flags, false);
916 }
917 EXPORT_SYMBOL(ion_alloc);
918
919 static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
920 {
921 bool valid_handle;
922
923 BUG_ON(client != handle->client);
924
925 valid_handle = ion_handle_validate(client, handle);
926
927 if (!valid_handle) {
928 WARN(1, "%s: invalid handle passed to free.\n", __func__);
929 return;
930 }
931 ion_handle_put_nolock(handle);
932 }
933
934 static void user_ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
935 {
936 bool valid_handle;
937
938 BUG_ON(client != handle->client);
939
940 valid_handle = ion_handle_validate(client, handle);
941 if (!valid_handle) {
942 WARN(1, "%s: invalid handle passed to free.\n", __func__);
943 return;
944 }
945 if (!(handle->user_ref_count > 0)) {
946 WARN(1, "%s: User does not have access!\n", __func__);
947 return;
948 }
949 user_ion_handle_put_nolock(handle);
950 }
951
952 void ion_free(struct ion_client *client, struct ion_handle *handle)
953 {
954 BUG_ON(client != handle->client);
955
956 mutex_lock(&client->lock);
957 ion_free_nolock(client, handle);
958 mutex_unlock(&client->lock);
959 }
960 EXPORT_SYMBOL(ion_free);
961
962 int ion_phys(struct ion_client *client, struct ion_handle *handle,
963 ion_phys_addr_t *addr, size_t *len)
964 {
965 struct ion_buffer *buffer;
966 int ret;
967
968 mutex_lock(&client->lock);
969 if (!ion_handle_validate(client, handle)) {
970 mutex_unlock(&client->lock);
971 return -EINVAL;
972 }
973
974 buffer = handle->buffer;
975
976 if (!buffer->heap->ops->phys) {
977 pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
978 __func__, buffer->heap->name, buffer->heap->type);
979 mutex_unlock(&client->lock);
980 return -ENODEV;
981 }
982 mutex_unlock(&client->lock);
983 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
984 return ret;
985 }
986 EXPORT_SYMBOL(ion_phys);
987
988 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
989 {
990 void *vaddr;
991
992 if (buffer->kmap_cnt) {
993 buffer->kmap_cnt++;
994 return buffer->vaddr;
995 }
996 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
997 if (WARN_ONCE(vaddr == NULL,
998 "heap->ops->map_kernel should return ERR_PTR on error"))
999 return ERR_PTR(-EINVAL);
1000 if (IS_ERR(vaddr))
1001 return vaddr;
1002 buffer->vaddr = vaddr;
1003 buffer->kmap_cnt++;
1004
1005 return vaddr;
1006 }
1007
1008 static void *ion_handle_kmap_get(struct ion_handle *handle)
1009 {
1010 struct ion_buffer *buffer = handle->buffer;
1011 void *vaddr;
1012
1013 if (handle->kmap_cnt) {
1014 handle->kmap_cnt++;
1015 return buffer->vaddr;
1016 }
1017 vaddr = ion_buffer_kmap_get(buffer);
1018 if (IS_ERR(vaddr))
1019 return vaddr;
1020 handle->kmap_cnt++;
1021 return vaddr;
1022 }
1023
1024 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
1025 {
1026 buffer->kmap_cnt--;
1027 if (!buffer->kmap_cnt) {
1028 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
1029 buffer->vaddr = NULL;
1030 }
1031 }
1032
1033 static void ion_handle_kmap_put(struct ion_handle *handle)
1034 {
1035 struct ion_buffer *buffer = handle->buffer;
1036
1037 if (!handle->kmap_cnt) {
1038 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
1039 return;
1040 }
1041 handle->kmap_cnt--;
1042 if (!handle->kmap_cnt)
1043 ion_buffer_kmap_put(buffer);
1044 }
1045
1046 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
1047 {
1048 struct ion_buffer *buffer;
1049 void *vaddr;
1050
1051 mutex_lock(&client->lock);
1052 if (!ion_handle_validate(client, handle)) {
1053 pr_err("%s: invalid handle passed to map_kernel.\n",
1054 __func__);
1055 mutex_unlock(&client->lock);
1056 return ERR_PTR(-EINVAL);
1057 }
1058
1059 buffer = handle->buffer;
1060
1061 if (!handle->buffer->heap->ops->map_kernel) {
1062 pr_err("%s: map_kernel is not implemented by this heap.\n",
1063 __func__);
1064 mutex_unlock(&client->lock);
1065 return ERR_PTR(-ENODEV);
1066 }
1067
1068 mutex_lock(&buffer->lock);
1069 vaddr = ion_handle_kmap_get(handle);
1070 mutex_unlock(&buffer->lock);
1071 mutex_unlock(&client->lock);
1072 return vaddr;
1073 }
1074 EXPORT_SYMBOL(ion_map_kernel);
1075
1076 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
1077 {
1078 struct ion_buffer *buffer;
1079
1080 mutex_lock(&client->lock);
1081 buffer = handle->buffer;
1082 mutex_lock(&buffer->lock);
1083 ion_handle_kmap_put(handle);
1084 mutex_unlock(&buffer->lock);
1085 mutex_unlock(&client->lock);
1086 }
1087 EXPORT_SYMBOL(ion_unmap_kernel);
1088
1089 static int ion_debug_client_show(struct seq_file *s, void *unused)
1090 {
1091 struct ion_client *client = s->private;
1092 struct rb_node *n;
1093 size_t sizes[ION_NUM_HEAP_IDS] = {0};
1094 size_t sizes_pss[ION_NUM_HEAP_IDS] = {0};
1095 const char *names[ION_NUM_HEAP_IDS] = {NULL};
1096 int i;
1097
1098 down_read(&g_idev->lock);
1099
1100 /* check validity of the client */
1101 for (n = rb_first(&g_idev->clients); n; n = rb_next(n)) {
1102 struct ion_client *c = rb_entry(n, struct ion_client, node);
1103 if (client == c)
1104 break;
1105 }
1106
1107 if (IS_ERR_OR_NULL(n)) {
1108 pr_err("%s: invalid client %p\n", __func__, client);
1109 up_read(&g_idev->lock);
1110 return -EINVAL;
1111 }
1112
1113 seq_printf(s, "%16.s %4.s %16.s %4.s %10.s %8.s %9.s\n",
1114 "task", "pid", "thread", "tid", "size", "# procs", "flag");
1115 seq_printf(s, "----------------------------------------------"
1116 "--------------------------------------------\n");
1117
1118 mutex_lock(&client->lock);
1119 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1120 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1121 node);
1122 struct ion_buffer *buffer = handle->buffer;
1123 unsigned int id = buffer->heap->id;
1124
1125 if (!names[id])
1126 names[id] = buffer->heap->name;
1127 sizes[id] += buffer->size;
1128 sizes_pss[id] += (buffer->size / buffer->handle_count);
1129 seq_printf(s, "%16.s %4u %16.s %4u %10zu %8d %9lx\n",
1130 buffer->task_comm, buffer->pid,
1131 buffer->thread_comm, buffer->tid, buffer->size,
1132 buffer->handle_count, buffer->flags);
1133 }
1134 mutex_unlock(&client->lock);
1135 up_read(&g_idev->lock);
1136
1137 seq_printf(s, "----------------------------------------------"
1138 "--------------------------------------------\n");
1139 seq_printf(s, "%16.16s: %16.16s %18.18s\n", "heap_name",
1140 "size_in_bytes", "size_in_bytes(pss)");
1141 for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
1142 if (!names[i])
1143 continue;
1144 seq_printf(s, "%16.16s: %16zu %18zu\n",
1145 names[i], sizes[i], sizes_pss[i]);
1146 }
1147 return 0;
1148 }
1149
1150 static int ion_debug_client_open(struct inode *inode, struct file *file)
1151 {
1152 return single_open(file, ion_debug_client_show, inode->i_private);
1153 }
1154
1155 static const struct file_operations debug_client_fops = {
1156 .open = ion_debug_client_open,
1157 .read = seq_read,
1158 .llseek = seq_lseek,
1159 .release = single_release,
1160 };
1161
1162 static int ion_get_client_serial(const struct rb_root *root,
1163 const unsigned char *name)
1164 {
1165 int serial = -1;
1166 struct rb_node *node;
1167
1168 for (node = rb_first(root); node; node = rb_next(node)) {
1169 struct ion_client *client = rb_entry(node, struct ion_client,
1170 node);
1171
1172 if (strcmp(client->name, name))
1173 continue;
1174 serial = max(serial, client->display_serial);
1175 }
1176 return serial + 1;
1177 }
1178
1179 struct ion_client *ion_client_create(struct ion_device *dev,
1180 const char *name)
1181 {
1182 struct ion_client *client;
1183 struct task_struct *task;
1184 struct rb_node **p;
1185 struct rb_node *parent = NULL;
1186 struct ion_client *entry;
1187 pid_t pid;
1188
1189 if (!name) {
1190 pr_err("%s: Name cannot be null\n", __func__);
1191 return ERR_PTR(-EINVAL);
1192 }
1193
1194 get_task_struct(current->group_leader);
1195 task_lock(current->group_leader);
1196 pid = task_pid_nr(current->group_leader);
1197 /*
1198 * don't bother to store task struct for kernel threads,
1199 * they can't be killed anyway
1200 */
1201 if (current->group_leader->flags & PF_KTHREAD) {
1202 put_task_struct(current->group_leader);
1203 task = NULL;
1204 } else {
1205 task = current->group_leader;
1206 }
1207 task_unlock(current->group_leader);
1208
1209 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
1210 if (!client)
1211 goto err_put_task_struct;
1212
1213 client->dev = dev;
1214 client->handles = RB_ROOT;
1215 idr_init(&client->idr);
1216 mutex_init(&client->lock);
1217 client->task = task;
1218 client->pid = pid;
1219 client->name = kstrdup(name, GFP_KERNEL);
1220 if (!client->name)
1221 goto err_free_client;
1222
1223 down_write(&dev->lock);
1224 client->display_serial = ion_get_client_serial(&dev->clients, name);
1225 client->display_name = kasprintf(
1226 GFP_KERNEL, "%s-%d", name, client->display_serial);
1227 if (!client->display_name) {
1228 up_write(&dev->lock);
1229 goto err_free_client_name;
1230 }
1231 p = &dev->clients.rb_node;
1232 while (*p) {
1233 parent = *p;
1234 entry = rb_entry(parent, struct ion_client, node);
1235
1236 if (client < entry)
1237 p = &(*p)->rb_left;
1238 else if (client > entry)
1239 p = &(*p)->rb_right;
1240 }
1241 rb_link_node(&client->node, parent, p);
1242 rb_insert_color(&client->node, &dev->clients);
1243
1244 client->debug_root = debugfs_create_file(client->display_name, 0664,
1245 dev->clients_debug_root,
1246 client, &debug_client_fops);
1247 if (!client->debug_root) {
1248 char buf[256], *path;
1249
1250 path = dentry_path(dev->clients_debug_root, buf, 256);
1251 pr_err("Failed to create client debugfs at %s/%s\n",
1252 path, client->display_name);
1253 }
1254
1255 up_write(&dev->lock);
1256
1257 return client;
1258
1259 err_free_client_name:
1260 kfree(client->name);
1261 err_free_client:
1262 kfree(client);
1263 err_put_task_struct:
1264 if (task)
1265 put_task_struct(current->group_leader);
1266 return ERR_PTR(-ENOMEM);
1267 }
1268 EXPORT_SYMBOL(ion_client_create);
1269
1270 void ion_client_destroy(struct ion_client *client)
1271 {
1272 struct ion_device *dev = client->dev;
1273 struct rb_node *n;
1274
1275 pr_debug("%s: %d\n", __func__, __LINE__);
1276
1277 mutex_lock(&client->lock);
1278 while ((n = rb_first(&client->handles))) {
1279 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1280 node);
1281 ion_handle_destroy(&handle->ref);
1282 }
1283
1284 mutex_unlock(&client->lock);
1285 idr_destroy(&client->idr);
1286
1287 down_write(&dev->lock);
1288 if (client->task)
1289 put_task_struct(client->task);
1290 rb_erase(&client->node, &dev->clients);
1291 debugfs_remove_recursive(client->debug_root);
1292 up_write(&dev->lock);
1293
1294 kfree(client->display_name);
1295 kfree(client->name);
1296 kfree(client);
1297 }
1298 EXPORT_SYMBOL(ion_client_destroy);
1299
1300 struct sg_table *ion_sg_table(struct ion_client *client,
1301 struct ion_handle *handle)
1302 {
1303 struct ion_buffer *buffer;
1304 struct sg_table *table;
1305
1306 mutex_lock(&client->lock);
1307 if (!ion_handle_validate(client, handle)) {
1308 pr_err("%s: invalid handle passed to map_dma.\n",
1309 __func__);
1310 mutex_unlock(&client->lock);
1311 return ERR_PTR(-EINVAL);
1312 }
1313 buffer = handle->buffer;
1314 table = buffer->sg_table;
1315 mutex_unlock(&client->lock);
1316 return table;
1317 }
1318 EXPORT_SYMBOL(ion_sg_table);
1319
1320 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1321 struct device *dev,
1322 enum dma_data_direction direction);
1323
1324 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1325 enum dma_data_direction direction)
1326 {
1327 struct dma_buf *dmabuf = attachment->dmabuf;
1328 struct ion_buffer *buffer = dmabuf->priv;
1329
1330 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
1331
1332 ion_buffer_task_add_lock(buffer, attachment->dev);
1333
1334 return buffer->sg_table;
1335 }
1336
1337 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1338 struct sg_table *table,
1339 enum dma_data_direction direction)
1340 {
1341 ion_buffer_task_remove_lock(attachment->dmabuf->priv, attachment->dev);
1342 }
1343
1344 void ion_pages_sync_for_device(struct device *dev, struct page *page,
1345 size_t size, enum dma_data_direction dir)
1346 {
1347 struct scatterlist sg;
1348
1349 sg_init_table(&sg, 1);
1350 sg_set_page(&sg, page, size, 0);
1351 /*
1352 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
1353 * for the targeted device, but this works on the currently targeted
1354 * hardware.
1355 */
1356 sg_dma_address(&sg) = page_to_phys(page);
1357 dma_sync_sg_for_device(dev, &sg, 1, dir);
1358 }
1359
1360 struct ion_vma_list {
1361 struct list_head list;
1362 struct vm_area_struct *vma;
1363 };
1364
1365 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1366 struct device *dev,
1367 enum dma_data_direction dir)
1368 {
1369 struct ion_vma_list *vma_list;
1370 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
1371 int i;
1372
1373 if (!ion_buffer_cached(buffer))
1374 return;
1375
1376 pr_debug("%s: syncing for device %s\n", __func__,
1377 dev ? dev_name(dev) : "null");
1378
1379 if (!ion_buffer_fault_user_mappings(buffer))
1380 return;
1381
1382 mutex_lock(&buffer->lock);
1383 for (i = 0; i < pages; i++) {
1384 struct page *page = buffer->pages[i];
1385
1386 if (ion_buffer_page_is_dirty(page))
1387 ion_pages_sync_for_device(dev, ion_buffer_page(page),
1388 PAGE_SIZE, dir);
1389
1390 ion_buffer_page_clean(buffer->pages + i);
1391 }
1392 list_for_each_entry(vma_list, &buffer->vmas, list) {
1393 struct vm_area_struct *vma = vma_list->vma;
1394
1395 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1396 NULL);
1397 }
1398 mutex_unlock(&buffer->lock);
1399 }
1400
1401 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1402 {
1403 struct ion_buffer *buffer = vma->vm_private_data;
1404 unsigned long pfn;
1405 int ret;
1406
1407 mutex_lock(&buffer->lock);
1408 ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
1409 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
1410
1411 pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
1412 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1413 mutex_unlock(&buffer->lock);
1414 if (ret)
1415 return VM_FAULT_ERROR;
1416
1417 return VM_FAULT_NOPAGE;
1418 }
1419
1420 static void ion_vm_open(struct vm_area_struct *vma)
1421 {
1422 struct ion_buffer *buffer = vma->vm_private_data;
1423 struct ion_vma_list *vma_list;
1424
1425 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1426 if (!vma_list)
1427 return;
1428 vma_list->vma = vma;
1429 mutex_lock(&buffer->lock);
1430 list_add(&vma_list->list, &buffer->vmas);
1431 mutex_unlock(&buffer->lock);
1432 pr_debug("%s: adding %pK\n", __func__, vma);
1433 }
1434
1435 static void ion_vm_close(struct vm_area_struct *vma)
1436 {
1437 struct ion_buffer *buffer = vma->vm_private_data;
1438 struct ion_vma_list *vma_list, *tmp;
1439
1440 pr_debug("%s\n", __func__);
1441 mutex_lock(&buffer->lock);
1442 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1443 if (vma_list->vma != vma)
1444 continue;
1445 list_del(&vma_list->list);
1446 kfree(vma_list);
1447 pr_debug("%s: deleting %pK\n", __func__, vma);
1448 break;
1449 }
1450 mutex_unlock(&buffer->lock);
1451 }
1452
1453 static const struct vm_operations_struct ion_vma_ops = {
1454 .open = ion_vm_open,
1455 .close = ion_vm_close,
1456 .fault = ion_vm_fault,
1457 };
1458
1459 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1460 {
1461 struct ion_buffer *buffer = dmabuf->priv;
1462 int ret = 0;
1463
1464 ION_EVENT_BEGIN();
1465
1466 if (buffer->flags & ION_FLAG_NOZEROED) {
1467 pr_err("%s: mmap non-zeroed buffer to user is prohibited!\n",
1468 __func__);
1469 return -EINVAL;
1470 }
1471
1472 if (buffer->flags & ION_FLAG_PROTECTED) {
1473 pr_err("%s: mmap protected buffer to user is prohibited!\n",
1474 __func__);
1475 return -EPERM;
1476 }
1477
1478 if ((((vma->vm_pgoff << PAGE_SHIFT) >= buffer->size)) ||
1479 ((vma->vm_end - vma->vm_start) >
1480 (buffer->size - (vma->vm_pgoff << PAGE_SHIFT)))) {
1481 pr_err("%s: trying to map outside of buffer.\n", __func__);
1482 return -EINVAL;
1483 }
1484
1485 if (!buffer->heap->ops->map_user) {
1486 pr_err("%s: this heap does not define a method for mapping to userspace\n",
1487 __func__);
1488 return -EINVAL;
1489 }
1490
1491 trace_ion_mmap_start((unsigned long) buffer, buffer->size,
1492 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
1493
1494 if (ion_buffer_fault_user_mappings(buffer)) {
1495 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1496 VM_DONTDUMP;
1497 vma->vm_private_data = buffer;
1498 vma->vm_ops = &ion_vma_ops;
1499 ion_vm_open(vma);
1500 ION_EVENT_MMAP(buffer, ION_EVENT_DONE());
1501 trace_ion_mmap_end((unsigned long) buffer, buffer->size,
1502 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
1503 return 0;
1504 }
1505
1506 if (!(buffer->flags & ION_FLAG_CACHED))
1507 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1508
1509 mutex_lock(&buffer->lock);
1510 /* now map it to userspace */
1511 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1512 mutex_unlock(&buffer->lock);
1513
1514 if (ret)
1515 pr_err("%s: failure mapping buffer to userspace\n",
1516 __func__);
1517
1518 ION_EVENT_MMAP(buffer, ION_EVENT_DONE());
1519 trace_ion_mmap_end((unsigned long) buffer, buffer->size,
1520 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
1521
1522 return ret;
1523 }
1524
1525 static void ion_dma_buf_release(struct dma_buf *dmabuf)
1526 {
1527 struct ion_buffer *buffer = dmabuf->priv;
1528
1529 ion_buffer_put(buffer);
1530 }
1531
1532 static void *ion_dma_buf_vmap(struct dma_buf *dmabuf)
1533 {
1534 struct ion_buffer *buffer = dmabuf->priv;
1535 void *vaddr;
1536
1537 if (!buffer->heap->ops->map_kernel) {
1538 pr_err("%s: map kernel is not implemented by this heap.\n",
1539 __func__);
1540 return ERR_PTR(-ENODEV);
1541 }
1542
1543 mutex_lock(&buffer->lock);
1544 vaddr = ion_buffer_kmap_get(buffer);
1545 mutex_unlock(&buffer->lock);
1546
1547 return vaddr;
1548 }
1549
1550 static void ion_dma_buf_vunmap(struct dma_buf *dmabuf, void *ptr)
1551 {
1552 struct ion_buffer *buffer = dmabuf->priv;
1553
1554 mutex_lock(&buffer->lock);
1555 ion_buffer_kmap_put(buffer);
1556 mutex_unlock(&buffer->lock);
1557 }
1558
1559 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1560 {
1561 struct ion_buffer *buffer = dmabuf->priv;
1562
1563 return buffer->vaddr + offset * PAGE_SIZE;
1564 }
1565
1566 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1567 void *ptr)
1568 {
1569 }
1570
1571 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1572 size_t len,
1573 enum dma_data_direction direction)
1574 {
1575 struct ion_buffer *buffer = dmabuf->priv;
1576 void *vaddr;
1577
1578 if (!buffer->heap->ops->map_kernel) {
1579 pr_err("%s: map kernel is not implemented by this heap.\n",
1580 __func__);
1581 return -ENODEV;
1582 }
1583
1584 mutex_lock(&buffer->lock);
1585 vaddr = ion_buffer_kmap_get(buffer);
1586 mutex_unlock(&buffer->lock);
1587 return PTR_ERR_OR_ZERO(vaddr);
1588 }
1589
1590 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1591 size_t len,
1592 enum dma_data_direction direction)
1593 {
1594 struct ion_buffer *buffer = dmabuf->priv;
1595
1596 mutex_lock(&buffer->lock);
1597 ion_buffer_kmap_put(buffer);
1598 mutex_unlock(&buffer->lock);
1599 }
1600
1601 static void ion_dma_buf_set_privflag(struct dma_buf *dmabuf)
1602 {
1603 struct ion_buffer *buffer = dmabuf->priv;
1604
1605 mutex_lock(&buffer->lock);
1606 buffer->private_flags |= ION_PRIV_FLAG_NEED_TO_FLUSH;
1607 mutex_unlock(&buffer->lock);
1608 }
1609
1610 static bool ion_dma_buf_get_privflag(struct dma_buf *dmabuf, bool clear)
1611 {
1612 struct ion_buffer *buffer = dmabuf->priv;
1613 bool ret;
1614
1615 mutex_lock(&buffer->lock);
1616 ret = !!(buffer->private_flags & ION_PRIV_FLAG_NEED_TO_FLUSH);
1617 if (clear)
1618 buffer->private_flags &= ~ION_PRIV_FLAG_NEED_TO_FLUSH;
1619 mutex_unlock(&buffer->lock);
1620
1621 return ret;
1622 }
1623
1624 static struct dma_buf_ops dma_buf_ops = {
1625 .map_dma_buf = ion_map_dma_buf,
1626 .unmap_dma_buf = ion_unmap_dma_buf,
1627 .mmap = ion_mmap,
1628 .release = ion_dma_buf_release,
1629 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1630 .end_cpu_access = ion_dma_buf_end_cpu_access,
1631 .kmap_atomic = ion_dma_buf_kmap,
1632 .kunmap_atomic = ion_dma_buf_kunmap,
1633 .kmap = ion_dma_buf_kmap,
1634 .kunmap = ion_dma_buf_kunmap,
1635 .vmap = ion_dma_buf_vmap,
1636 .vunmap = ion_dma_buf_vunmap,
1637 .set_privflag = ion_dma_buf_set_privflag,
1638 .get_privflag = ion_dma_buf_get_privflag,
1639 };
1640
1641 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1642 struct ion_handle *handle)
1643 {
1644 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1645 struct ion_buffer *buffer;
1646 struct dma_buf *dmabuf;
1647 bool valid_handle;
1648
1649 mutex_lock(&client->lock);
1650 valid_handle = ion_handle_validate(client, handle);
1651 if (!valid_handle) {
1652 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1653 mutex_unlock(&client->lock);
1654 return ERR_PTR(-EINVAL);
1655 }
1656 buffer = handle->buffer;
1657 ion_buffer_get(buffer);
1658 mutex_unlock(&client->lock);
1659
1660 exp_info.ops = &dma_buf_ops;
1661 exp_info.size = buffer->size;
1662 exp_info.flags = O_RDWR;
1663 exp_info.priv = buffer;
1664
1665 dmabuf = dma_buf_export(&exp_info);
1666 if (IS_ERR(dmabuf)) {
1667 ion_buffer_put(buffer);
1668 return dmabuf;
1669 }
1670
1671 return dmabuf;
1672 }
1673 EXPORT_SYMBOL(ion_share_dma_buf);
1674
1675 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1676 {
1677 struct dma_buf *dmabuf;
1678 int fd;
1679
1680 dmabuf = ion_share_dma_buf(client, handle);
1681 if (IS_ERR(dmabuf))
1682 return PTR_ERR(dmabuf);
1683
1684 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1685 if (fd < 0)
1686 dma_buf_put(dmabuf);
1687
1688 return fd;
1689 }
1690 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1691
1692 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1693 {
1694 struct dma_buf *dmabuf;
1695 struct ion_buffer *buffer;
1696 struct ion_handle *handle;
1697 int ret;
1698
1699 dmabuf = dma_buf_get(fd);
1700 if (IS_ERR(dmabuf))
1701 return ERR_CAST(dmabuf);
1702 /* if this memory came from ion */
1703
1704 if (dmabuf->ops != &dma_buf_ops) {
1705 pr_err("%s: can not import dmabuf from another exporter\n",
1706 __func__);
1707 dma_buf_put(dmabuf);
1708 return ERR_PTR(-EINVAL);
1709 }
1710 buffer = dmabuf->priv;
1711
1712 mutex_lock(&client->lock);
1713 /* if a handle exists for this buffer just take a reference to it */
1714 handle = ion_handle_lookup(client, buffer);
1715 if (!IS_ERR(handle)) {
1716 handle = ion_handle_get_check_overflow(handle);
1717 mutex_unlock(&client->lock);
1718 goto end;
1719 }
1720
1721 handle = ion_handle_create(client, buffer);
1722 if (IS_ERR(handle)) {
1723 mutex_unlock(&client->lock);
1724 goto end;
1725 }
1726
1727 ret = ion_handle_add(client, handle);
1728 mutex_unlock(&client->lock);
1729 if (ret) {
1730 ion_handle_put(handle);
1731 handle = ERR_PTR(ret);
1732 }
1733
1734 end:
1735 dma_buf_put(dmabuf);
1736 return handle;
1737 }
1738 EXPORT_SYMBOL(ion_import_dma_buf);
1739
1740 int ion_cached_needsync_dmabuf(struct dma_buf *dmabuf)
1741 {
1742 struct ion_buffer *buffer = dmabuf->priv;
1743 unsigned long cacheflag = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;
1744
1745 if (dmabuf->ops != &dma_buf_ops)
1746 return -EINVAL;
1747
1748 return ((buffer->flags & cacheflag) == cacheflag) ? 1 : 0;
1749 }
1750 EXPORT_SYMBOL(ion_cached_needsync_dmabuf);
1751
1752 bool ion_may_hwrender_dmabuf(struct dma_buf *dmabuf)
1753 {
1754 struct ion_buffer *buffer = dmabuf->priv;
1755
1756 if (dmabuf->ops != &dma_buf_ops) {
1757 WARN(1, "%s: given dmabuf is not exported by ION\n", __func__);
1758 return false;
1759 }
1760
1761 return !!(buffer->flags & ION_FLAG_MAY_HWRENDER);
1762 }
1763 EXPORT_SYMBOL(ion_may_hwrender_dmabuf);
1764
1765 bool ion_may_hwrender_handle(struct ion_client *client, struct ion_handle *handle)
1766 {
1767 struct ion_buffer *buffer = handle->buffer;
1768 bool valid_handle;
1769
1770 mutex_lock(&client->lock);
1771 valid_handle = ion_handle_validate(client, handle);
1772
1773 if (!valid_handle) {
1774 WARN(1, "%s: invalid handle passed\n", __func__);
1775 mutex_unlock(&client->lock);
1776 return false;
1777 }
1778 mutex_unlock(&client->lock);
1779
1780 return !!(buffer->flags & ION_FLAG_MAY_HWRENDER);
1781 }
1782 EXPORT_SYMBOL(ion_may_hwrender_handle);
1783
1784 static int ion_sync_for_device(struct ion_client *client, int fd)
1785 {
1786 struct dma_buf *dmabuf;
1787 struct ion_buffer *buffer;
1788 struct scatterlist *sg, *sgl;
1789 int nelems;
1790 void *vaddr;
1791 int i = 0;
1792
1793 dmabuf = dma_buf_get(fd);
1794 if (IS_ERR(dmabuf))
1795 return PTR_ERR(dmabuf);
1796
1797 /* if this memory came from ion */
1798 if (dmabuf->ops != &dma_buf_ops) {
1799 pr_err("%s: can not sync dmabuf from another exporter\n",
1800 __func__);
1801 dma_buf_put(dmabuf);
1802 return -EINVAL;
1803 }
1804 buffer = dmabuf->priv;
1805
1806 if (!ion_buffer_cached(buffer) ||
1807 ion_buffer_fault_user_mappings(buffer)) {
1808 dma_buf_put(dmabuf);
1809 return 0;
1810 }
1811
1812 trace_ion_sync_start(_RET_IP_, buffer->dev->dev.this_device,
1813 DMA_BIDIRECTIONAL, buffer->size,
1814 buffer->vaddr, 0, false);
1815
1816 sgl = buffer->sg_table->sgl;
1817 nelems = buffer->sg_table->nents;
1818
1819 for_each_sg(sgl, sg, nelems, i) {
1820 vaddr = phys_to_virt(sg_phys(sg));
1821 __dma_flush_range(vaddr, vaddr + sg->length);
1822 }
1823
1824 trace_ion_sync_end(_RET_IP_, buffer->dev->dev.this_device,
1825 DMA_BIDIRECTIONAL, buffer->size,
1826 buffer->vaddr, 0, false);
1827
1828 dma_buf_put(dmabuf);
1829 return 0;
1830 }
1831
1832 static int ion_sync_partial_for_device(struct ion_client *client, int fd,
1833 off_t offset, size_t len)
1834 {
1835 struct dma_buf *dmabuf;
1836 struct ion_buffer *buffer;
1837 struct scatterlist *sg, *sgl;
1838 size_t remained = len;
1839 int nelems;
1840 int i;
1841
1842 dmabuf = dma_buf_get(fd);
1843 if (IS_ERR(dmabuf))
1844 return PTR_ERR(dmabuf);
1845
1846 /* if this memory came from ion */
1847 if (dmabuf->ops != &dma_buf_ops) {
1848 pr_err("%s: can not sync dmabuf from another exporter\n",
1849 __func__);
1850 dma_buf_put(dmabuf);
1851 return -EINVAL;
1852 }
1853 buffer = dmabuf->priv;
1854
1855 if (!ion_buffer_cached(buffer) ||
1856 ion_buffer_fault_user_mappings(buffer)) {
1857 dma_buf_put(dmabuf);
1858 return 0;
1859 }
1860
1861 trace_ion_sync_start(_RET_IP_, buffer->dev->dev.this_device,
1862 DMA_BIDIRECTIONAL, buffer->size,
1863 buffer->vaddr, 0, false);
1864
1865 sgl = buffer->sg_table->sgl;
1866 nelems = buffer->sg_table->nents;
1867
1868 for_each_sg(sgl, sg, nelems, i) {
1869 size_t len_to_flush;
1870 if (offset >= sg->length) {
1871 offset -= sg->length;
1872 continue;
1873 }
1874
1875 len_to_flush = sg->length - offset;
1876 if (remained < len_to_flush) {
1877 len_to_flush = remained;
1878 remained = 0;
1879 } else {
1880 remained -= len_to_flush;
1881 }
1882
1883 __dma_map_area(phys_to_virt(sg_phys(sg)) + offset,
1884 len_to_flush, DMA_TO_DEVICE);
1885
1886 if (remained == 0)
1887 break;
1888 offset = 0;
1889 }
1890
1891 trace_ion_sync_end(_RET_IP_, buffer->dev->dev.this_device,
1892 DMA_BIDIRECTIONAL, buffer->size,
1893 buffer->vaddr, 0, false);
1894
1895 dma_buf_put(dmabuf);
1896
1897 return 0;
1898 }
1899
1900 /* fix up the cases where the ioctl direction bits are incorrect */
1901 static unsigned int ion_ioctl_dir(unsigned int cmd)
1902 {
1903 switch (cmd) {
1904 case ION_IOC_SYNC:
1905 case ION_IOC_SYNC_PARTIAL:
1906 case ION_IOC_FREE:
1907 case ION_IOC_CUSTOM:
1908 return _IOC_WRITE;
1909 default:
1910 return _IOC_DIR(cmd);
1911 }
1912 }
1913
1914 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1915 {
1916 struct ion_client *client = filp->private_data;
1917 struct ion_device *dev = client->dev;
1918 struct ion_handle *cleanup_handle = NULL;
1919 int ret = 0;
1920 unsigned int dir;
1921
1922 union {
1923 struct ion_fd_data fd;
1924 struct ion_fd_partial_data fd_partial;
1925 struct ion_allocation_data allocation;
1926 struct ion_handle_data handle;
1927 struct ion_custom_data custom;
1928 } data;
1929
1930 dir = ion_ioctl_dir(cmd);
1931
1932 if (_IOC_SIZE(cmd) > sizeof(data))
1933 return -EINVAL;
1934
1935 if (dir & _IOC_WRITE)
1936 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1937 return -EFAULT;
1938
1939 switch (cmd) {
1940 case ION_IOC_ALLOC:
1941 {
1942 struct ion_handle *handle;
1943
1944 handle = __ion_alloc(client, data.allocation.len,
1945 data.allocation.align,
1946 data.allocation.heap_id_mask,
1947 data.allocation.flags, true);
1948 if (IS_ERR(handle)) {
1949 pr_err("%s: len %zu align %zu heap_id_mask %u flags %x (ret %ld)\n",
1950 __func__, data.allocation.len,
1951 data.allocation.align,
1952 data.allocation.heap_id_mask,
1953 data.allocation.flags, PTR_ERR(handle));
1954 return PTR_ERR(handle);
1955 }
1956 pass_to_user(handle);
1957 data.allocation.handle = handle->id;
1958
1959 cleanup_handle = handle;
1960 break;
1961 }
1962 case ION_IOC_FREE:
1963 {
1964 struct ion_handle *handle;
1965
1966 mutex_lock(&client->lock);
1967 handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
1968 if (IS_ERR(handle)) {
1969 mutex_unlock(&client->lock);
1970 return PTR_ERR(handle);
1971 }
1972 user_ion_free_nolock(client, handle);
1973 ion_handle_put_nolock(handle);
1974 mutex_unlock(&client->lock);
1975 break;
1976 }
1977 case ION_IOC_SHARE:
1978 case ION_IOC_MAP:
1979 {
1980 struct ion_handle *handle;
1981
1982 handle = ion_handle_get_by_id(client, data.handle.handle);
1983 if (IS_ERR(handle))
1984 return PTR_ERR(handle);
1985 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1986 ion_handle_put(handle);
1987 if (data.fd.fd < 0)
1988 ret = data.fd.fd;
1989 break;
1990 }
1991 case ION_IOC_IMPORT:
1992 {
1993 struct ion_handle *handle;
1994
1995 handle = ion_import_dma_buf(client, data.fd.fd);
1996 if (IS_ERR(handle)) {
1997 ret = PTR_ERR(handle);
1998 } else {
1999 handle = pass_to_user(handle);
2000 if (IS_ERR(handle))
2001 ret = PTR_ERR(handle);
2002 else
2003 data.handle.handle = handle->id;
2004 }
2005 break;
2006 }
2007 case ION_IOC_SYNC:
2008 {
2009 ret = ion_sync_for_device(client, data.fd.fd);
2010 break;
2011 }
2012 case ION_IOC_SYNC_PARTIAL:
2013 {
2014 ret = ion_sync_partial_for_device(client, data.fd_partial.fd,
2015 data.fd_partial.offset, data.fd_partial.len);
2016 break;
2017 }
2018 case ION_IOC_CUSTOM:
2019 {
2020 if (!dev->custom_ioctl)
2021 return -ENOTTY;
2022 ret = dev->custom_ioctl(client, data.custom.cmd,
2023 data.custom.arg);
2024 break;
2025 }
2026 default:
2027 return -ENOTTY;
2028 }
2029
2030 if (dir & _IOC_READ) {
2031 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
2032 if (cleanup_handle) {
2033 mutex_lock(&client->lock);
2034 user_ion_free_nolock(client, cleanup_handle);
2035 ion_handle_put_nolock(cleanup_handle);
2036 mutex_unlock(&client->lock);
2037 }
2038 return -EFAULT;
2039 }
2040 }
2041 if (cleanup_handle)
2042 ion_handle_put(cleanup_handle);
2043 return ret;
2044 }
2045
2046 static int ion_release(struct inode *inode, struct file *file)
2047 {
2048 struct ion_client *client = file->private_data;
2049
2050 pr_debug("%s: %d\n", __func__, __LINE__);
2051 ion_client_destroy(client);
2052 return 0;
2053 }
2054
2055 static int ion_open(struct inode *inode, struct file *file)
2056 {
2057 struct miscdevice *miscdev = file->private_data;
2058 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
2059 struct ion_client *client;
2060 char debug_name[64];
2061
2062 pr_debug("%s: %d\n", __func__, __LINE__);
2063 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
2064 client = ion_client_create(dev, debug_name);
2065 if (IS_ERR(client))
2066 return PTR_ERR(client);
2067 file->private_data = client;
2068
2069 return 0;
2070 }
2071
2072 static const struct file_operations ion_fops = {
2073 .owner = THIS_MODULE,
2074 .open = ion_open,
2075 .release = ion_release,
2076 .unlocked_ioctl = ion_ioctl,
2077 .compat_ioctl = compat_ion_ioctl,
2078 };
2079
2080 static size_t ion_debug_heap_total(struct ion_client *client,
2081 unsigned int id)
2082 {
2083 size_t size = 0;
2084 struct rb_node *n;
2085
2086 mutex_lock(&client->lock);
2087 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
2088 struct ion_handle *handle = rb_entry(n,
2089 struct ion_handle,
2090 node);
2091 if (handle->buffer->heap->id == id)
2092 size += handle->buffer->size;
2093 }
2094 mutex_unlock(&client->lock);
2095 return size;
2096 }
2097
2098 static int ion_debug_heap_show(struct seq_file *s, void *unused)
2099 {
2100 struct ion_heap *heap = s->private;
2101 struct ion_device *dev = heap->dev;
2102 struct rb_node *n;
2103 size_t total_size = 0;
2104 size_t total_orphaned_size = 0;
2105
2106 seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
2107 seq_puts(s, "----------------------------------------------------\n");
2108
2109 down_read(&dev->lock);
2110
2111 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
2112 struct ion_client *client = rb_entry(n, struct ion_client,
2113 node);
2114 size_t size = ion_debug_heap_total(client, heap->id);
2115
2116 if (!size)
2117 continue;
2118 if (client->task) {
2119 char task_comm[TASK_COMM_LEN];
2120
2121 get_task_comm(task_comm, client->task);
2122 seq_printf(s, "%16s %16u %16zu\n", task_comm,
2123 client->pid, size);
2124 } else {
2125 seq_printf(s, "%16s %16u %16zu\n", client->name,
2126 client->pid, size);
2127 }
2128 }
2129 seq_puts(s, "----------------------------------------------------\n");
2130 seq_puts(s, "orphaned allocations (info is from last known client):\n");
2131 mutex_lock(&dev->buffer_lock);
2132 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
2133 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
2134 node);
2135 if (buffer->heap->id != heap->id)
2136 continue;
2137 total_size += buffer->size;
2138 if (!buffer->handle_count) {
2139 seq_printf(s, "%16s %16u %16zu %d %d\n",
2140 buffer->task_comm, buffer->pid,
2141 buffer->size, buffer->kmap_cnt,
2142 atomic_read(&buffer->ref.refcount));
2143 total_orphaned_size += buffer->size;
2144 }
2145 }
2146 mutex_unlock(&dev->buffer_lock);
2147 seq_puts(s, "----------------------------------------------------\n");
2148 seq_printf(s, "%16s %16zu\n", "total orphaned",
2149 total_orphaned_size);
2150 seq_printf(s, "%16s %16zu\n", "total ", total_size);
2151 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
2152 seq_printf(s, "%16s %16zu\n", "deferred free",
2153 heap->free_list_size);
2154 seq_puts(s, "----------------------------------------------------\n");
2155
2156 if (heap->debug_show)
2157 heap->debug_show(heap, s, unused);
2158
2159 up_read(&dev->lock);
2160
2161 return 0;
2162 }
2163
2164 static int ion_debug_heap_open(struct inode *inode, struct file *file)
2165 {
2166 return single_open(file, ion_debug_heap_show, inode->i_private);
2167 }
2168
2169 static const struct file_operations debug_heap_fops = {
2170 .open = ion_debug_heap_open,
2171 .read = seq_read,
2172 .llseek = seq_lseek,
2173 .release = single_release,
2174 };
2175
2176 static int debug_shrink_set(void *data, u64 val)
2177 {
2178 struct ion_heap *heap = data;
2179 struct shrink_control sc;
2180 int objs;
2181
2182 sc.gfp_mask = -1;
2183 sc.nr_to_scan = val;
2184
2185 if (!val) {
2186 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
2187 sc.nr_to_scan = objs;
2188 }
2189
2190 heap->shrinker.scan_objects(&heap->shrinker, &sc);
2191 return 0;
2192 }
2193
2194 static int debug_shrink_get(void *data, u64 *val)
2195 {
2196 struct ion_heap *heap = data;
2197 struct shrink_control sc;
2198 int objs;
2199
2200 sc.gfp_mask = -1;
2201 sc.nr_to_scan = 0;
2202
2203 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
2204 *val = objs;
2205 return 0;
2206 }
2207
2208 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
2209 debug_shrink_set, "%llu\n");
2210
2211 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
2212 {
2213 struct dentry *debug_file;
2214
2215 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
2216 !heap->ops->unmap_dma)
2217 pr_err("%s: can not add heap with invalid ops struct.\n",
2218 __func__);
2219
2220 spin_lock_init(&heap->free_lock);
2221 heap->free_list_size = 0;
2222
2223 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
2224 ion_heap_init_deferred_free(heap);
2225
2226 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
2227 ion_heap_init_shrinker(heap);
2228
2229 heap->dev = dev;
2230 down_write(&dev->lock);
2231 /*
2232 * use negative heap->id to reverse the priority -- when traversing
2233 * the list later attempt higher id numbers first
2234 */
2235 plist_node_init(&heap->node, -heap->id);
2236 plist_add(&heap->node, &dev->heaps);
2237 debug_file = debugfs_create_file(heap->name, 0664,
2238 dev->heaps_debug_root, heap,
2239 &debug_heap_fops);
2240
2241 if (!debug_file) {
2242 char buf[256], *path;
2243
2244 path = dentry_path(dev->heaps_debug_root, buf, 256);
2245 pr_err("Failed to create heap debugfs at %s/%s\n",
2246 path, heap->name);
2247 }
2248
2249 if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
2250 char debug_name[64];
2251
2252 snprintf(debug_name, 64, "%s_shrink", heap->name);
2253 debug_file = debugfs_create_file(
2254 debug_name, 0644, dev->heaps_debug_root, heap,
2255 &debug_shrink_fops);
2256 if (!debug_file) {
2257 char buf[256], *path;
2258
2259 path = dentry_path(dev->heaps_debug_root, buf, 256);
2260 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
2261 path, debug_name);
2262 }
2263 }
2264
2265 up_write(&dev->lock);
2266 }
2267 EXPORT_SYMBOL(ion_device_add_heap);
2268
2269 #ifdef CONFIG_ION_EXYNOS_STAT_LOG
2270
2271 #define MAX_DUMP_TASKS 8
2272 #define MAX_DUMP_NAME_LEN 32
2273 #define MAX_DUMP_BUFF_LEN 512
2274
2275 static void ion_buffer_dump_flags(struct seq_file *s, unsigned long flags)
2276 {
2277 if ((flags & ION_FLAG_CACHED) && !(flags & ION_FLAG_CACHED_NEEDS_SYNC))
2278 seq_printf(s, "cached|faultmap");
2279 else if (flags & ION_FLAG_CACHED)
2280 seq_printf(s, "cached|needsync");
2281 else
2282 seq_printf(s, "noncached");
2283
2284 if (flags & ION_FLAG_NOZEROED)
2285 seq_printf(s, "|nozeroed");
2286
2287 if (flags & ION_FLAG_PROTECTED)
2288 seq_printf(s, "|protected");
2289 }
2290
2291 static void ion_buffer_dump_tasks(struct ion_buffer *buffer, char *str)
2292 {
2293 struct ion_task *task, *tmp;
2294 const char *delim = "|";
2295 size_t total_len = 0;
2296 int count = 0;
2297
2298 list_for_each_entry_safe(task, tmp, &buffer->master_list, list) {
2299 const char *name;
2300 size_t len = strlen(dev_name(task->master));
2301
2302 if (len > MAX_DUMP_NAME_LEN)
2303 len = MAX_DUMP_NAME_LEN;
2304 if (!strncmp(dev_name(task->master), "ion", len)) {
2305 continue;
2306 } else {
2307 name = dev_name(task->master) + 9;
2308 len -= 9;
2309 }
2310 if (total_len + len + 1 > MAX_DUMP_BUFF_LEN)
2311 break;
2312
2313 strncat((char *)(str + total_len), name, len);
2314 total_len += len;
2315 if (!list_is_last(&task->list, &buffer->master_list))
2316 str[total_len++] = *delim;
2317
2318 if (++count > MAX_DUMP_TASKS)
2319 break;
2320 }
2321 }
2322
2323 static int ion_debug_buffer_show(struct seq_file *s, void *unused)
2324 {
2325 struct ion_device *dev = s->private;
2326 struct rb_node *n;
2327 char *master_name;
2328 size_t total_size = 0;
2329
2330 master_name = kzalloc(MAX_DUMP_BUFF_LEN, GFP_KERNEL);
2331 if (!master_name) {
2332 pr_err("%s: no memory for client string buffer\n", __func__);
2333 return -ENOMEM;
2334 }
2335
2336 seq_printf(s, "%20.s %16.s %4.s %16.s %4.s %10.s %4.s %3.s %6.s "
2337 "%24.s %9.s\n",
2338 "heap", "task", "pid", "thread", "tid",
2339 "size", "kmap", "ref", "handle",
2340 "master", "flag");
2341 seq_printf(s, "------------------------------------------"
2342 "----------------------------------------"
2343 "----------------------------------------"
2344 "--------------------------------------\n");
2345
2346 mutex_lock(&dev->buffer_lock);
2347 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
2348 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
2349 node);
2350 mutex_lock(&buffer->lock);
2351 ion_buffer_dump_tasks(buffer, master_name);
2352 total_size += buffer->size;
2353 seq_printf(s, "%20.s %16.s %4u %16.s %4u %10zu %4d %3d %6d "
2354 "%24.s %9lx", buffer->heap->name,
2355 buffer->task_comm, buffer->pid,
2356 buffer->thread_comm,
2357 buffer->tid, buffer->size, buffer->kmap_cnt,
2358 atomic_read(&buffer->ref.refcount),
2359 buffer->handle_count, master_name,
2360 buffer->flags);
2361 seq_printf(s, "(");
2362 ion_buffer_dump_flags(s, buffer->flags);
2363 seq_printf(s, ")\n");
2364 mutex_unlock(&buffer->lock);
2365
2366 memset(master_name, 0, MAX_DUMP_BUFF_LEN);
2367 }
2368 mutex_unlock(&dev->buffer_lock);
2369
2370 seq_printf(s, "------------------------------------------"
2371 "----------------------------------------"
2372 "----------------------------------------"
2373 "--------------------------------------\n");
2374 seq_printf(s, "%16.s %16zu\n", "total ", total_size);
2375 seq_printf(s, "------------------------------------------"
2376 "----------------------------------------"
2377 "----------------------------------------"
2378 "--------------------------------------\n");
2379
2380 kfree(master_name);
2381
2382 return 0;
2383 }
2384
2385 static int ion_debug_buffer_open(struct inode *inode, struct file *file)
2386 {
2387 return single_open(file, ion_debug_buffer_show, inode->i_private);
2388 }
2389
2390 static const struct file_operations debug_buffer_fops = {
2391 .open = ion_debug_buffer_open,
2392 .read = seq_read,
2393 .llseek = seq_lseek,
2394 .release = single_release,
2395 };
2396
2397 static void ion_debug_event_show_one(struct seq_file *s,
2398 struct ion_eventlog *log)
2399 {
2400 struct timeval tv = ktime_to_timeval(log->begin);
2401 long elapsed = ktime_us_delta(log->done, log->begin);
2402
2403 if (elapsed == 0)
2404 return;
2405
2406 seq_printf(s, "[%06ld.%06ld] ", tv.tv_sec, tv.tv_usec);
2407
2408 switch (log->type) {
2409 case ION_EVENT_TYPE_ALLOC:
2410 {
2411 struct ion_event_alloc *data = &log->data.alloc;
2412 seq_printf(s, "%8s %pK %18s %11zd ", "alloc",
2413 data->id, data->heap->name, data->size);
2414 break;
2415 }
2416 case ION_EVENT_TYPE_FREE:
2417 {
2418 struct ion_event_free *data = &log->data.free;
2419 seq_printf(s, "%8s %pK %18s %11zd ", "free",
2420 data->id, data->heap->name, data->size);
2421 break;
2422 }
2423 case ION_EVENT_TYPE_MMAP:
2424 {
2425 struct ion_event_mmap *data = &log->data.mmap;
2426 seq_printf(s, "%8s %pK %18s %11zd ", "mmap",
2427 data->id, data->heap->name, data->size);
2428 break;
2429 }
2430 case ION_EVENT_TYPE_SHRINK:
2431 {
2432 struct ion_event_shrink *data = &log->data.shrink;
2433 seq_printf(s, "%8s %16lx %18s %11zd ", "shrink",
2434 0l, "ion_noncontig_heap", data->size);
2435 elapsed = 0;
2436 break;
2437 }
2438 case ION_EVENT_TYPE_CLEAR:
2439 {
2440 struct ion_event_clear *data = &log->data.clear;
2441 seq_printf(s, "%8s %pK %18s %11zd ", "clear",
2442 data->id, data->heap->name, data->size);
2443 break;
2444 }
2445 }
2446
2447 seq_printf(s, "%9ld", elapsed);
2448
2449 if (elapsed > 100 * USEC_PER_MSEC)
2450 seq_printf(s, " *");
2451
2452 if (log->type == ION_EVENT_TYPE_ALLOC) {
2453 seq_printf(s, " ");
2454 ion_buffer_dump_flags(s, log->data.alloc.flags);
2455 } else if (log->type == ION_EVENT_TYPE_CLEAR) {
2456 seq_printf(s, " ");
2457 ion_buffer_dump_flags(s, log->data.clear.flags);
2458 }
2459
2460 if (log->type == ION_EVENT_TYPE_FREE && log->data.free.shrinker)
2461 seq_printf(s, " shrinker");
2462
2463 seq_printf(s, "\n");
2464 }
2465
2466 static int ion_debug_event_show(struct seq_file *s, void *unused)
2467 {
2468 struct ion_device *dev = s->private;
2469 int index = atomic_read(&dev->event_idx) % ION_EVENT_LOG_MAX;
2470 int last = index;
2471
2472 seq_printf(s, "%13s %10s %8s %18s %11s %10s %24s\n", "timestamp",
2473 "type", "id", "heap", "size", "time (us)", "remarks");
2474 seq_printf(s, "-------------------------------------------");
2475 seq_printf(s, "-------------------------------------------");
2476 seq_printf(s, "-----------------------------------------\n");
2477
2478 do {
2479 if (++index >= ION_EVENT_LOG_MAX)
2480 index = 0;
2481 ion_debug_event_show_one(s, &dev->eventlog[index]);
2482 } while (index != last);
2483
2484 return 0;
2485 }
2486
2487 static int ion_debug_event_open(struct inode *inode, struct file *file)
2488 {
2489 return single_open(file, ion_debug_event_show, inode->i_private);
2490 }
2491
2492 static const struct file_operations debug_event_fops = {
2493 .open = ion_debug_event_open,
2494 .read = seq_read,
2495 .llseek = seq_lseek,
2496 .release = single_release,
2497 };
2498 #endif
2499
2500 struct ion_device *ion_device_create(long (*custom_ioctl)
2501 (struct ion_client *client,
2502 unsigned int cmd,
2503 unsigned long arg))
2504 {
2505 struct ion_device *idev;
2506 int ret;
2507
2508 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
2509 if (!idev)
2510 return ERR_PTR(-ENOMEM);
2511
2512 idev->dev.minor = MISC_DYNAMIC_MINOR;
2513 idev->dev.name = "ion";
2514 idev->dev.fops = &ion_fops;
2515 idev->dev.parent = NULL;
2516 ret = misc_register(&idev->dev);
2517 if (ret) {
2518 pr_err("ion: failed to register misc device.\n");
2519 kfree(idev);
2520 return ERR_PTR(ret);
2521 }
2522
2523 idev->debug_root = debugfs_create_dir("ion", NULL);
2524 if (!idev->debug_root) {
2525 pr_err("ion: failed to create debugfs root directory.\n");
2526 goto debugfs_done;
2527 }
2528 idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
2529 if (!idev->heaps_debug_root) {
2530 pr_err("ion: failed to create debugfs heaps directory.\n");
2531 goto debugfs_done;
2532 }
2533 idev->clients_debug_root = debugfs_create_dir("clients",
2534 idev->debug_root);
2535 if (!idev->clients_debug_root) {
2536 pr_err("ion: failed to create debugfs clients directory.\n");
2537 goto debugfs_done;
2538 }
2539
2540 #ifdef CONFIG_ION_EXYNOS_STAT_LOG
2541 atomic_set(&idev->event_idx, -1);
2542 idev->buffer_debug_file = debugfs_create_file("buffer", 0444,
2543 idev->debug_root, idev,
2544 &debug_buffer_fops);
2545 if (!idev->buffer_debug_file) {
2546 pr_err("%s: failed to create buffer debug file\n", __func__);
2547 goto debugfs_done;
2548 }
2549
2550 idev->event_debug_file = debugfs_create_file("event", 0444,
2551 idev->debug_root, idev,
2552 &debug_event_fops);
2553 if (!idev->event_debug_file)
2554 pr_err("%s: failed to create event debug file\n", __func__);
2555 #endif
2556
2557 debugfs_done:
2558
2559 idev->custom_ioctl = custom_ioctl;
2560 idev->buffers = RB_ROOT;
2561 mutex_init(&idev->buffer_lock);
2562 init_rwsem(&idev->lock);
2563 plist_head_init(&idev->heaps);
2564 idev->clients = RB_ROOT;
2565
2566 /* backup of ion device: assumes there is only one ion device */
2567 g_idev = idev;
2568
2569 return idev;
2570 }
2571 EXPORT_SYMBOL(ion_device_create);
2572
2573 void ion_device_destroy(struct ion_device *dev)
2574 {
2575 misc_deregister(&dev->dev);
2576 debugfs_remove_recursive(dev->debug_root);
2577 kfree(dev);
2578 }
2579 EXPORT_SYMBOL(ion_device_destroy);
2580
2581 void __init ion_reserve(struct ion_platform_data *data)
2582 {
2583 int i;
2584
2585 for (i = 0; i < data->nr; i++) {
2586 if (data->heaps[i].size == 0)
2587 continue;
2588
2589 if (data->heaps[i].base == 0) {
2590 phys_addr_t paddr;
2591
2592 paddr = memblock_alloc_base(data->heaps[i].size,
2593 data->heaps[i].align,
2594 MEMBLOCK_ALLOC_ANYWHERE);
2595 if (!paddr) {
2596 pr_err("%s: error allocating memblock for heap %d\n",
2597 __func__, i);
2598 continue;
2599 }
2600 data->heaps[i].base = paddr;
2601 } else {
2602 int ret = memblock_reserve(data->heaps[i].base,
2603 data->heaps[i].size);
2604 if (ret)
2605 pr_err("memblock reserve of %zx@%lx failed\n",
2606 data->heaps[i].size,
2607 data->heaps[i].base);
2608 }
2609 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
2610 data->heaps[i].name,
2611 data->heaps[i].base,
2612 data->heaps[i].size);
2613 }
2614 }
2615
2616 static struct ion_iovm_map *ion_buffer_iova_create(struct ion_buffer *buffer,
2617 struct device *dev, enum dma_data_direction dir, int prop)
2618 {
2619 /* Must be called under buffer->lock held */
2620 struct ion_iovm_map *iovm_map;
2621 int ret = 0;
2622
2623 iovm_map = kzalloc(sizeof(struct ion_iovm_map), GFP_KERNEL);
2624 if (!iovm_map) {
2625 pr_err("%s: Failed to allocate ion_iovm_map for %s\n",
2626 __func__, dev_name(dev));
2627 return ERR_PTR(-ENOMEM);
2628 }
2629
2630 iovm_map->iova = iovmm_map(dev, buffer->sg_table->sgl,
2631 0, buffer->size, dir, prop);
2632
2633 if (iovm_map->iova == (dma_addr_t)-ENOSYS) {
2634 size_t len;
2635 ion_phys_addr_t addr;
2636
2637 BUG_ON(!buffer->heap->ops->phys);
2638 ret = buffer->heap->ops->phys(buffer->heap, buffer,
2639 &addr, &len);
2640 if (ret)
2641 pr_err("%s: Unable to get PA for %s\n",
2642 __func__, dev_name(dev));
2643 } else if (IS_ERR_VALUE(iovm_map->iova)) {
2644 ret = iovm_map->iova;
2645 pr_err("%s: Unable to allocate IOVA for %s\n",
2646 __func__, dev_name(dev));
2647 }
2648
2649 if (ret) {
2650 kfree(iovm_map);
2651 return ERR_PTR(ret);
2652 }
2653
2654 iovm_map->dev = dev;
2655 iovm_map->domain = get_domain_from_dev(dev);
2656 iovm_map->map_cnt = 1;
2657
2658 pr_debug("%s: new map added for dev %s, iova %pa, prop %d\n", __func__,
2659 dev_name(dev), &iovm_map->iova, prop);
2660
2661 return iovm_map;
2662 }
2663
2664 dma_addr_t ion_iovmm_map(struct dma_buf_attachment *attachment,
2665 off_t offset, size_t size,
2666 enum dma_data_direction direction, int prop)
2667 {
2668 struct dma_buf *dmabuf = attachment->dmabuf;
2669 struct ion_buffer *buffer = dmabuf->priv;
2670 struct ion_iovm_map *iovm_map;
2671 struct iommu_domain *domain;
2672
2673 BUG_ON(dmabuf->ops != &dma_buf_ops);
2674
2675 if (IS_ENABLED(CONFIG_EXYNOS_CONTENT_PATH_PROTECTION) &&
2676 buffer->flags & ION_FLAG_PROTECTED) {
2677 struct ion_buffer_info *info = buffer->priv_virt;
2678
2679 if (info->prot_desc.dma_addr)
2680 return info->prot_desc.dma_addr;
2681 pr_err("%s: protected buffer but no secure iova\n", __func__);
2682 return -EINVAL;
2683 }
2684
2685 domain = get_domain_from_dev(attachment->dev);
2686 if (!domain) {
2687 pr_err("%s: invalid iommu device\n", __func__);
2688 return -EINVAL;
2689 }
2690
2691 mutex_lock(&buffer->lock);
2692 list_for_each_entry(iovm_map, &buffer->iovas, list) {
2693 if (domain == iovm_map->domain) {
2694 iovm_map->map_cnt++;
2695 mutex_unlock(&buffer->lock);
2696 return iovm_map->iova;
2697 }
2698 }
2699
2700 if (!ion_buffer_cached(buffer))
2701 prop &= ~IOMMU_CACHE;
2702
2703 iovm_map = ion_buffer_iova_create(buffer, attachment->dev,
2704 direction, prop);
2705 if (IS_ERR(iovm_map)) {
2706 mutex_unlock(&buffer->lock);
2707 return PTR_ERR(iovm_map);
2708 }
2709
2710 list_add_tail(&iovm_map->list, &buffer->iovas);
2711 mutex_unlock(&buffer->lock);
2712
2713 return iovm_map->iova;
2714 }
2715
2716 void ion_iovmm_unmap(struct dma_buf_attachment *attachment, dma_addr_t iova)
2717 {
2718 struct ion_iovm_map *iovm_map;
2719 struct dma_buf * dmabuf = attachment->dmabuf;
2720 struct device *dev = attachment->dev;
2721 struct ion_buffer *buffer = attachment->dmabuf->priv;
2722 struct iommu_domain *domain;
2723
2724 BUG_ON(dmabuf->ops != &dma_buf_ops);
2725
2726 if (IS_ENABLED(CONFIG_EXYNOS_CONTENT_PATH_PROTECTION) &&
2727 buffer->flags & ION_FLAG_PROTECTED)
2728 return;
2729
2730 domain = get_domain_from_dev(attachment->dev);
2731 if (!domain) {
2732 pr_err("%s: invalid iommu device\n", __func__);
2733 return;
2734 }
2735
2736 mutex_lock(&buffer->lock);
2737 list_for_each_entry(iovm_map, &buffer->iovas, list) {
2738 if ((domain == iovm_map->domain) && (iova == iovm_map->iova)) {
2739 if (--iovm_map->map_cnt == 0) {
2740 list_del(&iovm_map->list);
2741 pr_debug("%s: unmap previous %pa for dev %s\n",
2742 __func__, &iovm_map->iova,
2743 dev_name(iovm_map->dev));
2744 iovmm_unmap(iovm_map->dev, iovm_map->iova);
2745 kfree(iovm_map);
2746 }
2747
2748 mutex_unlock(&buffer->lock);
2749 return;
2750 }
2751 }
2752
2753 mutex_unlock(&buffer->lock);
2754
2755 WARN(1, "IOVA %pa is not found for %s\n", &iova, dev_name(dev));
2756 }