3 * drivers/staging/android/ion/ion.c
5 * Copyright (C) 2011 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #include <linux/device.h>
19 #include <linux/atomic.h>
20 #include <linux/err.h>
21 #include <linux/file.h>
22 #include <linux/freezer.h>
24 #include <linux/anon_inodes.h>
25 #include <linux/kthread.h>
26 #include <linux/list.h>
27 #include <linux/memblock.h>
28 #include <linux/miscdevice.h>
29 #include <linux/export.h>
31 #include <linux/mm_types.h>
32 #include <linux/rbtree.h>
33 #include <linux/slab.h>
34 #include <linux/seq_file.h>
35 #include <linux/uaccess.h>
36 #include <linux/vmalloc.h>
37 #include <linux/debugfs.h>
38 #include <linux/dma-buf.h>
39 #include <linux/idr.h>
40 #include <linux/exynos_iovmm.h>
41 #include <linux/exynos_ion.h>
42 #include <linux/highmem.h>
45 #include <asm/cacheflush.h>
46 #include <asm/tlbflush.h>
48 #define CREATE_TRACE_POINTS
50 #include "compat_ion.h"
53 * struct ion_device - the metadata of the ion device node
54 * @dev: the actual misc device
55 * @buffers: an rb tree of all the existing buffers
56 * @buffer_lock: lock protecting the tree of buffers
57 * @lock: rwsem protecting the tree of heaps and clients
58 * @heaps: list of all the heaps in the system
59 * @user_clients: list of all the clients created from userspace
62 struct miscdevice dev
;
63 struct rb_root buffers
;
64 struct mutex buffer_lock
;
65 struct rw_semaphore lock
;
66 struct plist_head heaps
;
67 long (*custom_ioctl
)(struct ion_client
*client
, unsigned int cmd
,
69 struct rb_root clients
;
70 struct dentry
*debug_root
;
71 struct dentry
*heaps_debug_root
;
72 struct dentry
*clients_debug_root
;
74 #ifdef CONFIG_ION_EXYNOS_STAT_LOG
76 struct dentry
*buffer_debug_file
;
77 struct dentry
*event_debug_file
;
78 struct ion_eventlog eventlog
[ION_EVENT_LOG_MAX
];
84 * struct ion_client - a process/hw block local address space
85 * @node: node in the tree of all clients
86 * @dev: backpointer to ion device
87 * @handles: an rb tree of all the handles in this client
88 * @idr: an idr space for allocating handle ids
89 * @lock: lock protecting the tree of handles
90 * @name: used for debugging
91 * @display_name: used for debugging (unique version of @name)
92 * @display_serial: used for debugging (to make display_name unique)
93 * @task: used for debugging
95 * A client represents a list of buffers this client may access.
96 * The mutex stored here is used to protect both handles tree
97 * as well as the handles themselves, and should be held while modifying either.
101 struct ion_device
*dev
;
102 struct rb_root handles
;
108 struct task_struct
*task
;
110 struct dentry
*debug_root
;
114 * ion_handle - a client local reference to a buffer
115 * @ref: reference count
116 * @client: back pointer to the client the buffer resides in
117 * @buffer: pointer to the buffer
118 * @node: node in the client's handle rbtree
119 * @kmap_cnt: count of times this client has mapped to kernel
120 * @id: client-unique id allocated by client->idr
122 * Modifications to node, map_cnt or mapping should be protected by the
123 * lock in the client. Other fields are never changed after initialization.
127 unsigned int user_ref_count
;
128 struct ion_client
*client
;
129 struct ion_buffer
*buffer
;
131 unsigned int kmap_cnt
;
135 struct ion_device
*g_idev
;
137 static inline struct page
*ion_buffer_page(struct page
*page
)
139 return (struct page
*)((unsigned long)page
& ~(1UL));
142 static inline bool ion_buffer_page_is_dirty(struct page
*page
)
144 return !!((unsigned long)page
& 1UL);
147 static inline void ion_buffer_page_dirty(struct page
**page
)
149 *page
= (struct page
*)((unsigned long)(*page
) | 1UL);
152 static inline void ion_buffer_page_clean(struct page
**page
)
154 *page
= (struct page
*)((unsigned long)(*page
) & ~(1UL));
157 void ion_debug_heap_usage_show(struct ion_heap
*heap
)
159 struct scatterlist
*sg
;
160 struct sg_table
*table
;
163 struct ion_device
*dev
= heap
->dev
;
165 ion_phys_addr_t paddr
;
167 /* show the usage for only contiguous buffer */
168 if ((heap
->type
!= ION_HEAP_TYPE_CARVEOUT
)
169 && (heap
->type
!= ION_HEAP_TYPE_DMA
))
172 pr_err("[HEAP %16s (id %4d) DETAIL USAGE]\n", heap
->name
, heap
->id
);
174 mutex_lock(&dev
->buffer_lock
);
175 for (n
= rb_first(&dev
->buffers
); n
; n
= rb_next(n
)) {
176 struct ion_buffer
*buffer
= rb_entry(n
, struct ion_buffer
,
178 if (buffer
->heap
->id
!= heap
->id
)
180 table
= buffer
->sg_table
;
181 for_each_sg(table
->sgl
, sg
, table
->nents
, i
) {
183 paddr
= PFN_PHYS(page_to_pfn(page
));
184 pr_err("[%16lx--%16lx] %16zu\n",
185 paddr
, paddr
+ sg
->length
, buffer
->size
);
188 mutex_unlock(&dev
->buffer_lock
);
191 #ifdef CONFIG_ION_EXYNOS_STAT_LOG
192 static inline void ION_EVENT_ALLOC(struct ion_buffer
*buffer
, ktime_t begin
)
194 struct ion_device
*dev
= buffer
->dev
;
195 int idx
= atomic_inc_return(&dev
->event_idx
);
196 struct ion_eventlog
*log
= &dev
->eventlog
[idx
% ION_EVENT_LOG_MAX
];
197 struct ion_event_alloc
*data
= &log
->data
.alloc
;
199 log
->type
= ION_EVENT_TYPE_ALLOC
;
201 log
->done
= ktime_get();
203 data
->heap
= buffer
->heap
;
204 data
->size
= buffer
->size
;
205 data
->flags
= buffer
->flags
;
208 static inline void ION_EVENT_FREE(struct ion_buffer
*buffer
, ktime_t begin
)
210 struct ion_device
*dev
= buffer
->dev
;
211 int idx
= atomic_inc_return(&dev
->event_idx
) % ION_EVENT_LOG_MAX
;
212 struct ion_eventlog
*log
= &dev
->eventlog
[idx
];
213 struct ion_event_free
*data
= &log
->data
.free
;
215 log
->type
= ION_EVENT_TYPE_FREE
;
217 log
->done
= ktime_get();
219 data
->heap
= buffer
->heap
;
220 data
->size
= buffer
->size
;
221 data
->shrinker
= (buffer
->private_flags
& ION_PRIV_FLAG_SHRINKER_FREE
);
224 static inline void ION_EVENT_MMAP(struct ion_buffer
*buffer
, ktime_t begin
)
226 struct ion_device
*dev
= buffer
->dev
;
227 int idx
= atomic_inc_return(&dev
->event_idx
) % ION_EVENT_LOG_MAX
;
228 struct ion_eventlog
*log
= &dev
->eventlog
[idx
];
229 struct ion_event_mmap
*data
= &log
->data
.mmap
;
231 log
->type
= ION_EVENT_TYPE_MMAP
;
233 log
->done
= ktime_get();
235 data
->heap
= buffer
->heap
;
236 data
->size
= buffer
->size
;
239 void ION_EVENT_SHRINK(struct ion_device
*dev
, size_t size
)
241 int idx
= atomic_inc_return(&dev
->event_idx
) % ION_EVENT_LOG_MAX
;
242 struct ion_eventlog
*log
= &dev
->eventlog
[idx
];
244 log
->type
= ION_EVENT_TYPE_SHRINK
;
245 log
->begin
= ktime_get();
246 log
->done
= ktime_set(0, 0);
247 log
->data
.shrink
.size
= size
;
250 void ION_EVENT_CLEAR(struct ion_buffer
*buffer
, ktime_t begin
)
252 struct ion_device
*dev
= buffer
->dev
;
253 int idx
= atomic_inc_return(&dev
->event_idx
) % ION_EVENT_LOG_MAX
;
254 struct ion_eventlog
*log
= &dev
->eventlog
[idx
];
255 struct ion_event_clear
*data
= &log
->data
.clear
;
257 log
->type
= ION_EVENT_TYPE_CLEAR
;
259 log
->done
= ktime_get();
261 data
->heap
= buffer
->heap
;
262 data
->size
= buffer
->size
;
263 data
->flags
= buffer
->flags
;
266 static struct ion_task
*ion_buffer_task_lookup(struct ion_buffer
*buffer
,
267 struct device
*master
)
270 struct ion_task
*task
;
272 list_for_each_entry(task
, &buffer
->master_list
, list
) {
273 if (task
->master
== master
) {
279 return found
? task
: NULL
;
282 static void ion_buffer_set_task_info(struct ion_buffer
*buffer
)
284 INIT_LIST_HEAD(&buffer
->master_list
);
285 get_task_comm(buffer
->task_comm
, current
->group_leader
);
286 get_task_comm(buffer
->thread_comm
, current
);
287 buffer
->pid
= task_pid_nr(current
->group_leader
);
288 buffer
->tid
= task_pid_nr(current
);
291 static void ion_buffer_task_add(struct ion_buffer
*buffer
,
292 struct device
*master
)
294 struct ion_task
*task
;
296 task
= ion_buffer_task_lookup(buffer
, master
);
298 task
= kzalloc(sizeof(*task
), GFP_KERNEL
);
300 task
->master
= master
;
301 kref_init(&task
->ref
);
302 list_add_tail(&task
->list
, &buffer
->master_list
);
305 kref_get(&task
->ref
);
309 static void ion_buffer_task_add_lock(struct ion_buffer
*buffer
,
310 struct device
*master
)
312 mutex_lock(&buffer
->lock
);
313 ion_buffer_task_add(buffer
, master
);
314 mutex_unlock(&buffer
->lock
);
317 static void __ion_buffer_task_remove(struct kref
*kref
)
319 struct ion_task
*task
= container_of(kref
, struct ion_task
, ref
);
321 list_del(&task
->list
);
325 static void ion_buffer_task_remove(struct ion_buffer
*buffer
,
326 struct device
*master
)
328 struct ion_task
*task
, *tmp
;
330 list_for_each_entry_safe(task
, tmp
, &buffer
->master_list
, list
) {
331 if (task
->master
== master
) {
332 kref_put(&task
->ref
, __ion_buffer_task_remove
);
338 static void ion_buffer_task_remove_lock(struct ion_buffer
*buffer
,
339 struct device
*master
)
341 mutex_lock(&buffer
->lock
);
342 ion_buffer_task_remove(buffer
, master
);
343 mutex_unlock(&buffer
->lock
);
346 static void ion_buffer_task_remove_all(struct ion_buffer
*buffer
)
348 struct ion_task
*task
, *tmp
;
350 mutex_lock(&buffer
->lock
);
351 list_for_each_entry_safe(task
, tmp
, &buffer
->master_list
, list
) {
352 list_del(&task
->list
);
355 mutex_unlock(&buffer
->lock
);
358 #define ION_EVENT_ALLOC(buffer, begin) do { } while (0)
359 #define ION_EVENT_FREE(buffer, begin) do { } while (0)
360 #define ION_EVENT_MMAP(buffer, begin) do { } while (0)
361 #define ion_buffer_set_task_info(buffer) do { } while (0)
362 #define ion_buffer_task_add(buffer, master) do { } while (0)
363 #define ion_buffer_task_add_lock(buffer, master) do { } while (0)
364 #define ion_buffer_task_remove(buffer, master) do { } while (0)
365 #define ion_buffer_task_remove_lock(buffer, master) do { } while (0)
366 #define ion_buffer_task_remove_all(buffer) do { } while (0)
369 /* this function should only be called while dev->lock is held */
370 static void ion_buffer_add(struct ion_device
*dev
,
371 struct ion_buffer
*buffer
)
373 struct rb_node
**p
= &dev
->buffers
.rb_node
;
374 struct rb_node
*parent
= NULL
;
375 struct ion_buffer
*entry
;
379 entry
= rb_entry(parent
, struct ion_buffer
, node
);
381 if (buffer
< entry
) {
383 } else if (buffer
> entry
) {
386 pr_err("%s: buffer already found.", __func__
);
391 rb_link_node(&buffer
->node
, parent
, p
);
392 rb_insert_color(&buffer
->node
, &dev
->buffers
);
394 ion_buffer_set_task_info(buffer
);
395 ion_buffer_task_add(buffer
, dev
->dev
.this_device
);
398 /* this function should only be called while dev->lock is held */
399 static struct ion_buffer
*ion_buffer_create(struct ion_heap
*heap
,
400 struct ion_device
*dev
,
405 struct ion_buffer
*buffer
;
406 struct sg_table
*table
;
407 struct scatterlist
*sg
;
410 buffer
= kzalloc(sizeof(struct ion_buffer
), GFP_KERNEL
);
412 return ERR_PTR(-ENOMEM
);
415 buffer
->flags
= flags
;
417 kref_init(&buffer
->ref
);
419 ret
= heap
->ops
->allocate(heap
, buffer
, len
, align
, flags
);
422 if (!(heap
->flags
& ION_HEAP_FLAG_DEFER_FREE
))
425 ion_heap_freelist_drain(heap
, 0);
426 ret
= heap
->ops
->allocate(heap
, buffer
, len
, align
,
434 table
= heap
->ops
->map_dma(heap
, buffer
);
435 if (WARN_ONCE(table
== NULL
,
436 "heap->ops->map_dma should return ERR_PTR on error"))
437 table
= ERR_PTR(-EINVAL
);
443 buffer
->sg_table
= table
;
444 if (ion_buffer_fault_user_mappings(buffer
)) {
445 int num_pages
= PAGE_ALIGN(buffer
->size
) / PAGE_SIZE
;
446 struct scatterlist
*sg
;
449 buffer
->pages
= vmalloc(sizeof(struct page
*) * num_pages
);
450 if (!buffer
->pages
) {
455 for_each_sg(table
->sgl
, sg
, table
->nents
, i
) {
456 struct page
*page
= sg_page(sg
);
458 for (j
= 0; j
< sg
->length
/ PAGE_SIZE
; j
++)
459 buffer
->pages
[k
++] = page
++;
465 INIT_LIST_HEAD(&buffer
->vmas
);
466 INIT_LIST_HEAD(&buffer
->iovas
);
467 mutex_init(&buffer
->lock
);
469 * this will set up dma addresses for the sglist -- it is not
470 * technically correct as per the dma api -- a specific
471 * device isn't really taking ownership here. However, in practice on
472 * our systems the only dma_address space is physical addresses.
473 * Additionally, we can't afford the overhead of invalidating every
474 * allocation via dma_map_sg. The implicit contract here is that
475 * memory coming from the heaps is ready for dma, ie if it has a
476 * cached mapping that mapping has been invalidated
478 for_each_sg(buffer
->sg_table
->sgl
, sg
, buffer
->sg_table
->nents
, i
) {
479 sg_dma_address(sg
) = sg_phys(sg
);
480 sg_dma_len(sg
) = sg
->length
;
482 mutex_lock(&dev
->buffer_lock
);
483 ion_buffer_add(dev
, buffer
);
484 mutex_unlock(&dev
->buffer_lock
);
488 heap
->ops
->unmap_dma(heap
, buffer
);
490 heap
->ops
->free(buffer
);
496 void ion_buffer_destroy(struct ion_buffer
*buffer
)
498 struct ion_iovm_map
*iovm_map
;
499 struct ion_iovm_map
*tmp
;
502 trace_ion_free_start((unsigned long) buffer
, buffer
->size
,
503 buffer
->private_flags
& ION_PRIV_FLAG_SHRINKER_FREE
);
505 if (WARN_ON(buffer
->kmap_cnt
> 0))
506 buffer
->heap
->ops
->unmap_kernel(buffer
->heap
, buffer
);
508 list_for_each_entry_safe(iovm_map
, tmp
, &buffer
->iovas
, list
) {
509 iovmm_unmap(iovm_map
->dev
, iovm_map
->iova
);
510 list_del(&iovm_map
->list
);
514 buffer
->heap
->ops
->unmap_dma(buffer
->heap
, buffer
);
515 buffer
->heap
->ops
->free(buffer
);
516 vfree(buffer
->pages
);
518 ion_buffer_task_remove_all(buffer
);
519 ION_EVENT_FREE(buffer
, ION_EVENT_DONE());
520 trace_ion_free_end((unsigned long) buffer
, buffer
->size
,
521 buffer
->private_flags
& ION_PRIV_FLAG_SHRINKER_FREE
);
525 static void _ion_buffer_destroy(struct kref
*kref
)
527 struct ion_buffer
*buffer
= container_of(kref
, struct ion_buffer
, ref
);
528 struct ion_heap
*heap
= buffer
->heap
;
529 struct ion_device
*dev
= buffer
->dev
;
531 mutex_lock(&dev
->buffer_lock
);
532 rb_erase(&buffer
->node
, &dev
->buffers
);
533 mutex_unlock(&dev
->buffer_lock
);
535 if (heap
->flags
& ION_HEAP_FLAG_DEFER_FREE
)
536 ion_heap_freelist_add(heap
, buffer
);
538 ion_buffer_destroy(buffer
);
541 static void ion_buffer_get(struct ion_buffer
*buffer
)
543 kref_get(&buffer
->ref
);
546 static int ion_buffer_put(struct ion_buffer
*buffer
)
548 return kref_put(&buffer
->ref
, _ion_buffer_destroy
);
551 static void ion_buffer_add_to_handle(struct ion_buffer
*buffer
)
553 mutex_lock(&buffer
->lock
);
554 buffer
->handle_count
++;
555 mutex_unlock(&buffer
->lock
);
558 static void ion_buffer_remove_from_handle(struct ion_buffer
*buffer
)
561 * when a buffer is removed from a handle, if it is not in
562 * any other handles, copy the taskcomm and the pid of the
563 * process it's being removed from into the buffer. At this
564 * point there will be no way to track what processes this buffer is
565 * being used by, it only exists as a dma_buf file descriptor.
566 * The taskcomm and pid can provide a debug hint as to where this fd
569 mutex_lock(&buffer
->lock
);
570 buffer
->handle_count
--;
571 BUG_ON(buffer
->handle_count
< 0);
572 if (!buffer
->handle_count
) {
573 struct task_struct
*task
;
575 task
= current
->group_leader
;
576 get_task_comm(buffer
->task_comm
, task
);
577 buffer
->pid
= task_pid_nr(task
);
579 mutex_unlock(&buffer
->lock
);
582 static bool ion_handle_validate(struct ion_client
*client
,
583 struct ion_handle
*handle
)
585 WARN_ON(!mutex_is_locked(&client
->lock
));
586 return idr_find(&client
->idr
, handle
->id
) == handle
;
589 static struct ion_handle
*ion_handle_create(struct ion_client
*client
,
590 struct ion_buffer
*buffer
)
592 struct ion_handle
*handle
;
594 handle
= kzalloc(sizeof(struct ion_handle
), GFP_KERNEL
);
596 return ERR_PTR(-ENOMEM
);
597 kref_init(&handle
->ref
);
598 RB_CLEAR_NODE(&handle
->node
);
599 handle
->client
= client
;
600 ion_buffer_get(buffer
);
601 ion_buffer_add_to_handle(buffer
);
602 handle
->buffer
= buffer
;
607 static void ion_handle_kmap_put(struct ion_handle
*);
609 static void ion_handle_destroy(struct kref
*kref
)
611 struct ion_handle
*handle
= container_of(kref
, struct ion_handle
, ref
);
612 struct ion_client
*client
= handle
->client
;
613 struct ion_buffer
*buffer
= handle
->buffer
;
615 mutex_lock(&buffer
->lock
);
616 while (handle
->kmap_cnt
)
617 ion_handle_kmap_put(handle
);
618 mutex_unlock(&buffer
->lock
);
620 idr_remove(&client
->idr
, handle
->id
);
621 if (!RB_EMPTY_NODE(&handle
->node
))
622 rb_erase(&handle
->node
, &client
->handles
);
624 ion_buffer_remove_from_handle(buffer
);
625 ion_buffer_put(buffer
);
630 struct ion_buffer
*ion_handle_buffer(struct ion_handle
*handle
)
632 return handle
->buffer
;
635 static void ion_handle_get(struct ion_handle
*handle
)
637 kref_get(&handle
->ref
);
640 /* Must hold the client lock */
641 static struct ion_handle
* ion_handle_get_check_overflow(struct ion_handle
*handle
)
643 if (atomic_read(&handle
->ref
.refcount
) + 1 == 0)
644 return ERR_PTR(-EOVERFLOW
);
645 ion_handle_get(handle
);
649 static int ion_handle_put_nolock(struct ion_handle
*handle
)
651 return kref_put(&handle
->ref
, ion_handle_destroy
);
654 int ion_handle_put(struct ion_handle
*handle
)
658 mutex_lock(&handle
->client
->lock
);
659 if (!ion_handle_validate(handle
->client
, handle
)) {
660 WARN(1, "%s: invalid handle passed to free.\n", __func__
);
661 mutex_unlock(&handle
->client
->lock
);
665 ret
= ion_handle_put_nolock(handle
);
666 mutex_unlock(&handle
->client
->lock
);
671 /* Must hold the client lock */
672 static void user_ion_handle_get(struct ion_handle
*handle
)
674 if (handle
->user_ref_count
++ == 0) {
675 kref_get(&handle
->ref
);
679 /* Must hold the client lock */
680 static struct ion_handle
* user_ion_handle_get_check_overflow(struct ion_handle
*handle
)
682 if (handle
->user_ref_count
+ 1 == 0)
683 return ERR_PTR(-EOVERFLOW
);
684 user_ion_handle_get(handle
);
688 /* passes a kref to the user ref count.
689 * We know we're holding a kref to the object before and
690 * after this call, so no need to reverify handle. */
691 static struct ion_handle
* pass_to_user(struct ion_handle
*handle
)
693 struct ion_client
*client
= handle
->client
;
694 struct ion_handle
*ret
;
696 mutex_lock(&client
->lock
);
697 ret
= user_ion_handle_get_check_overflow(handle
);
698 ion_handle_put_nolock(handle
);
699 mutex_unlock(&client
->lock
);
703 /* Must hold the client lock */
704 static int user_ion_handle_put_nolock(struct ion_handle
*handle
)
708 if (--handle
->user_ref_count
== 0) {
709 ret
= ion_handle_put_nolock(handle
);
715 static struct ion_handle
*ion_handle_lookup(struct ion_client
*client
,
716 struct ion_buffer
*buffer
)
718 struct rb_node
*n
= client
->handles
.rb_node
;
721 struct ion_handle
*entry
= rb_entry(n
, struct ion_handle
, node
);
723 if (buffer
< entry
->buffer
)
725 else if (buffer
> entry
->buffer
)
730 return ERR_PTR(-EINVAL
);
733 static struct ion_handle
*ion_handle_get_by_id_nolock(struct ion_client
*client
,
736 struct ion_handle
*handle
;
738 handle
= idr_find(&client
->idr
, id
);
740 return ion_handle_get_check_overflow(handle
);
742 return ERR_PTR(-EINVAL
);
745 struct ion_handle
*ion_handle_get_by_id(struct ion_client
*client
,
748 struct ion_handle
*handle
;
750 mutex_lock(&client
->lock
);
751 handle
= ion_handle_get_by_id_nolock(client
, id
);
752 mutex_unlock(&client
->lock
);
757 static int ion_handle_add(struct ion_client
*client
, struct ion_handle
*handle
)
760 struct rb_node
**p
= &client
->handles
.rb_node
;
761 struct rb_node
*parent
= NULL
;
762 struct ion_handle
*entry
;
764 id
= idr_alloc(&client
->idr
, handle
, 1, 0, GFP_KERNEL
);
766 pr_err("%s: Fail to get bad id (ret %d)\n", __func__
, id
);
774 entry
= rb_entry(parent
, struct ion_handle
, node
);
776 if (handle
->buffer
< entry
->buffer
)
778 else if (handle
->buffer
> entry
->buffer
)
781 WARN(1, "%s: buffer already found.", __func__
);
784 rb_link_node(&handle
->node
, parent
, p
);
785 rb_insert_color(&handle
->node
, &client
->handles
);
790 unsigned int ion_parse_heap_id(unsigned int heap_id_mask
, unsigned int flags
);
792 static size_t ion_buffer_get_total_size_by_pid(struct ion_client
*client
)
794 struct ion_device
*dev
= client
->dev
;
795 pid_t pid
= client
->pid
;
796 size_t pid_total_size
= 0;
799 mutex_lock(&dev
->buffer_lock
);
800 for (n
= rb_first(&dev
->buffers
); n
; n
= rb_next(n
)) {
801 struct ion_buffer
*buffer
= rb_entry(n
, struct ion_buffer
,
803 mutex_lock(&buffer
->lock
);
804 if (pid
== buffer
->pid
)
805 pid_total_size
+= buffer
->size
;
806 mutex_unlock(&buffer
->lock
);
808 mutex_unlock(&dev
->buffer_lock
);
810 return pid_total_size
;
813 static struct ion_handle
*__ion_alloc(struct ion_client
*client
, size_t len
,
814 size_t align
, unsigned int heap_id_mask
,
815 unsigned int flags
, bool grab_handle
)
817 struct ion_handle
*handle
;
818 struct ion_device
*dev
= client
->dev
;
819 struct ion_buffer
*buffer
= NULL
;
820 struct ion_heap
*heap
;
824 trace_ion_alloc_start(client
->name
, 0, len
, align
, heap_id_mask
, flags
);
826 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__
,
827 len
, align
, heap_id_mask
, flags
);
829 * traverse the list of heaps available in this system in priority
830 * order. If the heap type is supported by the client, and matches the
831 * request of the caller allocate from it. Repeat until allocate has
832 * succeeded or all heaps have been tried
834 len
= PAGE_ALIGN(len
);
836 trace_ion_alloc_fail(client
->name
, EINVAL
, len
,
837 align
, heap_id_mask
, flags
);
838 return ERR_PTR(-EINVAL
);
841 if (len
/ PAGE_SIZE
> totalram_pages
/ 4) {
842 size_t pid_total_size
= ion_buffer_get_total_size_by_pid(client
);
844 if ((len
+ pid_total_size
) / PAGE_SIZE
> totalram_pages
/ 2) {
845 pr_err("%s: len %zu total %zu heap_id_mask %u flags %x\n",
846 __func__
, len
, pid_total_size
, heap_id_mask
, flags
);
847 return ERR_PTR(-EINVAL
);
851 down_read(&dev
->lock
);
852 heap_id_mask
= ion_parse_heap_id(heap_id_mask
, flags
);
853 if (heap_id_mask
== 0)
854 return ERR_PTR(-EINVAL
);
856 plist_for_each_entry(heap
, &dev
->heaps
, node
) {
857 /* if the caller didn't specify this heap id */
858 if (!((1 << heap
->id
) & heap_id_mask
))
860 buffer
= ion_buffer_create(heap
, dev
, len
, align
, flags
);
866 if (buffer
== NULL
) {
867 trace_ion_alloc_fail(client
->name
, ENODEV
, len
,
868 align
, heap_id_mask
, flags
);
869 return ERR_PTR(-ENODEV
);
872 if (IS_ERR(buffer
)) {
873 trace_ion_alloc_fail(client
->name
, PTR_ERR(buffer
),
874 len
, align
, heap_id_mask
, flags
);
875 return ERR_CAST(buffer
);
878 handle
= ion_handle_create(client
, buffer
);
881 * ion_buffer_create will create a buffer with a ref_cnt of 1,
882 * and ion_handle_create will take a second reference, drop one here
884 ion_buffer_put(buffer
);
886 if (IS_ERR(handle
)) {
887 trace_ion_alloc_fail(client
->name
, (unsigned long) buffer
,
888 len
, align
, heap_id_mask
, flags
);
892 mutex_lock(&client
->lock
);
894 ion_handle_get(handle
);
895 ret
= ion_handle_add(client
, handle
);
896 mutex_unlock(&client
->lock
);
898 ion_handle_put(handle
);
899 handle
= ERR_PTR(ret
);
900 trace_ion_alloc_fail(client
->name
, (unsigned long ) buffer
,
901 len
, align
, heap_id_mask
, flags
);
904 ION_EVENT_ALLOC(buffer
, ION_EVENT_DONE());
905 trace_ion_alloc_end(client
->name
, (unsigned long) buffer
,
906 len
, align
, heap_id_mask
, flags
);
911 struct ion_handle
*ion_alloc(struct ion_client
*client
, size_t len
,
912 size_t align
, unsigned int heap_id_mask
,
915 return __ion_alloc(client
, len
, align
, heap_id_mask
, flags
, false);
917 EXPORT_SYMBOL(ion_alloc
);
919 static void ion_free_nolock(struct ion_client
*client
, struct ion_handle
*handle
)
923 BUG_ON(client
!= handle
->client
);
925 valid_handle
= ion_handle_validate(client
, handle
);
928 WARN(1, "%s: invalid handle passed to free.\n", __func__
);
931 ion_handle_put_nolock(handle
);
934 static void user_ion_free_nolock(struct ion_client
*client
, struct ion_handle
*handle
)
938 BUG_ON(client
!= handle
->client
);
940 valid_handle
= ion_handle_validate(client
, handle
);
942 WARN(1, "%s: invalid handle passed to free.\n", __func__
);
945 if (!(handle
->user_ref_count
> 0)) {
946 WARN(1, "%s: User does not have access!\n", __func__
);
949 user_ion_handle_put_nolock(handle
);
952 void ion_free(struct ion_client
*client
, struct ion_handle
*handle
)
954 BUG_ON(client
!= handle
->client
);
956 mutex_lock(&client
->lock
);
957 ion_free_nolock(client
, handle
);
958 mutex_unlock(&client
->lock
);
960 EXPORT_SYMBOL(ion_free
);
962 int ion_phys(struct ion_client
*client
, struct ion_handle
*handle
,
963 ion_phys_addr_t
*addr
, size_t *len
)
965 struct ion_buffer
*buffer
;
968 mutex_lock(&client
->lock
);
969 if (!ion_handle_validate(client
, handle
)) {
970 mutex_unlock(&client
->lock
);
974 buffer
= handle
->buffer
;
976 if (!buffer
->heap
->ops
->phys
) {
977 pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
978 __func__
, buffer
->heap
->name
, buffer
->heap
->type
);
979 mutex_unlock(&client
->lock
);
982 mutex_unlock(&client
->lock
);
983 ret
= buffer
->heap
->ops
->phys(buffer
->heap
, buffer
, addr
, len
);
986 EXPORT_SYMBOL(ion_phys
);
988 static void *ion_buffer_kmap_get(struct ion_buffer
*buffer
)
992 if (buffer
->kmap_cnt
) {
994 return buffer
->vaddr
;
996 vaddr
= buffer
->heap
->ops
->map_kernel(buffer
->heap
, buffer
);
997 if (WARN_ONCE(vaddr
== NULL
,
998 "heap->ops->map_kernel should return ERR_PTR on error"))
999 return ERR_PTR(-EINVAL
);
1002 buffer
->vaddr
= vaddr
;
1008 static void *ion_handle_kmap_get(struct ion_handle
*handle
)
1010 struct ion_buffer
*buffer
= handle
->buffer
;
1013 if (handle
->kmap_cnt
) {
1015 return buffer
->vaddr
;
1017 vaddr
= ion_buffer_kmap_get(buffer
);
1024 static void ion_buffer_kmap_put(struct ion_buffer
*buffer
)
1027 if (!buffer
->kmap_cnt
) {
1028 buffer
->heap
->ops
->unmap_kernel(buffer
->heap
, buffer
);
1029 buffer
->vaddr
= NULL
;
1033 static void ion_handle_kmap_put(struct ion_handle
*handle
)
1035 struct ion_buffer
*buffer
= handle
->buffer
;
1037 if (!handle
->kmap_cnt
) {
1038 WARN(1, "%s: Double unmap detected! bailing...\n", __func__
);
1042 if (!handle
->kmap_cnt
)
1043 ion_buffer_kmap_put(buffer
);
1046 void *ion_map_kernel(struct ion_client
*client
, struct ion_handle
*handle
)
1048 struct ion_buffer
*buffer
;
1051 mutex_lock(&client
->lock
);
1052 if (!ion_handle_validate(client
, handle
)) {
1053 pr_err("%s: invalid handle passed to map_kernel.\n",
1055 mutex_unlock(&client
->lock
);
1056 return ERR_PTR(-EINVAL
);
1059 buffer
= handle
->buffer
;
1061 if (!handle
->buffer
->heap
->ops
->map_kernel
) {
1062 pr_err("%s: map_kernel is not implemented by this heap.\n",
1064 mutex_unlock(&client
->lock
);
1065 return ERR_PTR(-ENODEV
);
1068 mutex_lock(&buffer
->lock
);
1069 vaddr
= ion_handle_kmap_get(handle
);
1070 mutex_unlock(&buffer
->lock
);
1071 mutex_unlock(&client
->lock
);
1074 EXPORT_SYMBOL(ion_map_kernel
);
1076 void ion_unmap_kernel(struct ion_client
*client
, struct ion_handle
*handle
)
1078 struct ion_buffer
*buffer
;
1080 mutex_lock(&client
->lock
);
1081 buffer
= handle
->buffer
;
1082 mutex_lock(&buffer
->lock
);
1083 ion_handle_kmap_put(handle
);
1084 mutex_unlock(&buffer
->lock
);
1085 mutex_unlock(&client
->lock
);
1087 EXPORT_SYMBOL(ion_unmap_kernel
);
1089 static int ion_debug_client_show(struct seq_file
*s
, void *unused
)
1091 struct ion_client
*client
= s
->private;
1093 size_t sizes
[ION_NUM_HEAP_IDS
] = {0};
1094 size_t sizes_pss
[ION_NUM_HEAP_IDS
] = {0};
1095 const char *names
[ION_NUM_HEAP_IDS
] = {NULL
};
1098 down_read(&g_idev
->lock
);
1100 /* check validity of the client */
1101 for (n
= rb_first(&g_idev
->clients
); n
; n
= rb_next(n
)) {
1102 struct ion_client
*c
= rb_entry(n
, struct ion_client
, node
);
1107 if (IS_ERR_OR_NULL(n
)) {
1108 pr_err("%s: invalid client %p\n", __func__
, client
);
1109 up_read(&g_idev
->lock
);
1113 seq_printf(s
, "%16.s %4.s %16.s %4.s %10.s %8.s %9.s\n",
1114 "task", "pid", "thread", "tid", "size", "# procs", "flag");
1115 seq_printf(s
, "----------------------------------------------"
1116 "--------------------------------------------\n");
1118 mutex_lock(&client
->lock
);
1119 for (n
= rb_first(&client
->handles
); n
; n
= rb_next(n
)) {
1120 struct ion_handle
*handle
= rb_entry(n
, struct ion_handle
,
1122 struct ion_buffer
*buffer
= handle
->buffer
;
1123 unsigned int id
= buffer
->heap
->id
;
1126 names
[id
] = buffer
->heap
->name
;
1127 sizes
[id
] += buffer
->size
;
1128 sizes_pss
[id
] += (buffer
->size
/ buffer
->handle_count
);
1129 seq_printf(s
, "%16.s %4u %16.s %4u %10zu %8d %9lx\n",
1130 buffer
->task_comm
, buffer
->pid
,
1131 buffer
->thread_comm
, buffer
->tid
, buffer
->size
,
1132 buffer
->handle_count
, buffer
->flags
);
1134 mutex_unlock(&client
->lock
);
1135 up_read(&g_idev
->lock
);
1137 seq_printf(s
, "----------------------------------------------"
1138 "--------------------------------------------\n");
1139 seq_printf(s
, "%16.16s: %16.16s %18.18s\n", "heap_name",
1140 "size_in_bytes", "size_in_bytes(pss)");
1141 for (i
= 0; i
< ION_NUM_HEAP_IDS
; i
++) {
1144 seq_printf(s
, "%16.16s: %16zu %18zu\n",
1145 names
[i
], sizes
[i
], sizes_pss
[i
]);
1150 static int ion_debug_client_open(struct inode
*inode
, struct file
*file
)
1152 return single_open(file
, ion_debug_client_show
, inode
->i_private
);
1155 static const struct file_operations debug_client_fops
= {
1156 .open
= ion_debug_client_open
,
1158 .llseek
= seq_lseek
,
1159 .release
= single_release
,
1162 static int ion_get_client_serial(const struct rb_root
*root
,
1163 const unsigned char *name
)
1166 struct rb_node
*node
;
1168 for (node
= rb_first(root
); node
; node
= rb_next(node
)) {
1169 struct ion_client
*client
= rb_entry(node
, struct ion_client
,
1172 if (strcmp(client
->name
, name
))
1174 serial
= max(serial
, client
->display_serial
);
1179 struct ion_client
*ion_client_create(struct ion_device
*dev
,
1182 struct ion_client
*client
;
1183 struct task_struct
*task
;
1185 struct rb_node
*parent
= NULL
;
1186 struct ion_client
*entry
;
1190 pr_err("%s: Name cannot be null\n", __func__
);
1191 return ERR_PTR(-EINVAL
);
1194 get_task_struct(current
->group_leader
);
1195 task_lock(current
->group_leader
);
1196 pid
= task_pid_nr(current
->group_leader
);
1198 * don't bother to store task struct for kernel threads,
1199 * they can't be killed anyway
1201 if (current
->group_leader
->flags
& PF_KTHREAD
) {
1202 put_task_struct(current
->group_leader
);
1205 task
= current
->group_leader
;
1207 task_unlock(current
->group_leader
);
1209 client
= kzalloc(sizeof(struct ion_client
), GFP_KERNEL
);
1211 goto err_put_task_struct
;
1214 client
->handles
= RB_ROOT
;
1215 idr_init(&client
->idr
);
1216 mutex_init(&client
->lock
);
1217 client
->task
= task
;
1219 client
->name
= kstrdup(name
, GFP_KERNEL
);
1221 goto err_free_client
;
1223 down_write(&dev
->lock
);
1224 client
->display_serial
= ion_get_client_serial(&dev
->clients
, name
);
1225 client
->display_name
= kasprintf(
1226 GFP_KERNEL
, "%s-%d", name
, client
->display_serial
);
1227 if (!client
->display_name
) {
1228 up_write(&dev
->lock
);
1229 goto err_free_client_name
;
1231 p
= &dev
->clients
.rb_node
;
1234 entry
= rb_entry(parent
, struct ion_client
, node
);
1238 else if (client
> entry
)
1239 p
= &(*p
)->rb_right
;
1241 rb_link_node(&client
->node
, parent
, p
);
1242 rb_insert_color(&client
->node
, &dev
->clients
);
1244 client
->debug_root
= debugfs_create_file(client
->display_name
, 0664,
1245 dev
->clients_debug_root
,
1246 client
, &debug_client_fops
);
1247 if (!client
->debug_root
) {
1248 char buf
[256], *path
;
1250 path
= dentry_path(dev
->clients_debug_root
, buf
, 256);
1251 pr_err("Failed to create client debugfs at %s/%s\n",
1252 path
, client
->display_name
);
1255 up_write(&dev
->lock
);
1259 err_free_client_name
:
1260 kfree(client
->name
);
1263 err_put_task_struct
:
1265 put_task_struct(current
->group_leader
);
1266 return ERR_PTR(-ENOMEM
);
1268 EXPORT_SYMBOL(ion_client_create
);
1270 void ion_client_destroy(struct ion_client
*client
)
1272 struct ion_device
*dev
= client
->dev
;
1275 pr_debug("%s: %d\n", __func__
, __LINE__
);
1277 mutex_lock(&client
->lock
);
1278 while ((n
= rb_first(&client
->handles
))) {
1279 struct ion_handle
*handle
= rb_entry(n
, struct ion_handle
,
1281 ion_handle_destroy(&handle
->ref
);
1284 mutex_unlock(&client
->lock
);
1285 idr_destroy(&client
->idr
);
1287 down_write(&dev
->lock
);
1289 put_task_struct(client
->task
);
1290 rb_erase(&client
->node
, &dev
->clients
);
1291 debugfs_remove_recursive(client
->debug_root
);
1292 up_write(&dev
->lock
);
1294 kfree(client
->display_name
);
1295 kfree(client
->name
);
1298 EXPORT_SYMBOL(ion_client_destroy
);
1300 struct sg_table
*ion_sg_table(struct ion_client
*client
,
1301 struct ion_handle
*handle
)
1303 struct ion_buffer
*buffer
;
1304 struct sg_table
*table
;
1306 mutex_lock(&client
->lock
);
1307 if (!ion_handle_validate(client
, handle
)) {
1308 pr_err("%s: invalid handle passed to map_dma.\n",
1310 mutex_unlock(&client
->lock
);
1311 return ERR_PTR(-EINVAL
);
1313 buffer
= handle
->buffer
;
1314 table
= buffer
->sg_table
;
1315 mutex_unlock(&client
->lock
);
1318 EXPORT_SYMBOL(ion_sg_table
);
1320 static void ion_buffer_sync_for_device(struct ion_buffer
*buffer
,
1322 enum dma_data_direction direction
);
1324 static struct sg_table
*ion_map_dma_buf(struct dma_buf_attachment
*attachment
,
1325 enum dma_data_direction direction
)
1327 struct dma_buf
*dmabuf
= attachment
->dmabuf
;
1328 struct ion_buffer
*buffer
= dmabuf
->priv
;
1330 ion_buffer_sync_for_device(buffer
, attachment
->dev
, direction
);
1332 ion_buffer_task_add_lock(buffer
, attachment
->dev
);
1334 return buffer
->sg_table
;
1337 static void ion_unmap_dma_buf(struct dma_buf_attachment
*attachment
,
1338 struct sg_table
*table
,
1339 enum dma_data_direction direction
)
1341 ion_buffer_task_remove_lock(attachment
->dmabuf
->priv
, attachment
->dev
);
1344 void ion_pages_sync_for_device(struct device
*dev
, struct page
*page
,
1345 size_t size
, enum dma_data_direction dir
)
1347 struct scatterlist sg
;
1349 sg_init_table(&sg
, 1);
1350 sg_set_page(&sg
, page
, size
, 0);
1352 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
1353 * for the targeted device, but this works on the currently targeted
1356 sg_dma_address(&sg
) = page_to_phys(page
);
1357 dma_sync_sg_for_device(dev
, &sg
, 1, dir
);
1360 struct ion_vma_list
{
1361 struct list_head list
;
1362 struct vm_area_struct
*vma
;
1365 static void ion_buffer_sync_for_device(struct ion_buffer
*buffer
,
1367 enum dma_data_direction dir
)
1369 struct ion_vma_list
*vma_list
;
1370 int pages
= PAGE_ALIGN(buffer
->size
) / PAGE_SIZE
;
1373 if (!ion_buffer_cached(buffer
))
1376 pr_debug("%s: syncing for device %s\n", __func__
,
1377 dev
? dev_name(dev
) : "null");
1379 if (!ion_buffer_fault_user_mappings(buffer
))
1382 mutex_lock(&buffer
->lock
);
1383 for (i
= 0; i
< pages
; i
++) {
1384 struct page
*page
= buffer
->pages
[i
];
1386 if (ion_buffer_page_is_dirty(page
))
1387 ion_pages_sync_for_device(dev
, ion_buffer_page(page
),
1390 ion_buffer_page_clean(buffer
->pages
+ i
);
1392 list_for_each_entry(vma_list
, &buffer
->vmas
, list
) {
1393 struct vm_area_struct
*vma
= vma_list
->vma
;
1395 zap_page_range(vma
, vma
->vm_start
, vma
->vm_end
- vma
->vm_start
,
1398 mutex_unlock(&buffer
->lock
);
1401 static int ion_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1403 struct ion_buffer
*buffer
= vma
->vm_private_data
;
1407 mutex_lock(&buffer
->lock
);
1408 ion_buffer_page_dirty(buffer
->pages
+ vmf
->pgoff
);
1409 BUG_ON(!buffer
->pages
|| !buffer
->pages
[vmf
->pgoff
]);
1411 pfn
= page_to_pfn(ion_buffer_page(buffer
->pages
[vmf
->pgoff
]));
1412 ret
= vm_insert_pfn(vma
, (unsigned long)vmf
->virtual_address
, pfn
);
1413 mutex_unlock(&buffer
->lock
);
1415 return VM_FAULT_ERROR
;
1417 return VM_FAULT_NOPAGE
;
1420 static void ion_vm_open(struct vm_area_struct
*vma
)
1422 struct ion_buffer
*buffer
= vma
->vm_private_data
;
1423 struct ion_vma_list
*vma_list
;
1425 vma_list
= kmalloc(sizeof(struct ion_vma_list
), GFP_KERNEL
);
1428 vma_list
->vma
= vma
;
1429 mutex_lock(&buffer
->lock
);
1430 list_add(&vma_list
->list
, &buffer
->vmas
);
1431 mutex_unlock(&buffer
->lock
);
1432 pr_debug("%s: adding %pK\n", __func__
, vma
);
1435 static void ion_vm_close(struct vm_area_struct
*vma
)
1437 struct ion_buffer
*buffer
= vma
->vm_private_data
;
1438 struct ion_vma_list
*vma_list
, *tmp
;
1440 pr_debug("%s\n", __func__
);
1441 mutex_lock(&buffer
->lock
);
1442 list_for_each_entry_safe(vma_list
, tmp
, &buffer
->vmas
, list
) {
1443 if (vma_list
->vma
!= vma
)
1445 list_del(&vma_list
->list
);
1447 pr_debug("%s: deleting %pK\n", __func__
, vma
);
1450 mutex_unlock(&buffer
->lock
);
1453 static const struct vm_operations_struct ion_vma_ops
= {
1454 .open
= ion_vm_open
,
1455 .close
= ion_vm_close
,
1456 .fault
= ion_vm_fault
,
1459 static int ion_mmap(struct dma_buf
*dmabuf
, struct vm_area_struct
*vma
)
1461 struct ion_buffer
*buffer
= dmabuf
->priv
;
1466 if (buffer
->flags
& ION_FLAG_NOZEROED
) {
1467 pr_err("%s: mmap non-zeroed buffer to user is prohibited!\n",
1472 if (buffer
->flags
& ION_FLAG_PROTECTED
) {
1473 pr_err("%s: mmap protected buffer to user is prohibited!\n",
1478 if ((((vma
->vm_pgoff
<< PAGE_SHIFT
) >= buffer
->size
)) ||
1479 ((vma
->vm_end
- vma
->vm_start
) >
1480 (buffer
->size
- (vma
->vm_pgoff
<< PAGE_SHIFT
)))) {
1481 pr_err("%s: trying to map outside of buffer.\n", __func__
);
1485 if (!buffer
->heap
->ops
->map_user
) {
1486 pr_err("%s: this heap does not define a method for mapping to userspace\n",
1491 trace_ion_mmap_start((unsigned long) buffer
, buffer
->size
,
1492 !(buffer
->flags
& ION_FLAG_CACHED_NEEDS_SYNC
));
1494 if (ion_buffer_fault_user_mappings(buffer
)) {
1495 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
| VM_DONTEXPAND
|
1497 vma
->vm_private_data
= buffer
;
1498 vma
->vm_ops
= &ion_vma_ops
;
1500 ION_EVENT_MMAP(buffer
, ION_EVENT_DONE());
1501 trace_ion_mmap_end((unsigned long) buffer
, buffer
->size
,
1502 !(buffer
->flags
& ION_FLAG_CACHED_NEEDS_SYNC
));
1506 if (!(buffer
->flags
& ION_FLAG_CACHED
))
1507 vma
->vm_page_prot
= pgprot_writecombine(vma
->vm_page_prot
);
1509 mutex_lock(&buffer
->lock
);
1510 /* now map it to userspace */
1511 ret
= buffer
->heap
->ops
->map_user(buffer
->heap
, buffer
, vma
);
1512 mutex_unlock(&buffer
->lock
);
1515 pr_err("%s: failure mapping buffer to userspace\n",
1518 ION_EVENT_MMAP(buffer
, ION_EVENT_DONE());
1519 trace_ion_mmap_end((unsigned long) buffer
, buffer
->size
,
1520 !(buffer
->flags
& ION_FLAG_CACHED_NEEDS_SYNC
));
1525 static void ion_dma_buf_release(struct dma_buf
*dmabuf
)
1527 struct ion_buffer
*buffer
= dmabuf
->priv
;
1529 ion_buffer_put(buffer
);
1532 static void *ion_dma_buf_vmap(struct dma_buf
*dmabuf
)
1534 struct ion_buffer
*buffer
= dmabuf
->priv
;
1537 if (!buffer
->heap
->ops
->map_kernel
) {
1538 pr_err("%s: map kernel is not implemented by this heap.\n",
1540 return ERR_PTR(-ENODEV
);
1543 mutex_lock(&buffer
->lock
);
1544 vaddr
= ion_buffer_kmap_get(buffer
);
1545 mutex_unlock(&buffer
->lock
);
1550 static void ion_dma_buf_vunmap(struct dma_buf
*dmabuf
, void *ptr
)
1552 struct ion_buffer
*buffer
= dmabuf
->priv
;
1554 mutex_lock(&buffer
->lock
);
1555 ion_buffer_kmap_put(buffer
);
1556 mutex_unlock(&buffer
->lock
);
1559 static void *ion_dma_buf_kmap(struct dma_buf
*dmabuf
, unsigned long offset
)
1561 struct ion_buffer
*buffer
= dmabuf
->priv
;
1563 return buffer
->vaddr
+ offset
* PAGE_SIZE
;
1566 static void ion_dma_buf_kunmap(struct dma_buf
*dmabuf
, unsigned long offset
,
1571 static int ion_dma_buf_begin_cpu_access(struct dma_buf
*dmabuf
, size_t start
,
1573 enum dma_data_direction direction
)
1575 struct ion_buffer
*buffer
= dmabuf
->priv
;
1578 if (!buffer
->heap
->ops
->map_kernel
) {
1579 pr_err("%s: map kernel is not implemented by this heap.\n",
1584 mutex_lock(&buffer
->lock
);
1585 vaddr
= ion_buffer_kmap_get(buffer
);
1586 mutex_unlock(&buffer
->lock
);
1587 return PTR_ERR_OR_ZERO(vaddr
);
1590 static void ion_dma_buf_end_cpu_access(struct dma_buf
*dmabuf
, size_t start
,
1592 enum dma_data_direction direction
)
1594 struct ion_buffer
*buffer
= dmabuf
->priv
;
1596 mutex_lock(&buffer
->lock
);
1597 ion_buffer_kmap_put(buffer
);
1598 mutex_unlock(&buffer
->lock
);
1601 static void ion_dma_buf_set_privflag(struct dma_buf
*dmabuf
)
1603 struct ion_buffer
*buffer
= dmabuf
->priv
;
1605 mutex_lock(&buffer
->lock
);
1606 buffer
->private_flags
|= ION_PRIV_FLAG_NEED_TO_FLUSH
;
1607 mutex_unlock(&buffer
->lock
);
1610 static bool ion_dma_buf_get_privflag(struct dma_buf
*dmabuf
, bool clear
)
1612 struct ion_buffer
*buffer
= dmabuf
->priv
;
1615 mutex_lock(&buffer
->lock
);
1616 ret
= !!(buffer
->private_flags
& ION_PRIV_FLAG_NEED_TO_FLUSH
);
1618 buffer
->private_flags
&= ~ION_PRIV_FLAG_NEED_TO_FLUSH
;
1619 mutex_unlock(&buffer
->lock
);
1624 static struct dma_buf_ops dma_buf_ops
= {
1625 .map_dma_buf
= ion_map_dma_buf
,
1626 .unmap_dma_buf
= ion_unmap_dma_buf
,
1628 .release
= ion_dma_buf_release
,
1629 .begin_cpu_access
= ion_dma_buf_begin_cpu_access
,
1630 .end_cpu_access
= ion_dma_buf_end_cpu_access
,
1631 .kmap_atomic
= ion_dma_buf_kmap
,
1632 .kunmap_atomic
= ion_dma_buf_kunmap
,
1633 .kmap
= ion_dma_buf_kmap
,
1634 .kunmap
= ion_dma_buf_kunmap
,
1635 .vmap
= ion_dma_buf_vmap
,
1636 .vunmap
= ion_dma_buf_vunmap
,
1637 .set_privflag
= ion_dma_buf_set_privflag
,
1638 .get_privflag
= ion_dma_buf_get_privflag
,
1641 struct dma_buf
*ion_share_dma_buf(struct ion_client
*client
,
1642 struct ion_handle
*handle
)
1644 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
1645 struct ion_buffer
*buffer
;
1646 struct dma_buf
*dmabuf
;
1649 mutex_lock(&client
->lock
);
1650 valid_handle
= ion_handle_validate(client
, handle
);
1651 if (!valid_handle
) {
1652 WARN(1, "%s: invalid handle passed to share.\n", __func__
);
1653 mutex_unlock(&client
->lock
);
1654 return ERR_PTR(-EINVAL
);
1656 buffer
= handle
->buffer
;
1657 ion_buffer_get(buffer
);
1658 mutex_unlock(&client
->lock
);
1660 exp_info
.ops
= &dma_buf_ops
;
1661 exp_info
.size
= buffer
->size
;
1662 exp_info
.flags
= O_RDWR
;
1663 exp_info
.priv
= buffer
;
1665 dmabuf
= dma_buf_export(&exp_info
);
1666 if (IS_ERR(dmabuf
)) {
1667 ion_buffer_put(buffer
);
1673 EXPORT_SYMBOL(ion_share_dma_buf
);
1675 int ion_share_dma_buf_fd(struct ion_client
*client
, struct ion_handle
*handle
)
1677 struct dma_buf
*dmabuf
;
1680 dmabuf
= ion_share_dma_buf(client
, handle
);
1682 return PTR_ERR(dmabuf
);
1684 fd
= dma_buf_fd(dmabuf
, O_CLOEXEC
);
1686 dma_buf_put(dmabuf
);
1690 EXPORT_SYMBOL(ion_share_dma_buf_fd
);
1692 struct ion_handle
*ion_import_dma_buf(struct ion_client
*client
, int fd
)
1694 struct dma_buf
*dmabuf
;
1695 struct ion_buffer
*buffer
;
1696 struct ion_handle
*handle
;
1699 dmabuf
= dma_buf_get(fd
);
1701 return ERR_CAST(dmabuf
);
1702 /* if this memory came from ion */
1704 if (dmabuf
->ops
!= &dma_buf_ops
) {
1705 pr_err("%s: can not import dmabuf from another exporter\n",
1707 dma_buf_put(dmabuf
);
1708 return ERR_PTR(-EINVAL
);
1710 buffer
= dmabuf
->priv
;
1712 mutex_lock(&client
->lock
);
1713 /* if a handle exists for this buffer just take a reference to it */
1714 handle
= ion_handle_lookup(client
, buffer
);
1715 if (!IS_ERR(handle
)) {
1716 handle
= ion_handle_get_check_overflow(handle
);
1717 mutex_unlock(&client
->lock
);
1721 handle
= ion_handle_create(client
, buffer
);
1722 if (IS_ERR(handle
)) {
1723 mutex_unlock(&client
->lock
);
1727 ret
= ion_handle_add(client
, handle
);
1728 mutex_unlock(&client
->lock
);
1730 ion_handle_put(handle
);
1731 handle
= ERR_PTR(ret
);
1735 dma_buf_put(dmabuf
);
1738 EXPORT_SYMBOL(ion_import_dma_buf
);
1740 int ion_cached_needsync_dmabuf(struct dma_buf
*dmabuf
)
1742 struct ion_buffer
*buffer
= dmabuf
->priv
;
1743 unsigned long cacheflag
= ION_FLAG_CACHED
| ION_FLAG_CACHED_NEEDS_SYNC
;
1745 if (dmabuf
->ops
!= &dma_buf_ops
)
1748 return ((buffer
->flags
& cacheflag
) == cacheflag
) ? 1 : 0;
1750 EXPORT_SYMBOL(ion_cached_needsync_dmabuf
);
1752 bool ion_may_hwrender_dmabuf(struct dma_buf
*dmabuf
)
1754 struct ion_buffer
*buffer
= dmabuf
->priv
;
1756 if (dmabuf
->ops
!= &dma_buf_ops
) {
1757 WARN(1, "%s: given dmabuf is not exported by ION\n", __func__
);
1761 return !!(buffer
->flags
& ION_FLAG_MAY_HWRENDER
);
1763 EXPORT_SYMBOL(ion_may_hwrender_dmabuf
);
1765 bool ion_may_hwrender_handle(struct ion_client
*client
, struct ion_handle
*handle
)
1767 struct ion_buffer
*buffer
= handle
->buffer
;
1770 mutex_lock(&client
->lock
);
1771 valid_handle
= ion_handle_validate(client
, handle
);
1773 if (!valid_handle
) {
1774 WARN(1, "%s: invalid handle passed\n", __func__
);
1775 mutex_unlock(&client
->lock
);
1778 mutex_unlock(&client
->lock
);
1780 return !!(buffer
->flags
& ION_FLAG_MAY_HWRENDER
);
1782 EXPORT_SYMBOL(ion_may_hwrender_handle
);
1784 static int ion_sync_for_device(struct ion_client
*client
, int fd
)
1786 struct dma_buf
*dmabuf
;
1787 struct ion_buffer
*buffer
;
1788 struct scatterlist
*sg
, *sgl
;
1793 dmabuf
= dma_buf_get(fd
);
1795 return PTR_ERR(dmabuf
);
1797 /* if this memory came from ion */
1798 if (dmabuf
->ops
!= &dma_buf_ops
) {
1799 pr_err("%s: can not sync dmabuf from another exporter\n",
1801 dma_buf_put(dmabuf
);
1804 buffer
= dmabuf
->priv
;
1806 if (!ion_buffer_cached(buffer
) ||
1807 ion_buffer_fault_user_mappings(buffer
)) {
1808 dma_buf_put(dmabuf
);
1812 trace_ion_sync_start(_RET_IP_
, buffer
->dev
->dev
.this_device
,
1813 DMA_BIDIRECTIONAL
, buffer
->size
,
1814 buffer
->vaddr
, 0, false);
1816 sgl
= buffer
->sg_table
->sgl
;
1817 nelems
= buffer
->sg_table
->nents
;
1819 for_each_sg(sgl
, sg
, nelems
, i
) {
1820 vaddr
= phys_to_virt(sg_phys(sg
));
1821 __dma_flush_range(vaddr
, vaddr
+ sg
->length
);
1824 trace_ion_sync_end(_RET_IP_
, buffer
->dev
->dev
.this_device
,
1825 DMA_BIDIRECTIONAL
, buffer
->size
,
1826 buffer
->vaddr
, 0, false);
1828 dma_buf_put(dmabuf
);
1832 static int ion_sync_partial_for_device(struct ion_client
*client
, int fd
,
1833 off_t offset
, size_t len
)
1835 struct dma_buf
*dmabuf
;
1836 struct ion_buffer
*buffer
;
1837 struct scatterlist
*sg
, *sgl
;
1838 size_t remained
= len
;
1842 dmabuf
= dma_buf_get(fd
);
1844 return PTR_ERR(dmabuf
);
1846 /* if this memory came from ion */
1847 if (dmabuf
->ops
!= &dma_buf_ops
) {
1848 pr_err("%s: can not sync dmabuf from another exporter\n",
1850 dma_buf_put(dmabuf
);
1853 buffer
= dmabuf
->priv
;
1855 if (!ion_buffer_cached(buffer
) ||
1856 ion_buffer_fault_user_mappings(buffer
)) {
1857 dma_buf_put(dmabuf
);
1861 trace_ion_sync_start(_RET_IP_
, buffer
->dev
->dev
.this_device
,
1862 DMA_BIDIRECTIONAL
, buffer
->size
,
1863 buffer
->vaddr
, 0, false);
1865 sgl
= buffer
->sg_table
->sgl
;
1866 nelems
= buffer
->sg_table
->nents
;
1868 for_each_sg(sgl
, sg
, nelems
, i
) {
1869 size_t len_to_flush
;
1870 if (offset
>= sg
->length
) {
1871 offset
-= sg
->length
;
1875 len_to_flush
= sg
->length
- offset
;
1876 if (remained
< len_to_flush
) {
1877 len_to_flush
= remained
;
1880 remained
-= len_to_flush
;
1883 __dma_map_area(phys_to_virt(sg_phys(sg
)) + offset
,
1884 len_to_flush
, DMA_TO_DEVICE
);
1891 trace_ion_sync_end(_RET_IP_
, buffer
->dev
->dev
.this_device
,
1892 DMA_BIDIRECTIONAL
, buffer
->size
,
1893 buffer
->vaddr
, 0, false);
1895 dma_buf_put(dmabuf
);
1900 /* fix up the cases where the ioctl direction bits are incorrect */
1901 static unsigned int ion_ioctl_dir(unsigned int cmd
)
1905 case ION_IOC_SYNC_PARTIAL
:
1907 case ION_IOC_CUSTOM
:
1910 return _IOC_DIR(cmd
);
1914 static long ion_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
1916 struct ion_client
*client
= filp
->private_data
;
1917 struct ion_device
*dev
= client
->dev
;
1918 struct ion_handle
*cleanup_handle
= NULL
;
1923 struct ion_fd_data fd
;
1924 struct ion_fd_partial_data fd_partial
;
1925 struct ion_allocation_data allocation
;
1926 struct ion_handle_data handle
;
1927 struct ion_custom_data custom
;
1930 dir
= ion_ioctl_dir(cmd
);
1932 if (_IOC_SIZE(cmd
) > sizeof(data
))
1935 if (dir
& _IOC_WRITE
)
1936 if (copy_from_user(&data
, (void __user
*)arg
, _IOC_SIZE(cmd
)))
1942 struct ion_handle
*handle
;
1944 handle
= __ion_alloc(client
, data
.allocation
.len
,
1945 data
.allocation
.align
,
1946 data
.allocation
.heap_id_mask
,
1947 data
.allocation
.flags
, true);
1948 if (IS_ERR(handle
)) {
1949 pr_err("%s: len %zu align %zu heap_id_mask %u flags %x (ret %ld)\n",
1950 __func__
, data
.allocation
.len
,
1951 data
.allocation
.align
,
1952 data
.allocation
.heap_id_mask
,
1953 data
.allocation
.flags
, PTR_ERR(handle
));
1954 return PTR_ERR(handle
);
1956 pass_to_user(handle
);
1957 data
.allocation
.handle
= handle
->id
;
1959 cleanup_handle
= handle
;
1964 struct ion_handle
*handle
;
1966 mutex_lock(&client
->lock
);
1967 handle
= ion_handle_get_by_id_nolock(client
, data
.handle
.handle
);
1968 if (IS_ERR(handle
)) {
1969 mutex_unlock(&client
->lock
);
1970 return PTR_ERR(handle
);
1972 user_ion_free_nolock(client
, handle
);
1973 ion_handle_put_nolock(handle
);
1974 mutex_unlock(&client
->lock
);
1980 struct ion_handle
*handle
;
1982 handle
= ion_handle_get_by_id(client
, data
.handle
.handle
);
1984 return PTR_ERR(handle
);
1985 data
.fd
.fd
= ion_share_dma_buf_fd(client
, handle
);
1986 ion_handle_put(handle
);
1991 case ION_IOC_IMPORT
:
1993 struct ion_handle
*handle
;
1995 handle
= ion_import_dma_buf(client
, data
.fd
.fd
);
1996 if (IS_ERR(handle
)) {
1997 ret
= PTR_ERR(handle
);
1999 handle
= pass_to_user(handle
);
2001 ret
= PTR_ERR(handle
);
2003 data
.handle
.handle
= handle
->id
;
2009 ret
= ion_sync_for_device(client
, data
.fd
.fd
);
2012 case ION_IOC_SYNC_PARTIAL
:
2014 ret
= ion_sync_partial_for_device(client
, data
.fd_partial
.fd
,
2015 data
.fd_partial
.offset
, data
.fd_partial
.len
);
2018 case ION_IOC_CUSTOM
:
2020 if (!dev
->custom_ioctl
)
2022 ret
= dev
->custom_ioctl(client
, data
.custom
.cmd
,
2030 if (dir
& _IOC_READ
) {
2031 if (copy_to_user((void __user
*)arg
, &data
, _IOC_SIZE(cmd
))) {
2032 if (cleanup_handle
) {
2033 mutex_lock(&client
->lock
);
2034 user_ion_free_nolock(client
, cleanup_handle
);
2035 ion_handle_put_nolock(cleanup_handle
);
2036 mutex_unlock(&client
->lock
);
2042 ion_handle_put(cleanup_handle
);
2046 static int ion_release(struct inode
*inode
, struct file
*file
)
2048 struct ion_client
*client
= file
->private_data
;
2050 pr_debug("%s: %d\n", __func__
, __LINE__
);
2051 ion_client_destroy(client
);
2055 static int ion_open(struct inode
*inode
, struct file
*file
)
2057 struct miscdevice
*miscdev
= file
->private_data
;
2058 struct ion_device
*dev
= container_of(miscdev
, struct ion_device
, dev
);
2059 struct ion_client
*client
;
2060 char debug_name
[64];
2062 pr_debug("%s: %d\n", __func__
, __LINE__
);
2063 snprintf(debug_name
, 64, "%u", task_pid_nr(current
->group_leader
));
2064 client
= ion_client_create(dev
, debug_name
);
2066 return PTR_ERR(client
);
2067 file
->private_data
= client
;
2072 static const struct file_operations ion_fops
= {
2073 .owner
= THIS_MODULE
,
2075 .release
= ion_release
,
2076 .unlocked_ioctl
= ion_ioctl
,
2077 .compat_ioctl
= compat_ion_ioctl
,
2080 static size_t ion_debug_heap_total(struct ion_client
*client
,
2086 mutex_lock(&client
->lock
);
2087 for (n
= rb_first(&client
->handles
); n
; n
= rb_next(n
)) {
2088 struct ion_handle
*handle
= rb_entry(n
,
2091 if (handle
->buffer
->heap
->id
== id
)
2092 size
+= handle
->buffer
->size
;
2094 mutex_unlock(&client
->lock
);
2098 static int ion_debug_heap_show(struct seq_file
*s
, void *unused
)
2100 struct ion_heap
*heap
= s
->private;
2101 struct ion_device
*dev
= heap
->dev
;
2103 size_t total_size
= 0;
2104 size_t total_orphaned_size
= 0;
2106 seq_printf(s
, "%16s %16s %16s\n", "client", "pid", "size");
2107 seq_puts(s
, "----------------------------------------------------\n");
2109 down_read(&dev
->lock
);
2111 for (n
= rb_first(&dev
->clients
); n
; n
= rb_next(n
)) {
2112 struct ion_client
*client
= rb_entry(n
, struct ion_client
,
2114 size_t size
= ion_debug_heap_total(client
, heap
->id
);
2119 char task_comm
[TASK_COMM_LEN
];
2121 get_task_comm(task_comm
, client
->task
);
2122 seq_printf(s
, "%16s %16u %16zu\n", task_comm
,
2125 seq_printf(s
, "%16s %16u %16zu\n", client
->name
,
2129 seq_puts(s
, "----------------------------------------------------\n");
2130 seq_puts(s
, "orphaned allocations (info is from last known client):\n");
2131 mutex_lock(&dev
->buffer_lock
);
2132 for (n
= rb_first(&dev
->buffers
); n
; n
= rb_next(n
)) {
2133 struct ion_buffer
*buffer
= rb_entry(n
, struct ion_buffer
,
2135 if (buffer
->heap
->id
!= heap
->id
)
2137 total_size
+= buffer
->size
;
2138 if (!buffer
->handle_count
) {
2139 seq_printf(s
, "%16s %16u %16zu %d %d\n",
2140 buffer
->task_comm
, buffer
->pid
,
2141 buffer
->size
, buffer
->kmap_cnt
,
2142 atomic_read(&buffer
->ref
.refcount
));
2143 total_orphaned_size
+= buffer
->size
;
2146 mutex_unlock(&dev
->buffer_lock
);
2147 seq_puts(s
, "----------------------------------------------------\n");
2148 seq_printf(s
, "%16s %16zu\n", "total orphaned",
2149 total_orphaned_size
);
2150 seq_printf(s
, "%16s %16zu\n", "total ", total_size
);
2151 if (heap
->flags
& ION_HEAP_FLAG_DEFER_FREE
)
2152 seq_printf(s
, "%16s %16zu\n", "deferred free",
2153 heap
->free_list_size
);
2154 seq_puts(s
, "----------------------------------------------------\n");
2156 if (heap
->debug_show
)
2157 heap
->debug_show(heap
, s
, unused
);
2159 up_read(&dev
->lock
);
2164 static int ion_debug_heap_open(struct inode
*inode
, struct file
*file
)
2166 return single_open(file
, ion_debug_heap_show
, inode
->i_private
);
2169 static const struct file_operations debug_heap_fops
= {
2170 .open
= ion_debug_heap_open
,
2172 .llseek
= seq_lseek
,
2173 .release
= single_release
,
2176 static int debug_shrink_set(void *data
, u64 val
)
2178 struct ion_heap
*heap
= data
;
2179 struct shrink_control sc
;
2183 sc
.nr_to_scan
= val
;
2186 objs
= heap
->shrinker
.count_objects(&heap
->shrinker
, &sc
);
2187 sc
.nr_to_scan
= objs
;
2190 heap
->shrinker
.scan_objects(&heap
->shrinker
, &sc
);
2194 static int debug_shrink_get(void *data
, u64
*val
)
2196 struct ion_heap
*heap
= data
;
2197 struct shrink_control sc
;
2203 objs
= heap
->shrinker
.count_objects(&heap
->shrinker
, &sc
);
2208 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops
, debug_shrink_get
,
2209 debug_shrink_set
, "%llu\n");
2211 void ion_device_add_heap(struct ion_device
*dev
, struct ion_heap
*heap
)
2213 struct dentry
*debug_file
;
2215 if (!heap
->ops
->allocate
|| !heap
->ops
->free
|| !heap
->ops
->map_dma
||
2216 !heap
->ops
->unmap_dma
)
2217 pr_err("%s: can not add heap with invalid ops struct.\n",
2220 spin_lock_init(&heap
->free_lock
);
2221 heap
->free_list_size
= 0;
2223 if (heap
->flags
& ION_HEAP_FLAG_DEFER_FREE
)
2224 ion_heap_init_deferred_free(heap
);
2226 if ((heap
->flags
& ION_HEAP_FLAG_DEFER_FREE
) || heap
->ops
->shrink
)
2227 ion_heap_init_shrinker(heap
);
2230 down_write(&dev
->lock
);
2232 * use negative heap->id to reverse the priority -- when traversing
2233 * the list later attempt higher id numbers first
2235 plist_node_init(&heap
->node
, -heap
->id
);
2236 plist_add(&heap
->node
, &dev
->heaps
);
2237 debug_file
= debugfs_create_file(heap
->name
, 0664,
2238 dev
->heaps_debug_root
, heap
,
2242 char buf
[256], *path
;
2244 path
= dentry_path(dev
->heaps_debug_root
, buf
, 256);
2245 pr_err("Failed to create heap debugfs at %s/%s\n",
2249 if (heap
->shrinker
.count_objects
&& heap
->shrinker
.scan_objects
) {
2250 char debug_name
[64];
2252 snprintf(debug_name
, 64, "%s_shrink", heap
->name
);
2253 debug_file
= debugfs_create_file(
2254 debug_name
, 0644, dev
->heaps_debug_root
, heap
,
2255 &debug_shrink_fops
);
2257 char buf
[256], *path
;
2259 path
= dentry_path(dev
->heaps_debug_root
, buf
, 256);
2260 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
2265 up_write(&dev
->lock
);
2267 EXPORT_SYMBOL(ion_device_add_heap
);
2269 #ifdef CONFIG_ION_EXYNOS_STAT_LOG
2271 #define MAX_DUMP_TASKS 8
2272 #define MAX_DUMP_NAME_LEN 32
2273 #define MAX_DUMP_BUFF_LEN 512
2275 static void ion_buffer_dump_flags(struct seq_file
*s
, unsigned long flags
)
2277 if ((flags
& ION_FLAG_CACHED
) && !(flags
& ION_FLAG_CACHED_NEEDS_SYNC
))
2278 seq_printf(s
, "cached|faultmap");
2279 else if (flags
& ION_FLAG_CACHED
)
2280 seq_printf(s
, "cached|needsync");
2282 seq_printf(s
, "noncached");
2284 if (flags
& ION_FLAG_NOZEROED
)
2285 seq_printf(s
, "|nozeroed");
2287 if (flags
& ION_FLAG_PROTECTED
)
2288 seq_printf(s
, "|protected");
2291 static void ion_buffer_dump_tasks(struct ion_buffer
*buffer
, char *str
)
2293 struct ion_task
*task
, *tmp
;
2294 const char *delim
= "|";
2295 size_t total_len
= 0;
2298 list_for_each_entry_safe(task
, tmp
, &buffer
->master_list
, list
) {
2300 size_t len
= strlen(dev_name(task
->master
));
2302 if (len
> MAX_DUMP_NAME_LEN
)
2303 len
= MAX_DUMP_NAME_LEN
;
2304 if (!strncmp(dev_name(task
->master
), "ion", len
)) {
2307 name
= dev_name(task
->master
) + 9;
2310 if (total_len
+ len
+ 1 > MAX_DUMP_BUFF_LEN
)
2313 strncat((char *)(str
+ total_len
), name
, len
);
2315 if (!list_is_last(&task
->list
, &buffer
->master_list
))
2316 str
[total_len
++] = *delim
;
2318 if (++count
> MAX_DUMP_TASKS
)
2323 static int ion_debug_buffer_show(struct seq_file
*s
, void *unused
)
2325 struct ion_device
*dev
= s
->private;
2328 size_t total_size
= 0;
2330 master_name
= kzalloc(MAX_DUMP_BUFF_LEN
, GFP_KERNEL
);
2332 pr_err("%s: no memory for client string buffer\n", __func__
);
2336 seq_printf(s
, "%20.s %16.s %4.s %16.s %4.s %10.s %4.s %3.s %6.s "
2338 "heap", "task", "pid", "thread", "tid",
2339 "size", "kmap", "ref", "handle",
2341 seq_printf(s
, "------------------------------------------"
2342 "----------------------------------------"
2343 "----------------------------------------"
2344 "--------------------------------------\n");
2346 mutex_lock(&dev
->buffer_lock
);
2347 for (n
= rb_first(&dev
->buffers
); n
; n
= rb_next(n
)) {
2348 struct ion_buffer
*buffer
= rb_entry(n
, struct ion_buffer
,
2350 mutex_lock(&buffer
->lock
);
2351 ion_buffer_dump_tasks(buffer
, master_name
);
2352 total_size
+= buffer
->size
;
2353 seq_printf(s
, "%20.s %16.s %4u %16.s %4u %10zu %4d %3d %6d "
2354 "%24.s %9lx", buffer
->heap
->name
,
2355 buffer
->task_comm
, buffer
->pid
,
2356 buffer
->thread_comm
,
2357 buffer
->tid
, buffer
->size
, buffer
->kmap_cnt
,
2358 atomic_read(&buffer
->ref
.refcount
),
2359 buffer
->handle_count
, master_name
,
2362 ion_buffer_dump_flags(s
, buffer
->flags
);
2363 seq_printf(s
, ")\n");
2364 mutex_unlock(&buffer
->lock
);
2366 memset(master_name
, 0, MAX_DUMP_BUFF_LEN
);
2368 mutex_unlock(&dev
->buffer_lock
);
2370 seq_printf(s
, "------------------------------------------"
2371 "----------------------------------------"
2372 "----------------------------------------"
2373 "--------------------------------------\n");
2374 seq_printf(s
, "%16.s %16zu\n", "total ", total_size
);
2375 seq_printf(s
, "------------------------------------------"
2376 "----------------------------------------"
2377 "----------------------------------------"
2378 "--------------------------------------\n");
2385 static int ion_debug_buffer_open(struct inode
*inode
, struct file
*file
)
2387 return single_open(file
, ion_debug_buffer_show
, inode
->i_private
);
2390 static const struct file_operations debug_buffer_fops
= {
2391 .open
= ion_debug_buffer_open
,
2393 .llseek
= seq_lseek
,
2394 .release
= single_release
,
2397 static void ion_debug_event_show_one(struct seq_file
*s
,
2398 struct ion_eventlog
*log
)
2400 struct timeval tv
= ktime_to_timeval(log
->begin
);
2401 long elapsed
= ktime_us_delta(log
->done
, log
->begin
);
2406 seq_printf(s
, "[%06ld.%06ld] ", tv
.tv_sec
, tv
.tv_usec
);
2408 switch (log
->type
) {
2409 case ION_EVENT_TYPE_ALLOC
:
2411 struct ion_event_alloc
*data
= &log
->data
.alloc
;
2412 seq_printf(s
, "%8s %pK %18s %11zd ", "alloc",
2413 data
->id
, data
->heap
->name
, data
->size
);
2416 case ION_EVENT_TYPE_FREE
:
2418 struct ion_event_free
*data
= &log
->data
.free
;
2419 seq_printf(s
, "%8s %pK %18s %11zd ", "free",
2420 data
->id
, data
->heap
->name
, data
->size
);
2423 case ION_EVENT_TYPE_MMAP
:
2425 struct ion_event_mmap
*data
= &log
->data
.mmap
;
2426 seq_printf(s
, "%8s %pK %18s %11zd ", "mmap",
2427 data
->id
, data
->heap
->name
, data
->size
);
2430 case ION_EVENT_TYPE_SHRINK
:
2432 struct ion_event_shrink
*data
= &log
->data
.shrink
;
2433 seq_printf(s
, "%8s %16lx %18s %11zd ", "shrink",
2434 0l, "ion_noncontig_heap", data
->size
);
2438 case ION_EVENT_TYPE_CLEAR
:
2440 struct ion_event_clear
*data
= &log
->data
.clear
;
2441 seq_printf(s
, "%8s %pK %18s %11zd ", "clear",
2442 data
->id
, data
->heap
->name
, data
->size
);
2447 seq_printf(s
, "%9ld", elapsed
);
2449 if (elapsed
> 100 * USEC_PER_MSEC
)
2450 seq_printf(s
, " *");
2452 if (log
->type
== ION_EVENT_TYPE_ALLOC
) {
2454 ion_buffer_dump_flags(s
, log
->data
.alloc
.flags
);
2455 } else if (log
->type
== ION_EVENT_TYPE_CLEAR
) {
2457 ion_buffer_dump_flags(s
, log
->data
.clear
.flags
);
2460 if (log
->type
== ION_EVENT_TYPE_FREE
&& log
->data
.free
.shrinker
)
2461 seq_printf(s
, " shrinker");
2463 seq_printf(s
, "\n");
2466 static int ion_debug_event_show(struct seq_file
*s
, void *unused
)
2468 struct ion_device
*dev
= s
->private;
2469 int index
= atomic_read(&dev
->event_idx
) % ION_EVENT_LOG_MAX
;
2472 seq_printf(s
, "%13s %10s %8s %18s %11s %10s %24s\n", "timestamp",
2473 "type", "id", "heap", "size", "time (us)", "remarks");
2474 seq_printf(s
, "-------------------------------------------");
2475 seq_printf(s
, "-------------------------------------------");
2476 seq_printf(s
, "-----------------------------------------\n");
2479 if (++index
>= ION_EVENT_LOG_MAX
)
2481 ion_debug_event_show_one(s
, &dev
->eventlog
[index
]);
2482 } while (index
!= last
);
2487 static int ion_debug_event_open(struct inode
*inode
, struct file
*file
)
2489 return single_open(file
, ion_debug_event_show
, inode
->i_private
);
2492 static const struct file_operations debug_event_fops
= {
2493 .open
= ion_debug_event_open
,
2495 .llseek
= seq_lseek
,
2496 .release
= single_release
,
2500 struct ion_device
*ion_device_create(long (*custom_ioctl
)
2501 (struct ion_client
*client
,
2505 struct ion_device
*idev
;
2508 idev
= kzalloc(sizeof(struct ion_device
), GFP_KERNEL
);
2510 return ERR_PTR(-ENOMEM
);
2512 idev
->dev
.minor
= MISC_DYNAMIC_MINOR
;
2513 idev
->dev
.name
= "ion";
2514 idev
->dev
.fops
= &ion_fops
;
2515 idev
->dev
.parent
= NULL
;
2516 ret
= misc_register(&idev
->dev
);
2518 pr_err("ion: failed to register misc device.\n");
2520 return ERR_PTR(ret
);
2523 idev
->debug_root
= debugfs_create_dir("ion", NULL
);
2524 if (!idev
->debug_root
) {
2525 pr_err("ion: failed to create debugfs root directory.\n");
2528 idev
->heaps_debug_root
= debugfs_create_dir("heaps", idev
->debug_root
);
2529 if (!idev
->heaps_debug_root
) {
2530 pr_err("ion: failed to create debugfs heaps directory.\n");
2533 idev
->clients_debug_root
= debugfs_create_dir("clients",
2535 if (!idev
->clients_debug_root
) {
2536 pr_err("ion: failed to create debugfs clients directory.\n");
2540 #ifdef CONFIG_ION_EXYNOS_STAT_LOG
2541 atomic_set(&idev
->event_idx
, -1);
2542 idev
->buffer_debug_file
= debugfs_create_file("buffer", 0444,
2543 idev
->debug_root
, idev
,
2544 &debug_buffer_fops
);
2545 if (!idev
->buffer_debug_file
) {
2546 pr_err("%s: failed to create buffer debug file\n", __func__
);
2550 idev
->event_debug_file
= debugfs_create_file("event", 0444,
2551 idev
->debug_root
, idev
,
2553 if (!idev
->event_debug_file
)
2554 pr_err("%s: failed to create event debug file\n", __func__
);
2559 idev
->custom_ioctl
= custom_ioctl
;
2560 idev
->buffers
= RB_ROOT
;
2561 mutex_init(&idev
->buffer_lock
);
2562 init_rwsem(&idev
->lock
);
2563 plist_head_init(&idev
->heaps
);
2564 idev
->clients
= RB_ROOT
;
2566 /* backup of ion device: assumes there is only one ion device */
2571 EXPORT_SYMBOL(ion_device_create
);
2573 void ion_device_destroy(struct ion_device
*dev
)
2575 misc_deregister(&dev
->dev
);
2576 debugfs_remove_recursive(dev
->debug_root
);
2579 EXPORT_SYMBOL(ion_device_destroy
);
2581 void __init
ion_reserve(struct ion_platform_data
*data
)
2585 for (i
= 0; i
< data
->nr
; i
++) {
2586 if (data
->heaps
[i
].size
== 0)
2589 if (data
->heaps
[i
].base
== 0) {
2592 paddr
= memblock_alloc_base(data
->heaps
[i
].size
,
2593 data
->heaps
[i
].align
,
2594 MEMBLOCK_ALLOC_ANYWHERE
);
2596 pr_err("%s: error allocating memblock for heap %d\n",
2600 data
->heaps
[i
].base
= paddr
;
2602 int ret
= memblock_reserve(data
->heaps
[i
].base
,
2603 data
->heaps
[i
].size
);
2605 pr_err("memblock reserve of %zx@%lx failed\n",
2606 data
->heaps
[i
].size
,
2607 data
->heaps
[i
].base
);
2609 pr_info("%s: %s reserved base %lx size %zu\n", __func__
,
2610 data
->heaps
[i
].name
,
2611 data
->heaps
[i
].base
,
2612 data
->heaps
[i
].size
);
2616 static struct ion_iovm_map
*ion_buffer_iova_create(struct ion_buffer
*buffer
,
2617 struct device
*dev
, enum dma_data_direction dir
, int prop
)
2619 /* Must be called under buffer->lock held */
2620 struct ion_iovm_map
*iovm_map
;
2623 iovm_map
= kzalloc(sizeof(struct ion_iovm_map
), GFP_KERNEL
);
2625 pr_err("%s: Failed to allocate ion_iovm_map for %s\n",
2626 __func__
, dev_name(dev
));
2627 return ERR_PTR(-ENOMEM
);
2630 iovm_map
->iova
= iovmm_map(dev
, buffer
->sg_table
->sgl
,
2631 0, buffer
->size
, dir
, prop
);
2633 if (iovm_map
->iova
== (dma_addr_t
)-ENOSYS
) {
2635 ion_phys_addr_t addr
;
2637 BUG_ON(!buffer
->heap
->ops
->phys
);
2638 ret
= buffer
->heap
->ops
->phys(buffer
->heap
, buffer
,
2641 pr_err("%s: Unable to get PA for %s\n",
2642 __func__
, dev_name(dev
));
2643 } else if (IS_ERR_VALUE(iovm_map
->iova
)) {
2644 ret
= iovm_map
->iova
;
2645 pr_err("%s: Unable to allocate IOVA for %s\n",
2646 __func__
, dev_name(dev
));
2651 return ERR_PTR(ret
);
2654 iovm_map
->dev
= dev
;
2655 iovm_map
->domain
= get_domain_from_dev(dev
);
2656 iovm_map
->map_cnt
= 1;
2658 pr_debug("%s: new map added for dev %s, iova %pa, prop %d\n", __func__
,
2659 dev_name(dev
), &iovm_map
->iova
, prop
);
2664 dma_addr_t
ion_iovmm_map(struct dma_buf_attachment
*attachment
,
2665 off_t offset
, size_t size
,
2666 enum dma_data_direction direction
, int prop
)
2668 struct dma_buf
*dmabuf
= attachment
->dmabuf
;
2669 struct ion_buffer
*buffer
= dmabuf
->priv
;
2670 struct ion_iovm_map
*iovm_map
;
2671 struct iommu_domain
*domain
;
2673 BUG_ON(dmabuf
->ops
!= &dma_buf_ops
);
2675 if (IS_ENABLED(CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
) &&
2676 buffer
->flags
& ION_FLAG_PROTECTED
) {
2677 struct ion_buffer_info
*info
= buffer
->priv_virt
;
2679 if (info
->prot_desc
.dma_addr
)
2680 return info
->prot_desc
.dma_addr
;
2681 pr_err("%s: protected buffer but no secure iova\n", __func__
);
2685 domain
= get_domain_from_dev(attachment
->dev
);
2687 pr_err("%s: invalid iommu device\n", __func__
);
2691 mutex_lock(&buffer
->lock
);
2692 list_for_each_entry(iovm_map
, &buffer
->iovas
, list
) {
2693 if (domain
== iovm_map
->domain
) {
2694 iovm_map
->map_cnt
++;
2695 mutex_unlock(&buffer
->lock
);
2696 return iovm_map
->iova
;
2700 if (!ion_buffer_cached(buffer
))
2701 prop
&= ~IOMMU_CACHE
;
2703 iovm_map
= ion_buffer_iova_create(buffer
, attachment
->dev
,
2705 if (IS_ERR(iovm_map
)) {
2706 mutex_unlock(&buffer
->lock
);
2707 return PTR_ERR(iovm_map
);
2710 list_add_tail(&iovm_map
->list
, &buffer
->iovas
);
2711 mutex_unlock(&buffer
->lock
);
2713 return iovm_map
->iova
;
2716 void ion_iovmm_unmap(struct dma_buf_attachment
*attachment
, dma_addr_t iova
)
2718 struct ion_iovm_map
*iovm_map
;
2719 struct dma_buf
* dmabuf
= attachment
->dmabuf
;
2720 struct device
*dev
= attachment
->dev
;
2721 struct ion_buffer
*buffer
= attachment
->dmabuf
->priv
;
2722 struct iommu_domain
*domain
;
2724 BUG_ON(dmabuf
->ops
!= &dma_buf_ops
);
2726 if (IS_ENABLED(CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
) &&
2727 buffer
->flags
& ION_FLAG_PROTECTED
)
2730 domain
= get_domain_from_dev(attachment
->dev
);
2732 pr_err("%s: invalid iommu device\n", __func__
);
2736 mutex_lock(&buffer
->lock
);
2737 list_for_each_entry(iovm_map
, &buffer
->iovas
, list
) {
2738 if ((domain
== iovm_map
->domain
) && (iova
== iovm_map
->iova
)) {
2739 if (--iovm_map
->map_cnt
== 0) {
2740 list_del(&iovm_map
->list
);
2741 pr_debug("%s: unmap previous %pa for dev %s\n",
2742 __func__
, &iovm_map
->iova
,
2743 dev_name(iovm_map
->dev
));
2744 iovmm_unmap(iovm_map
->dev
, iovm_map
->iova
);
2748 mutex_unlock(&buffer
->lock
);
2753 mutex_unlock(&buffer
->lock
);
2755 WARN(1, "IOVA %pa is not found for %s\n", &iova
, dev_name(dev
));