ion: silence logical-not-parentheses warning
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / drivers / staging / android / ion / ion.c
CommitLineData
c30707be 1/*
7e416174 2 *
c30707be
RSZ
3 * drivers/staging/android/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/device.h>
1cac41cb 19#include <linux/atomic.h>
ab0c069a 20#include <linux/err.h>
c30707be 21#include <linux/file.h>
fe2faea7 22#include <linux/freezer.h>
c30707be
RSZ
23#include <linux/fs.h>
24#include <linux/anon_inodes.h>
fe2faea7 25#include <linux/kthread.h>
c30707be 26#include <linux/list.h>
2991b7a0 27#include <linux/memblock.h>
c30707be
RSZ
28#include <linux/miscdevice.h>
29#include <linux/export.h>
30#include <linux/mm.h>
31#include <linux/mm_types.h>
32#include <linux/rbtree.h>
c30707be
RSZ
33#include <linux/slab.h>
34#include <linux/seq_file.h>
35#include <linux/uaccess.h>
c13bd1c4 36#include <linux/vmalloc.h>
c30707be 37#include <linux/debugfs.h>
b892bf75 38#include <linux/dma-buf.h>
47b40458 39#include <linux/idr.h>
1cac41cb
MB
40#include <linux/exynos_iovmm.h>
41#include <linux/exynos_ion.h>
42#include <linux/highmem.h>
c30707be
RSZ
43
44#include "ion.h"
1cac41cb
MB
45#include <asm/cacheflush.h>
46#include <asm/tlbflush.h>
47
48#define CREATE_TRACE_POINTS
c30707be 49#include "ion_priv.h"
827c849e 50#include "compat_ion.h"
c30707be
RSZ
51
52/**
53 * struct ion_device - the metadata of the ion device node
54 * @dev: the actual misc device
8d7ab9a9
RSZ
55 * @buffers: an rb tree of all the existing buffers
56 * @buffer_lock: lock protecting the tree of buffers
57 * @lock: rwsem protecting the tree of heaps and clients
c30707be
RSZ
58 * @heaps: list of all the heaps in the system
59 * @user_clients: list of all the clients created from userspace
60 */
61struct ion_device {
62 struct miscdevice dev;
63 struct rb_root buffers;
8d7ab9a9
RSZ
64 struct mutex buffer_lock;
65 struct rw_semaphore lock;
cd69488c 66 struct plist_head heaps;
51108985
DY
67 long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
68 unsigned long arg);
b892bf75 69 struct rb_root clients;
c30707be 70 struct dentry *debug_root;
b08585fb
MH
71 struct dentry *heaps_debug_root;
72 struct dentry *clients_debug_root;
1cac41cb
MB
73
74#ifdef CONFIG_ION_EXYNOS_STAT_LOG
75 /* event log */
76 struct dentry *buffer_debug_file;
77 struct dentry *event_debug_file;
78 struct ion_eventlog eventlog[ION_EVENT_LOG_MAX];
79 atomic_t event_idx;
80#endif
c30707be
RSZ
81};
82
83/**
84 * struct ion_client - a process/hw block local address space
c30707be
RSZ
85 * @node: node in the tree of all clients
86 * @dev: backpointer to ion device
87 * @handles: an rb tree of all the handles in this client
47b40458 88 * @idr: an idr space for allocating handle ids
c30707be 89 * @lock: lock protecting the tree of handles
c30707be 90 * @name: used for debugging
2803ac7b
MH
91 * @display_name: used for debugging (unique version of @name)
92 * @display_serial: used for debugging (to make display_name unique)
c30707be
RSZ
93 * @task: used for debugging
94 *
95 * A client represents a list of buffers this client may access.
96 * The mutex stored here is used to protect both handles tree
97 * as well as the handles themselves, and should be held while modifying either.
98 */
99struct ion_client {
c30707be
RSZ
100 struct rb_node node;
101 struct ion_device *dev;
102 struct rb_root handles;
47b40458 103 struct idr idr;
c30707be 104 struct mutex lock;
c30707be 105 const char *name;
2803ac7b
MH
106 char *display_name;
107 int display_serial;
c30707be
RSZ
108 struct task_struct *task;
109 pid_t pid;
110 struct dentry *debug_root;
111};
112
113/**
114 * ion_handle - a client local reference to a buffer
115 * @ref: reference count
116 * @client: back pointer to the client the buffer resides in
117 * @buffer: pointer to the buffer
118 * @node: node in the client's handle rbtree
119 * @kmap_cnt: count of times this client has mapped to kernel
47b40458 120 * @id: client-unique id allocated by client->idr
c30707be
RSZ
121 *
122 * Modifications to node, map_cnt or mapping should be protected by the
123 * lock in the client. Other fields are never changed after initialization.
124 */
125struct ion_handle {
126 struct kref ref;
1cac41cb 127 unsigned int user_ref_count;
c30707be
RSZ
128 struct ion_client *client;
129 struct ion_buffer *buffer;
130 struct rb_node node;
131 unsigned int kmap_cnt;
47b40458 132 int id;
c30707be
RSZ
133};
134
1cac41cb 135struct ion_device *g_idev;
c13bd1c4
RSZ
136
137static inline struct page *ion_buffer_page(struct page *page)
138{
139 return (struct page *)((unsigned long)page & ~(1UL));
140}
141
142static inline bool ion_buffer_page_is_dirty(struct page *page)
143{
144 return !!((unsigned long)page & 1UL);
145}
146
147static inline void ion_buffer_page_dirty(struct page **page)
148{
149 *page = (struct page *)((unsigned long)(*page) | 1UL);
150}
151
152static inline void ion_buffer_page_clean(struct page **page)
153{
154 *page = (struct page *)((unsigned long)(*page) & ~(1UL));
45b17a80
RSZ
155}
156
1cac41cb
MB
157void ion_debug_heap_usage_show(struct ion_heap *heap)
158{
159 struct scatterlist *sg;
160 struct sg_table *table;
161 struct rb_node *n;
162 struct page *page;
163 struct ion_device *dev = heap->dev;
164 int i;
165 ion_phys_addr_t paddr;
166
167 /* show the usage for only contiguous buffer */
168 if ((heap->type != ION_HEAP_TYPE_CARVEOUT)
169 && (heap->type != ION_HEAP_TYPE_DMA))
170 return;
171
172 pr_err("[HEAP %16s (id %4d) DETAIL USAGE]\n", heap->name, heap->id);
173
174 mutex_lock(&dev->buffer_lock);
175 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
176 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
177 node);
178 if (buffer->heap->id != heap->id)
179 continue;
180 table = buffer->sg_table;
181 for_each_sg(table->sgl, sg, table->nents, i) {
182 page = sg_page(sg);
183 paddr = PFN_PHYS(page_to_pfn(page));
184 pr_err("[%16lx--%16lx] %16zu\n",
185 paddr, paddr + sg->length, buffer->size);
186 }
187 }
188 mutex_unlock(&dev->buffer_lock);
189}
190
191#ifdef CONFIG_ION_EXYNOS_STAT_LOG
192static inline void ION_EVENT_ALLOC(struct ion_buffer *buffer, ktime_t begin)
193{
194 struct ion_device *dev = buffer->dev;
195 int idx = atomic_inc_return(&dev->event_idx);
196 struct ion_eventlog *log = &dev->eventlog[idx % ION_EVENT_LOG_MAX];
197 struct ion_event_alloc *data = &log->data.alloc;
198
199 log->type = ION_EVENT_TYPE_ALLOC;
200 log->begin = begin;
201 log->done = ktime_get();
202 data->id = buffer;
203 data->heap = buffer->heap;
204 data->size = buffer->size;
205 data->flags = buffer->flags;
206}
207
208static inline void ION_EVENT_FREE(struct ion_buffer *buffer, ktime_t begin)
209{
210 struct ion_device *dev = buffer->dev;
211 int idx = atomic_inc_return(&dev->event_idx) % ION_EVENT_LOG_MAX;
212 struct ion_eventlog *log = &dev->eventlog[idx];
213 struct ion_event_free *data = &log->data.free;
214
215 log->type = ION_EVENT_TYPE_FREE;
216 log->begin = begin;
217 log->done = ktime_get();
218 data->id = buffer;
219 data->heap = buffer->heap;
220 data->size = buffer->size;
221 data->shrinker = (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE);
222}
223
224static inline void ION_EVENT_MMAP(struct ion_buffer *buffer, ktime_t begin)
225{
226 struct ion_device *dev = buffer->dev;
227 int idx = atomic_inc_return(&dev->event_idx) % ION_EVENT_LOG_MAX;
228 struct ion_eventlog *log = &dev->eventlog[idx];
229 struct ion_event_mmap *data = &log->data.mmap;
230
231 log->type = ION_EVENT_TYPE_MMAP;
232 log->begin = begin;
233 log->done = ktime_get();
234 data->id = buffer;
235 data->heap = buffer->heap;
236 data->size = buffer->size;
237}
238
239void ION_EVENT_SHRINK(struct ion_device *dev, size_t size)
240{
241 int idx = atomic_inc_return(&dev->event_idx) % ION_EVENT_LOG_MAX;
242 struct ion_eventlog *log = &dev->eventlog[idx];
243
244 log->type = ION_EVENT_TYPE_SHRINK;
245 log->begin = ktime_get();
246 log->done = ktime_set(0, 0);
247 log->data.shrink.size = size;
248}
249
250void ION_EVENT_CLEAR(struct ion_buffer *buffer, ktime_t begin)
251{
252 struct ion_device *dev = buffer->dev;
253 int idx = atomic_inc_return(&dev->event_idx) % ION_EVENT_LOG_MAX;
254 struct ion_eventlog *log = &dev->eventlog[idx];
255 struct ion_event_clear *data = &log->data.clear;
256
257 log->type = ION_EVENT_TYPE_CLEAR;
258 log->begin = begin;
259 log->done = ktime_get();
260 data->id = buffer;
261 data->heap = buffer->heap;
262 data->size = buffer->size;
263 data->flags = buffer->flags;
264}
265
266static struct ion_task *ion_buffer_task_lookup(struct ion_buffer *buffer,
267 struct device *master)
268{
269 bool found = false;
270 struct ion_task *task;
271
272 list_for_each_entry(task, &buffer->master_list, list) {
273 if (task->master == master) {
274 found = true;
275 break;
276 }
277 }
278
279 return found ? task : NULL;
280}
281
282static void ion_buffer_set_task_info(struct ion_buffer *buffer)
283{
284 INIT_LIST_HEAD(&buffer->master_list);
285 get_task_comm(buffer->task_comm, current->group_leader);
286 get_task_comm(buffer->thread_comm, current);
287 buffer->pid = task_pid_nr(current->group_leader);
288 buffer->tid = task_pid_nr(current);
289}
290
291static void ion_buffer_task_add(struct ion_buffer *buffer,
292 struct device *master)
293{
294 struct ion_task *task;
295
296 task = ion_buffer_task_lookup(buffer, master);
297 if (!task) {
298 task = kzalloc(sizeof(*task), GFP_KERNEL);
299 if (task) {
300 task->master = master;
301 kref_init(&task->ref);
302 list_add_tail(&task->list, &buffer->master_list);
303 }
304 } else {
305 kref_get(&task->ref);
306 }
307}
308
309static void ion_buffer_task_add_lock(struct ion_buffer *buffer,
310 struct device *master)
311{
312 mutex_lock(&buffer->lock);
313 ion_buffer_task_add(buffer, master);
314 mutex_unlock(&buffer->lock);
315}
316
317static void __ion_buffer_task_remove(struct kref *kref)
318{
319 struct ion_task *task = container_of(kref, struct ion_task, ref);
320
321 list_del(&task->list);
322 kfree(task);
323}
324
325static void ion_buffer_task_remove(struct ion_buffer *buffer,
326 struct device *master)
327{
328 struct ion_task *task, *tmp;
329
330 list_for_each_entry_safe(task, tmp, &buffer->master_list, list) {
331 if (task->master == master) {
332 kref_put(&task->ref, __ion_buffer_task_remove);
333 break;
334 }
335 }
336}
337
338static void ion_buffer_task_remove_lock(struct ion_buffer *buffer,
339 struct device *master)
340{
341 mutex_lock(&buffer->lock);
342 ion_buffer_task_remove(buffer, master);
343 mutex_unlock(&buffer->lock);
344}
345
346static void ion_buffer_task_remove_all(struct ion_buffer *buffer)
347{
348 struct ion_task *task, *tmp;
349
350 mutex_lock(&buffer->lock);
351 list_for_each_entry_safe(task, tmp, &buffer->master_list, list) {
352 list_del(&task->list);
353 kfree(task);
354 }
355 mutex_unlock(&buffer->lock);
356}
357#else
358#define ION_EVENT_ALLOC(buffer, begin) do { } while (0)
359#define ION_EVENT_FREE(buffer, begin) do { } while (0)
360#define ION_EVENT_MMAP(buffer, begin) do { } while (0)
361#define ion_buffer_set_task_info(buffer) do { } while (0)
362#define ion_buffer_task_add(buffer, master) do { } while (0)
363#define ion_buffer_task_add_lock(buffer, master) do { } while (0)
364#define ion_buffer_task_remove(buffer, master) do { } while (0)
365#define ion_buffer_task_remove_lock(buffer, master) do { } while (0)
366#define ion_buffer_task_remove_all(buffer) do { } while (0)
367#endif
368
c30707be
RSZ
369/* this function should only be called while dev->lock is held */
370static void ion_buffer_add(struct ion_device *dev,
371 struct ion_buffer *buffer)
372{
373 struct rb_node **p = &dev->buffers.rb_node;
374 struct rb_node *parent = NULL;
375 struct ion_buffer *entry;
376
377 while (*p) {
378 parent = *p;
379 entry = rb_entry(parent, struct ion_buffer, node);
380
381 if (buffer < entry) {
382 p = &(*p)->rb_left;
383 } else if (buffer > entry) {
384 p = &(*p)->rb_right;
385 } else {
386 pr_err("%s: buffer already found.", __func__);
387 BUG();
388 }
389 }
390
391 rb_link_node(&buffer->node, parent, p);
392 rb_insert_color(&buffer->node, &dev->buffers);
1cac41cb
MB
393
394 ion_buffer_set_task_info(buffer);
395 ion_buffer_task_add(buffer, dev->dev.this_device);
c30707be
RSZ
396}
397
398/* this function should only be called while dev->lock is held */
399static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
400 struct ion_device *dev,
401 unsigned long len,
402 unsigned long align,
403 unsigned long flags)
404{
405 struct ion_buffer *buffer;
29ae6bc7 406 struct sg_table *table;
a46b6b2d
RSZ
407 struct scatterlist *sg;
408 int i, ret;
c30707be
RSZ
409
410 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
411 if (!buffer)
412 return ERR_PTR(-ENOMEM);
413
414 buffer->heap = heap;
13ba7805 415 buffer->flags = flags;
1cac41cb 416 buffer->size = len;
c30707be
RSZ
417 kref_init(&buffer->ref);
418
419 ret = heap->ops->allocate(heap, buffer, len, align, flags);
fe2faea7 420
c30707be 421 if (ret) {
fe2faea7
RSZ
422 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
423 goto err2;
424
ea313b5f 425 ion_heap_freelist_drain(heap, 0);
fe2faea7
RSZ
426 ret = heap->ops->allocate(heap, buffer, len, align,
427 flags);
428 if (ret)
429 goto err2;
c30707be 430 }
29ae6bc7 431
056be396 432 buffer->dev = dev;
056be396 433
56a7c185 434 table = heap->ops->map_dma(heap, buffer);
e1d855b0
JS
435 if (WARN_ONCE(table == NULL,
436 "heap->ops->map_dma should return ERR_PTR on error"))
9e907654
CC
437 table = ERR_PTR(-EINVAL);
438 if (IS_ERR(table)) {
a56d092a
R
439 ret = -EINVAL;
440 goto err1;
29ae6bc7 441 }
a56d092a 442
29ae6bc7 443 buffer->sg_table = table;
13ba7805 444 if (ion_buffer_fault_user_mappings(buffer)) {
c13bd1c4
RSZ
445 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
446 struct scatterlist *sg;
447 int i, j, k = 0;
448
449 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
450 if (!buffer->pages) {
451 ret = -ENOMEM;
a56d092a 452 goto err;
c13bd1c4
RSZ
453 }
454
455 for_each_sg(table->sgl, sg, table->nents, i) {
456 struct page *page = sg_page(sg);
457
06e0dcae 458 for (j = 0; j < sg->length / PAGE_SIZE; j++)
c13bd1c4 459 buffer->pages[k++] = page++;
56a7c185 460 }
56a7c185
RSZ
461 }
462
463 buffer->dev = dev;
464 buffer->size = len;
465 INIT_LIST_HEAD(&buffer->vmas);
1cac41cb 466 INIT_LIST_HEAD(&buffer->iovas);
c30707be 467 mutex_init(&buffer->lock);
7e416174
SR
468 /*
469 * this will set up dma addresses for the sglist -- it is not
470 * technically correct as per the dma api -- a specific
471 * device isn't really taking ownership here. However, in practice on
472 * our systems the only dma_address space is physical addresses.
473 * Additionally, we can't afford the overhead of invalidating every
474 * allocation via dma_map_sg. The implicit contract here is that
475 * memory coming from the heaps is ready for dma, ie if it has a
476 * cached mapping that mapping has been invalidated
477 */
440e9a24 478 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
a46b6b2d 479 sg_dma_address(sg) = sg_phys(sg);
440e9a24
LD
480 sg_dma_len(sg) = sg->length;
481 }
8d7ab9a9 482 mutex_lock(&dev->buffer_lock);
c30707be 483 ion_buffer_add(dev, buffer);
8d7ab9a9 484 mutex_unlock(&dev->buffer_lock);
c30707be 485 return buffer;
d3c0bced
RSZ
486
487err:
488 heap->ops->unmap_dma(heap, buffer);
c13bd1c4 489err1:
a56d092a 490 heap->ops->free(buffer);
fe2faea7 491err2:
d3c0bced
RSZ
492 kfree(buffer);
493 return ERR_PTR(ret);
c30707be
RSZ
494}
495
ea313b5f 496void ion_buffer_destroy(struct ion_buffer *buffer)
c30707be 497{
1cac41cb
MB
498 struct ion_iovm_map *iovm_map;
499 struct ion_iovm_map *tmp;
500
501 ION_EVENT_BEGIN();
502 trace_ion_free_start((unsigned long) buffer, buffer->size,
503 buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE);
504
54ac0784
KC
505 if (WARN_ON(buffer->kmap_cnt > 0))
506 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
1cac41cb
MB
507
508 list_for_each_entry_safe(iovm_map, tmp, &buffer->iovas, list) {
509 iovmm_unmap(iovm_map->dev, iovm_map->iova);
510 list_del(&iovm_map->list);
511 kfree(iovm_map);
512 }
513
29ae6bc7 514 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
c30707be 515 buffer->heap->ops->free(buffer);
698f140d 516 vfree(buffer->pages);
1cac41cb
MB
517
518 ion_buffer_task_remove_all(buffer);
519 ION_EVENT_FREE(buffer, ION_EVENT_DONE());
520 trace_ion_free_end((unsigned long) buffer, buffer->size,
521 buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE);
c30707be
RSZ
522 kfree(buffer);
523}
524
ea313b5f 525static void _ion_buffer_destroy(struct kref *kref)
fe2faea7
RSZ
526{
527 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
528 struct ion_heap *heap = buffer->heap;
529 struct ion_device *dev = buffer->dev;
530
531 mutex_lock(&dev->buffer_lock);
532 rb_erase(&buffer->node, &dev->buffers);
533 mutex_unlock(&dev->buffer_lock);
534
ea313b5f
RSZ
535 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
536 ion_heap_freelist_add(heap, buffer);
537 else
538 ion_buffer_destroy(buffer);
fe2faea7
RSZ
539}
540
c30707be
RSZ
541static void ion_buffer_get(struct ion_buffer *buffer)
542{
543 kref_get(&buffer->ref);
544}
545
546static int ion_buffer_put(struct ion_buffer *buffer)
547{
ea313b5f 548 return kref_put(&buffer->ref, _ion_buffer_destroy);
c30707be
RSZ
549}
550
5ad7bc3a
RSZ
551static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
552{
8d7ab9a9 553 mutex_lock(&buffer->lock);
5ad7bc3a 554 buffer->handle_count++;
8d7ab9a9 555 mutex_unlock(&buffer->lock);
5ad7bc3a
RSZ
556}
557
558static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
559{
560 /*
561 * when a buffer is removed from a handle, if it is not in
562 * any other handles, copy the taskcomm and the pid of the
563 * process it's being removed from into the buffer. At this
564 * point there will be no way to track what processes this buffer is
565 * being used by, it only exists as a dma_buf file descriptor.
566 * The taskcomm and pid can provide a debug hint as to where this fd
567 * is in the system
568 */
8d7ab9a9 569 mutex_lock(&buffer->lock);
5ad7bc3a
RSZ
570 buffer->handle_count--;
571 BUG_ON(buffer->handle_count < 0);
572 if (!buffer->handle_count) {
573 struct task_struct *task;
574
575 task = current->group_leader;
576 get_task_comm(buffer->task_comm, task);
577 buffer->pid = task_pid_nr(task);
578 }
8d7ab9a9 579 mutex_unlock(&buffer->lock);
5ad7bc3a
RSZ
580}
581
1cac41cb
MB
582static bool ion_handle_validate(struct ion_client *client,
583 struct ion_handle *handle)
584{
585 WARN_ON(!mutex_is_locked(&client->lock));
586 return idr_find(&client->idr, handle->id) == handle;
587}
588
c30707be
RSZ
589static struct ion_handle *ion_handle_create(struct ion_client *client,
590 struct ion_buffer *buffer)
591{
592 struct ion_handle *handle;
593
594 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
595 if (!handle)
596 return ERR_PTR(-ENOMEM);
597 kref_init(&handle->ref);
598 RB_CLEAR_NODE(&handle->node);
599 handle->client = client;
600 ion_buffer_get(buffer);
5ad7bc3a 601 ion_buffer_add_to_handle(buffer);
c30707be
RSZ
602 handle->buffer = buffer;
603
604 return handle;
605}
606
b892bf75
RSZ
607static void ion_handle_kmap_put(struct ion_handle *);
608
c30707be
RSZ
609static void ion_handle_destroy(struct kref *kref)
610{
611 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
b892bf75
RSZ
612 struct ion_client *client = handle->client;
613 struct ion_buffer *buffer = handle->buffer;
614
b892bf75 615 mutex_lock(&buffer->lock);
2900cd76 616 while (handle->kmap_cnt)
b892bf75
RSZ
617 ion_handle_kmap_put(handle);
618 mutex_unlock(&buffer->lock);
619
47b40458 620 idr_remove(&client->idr, handle->id);
c30707be 621 if (!RB_EMPTY_NODE(&handle->node))
b892bf75 622 rb_erase(&handle->node, &client->handles);
b892bf75 623
5ad7bc3a 624 ion_buffer_remove_from_handle(buffer);
b892bf75 625 ion_buffer_put(buffer);
5ad7bc3a 626
c30707be
RSZ
627 kfree(handle);
628}
629
630struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
631{
632 return handle->buffer;
633}
634
635static void ion_handle_get(struct ion_handle *handle)
636{
637 kref_get(&handle->ref);
638}
639
1cac41cb
MB
640/* Must hold the client lock */
641static struct ion_handle* ion_handle_get_check_overflow(struct ion_handle *handle)
642{
643 if (atomic_read(&handle->ref.refcount) + 1 == 0)
644 return ERR_PTR(-EOVERFLOW);
645 ion_handle_get(handle);
646 return handle;
647}
648
620ed669 649static int ion_handle_put_nolock(struct ion_handle *handle)
1cac41cb
MB
650{
651 return kref_put(&handle->ref, ion_handle_destroy);
652}
653
654int ion_handle_put(struct ion_handle *handle)
620ed669
EL
655{
656 int ret;
657
1cac41cb
MB
658 mutex_lock(&handle->client->lock);
659 if (!ion_handle_validate(handle->client, handle)) {
660 WARN(1, "%s: invalid handle passed to free.\n", __func__);
661 mutex_unlock(&handle->client->lock);
662 return -EINVAL;
663 }
664
665 ret = ion_handle_put_nolock(handle);
666 mutex_unlock(&handle->client->lock);
620ed669
EL
667
668 return ret;
669}
670
1cac41cb
MB
671/* Must hold the client lock */
672static void user_ion_handle_get(struct ion_handle *handle)
673{
674 if (handle->user_ref_count++ == 0) {
675 kref_get(&handle->ref);
676 }
677}
678
679/* Must hold the client lock */
680static struct ion_handle* user_ion_handle_get_check_overflow(struct ion_handle *handle)
681{
682 if (handle->user_ref_count + 1 == 0)
683 return ERR_PTR(-EOVERFLOW);
684 user_ion_handle_get(handle);
685 return handle;
686}
687
688/* passes a kref to the user ref count.
689 * We know we're holding a kref to the object before and
690 * after this call, so no need to reverify handle. */
691static struct ion_handle* pass_to_user(struct ion_handle *handle)
c30707be 692{
83271f62 693 struct ion_client *client = handle->client;
1cac41cb 694 struct ion_handle *ret;
83271f62
CC
695
696 mutex_lock(&client->lock);
1cac41cb
MB
697 ret = user_ion_handle_get_check_overflow(handle);
698 ion_handle_put_nolock(handle);
83271f62 699 mutex_unlock(&client->lock);
1cac41cb
MB
700 return ret;
701}
702
703/* Must hold the client lock */
704static int user_ion_handle_put_nolock(struct ion_handle *handle)
705{
706 int ret = 0;
707
708 if (--handle->user_ref_count == 0) {
709 ret = ion_handle_put_nolock(handle);
710 }
83271f62
CC
711
712 return ret;
c30707be
RSZ
713}
714
715static struct ion_handle *ion_handle_lookup(struct ion_client *client,
716 struct ion_buffer *buffer)
717{
e1cf3682
CC
718 struct rb_node *n = client->handles.rb_node;
719
720 while (n) {
721 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
10f62861 722
e1cf3682
CC
723 if (buffer < entry->buffer)
724 n = n->rb_left;
725 else if (buffer > entry->buffer)
726 n = n->rb_right;
727 else
728 return entry;
c30707be 729 }
9e907654 730 return ERR_PTR(-EINVAL);
c30707be
RSZ
731}
732
620ed669 733static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
83271f62 734 int id)
47b40458 735{
83271f62
CC
736 struct ion_handle *handle;
737
83271f62
CC
738 handle = idr_find(&client->idr, id);
739 if (handle)
1cac41cb 740 return ion_handle_get_check_overflow(handle);
83271f62 741
1cac41cb 742 return ERR_PTR(-EINVAL);
47b40458
CC
743}
744
620ed669
EL
745struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
746 int id)
747{
748 struct ion_handle *handle;
749
750 mutex_lock(&client->lock);
751 handle = ion_handle_get_by_id_nolock(client, id);
752 mutex_unlock(&client->lock);
753
754 return handle;
755}
756
47b40458 757static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
c30707be 758{
b26661d1 759 int id;
c30707be
RSZ
760 struct rb_node **p = &client->handles.rb_node;
761 struct rb_node *parent = NULL;
762 struct ion_handle *entry;
763
b26661d1 764 id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
1cac41cb
MB
765 if (id < 0) {
766 pr_err("%s: Fail to get bad id (ret %d)\n", __func__, id);
b26661d1 767 return id;
1cac41cb 768 }
47b40458 769
b26661d1 770 handle->id = id;
47b40458 771
c30707be
RSZ
772 while (*p) {
773 parent = *p;
774 entry = rb_entry(parent, struct ion_handle, node);
775
e1cf3682 776 if (handle->buffer < entry->buffer)
c30707be 777 p = &(*p)->rb_left;
e1cf3682 778 else if (handle->buffer > entry->buffer)
c30707be
RSZ
779 p = &(*p)->rb_right;
780 else
781 WARN(1, "%s: buffer already found.", __func__);
782 }
783
784 rb_link_node(&handle->node, parent, p);
785 rb_insert_color(&handle->node, &client->handles);
47b40458
CC
786
787 return 0;
c30707be
RSZ
788}
789
1cac41cb
MB
790unsigned int ion_parse_heap_id(unsigned int heap_id_mask, unsigned int flags);
791
792static size_t ion_buffer_get_total_size_by_pid(struct ion_client *client)
793{
794 struct ion_device *dev = client->dev;
795 pid_t pid = client->pid;
796 size_t pid_total_size = 0;
797 struct rb_node *n;
798
799 mutex_lock(&dev->buffer_lock);
800 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
801 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
802 node);
803 mutex_lock(&buffer->lock);
804 if (pid == buffer->pid)
805 pid_total_size += buffer->size;
806 mutex_unlock(&buffer->lock);
807 }
808 mutex_unlock(&dev->buffer_lock);
809
810 return pid_total_size;
811}
812
813static struct ion_handle *__ion_alloc(struct ion_client *client, size_t len,
38eeeb51 814 size_t align, unsigned int heap_id_mask,
1cac41cb 815 unsigned int flags, bool grab_handle)
c30707be 816{
c30707be
RSZ
817 struct ion_handle *handle;
818 struct ion_device *dev = client->dev;
819 struct ion_buffer *buffer = NULL;
cd69488c 820 struct ion_heap *heap;
47b40458 821 int ret;
c30707be 822
1cac41cb
MB
823 ION_EVENT_BEGIN();
824 trace_ion_alloc_start(client->name, 0, len, align, heap_id_mask, flags);
825
e61fc915 826 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
38eeeb51 827 len, align, heap_id_mask, flags);
c30707be
RSZ
828 /*
829 * traverse the list of heaps available in this system in priority
830 * order. If the heap type is supported by the client, and matches the
831 * request of the caller allocate from it. Repeat until allocate has
832 * succeeded or all heaps have been tried
833 */
54ac0784 834 len = PAGE_ALIGN(len);
1cac41cb
MB
835 if (WARN_ON(!len)) {
836 trace_ion_alloc_fail(client->name, EINVAL, len,
837 align, heap_id_mask, flags);
a14baf71 838 return ERR_PTR(-EINVAL);
1cac41cb
MB
839 }
840
841 if (len / PAGE_SIZE > totalram_pages / 4) {
842 size_t pid_total_size = ion_buffer_get_total_size_by_pid(client);
843
844 if ((len + pid_total_size) / PAGE_SIZE > totalram_pages / 2) {
845 pr_err("%s: len %zu total %zu heap_id_mask %u flags %x\n",
846 __func__, len, pid_total_size, heap_id_mask, flags);
847 return ERR_PTR(-EINVAL);
848 }
849 }
a14baf71 850
8d7ab9a9 851 down_read(&dev->lock);
1cac41cb
MB
852 heap_id_mask = ion_parse_heap_id(heap_id_mask, flags);
853 if (heap_id_mask == 0)
854 return ERR_PTR(-EINVAL);
855
cd69488c 856 plist_for_each_entry(heap, &dev->heaps, node) {
38eeeb51
RSZ
857 /* if the caller didn't specify this heap id */
858 if (!((1 << heap->id) & heap_id_mask))
c30707be
RSZ
859 continue;
860 buffer = ion_buffer_create(heap, dev, len, align, flags);
9e907654 861 if (!IS_ERR(buffer))
c30707be
RSZ
862 break;
863 }
8d7ab9a9 864 up_read(&dev->lock);
c30707be 865
1cac41cb
MB
866 if (buffer == NULL) {
867 trace_ion_alloc_fail(client->name, ENODEV, len,
868 align, heap_id_mask, flags);
54ac0784 869 return ERR_PTR(-ENODEV);
1cac41cb 870 }
54ac0784 871
1cac41cb
MB
872 if (IS_ERR(buffer)) {
873 trace_ion_alloc_fail(client->name, PTR_ERR(buffer),
874 len, align, heap_id_mask, flags);
464a5028 875 return ERR_CAST(buffer);
1cac41cb 876 }
c30707be
RSZ
877
878 handle = ion_handle_create(client, buffer);
879
c30707be
RSZ
880 /*
881 * ion_buffer_create will create a buffer with a ref_cnt of 1,
882 * and ion_handle_create will take a second reference, drop one here
883 */
884 ion_buffer_put(buffer);
885
1cac41cb
MB
886 if (IS_ERR(handle)) {
887 trace_ion_alloc_fail(client->name, (unsigned long) buffer,
888 len, align, heap_id_mask, flags);
47b40458 889 return handle;
1cac41cb 890 }
c30707be 891
47b40458 892 mutex_lock(&client->lock);
1cac41cb
MB
893 if (grab_handle)
894 ion_handle_get(handle);
47b40458 895 ret = ion_handle_add(client, handle);
83271f62 896 mutex_unlock(&client->lock);
47b40458
CC
897 if (ret) {
898 ion_handle_put(handle);
899 handle = ERR_PTR(ret);
1cac41cb
MB
900 trace_ion_alloc_fail(client->name, (unsigned long ) buffer,
901 len, align, heap_id_mask, flags);
47b40458 902 }
29ae6bc7 903
1cac41cb
MB
904 ION_EVENT_ALLOC(buffer, ION_EVENT_DONE());
905 trace_ion_alloc_end(client->name, (unsigned long) buffer,
906 len, align, heap_id_mask, flags);
907
c30707be
RSZ
908 return handle;
909}
1cac41cb
MB
910
911struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
912 size_t align, unsigned int heap_id_mask,
913 unsigned int flags)
914{
915 return __ion_alloc(client, len, align, heap_id_mask, flags, false);
916}
ee4c8aa9 917EXPORT_SYMBOL(ion_alloc);
c30707be 918
620ed669 919static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
c30707be
RSZ
920{
921 bool valid_handle;
922
923 BUG_ON(client != handle->client);
924
c30707be 925 valid_handle = ion_handle_validate(client, handle);
c30707be
RSZ
926
927 if (!valid_handle) {
a9bb075d 928 WARN(1, "%s: invalid handle passed to free.\n", __func__);
c30707be
RSZ
929 return;
930 }
620ed669
EL
931 ion_handle_put_nolock(handle);
932}
933
1cac41cb
MB
934static void user_ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
935{
936 bool valid_handle;
937
938 BUG_ON(client != handle->client);
939
940 valid_handle = ion_handle_validate(client, handle);
941 if (!valid_handle) {
942 WARN(1, "%s: invalid handle passed to free.\n", __func__);
943 return;
944 }
2704110b 945 if (!(handle->user_ref_count > 0)) {
1cac41cb
MB
946 WARN(1, "%s: User does not have access!\n", __func__);
947 return;
948 }
949 user_ion_handle_put_nolock(handle);
950}
951
620ed669
EL
952void ion_free(struct ion_client *client, struct ion_handle *handle)
953{
954 BUG_ON(client != handle->client);
955
956 mutex_lock(&client->lock);
957 ion_free_nolock(client, handle);
0e9c03a5 958 mutex_unlock(&client->lock);
c30707be 959}
ee4c8aa9 960EXPORT_SYMBOL(ion_free);
c30707be 961
c30707be
RSZ
962int ion_phys(struct ion_client *client, struct ion_handle *handle,
963 ion_phys_addr_t *addr, size_t *len)
964{
965 struct ion_buffer *buffer;
966 int ret;
967
968 mutex_lock(&client->lock);
969 if (!ion_handle_validate(client, handle)) {
970 mutex_unlock(&client->lock);
971 return -EINVAL;
972 }
973
974 buffer = handle->buffer;
975
976 if (!buffer->heap->ops->phys) {
d9954896
MH
977 pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
978 __func__, buffer->heap->name, buffer->heap->type);
c30707be
RSZ
979 mutex_unlock(&client->lock);
980 return -ENODEV;
981 }
982 mutex_unlock(&client->lock);
983 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
984 return ret;
985}
ee4c8aa9 986EXPORT_SYMBOL(ion_phys);
c30707be 987
0f34faf8
RSZ
988static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
989{
990 void *vaddr;
991
992 if (buffer->kmap_cnt) {
993 buffer->kmap_cnt++;
994 return buffer->vaddr;
995 }
996 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
e1d855b0
JS
997 if (WARN_ONCE(vaddr == NULL,
998 "heap->ops->map_kernel should return ERR_PTR on error"))
9e907654
CC
999 return ERR_PTR(-EINVAL);
1000 if (IS_ERR(vaddr))
0f34faf8
RSZ
1001 return vaddr;
1002 buffer->vaddr = vaddr;
1003 buffer->kmap_cnt++;
1cac41cb 1004
0f34faf8
RSZ
1005 return vaddr;
1006}
1007
b892bf75 1008static void *ion_handle_kmap_get(struct ion_handle *handle)
c30707be 1009{
b892bf75 1010 struct ion_buffer *buffer = handle->buffer;
c30707be
RSZ
1011 void *vaddr;
1012
b892bf75
RSZ
1013 if (handle->kmap_cnt) {
1014 handle->kmap_cnt++;
1015 return buffer->vaddr;
c30707be 1016 }
0f34faf8 1017 vaddr = ion_buffer_kmap_get(buffer);
9e907654 1018 if (IS_ERR(vaddr))
b892bf75 1019 return vaddr;
b892bf75 1020 handle->kmap_cnt++;
b892bf75
RSZ
1021 return vaddr;
1022}
c30707be 1023
0f34faf8
RSZ
1024static void ion_buffer_kmap_put(struct ion_buffer *buffer)
1025{
1026 buffer->kmap_cnt--;
1027 if (!buffer->kmap_cnt) {
1028 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
1029 buffer->vaddr = NULL;
1030 }
1031}
1032
b892bf75
RSZ
1033static void ion_handle_kmap_put(struct ion_handle *handle)
1034{
1035 struct ion_buffer *buffer = handle->buffer;
1036
22f6b978
MH
1037 if (!handle->kmap_cnt) {
1038 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
1039 return;
1040 }
b892bf75
RSZ
1041 handle->kmap_cnt--;
1042 if (!handle->kmap_cnt)
0f34faf8 1043 ion_buffer_kmap_put(buffer);
c30707be
RSZ
1044}
1045
b892bf75 1046void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
c30707be
RSZ
1047{
1048 struct ion_buffer *buffer;
b892bf75 1049 void *vaddr;
c30707be
RSZ
1050
1051 mutex_lock(&client->lock);
1052 if (!ion_handle_validate(client, handle)) {
b892bf75 1053 pr_err("%s: invalid handle passed to map_kernel.\n",
c30707be
RSZ
1054 __func__);
1055 mutex_unlock(&client->lock);
1056 return ERR_PTR(-EINVAL);
1057 }
b892bf75 1058
c30707be 1059 buffer = handle->buffer;
c30707be 1060
b892bf75 1061 if (!handle->buffer->heap->ops->map_kernel) {
c30707be
RSZ
1062 pr_err("%s: map_kernel is not implemented by this heap.\n",
1063 __func__);
c30707be
RSZ
1064 mutex_unlock(&client->lock);
1065 return ERR_PTR(-ENODEV);
1066 }
c30707be 1067
c30707be 1068 mutex_lock(&buffer->lock);
b892bf75 1069 vaddr = ion_handle_kmap_get(handle);
c30707be
RSZ
1070 mutex_unlock(&buffer->lock);
1071 mutex_unlock(&client->lock);
b892bf75 1072 return vaddr;
c30707be 1073}
ee4c8aa9 1074EXPORT_SYMBOL(ion_map_kernel);
c30707be 1075
b892bf75 1076void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
c30707be
RSZ
1077{
1078 struct ion_buffer *buffer;
1079
1080 mutex_lock(&client->lock);
1081 buffer = handle->buffer;
1082 mutex_lock(&buffer->lock);
b892bf75 1083 ion_handle_kmap_put(handle);
c30707be
RSZ
1084 mutex_unlock(&buffer->lock);
1085 mutex_unlock(&client->lock);
1086}
ee4c8aa9 1087EXPORT_SYMBOL(ion_unmap_kernel);
c30707be 1088
c30707be
RSZ
1089static int ion_debug_client_show(struct seq_file *s, void *unused)
1090{
1091 struct ion_client *client = s->private;
1092 struct rb_node *n;
38eeeb51 1093 size_t sizes[ION_NUM_HEAP_IDS] = {0};
1cac41cb 1094 size_t sizes_pss[ION_NUM_HEAP_IDS] = {0};
f63958d8 1095 const char *names[ION_NUM_HEAP_IDS] = {NULL};
c30707be
RSZ
1096 int i;
1097
1cac41cb
MB
1098 down_read(&g_idev->lock);
1099
1100 /* check validity of the client */
1101 for (n = rb_first(&g_idev->clients); n; n = rb_next(n)) {
1102 struct ion_client *c = rb_entry(n, struct ion_client, node);
1103 if (client == c)
1104 break;
1105 }
1106
1107 if (IS_ERR_OR_NULL(n)) {
1108 pr_err("%s: invalid client %p\n", __func__, client);
1109 up_read(&g_idev->lock);
1110 return -EINVAL;
1111 }
1112
1113 seq_printf(s, "%16.s %4.s %16.s %4.s %10.s %8.s %9.s\n",
1114 "task", "pid", "thread", "tid", "size", "# procs", "flag");
1115 seq_printf(s, "----------------------------------------------"
1116 "--------------------------------------------\n");
1117
c30707be
RSZ
1118 mutex_lock(&client->lock);
1119 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1120 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1121 node);
1cac41cb
MB
1122 struct ion_buffer *buffer = handle->buffer;
1123 unsigned int id = buffer->heap->id;
c30707be 1124
38eeeb51 1125 if (!names[id])
1cac41cb
MB
1126 names[id] = buffer->heap->name;
1127 sizes[id] += buffer->size;
1128 sizes_pss[id] += (buffer->size / buffer->handle_count);
1129 seq_printf(s, "%16.s %4u %16.s %4u %10zu %8d %9lx\n",
1130 buffer->task_comm, buffer->pid,
1131 buffer->thread_comm, buffer->tid, buffer->size,
1132 buffer->handle_count, buffer->flags);
c30707be
RSZ
1133 }
1134 mutex_unlock(&client->lock);
1cac41cb 1135 up_read(&g_idev->lock);
c30707be 1136
1cac41cb
MB
1137 seq_printf(s, "----------------------------------------------"
1138 "--------------------------------------------\n");
1139 seq_printf(s, "%16.16s: %16.16s %18.18s\n", "heap_name",
1140 "size_in_bytes", "size_in_bytes(pss)");
38eeeb51 1141 for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
c30707be
RSZ
1142 if (!names[i])
1143 continue;
1cac41cb
MB
1144 seq_printf(s, "%16.16s: %16zu %18zu\n",
1145 names[i], sizes[i], sizes_pss[i]);
c30707be
RSZ
1146 }
1147 return 0;
1148}
1149
1150static int ion_debug_client_open(struct inode *inode, struct file *file)
1151{
1152 return single_open(file, ion_debug_client_show, inode->i_private);
1153}
1154
1155static const struct file_operations debug_client_fops = {
1156 .open = ion_debug_client_open,
1157 .read = seq_read,
1158 .llseek = seq_lseek,
1159 .release = single_release,
1160};
1161
2803ac7b
MH
1162static int ion_get_client_serial(const struct rb_root *root,
1163 const unsigned char *name)
1164{
1165 int serial = -1;
1166 struct rb_node *node;
10f62861 1167
2803ac7b
MH
1168 for (node = rb_first(root); node; node = rb_next(node)) {
1169 struct ion_client *client = rb_entry(node, struct ion_client,
1170 node);
10f62861 1171
2803ac7b
MH
1172 if (strcmp(client->name, name))
1173 continue;
1174 serial = max(serial, client->display_serial);
1175 }
1176 return serial + 1;
1177}
1178
c30707be 1179struct ion_client *ion_client_create(struct ion_device *dev,
c30707be
RSZ
1180 const char *name)
1181{
1182 struct ion_client *client;
1183 struct task_struct *task;
1184 struct rb_node **p;
1185 struct rb_node *parent = NULL;
1186 struct ion_client *entry;
c30707be
RSZ
1187 pid_t pid;
1188
2803ac7b
MH
1189 if (!name) {
1190 pr_err("%s: Name cannot be null\n", __func__);
1191 return ERR_PTR(-EINVAL);
1192 }
1193
c30707be
RSZ
1194 get_task_struct(current->group_leader);
1195 task_lock(current->group_leader);
1196 pid = task_pid_nr(current->group_leader);
7e416174
SR
1197 /*
1198 * don't bother to store task struct for kernel threads,
1199 * they can't be killed anyway
1200 */
c30707be
RSZ
1201 if (current->group_leader->flags & PF_KTHREAD) {
1202 put_task_struct(current->group_leader);
1203 task = NULL;
1204 } else {
1205 task = current->group_leader;
1206 }
1207 task_unlock(current->group_leader);
1208
c30707be 1209 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
ae5cbf4a
MH
1210 if (!client)
1211 goto err_put_task_struct;
c30707be
RSZ
1212
1213 client->dev = dev;
1214 client->handles = RB_ROOT;
47b40458 1215 idr_init(&client->idr);
c30707be 1216 mutex_init(&client->lock);
c30707be
RSZ
1217 client->task = task;
1218 client->pid = pid;
ae5cbf4a
MH
1219 client->name = kstrdup(name, GFP_KERNEL);
1220 if (!client->name)
1221 goto err_free_client;
c30707be 1222
8d7ab9a9 1223 down_write(&dev->lock);
2803ac7b
MH
1224 client->display_serial = ion_get_client_serial(&dev->clients, name);
1225 client->display_name = kasprintf(
1226 GFP_KERNEL, "%s-%d", name, client->display_serial);
1227 if (!client->display_name) {
1228 up_write(&dev->lock);
1229 goto err_free_client_name;
1230 }
b892bf75
RSZ
1231 p = &dev->clients.rb_node;
1232 while (*p) {
1233 parent = *p;
1234 entry = rb_entry(parent, struct ion_client, node);
1235
1236 if (client < entry)
1237 p = &(*p)->rb_left;
1238 else if (client > entry)
1239 p = &(*p)->rb_right;
c30707be 1240 }
b892bf75
RSZ
1241 rb_link_node(&client->node, parent, p);
1242 rb_insert_color(&client->node, &dev->clients);
c30707be 1243
2803ac7b 1244 client->debug_root = debugfs_create_file(client->display_name, 0664,
b08585fb
MH
1245 dev->clients_debug_root,
1246 client, &debug_client_fops);
1247 if (!client->debug_root) {
1248 char buf[256], *path;
04e14356 1249
b08585fb
MH
1250 path = dentry_path(dev->clients_debug_root, buf, 256);
1251 pr_err("Failed to create client debugfs at %s/%s\n",
2803ac7b 1252 path, client->display_name);
b08585fb
MH
1253 }
1254
8d7ab9a9 1255 up_write(&dev->lock);
c30707be
RSZ
1256
1257 return client;
ae5cbf4a 1258
2803ac7b
MH
1259err_free_client_name:
1260 kfree(client->name);
ae5cbf4a
MH
1261err_free_client:
1262 kfree(client);
1263err_put_task_struct:
1264 if (task)
1265 put_task_struct(current->group_leader);
1266 return ERR_PTR(-ENOMEM);
c30707be 1267}
9122fe86 1268EXPORT_SYMBOL(ion_client_create);
c30707be 1269
b892bf75 1270void ion_client_destroy(struct ion_client *client)
c30707be 1271{
c30707be
RSZ
1272 struct ion_device *dev = client->dev;
1273 struct rb_node *n;
1274
1275 pr_debug("%s: %d\n", __func__, __LINE__);
1cac41cb
MB
1276
1277 mutex_lock(&client->lock);
c30707be
RSZ
1278 while ((n = rb_first(&client->handles))) {
1279 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1280 node);
1281 ion_handle_destroy(&handle->ref);
1282 }
47b40458 1283
1cac41cb 1284 mutex_unlock(&client->lock);
47b40458
CC
1285 idr_destroy(&client->idr);
1286
8d7ab9a9 1287 down_write(&dev->lock);
b892bf75 1288 if (client->task)
c30707be 1289 put_task_struct(client->task);
b892bf75 1290 rb_erase(&client->node, &dev->clients);
c30707be 1291 debugfs_remove_recursive(client->debug_root);
8d7ab9a9 1292 up_write(&dev->lock);
c30707be 1293
2803ac7b 1294 kfree(client->display_name);
ae5cbf4a 1295 kfree(client->name);
c30707be
RSZ
1296 kfree(client);
1297}
ee4c8aa9 1298EXPORT_SYMBOL(ion_client_destroy);
c30707be 1299
ce1f147a
RSZ
1300struct sg_table *ion_sg_table(struct ion_client *client,
1301 struct ion_handle *handle)
c30707be 1302{
29ae6bc7 1303 struct ion_buffer *buffer;
b892bf75 1304 struct sg_table *table;
c30707be 1305
29ae6bc7
RSZ
1306 mutex_lock(&client->lock);
1307 if (!ion_handle_validate(client, handle)) {
1308 pr_err("%s: invalid handle passed to map_dma.\n",
b892bf75 1309 __func__);
29ae6bc7
RSZ
1310 mutex_unlock(&client->lock);
1311 return ERR_PTR(-EINVAL);
54ac0784 1312 }
29ae6bc7
RSZ
1313 buffer = handle->buffer;
1314 table = buffer->sg_table;
1315 mutex_unlock(&client->lock);
b892bf75 1316 return table;
c30707be 1317}
ee4c8aa9 1318EXPORT_SYMBOL(ion_sg_table);
c30707be 1319
56a7c185
RSZ
1320static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1321 struct device *dev,
1322 enum dma_data_direction direction);
1323
29ae6bc7
RSZ
1324static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1325 enum dma_data_direction direction)
c30707be 1326{
b892bf75
RSZ
1327 struct dma_buf *dmabuf = attachment->dmabuf;
1328 struct ion_buffer *buffer = dmabuf->priv;
c30707be 1329
0b9ec1cf 1330 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
1cac41cb
MB
1331
1332 ion_buffer_task_add_lock(buffer, attachment->dev);
1333
29ae6bc7
RSZ
1334 return buffer->sg_table;
1335}
1336
1337static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1338 struct sg_table *table,
1339 enum dma_data_direction direction)
1340{
1cac41cb 1341 ion_buffer_task_remove_lock(attachment->dmabuf->priv, attachment->dev);
c30707be
RSZ
1342}
1343
e946b209
CC
1344void ion_pages_sync_for_device(struct device *dev, struct page *page,
1345 size_t size, enum dma_data_direction dir)
1346{
1347 struct scatterlist sg;
1348
1349 sg_init_table(&sg, 1);
1350 sg_set_page(&sg, page, size, 0);
1351 /*
1352 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
8e4ec4fe 1353 * for the targeted device, but this works on the currently targeted
e946b209
CC
1354 * hardware.
1355 */
1356 sg_dma_address(&sg) = page_to_phys(page);
1357 dma_sync_sg_for_device(dev, &sg, 1, dir);
1358}
1359
56a7c185
RSZ
1360struct ion_vma_list {
1361 struct list_head list;
1362 struct vm_area_struct *vma;
1363};
1364
1365static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1366 struct device *dev,
1367 enum dma_data_direction dir)
1368{
56a7c185 1369 struct ion_vma_list *vma_list;
c13bd1c4
RSZ
1370 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
1371 int i;
56a7c185 1372
1cac41cb
MB
1373 if (!ion_buffer_cached(buffer))
1374 return;
1375
56a7c185
RSZ
1376 pr_debug("%s: syncing for device %s\n", __func__,
1377 dev ? dev_name(dev) : "null");
0b9ec1cf 1378
13ba7805 1379 if (!ion_buffer_fault_user_mappings(buffer))
0b9ec1cf
RSZ
1380 return;
1381
56a7c185 1382 mutex_lock(&buffer->lock);
c13bd1c4
RSZ
1383 for (i = 0; i < pages; i++) {
1384 struct page *page = buffer->pages[i];
1385
1386 if (ion_buffer_page_is_dirty(page))
e946b209
CC
1387 ion_pages_sync_for_device(dev, ion_buffer_page(page),
1388 PAGE_SIZE, dir);
1389
c13bd1c4 1390 ion_buffer_page_clean(buffer->pages + i);
56a7c185
RSZ
1391 }
1392 list_for_each_entry(vma_list, &buffer->vmas, list) {
1393 struct vm_area_struct *vma = vma_list->vma;
1394
1395 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1396 NULL);
1397 }
1398 mutex_unlock(&buffer->lock);
1399}
1400
f63958d8 1401static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
56a7c185
RSZ
1402{
1403 struct ion_buffer *buffer = vma->vm_private_data;
462be0c6 1404 unsigned long pfn;
c13bd1c4 1405 int ret;
56a7c185
RSZ
1406
1407 mutex_lock(&buffer->lock);
c13bd1c4 1408 ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
c13bd1c4 1409 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
462be0c6
CC
1410
1411 pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
1412 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
56a7c185 1413 mutex_unlock(&buffer->lock);
c13bd1c4
RSZ
1414 if (ret)
1415 return VM_FAULT_ERROR;
1416
56a7c185
RSZ
1417 return VM_FAULT_NOPAGE;
1418}
1419
1420static void ion_vm_open(struct vm_area_struct *vma)
1421{
1422 struct ion_buffer *buffer = vma->vm_private_data;
1423 struct ion_vma_list *vma_list;
1424
1425 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1426 if (!vma_list)
1427 return;
1428 vma_list->vma = vma;
1429 mutex_lock(&buffer->lock);
1430 list_add(&vma_list->list, &buffer->vmas);
1431 mutex_unlock(&buffer->lock);
1cac41cb 1432 pr_debug("%s: adding %pK\n", __func__, vma);
56a7c185
RSZ
1433}
1434
1435static void ion_vm_close(struct vm_area_struct *vma)
1436{
1437 struct ion_buffer *buffer = vma->vm_private_data;
1438 struct ion_vma_list *vma_list, *tmp;
1439
1440 pr_debug("%s\n", __func__);
1441 mutex_lock(&buffer->lock);
1442 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1443 if (vma_list->vma != vma)
1444 continue;
1445 list_del(&vma_list->list);
1446 kfree(vma_list);
1cac41cb 1447 pr_debug("%s: deleting %pK\n", __func__, vma);
56a7c185
RSZ
1448 break;
1449 }
1450 mutex_unlock(&buffer->lock);
1451}
1452
7cbea8dc 1453static const struct vm_operations_struct ion_vma_ops = {
56a7c185
RSZ
1454 .open = ion_vm_open,
1455 .close = ion_vm_close,
1456 .fault = ion_vm_fault,
1457};
1458
b892bf75 1459static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
c30707be 1460{
b892bf75 1461 struct ion_buffer *buffer = dmabuf->priv;
56a7c185 1462 int ret = 0;
c30707be 1463
1cac41cb
MB
1464 ION_EVENT_BEGIN();
1465
1466 if (buffer->flags & ION_FLAG_NOZEROED) {
1467 pr_err("%s: mmap non-zeroed buffer to user is prohibited!\n",
1468 __func__);
1469 return -EINVAL;
1470 }
1471
1472 if (buffer->flags & ION_FLAG_PROTECTED) {
1473 pr_err("%s: mmap protected buffer to user is prohibited!\n",
1474 __func__);
1475 return -EPERM;
1476 }
1477
1478 if ((((vma->vm_pgoff << PAGE_SHIFT) >= buffer->size)) ||
1479 ((vma->vm_end - vma->vm_start) >
1480 (buffer->size - (vma->vm_pgoff << PAGE_SHIFT)))) {
1481 pr_err("%s: trying to map outside of buffer.\n", __func__);
1482 return -EINVAL;
1483 }
1484
b892bf75 1485 if (!buffer->heap->ops->map_user) {
7287bb52
IM
1486 pr_err("%s: this heap does not define a method for mapping to userspace\n",
1487 __func__);
b892bf75 1488 return -EINVAL;
c30707be
RSZ
1489 }
1490
1cac41cb
MB
1491 trace_ion_mmap_start((unsigned long) buffer, buffer->size,
1492 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
1493
13ba7805 1494 if (ion_buffer_fault_user_mappings(buffer)) {
462be0c6
CC
1495 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1496 VM_DONTDUMP;
56a7c185
RSZ
1497 vma->vm_private_data = buffer;
1498 vma->vm_ops = &ion_vma_ops;
1499 ion_vm_open(vma);
1cac41cb
MB
1500 ION_EVENT_MMAP(buffer, ION_EVENT_DONE());
1501 trace_ion_mmap_end((unsigned long) buffer, buffer->size,
1502 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
856661d5 1503 return 0;
56a7c185 1504 }
b892bf75 1505
856661d5
RSZ
1506 if (!(buffer->flags & ION_FLAG_CACHED))
1507 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1508
1509 mutex_lock(&buffer->lock);
1510 /* now map it to userspace */
1511 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1512 mutex_unlock(&buffer->lock);
1513
b892bf75 1514 if (ret)
c30707be
RSZ
1515 pr_err("%s: failure mapping buffer to userspace\n",
1516 __func__);
c30707be 1517
1cac41cb
MB
1518 ION_EVENT_MMAP(buffer, ION_EVENT_DONE());
1519 trace_ion_mmap_end((unsigned long) buffer, buffer->size,
1520 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
1521
c30707be
RSZ
1522 return ret;
1523}
1524
b892bf75
RSZ
1525static void ion_dma_buf_release(struct dma_buf *dmabuf)
1526{
1527 struct ion_buffer *buffer = dmabuf->priv;
10f62861 1528
b892bf75
RSZ
1529 ion_buffer_put(buffer);
1530}
c30707be 1531
1cac41cb 1532static void *ion_dma_buf_vmap(struct dma_buf *dmabuf)
b892bf75 1533{
0f34faf8
RSZ
1534 struct ion_buffer *buffer = dmabuf->priv;
1535 void *vaddr;
1536
1537 if (!buffer->heap->ops->map_kernel) {
1538 pr_err("%s: map kernel is not implemented by this heap.\n",
1539 __func__);
1cac41cb 1540 return ERR_PTR(-ENODEV);
0f34faf8
RSZ
1541 }
1542
1543 mutex_lock(&buffer->lock);
1544 vaddr = ion_buffer_kmap_get(buffer);
1545 mutex_unlock(&buffer->lock);
1cac41cb
MB
1546
1547 return vaddr;
1548}
1549
1550static void ion_dma_buf_vunmap(struct dma_buf *dmabuf, void *ptr)
1551{
1552 struct ion_buffer *buffer = dmabuf->priv;
1553
1554 mutex_lock(&buffer->lock);
1555 ion_buffer_kmap_put(buffer);
1556 mutex_unlock(&buffer->lock);
1557}
1558
1559static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1560{
1561 struct ion_buffer *buffer = dmabuf->priv;
1562
1563 return buffer->vaddr + offset * PAGE_SIZE;
1564}
1565
1566static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1567 void *ptr)
1568{
1569}
1570
1571static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1572 size_t len,
1573 enum dma_data_direction direction)
1574{
1575 struct ion_buffer *buffer = dmabuf->priv;
1576 void *vaddr;
1577
1578 if (!buffer->heap->ops->map_kernel) {
1579 pr_err("%s: map kernel is not implemented by this heap.\n",
1580 __func__);
1581 return -ENODEV;
1582 }
1583
1584 mutex_lock(&buffer->lock);
1585 vaddr = ion_buffer_kmap_get(buffer);
1586 mutex_unlock(&buffer->lock);
1587 return PTR_ERR_OR_ZERO(vaddr);
b892bf75
RSZ
1588}
1589
0f34faf8
RSZ
1590static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1591 size_t len,
1592 enum dma_data_direction direction)
b892bf75 1593{
0f34faf8 1594 struct ion_buffer *buffer = dmabuf->priv;
c30707be 1595
0f34faf8
RSZ
1596 mutex_lock(&buffer->lock);
1597 ion_buffer_kmap_put(buffer);
1598 mutex_unlock(&buffer->lock);
1599}
c30707be 1600
1cac41cb
MB
1601static void ion_dma_buf_set_privflag(struct dma_buf *dmabuf)
1602{
1603 struct ion_buffer *buffer = dmabuf->priv;
1604
1605 mutex_lock(&buffer->lock);
1606 buffer->private_flags |= ION_PRIV_FLAG_NEED_TO_FLUSH;
1607 mutex_unlock(&buffer->lock);
1608}
1609
1610static bool ion_dma_buf_get_privflag(struct dma_buf *dmabuf, bool clear)
1611{
1612 struct ion_buffer *buffer = dmabuf->priv;
1613 bool ret;
1614
1615 mutex_lock(&buffer->lock);
1616 ret = !!(buffer->private_flags & ION_PRIV_FLAG_NEED_TO_FLUSH);
1617 if (clear)
1618 buffer->private_flags &= ~ION_PRIV_FLAG_NEED_TO_FLUSH;
1619 mutex_unlock(&buffer->lock);
1620
1621 return ret;
1622}
1623
f63958d8 1624static struct dma_buf_ops dma_buf_ops = {
b892bf75
RSZ
1625 .map_dma_buf = ion_map_dma_buf,
1626 .unmap_dma_buf = ion_unmap_dma_buf,
1627 .mmap = ion_mmap,
1628 .release = ion_dma_buf_release,
0f34faf8
RSZ
1629 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1630 .end_cpu_access = ion_dma_buf_end_cpu_access,
1631 .kmap_atomic = ion_dma_buf_kmap,
1632 .kunmap_atomic = ion_dma_buf_kunmap,
b892bf75
RSZ
1633 .kmap = ion_dma_buf_kmap,
1634 .kunmap = ion_dma_buf_kunmap,
1cac41cb
MB
1635 .vmap = ion_dma_buf_vmap,
1636 .vunmap = ion_dma_buf_vunmap,
1637 .set_privflag = ion_dma_buf_set_privflag,
1638 .get_privflag = ion_dma_buf_get_privflag,
b892bf75
RSZ
1639};
1640
22ba4322
JM
1641struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1642 struct ion_handle *handle)
b892bf75 1643{
5605b188 1644 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
b892bf75
RSZ
1645 struct ion_buffer *buffer;
1646 struct dma_buf *dmabuf;
1647 bool valid_handle;
d8fbe341 1648
b892bf75
RSZ
1649 mutex_lock(&client->lock);
1650 valid_handle = ion_handle_validate(client, handle);
b892bf75 1651 if (!valid_handle) {
a9bb075d 1652 WARN(1, "%s: invalid handle passed to share.\n", __func__);
83271f62 1653 mutex_unlock(&client->lock);
22ba4322 1654 return ERR_PTR(-EINVAL);
b892bf75 1655 }
b892bf75
RSZ
1656 buffer = handle->buffer;
1657 ion_buffer_get(buffer);
83271f62
CC
1658 mutex_unlock(&client->lock);
1659
72449cb4
SS
1660 exp_info.ops = &dma_buf_ops;
1661 exp_info.size = buffer->size;
1662 exp_info.flags = O_RDWR;
1663 exp_info.priv = buffer;
1664
d8fbe341 1665 dmabuf = dma_buf_export(&exp_info);
b892bf75
RSZ
1666 if (IS_ERR(dmabuf)) {
1667 ion_buffer_put(buffer);
22ba4322 1668 return dmabuf;
b892bf75 1669 }
22ba4322
JM
1670
1671 return dmabuf;
1672}
1673EXPORT_SYMBOL(ion_share_dma_buf);
1674
1675int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1676{
1677 struct dma_buf *dmabuf;
1678 int fd;
1679
1680 dmabuf = ion_share_dma_buf(client, handle);
1681 if (IS_ERR(dmabuf))
1682 return PTR_ERR(dmabuf);
1683
b892bf75 1684 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
55808b8d 1685 if (fd < 0)
b892bf75 1686 dma_buf_put(dmabuf);
55808b8d 1687
c30707be 1688 return fd;
b892bf75 1689}
22ba4322 1690EXPORT_SYMBOL(ion_share_dma_buf_fd);
c30707be 1691
b892bf75
RSZ
1692struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1693{
1694 struct dma_buf *dmabuf;
1695 struct ion_buffer *buffer;
1696 struct ion_handle *handle;
47b40458 1697 int ret;
b892bf75
RSZ
1698
1699 dmabuf = dma_buf_get(fd);
9e907654 1700 if (IS_ERR(dmabuf))
464a5028 1701 return ERR_CAST(dmabuf);
b892bf75
RSZ
1702 /* if this memory came from ion */
1703
1704 if (dmabuf->ops != &dma_buf_ops) {
1705 pr_err("%s: can not import dmabuf from another exporter\n",
1706 __func__);
1707 dma_buf_put(dmabuf);
1708 return ERR_PTR(-EINVAL);
1709 }
1710 buffer = dmabuf->priv;
1711
1712 mutex_lock(&client->lock);
1713 /* if a handle exists for this buffer just take a reference to it */
1714 handle = ion_handle_lookup(client, buffer);
9e907654 1715 if (!IS_ERR(handle)) {
1cac41cb 1716 handle = ion_handle_get_check_overflow(handle);
83271f62 1717 mutex_unlock(&client->lock);
b892bf75
RSZ
1718 goto end;
1719 }
83271f62 1720
b892bf75 1721 handle = ion_handle_create(client, buffer);
6fa92e2b
SL
1722 if (IS_ERR(handle)) {
1723 mutex_unlock(&client->lock);
b892bf75 1724 goto end;
6fa92e2b 1725 }
83271f62 1726
47b40458 1727 ret = ion_handle_add(client, handle);
83271f62 1728 mutex_unlock(&client->lock);
47b40458
CC
1729 if (ret) {
1730 ion_handle_put(handle);
1731 handle = ERR_PTR(ret);
1732 }
83271f62 1733
b892bf75 1734end:
b892bf75
RSZ
1735 dma_buf_put(dmabuf);
1736 return handle;
c30707be 1737}
ee4c8aa9 1738EXPORT_SYMBOL(ion_import_dma_buf);
c30707be 1739
1cac41cb
MB
1740int ion_cached_needsync_dmabuf(struct dma_buf *dmabuf)
1741{
1742 struct ion_buffer *buffer = dmabuf->priv;
1743 unsigned long cacheflag = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;
1744
1745 if (dmabuf->ops != &dma_buf_ops)
1746 return -EINVAL;
1747
1748 return ((buffer->flags & cacheflag) == cacheflag) ? 1 : 0;
1749}
1750EXPORT_SYMBOL(ion_cached_needsync_dmabuf);
1751
1752bool ion_may_hwrender_dmabuf(struct dma_buf *dmabuf)
1753{
1754 struct ion_buffer *buffer = dmabuf->priv;
1755
1756 if (dmabuf->ops != &dma_buf_ops) {
1757 WARN(1, "%s: given dmabuf is not exported by ION\n", __func__);
1758 return false;
1759 }
1760
1761 return !!(buffer->flags & ION_FLAG_MAY_HWRENDER);
1762}
1763EXPORT_SYMBOL(ion_may_hwrender_dmabuf);
1764
1765bool ion_may_hwrender_handle(struct ion_client *client, struct ion_handle *handle)
1766{
1767 struct ion_buffer *buffer = handle->buffer;
1768 bool valid_handle;
1769
1770 mutex_lock(&client->lock);
1771 valid_handle = ion_handle_validate(client, handle);
1772
1773 if (!valid_handle) {
1774 WARN(1, "%s: invalid handle passed\n", __func__);
1775 mutex_unlock(&client->lock);
1776 return false;
1777 }
1778 mutex_unlock(&client->lock);
1779
1780 return !!(buffer->flags & ION_FLAG_MAY_HWRENDER);
1781}
1782EXPORT_SYMBOL(ion_may_hwrender_handle);
1783
0b9ec1cf
RSZ
1784static int ion_sync_for_device(struct ion_client *client, int fd)
1785{
1786 struct dma_buf *dmabuf;
1787 struct ion_buffer *buffer;
1cac41cb
MB
1788 struct scatterlist *sg, *sgl;
1789 int nelems;
1790 void *vaddr;
1791 int i = 0;
0b9ec1cf
RSZ
1792
1793 dmabuf = dma_buf_get(fd);
9e907654 1794 if (IS_ERR(dmabuf))
0b9ec1cf
RSZ
1795 return PTR_ERR(dmabuf);
1796
1797 /* if this memory came from ion */
1798 if (dmabuf->ops != &dma_buf_ops) {
1799 pr_err("%s: can not sync dmabuf from another exporter\n",
1800 __func__);
1801 dma_buf_put(dmabuf);
1802 return -EINVAL;
1803 }
1804 buffer = dmabuf->priv;
856661d5 1805
1cac41cb
MB
1806 if (!ion_buffer_cached(buffer) ||
1807 ion_buffer_fault_user_mappings(buffer)) {
1808 dma_buf_put(dmabuf);
1809 return 0;
1810 }
1811
1812 trace_ion_sync_start(_RET_IP_, buffer->dev->dev.this_device,
1813 DMA_BIDIRECTIONAL, buffer->size,
1814 buffer->vaddr, 0, false);
1815
1816 sgl = buffer->sg_table->sgl;
1817 nelems = buffer->sg_table->nents;
1818
1819 for_each_sg(sgl, sg, nelems, i) {
1820 vaddr = phys_to_virt(sg_phys(sg));
1821 __dma_flush_range(vaddr, vaddr + sg->length);
1822 }
1823
1824 trace_ion_sync_end(_RET_IP_, buffer->dev->dev.this_device,
1825 DMA_BIDIRECTIONAL, buffer->size,
1826 buffer->vaddr, 0, false);
1827
0b9ec1cf
RSZ
1828 dma_buf_put(dmabuf);
1829 return 0;
1830}
1831
1cac41cb
MB
1832static int ion_sync_partial_for_device(struct ion_client *client, int fd,
1833 off_t offset, size_t len)
1834{
1835 struct dma_buf *dmabuf;
1836 struct ion_buffer *buffer;
1837 struct scatterlist *sg, *sgl;
1838 size_t remained = len;
1839 int nelems;
1840 int i;
1841
1842 dmabuf = dma_buf_get(fd);
1843 if (IS_ERR(dmabuf))
1844 return PTR_ERR(dmabuf);
1845
1846 /* if this memory came from ion */
1847 if (dmabuf->ops != &dma_buf_ops) {
1848 pr_err("%s: can not sync dmabuf from another exporter\n",
1849 __func__);
1850 dma_buf_put(dmabuf);
1851 return -EINVAL;
1852 }
1853 buffer = dmabuf->priv;
1854
1855 if (!ion_buffer_cached(buffer) ||
1856 ion_buffer_fault_user_mappings(buffer)) {
1857 dma_buf_put(dmabuf);
1858 return 0;
1859 }
1860
1861 trace_ion_sync_start(_RET_IP_, buffer->dev->dev.this_device,
1862 DMA_BIDIRECTIONAL, buffer->size,
1863 buffer->vaddr, 0, false);
1864
1865 sgl = buffer->sg_table->sgl;
1866 nelems = buffer->sg_table->nents;
1867
1868 for_each_sg(sgl, sg, nelems, i) {
1869 size_t len_to_flush;
1870 if (offset >= sg->length) {
1871 offset -= sg->length;
1872 continue;
1873 }
1874
1875 len_to_flush = sg->length - offset;
1876 if (remained < len_to_flush) {
1877 len_to_flush = remained;
1878 remained = 0;
1879 } else {
1880 remained -= len_to_flush;
1881 }
1882
1883 __dma_map_area(phys_to_virt(sg_phys(sg)) + offset,
1884 len_to_flush, DMA_TO_DEVICE);
1885
1886 if (remained == 0)
1887 break;
1888 offset = 0;
1889 }
1890
1891 trace_ion_sync_end(_RET_IP_, buffer->dev->dev.this_device,
1892 DMA_BIDIRECTIONAL, buffer->size,
1893 buffer->vaddr, 0, false);
1894
1895 dma_buf_put(dmabuf);
1896
1897 return 0;
1898}
1899
db866e3d
CC
1900/* fix up the cases where the ioctl direction bits are incorrect */
1901static unsigned int ion_ioctl_dir(unsigned int cmd)
1902{
1903 switch (cmd) {
1904 case ION_IOC_SYNC:
1cac41cb 1905 case ION_IOC_SYNC_PARTIAL:
db866e3d
CC
1906 case ION_IOC_FREE:
1907 case ION_IOC_CUSTOM:
1908 return _IOC_WRITE;
1909 default:
1910 return _IOC_DIR(cmd);
1911 }
1912}
1913
c30707be
RSZ
1914static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1915{
1916 struct ion_client *client = filp->private_data;
db866e3d
CC
1917 struct ion_device *dev = client->dev;
1918 struct ion_handle *cleanup_handle = NULL;
1919 int ret = 0;
1920 unsigned int dir;
1921
1922 union {
1923 struct ion_fd_data fd;
1cac41cb 1924 struct ion_fd_partial_data fd_partial;
db866e3d
CC
1925 struct ion_allocation_data allocation;
1926 struct ion_handle_data handle;
1927 struct ion_custom_data custom;
1928 } data;
1929
1930 dir = ion_ioctl_dir(cmd);
1931
1932 if (_IOC_SIZE(cmd) > sizeof(data))
1933 return -EINVAL;
1934
1935 if (dir & _IOC_WRITE)
1936 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1937 return -EFAULT;
c30707be
RSZ
1938
1939 switch (cmd) {
1940 case ION_IOC_ALLOC:
1941 {
47b40458 1942 struct ion_handle *handle;
c30707be 1943
1cac41cb 1944 handle = __ion_alloc(client, data.allocation.len,
db866e3d
CC
1945 data.allocation.align,
1946 data.allocation.heap_id_mask,
1cac41cb
MB
1947 data.allocation.flags, true);
1948 if (IS_ERR(handle)) {
1949 pr_err("%s: len %zu align %zu heap_id_mask %u flags %x (ret %ld)\n",
1950 __func__, data.allocation.len,
1951 data.allocation.align,
1952 data.allocation.heap_id_mask,
1953 data.allocation.flags, PTR_ERR(handle));
47b40458 1954 return PTR_ERR(handle);
1cac41cb
MB
1955 }
1956 pass_to_user(handle);
db866e3d 1957 data.allocation.handle = handle->id;
54ac0784 1958
db866e3d 1959 cleanup_handle = handle;
c30707be
RSZ
1960 break;
1961 }
1962 case ION_IOC_FREE:
1963 {
47b40458 1964 struct ion_handle *handle;
c30707be 1965
620ed669
EL
1966 mutex_lock(&client->lock);
1967 handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
1968 if (IS_ERR(handle)) {
1969 mutex_unlock(&client->lock);
83271f62 1970 return PTR_ERR(handle);
620ed669 1971 }
1cac41cb 1972 user_ion_free_nolock(client, handle);
620ed669
EL
1973 ion_handle_put_nolock(handle);
1974 mutex_unlock(&client->lock);
c30707be
RSZ
1975 break;
1976 }
c30707be 1977 case ION_IOC_SHARE:
df0f6c76 1978 case ION_IOC_MAP:
c30707be 1979 {
47b40458 1980 struct ion_handle *handle;
c30707be 1981
db866e3d 1982 handle = ion_handle_get_by_id(client, data.handle.handle);
83271f62
CC
1983 if (IS_ERR(handle))
1984 return PTR_ERR(handle);
db866e3d 1985 data.fd.fd = ion_share_dma_buf_fd(client, handle);
83271f62 1986 ion_handle_put(handle);
db866e3d
CC
1987 if (data.fd.fd < 0)
1988 ret = data.fd.fd;
c30707be
RSZ
1989 break;
1990 }
1991 case ION_IOC_IMPORT:
1992 {
47b40458 1993 struct ion_handle *handle;
10f62861 1994
db866e3d 1995 handle = ion_import_dma_buf(client, data.fd.fd);
1cac41cb 1996 if (IS_ERR(handle)) {
47b40458 1997 ret = PTR_ERR(handle);
1cac41cb
MB
1998 } else {
1999 handle = pass_to_user(handle);
2000 if (IS_ERR(handle))
2001 ret = PTR_ERR(handle);
2002 else
2003 data.handle.handle = handle->id;
2004 }
c30707be
RSZ
2005 break;
2006 }
0b9ec1cf
RSZ
2007 case ION_IOC_SYNC:
2008 {
db866e3d 2009 ret = ion_sync_for_device(client, data.fd.fd);
0b9ec1cf
RSZ
2010 break;
2011 }
1cac41cb
MB
2012 case ION_IOC_SYNC_PARTIAL:
2013 {
2014 ret = ion_sync_partial_for_device(client, data.fd_partial.fd,
2015 data.fd_partial.offset, data.fd_partial.len);
2016 break;
2017 }
c30707be
RSZ
2018 case ION_IOC_CUSTOM:
2019 {
c30707be
RSZ
2020 if (!dev->custom_ioctl)
2021 return -ENOTTY;
db866e3d
CC
2022 ret = dev->custom_ioctl(client, data.custom.cmd,
2023 data.custom.arg);
2024 break;
c30707be
RSZ
2025 }
2026 default:
2027 return -ENOTTY;
2028 }
db866e3d
CC
2029
2030 if (dir & _IOC_READ) {
2031 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1cac41cb
MB
2032 if (cleanup_handle) {
2033 mutex_lock(&client->lock);
2034 user_ion_free_nolock(client, cleanup_handle);
2035 ion_handle_put_nolock(cleanup_handle);
2036 mutex_unlock(&client->lock);
2037 }
db866e3d
CC
2038 return -EFAULT;
2039 }
2040 }
1cac41cb
MB
2041 if (cleanup_handle)
2042 ion_handle_put(cleanup_handle);
db866e3d 2043 return ret;
c30707be
RSZ
2044}
2045
2046static int ion_release(struct inode *inode, struct file *file)
2047{
2048 struct ion_client *client = file->private_data;
2049
2050 pr_debug("%s: %d\n", __func__, __LINE__);
b892bf75 2051 ion_client_destroy(client);
c30707be
RSZ
2052 return 0;
2053}
2054
2055static int ion_open(struct inode *inode, struct file *file)
2056{
2057 struct miscdevice *miscdev = file->private_data;
2058 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
2059 struct ion_client *client;
483ed03f 2060 char debug_name[64];
c30707be
RSZ
2061
2062 pr_debug("%s: %d\n", __func__, __LINE__);
483ed03f
LA
2063 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
2064 client = ion_client_create(dev, debug_name);
9e907654 2065 if (IS_ERR(client))
c30707be
RSZ
2066 return PTR_ERR(client);
2067 file->private_data = client;
2068
2069 return 0;
2070}
2071
2072static const struct file_operations ion_fops = {
2073 .owner = THIS_MODULE,
2074 .open = ion_open,
2075 .release = ion_release,
2076 .unlocked_ioctl = ion_ioctl,
827c849e 2077 .compat_ioctl = compat_ion_ioctl,
c30707be
RSZ
2078};
2079
2080static size_t ion_debug_heap_total(struct ion_client *client,
2bb9f503 2081 unsigned int id)
c30707be
RSZ
2082{
2083 size_t size = 0;
2084 struct rb_node *n;
2085
2086 mutex_lock(&client->lock);
2087 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
2088 struct ion_handle *handle = rb_entry(n,
2089 struct ion_handle,
2090 node);
2bb9f503 2091 if (handle->buffer->heap->id == id)
c30707be
RSZ
2092 size += handle->buffer->size;
2093 }
2094 mutex_unlock(&client->lock);
2095 return size;
2096}
2097
2098static int ion_debug_heap_show(struct seq_file *s, void *unused)
2099{
2100 struct ion_heap *heap = s->private;
2101 struct ion_device *dev = heap->dev;
2102 struct rb_node *n;
5ad7bc3a
RSZ
2103 size_t total_size = 0;
2104 size_t total_orphaned_size = 0;
c30707be 2105
b5693964 2106 seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
164ad86d 2107 seq_puts(s, "----------------------------------------------------\n");
c30707be 2108
1cac41cb
MB
2109 down_read(&dev->lock);
2110
b892bf75 2111 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
c30707be
RSZ
2112 struct ion_client *client = rb_entry(n, struct ion_client,
2113 node);
2bb9f503 2114 size_t size = ion_debug_heap_total(client, heap->id);
10f62861 2115
c30707be
RSZ
2116 if (!size)
2117 continue;
b892bf75
RSZ
2118 if (client->task) {
2119 char task_comm[TASK_COMM_LEN];
2120
2121 get_task_comm(task_comm, client->task);
b5693964 2122 seq_printf(s, "%16s %16u %16zu\n", task_comm,
b892bf75
RSZ
2123 client->pid, size);
2124 } else {
b5693964 2125 seq_printf(s, "%16s %16u %16zu\n", client->name,
b892bf75
RSZ
2126 client->pid, size);
2127 }
c30707be 2128 }
164ad86d
IM
2129 seq_puts(s, "----------------------------------------------------\n");
2130 seq_puts(s, "orphaned allocations (info is from last known client):\n");
8d7ab9a9 2131 mutex_lock(&dev->buffer_lock);
5ad7bc3a
RSZ
2132 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
2133 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
2134 node);
2bb9f503 2135 if (buffer->heap->id != heap->id)
45b17a80
RSZ
2136 continue;
2137 total_size += buffer->size;
5ad7bc3a 2138 if (!buffer->handle_count) {
b5693964 2139 seq_printf(s, "%16s %16u %16zu %d %d\n",
e61fc915
CC
2140 buffer->task_comm, buffer->pid,
2141 buffer->size, buffer->kmap_cnt,
092c354b 2142 atomic_read(&buffer->ref.refcount));
5ad7bc3a
RSZ
2143 total_orphaned_size += buffer->size;
2144 }
2145 }
8d7ab9a9 2146 mutex_unlock(&dev->buffer_lock);
164ad86d 2147 seq_puts(s, "----------------------------------------------------\n");
b5693964 2148 seq_printf(s, "%16s %16zu\n", "total orphaned",
5ad7bc3a 2149 total_orphaned_size);
b5693964 2150 seq_printf(s, "%16s %16zu\n", "total ", total_size);
2540c73a 2151 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
b5693964 2152 seq_printf(s, "%16s %16zu\n", "deferred free",
2540c73a 2153 heap->free_list_size);
164ad86d 2154 seq_puts(s, "----------------------------------------------------\n");
45b17a80
RSZ
2155
2156 if (heap->debug_show)
2157 heap->debug_show(heap, s, unused);
5ad7bc3a 2158
1cac41cb
MB
2159 up_read(&dev->lock);
2160
c30707be
RSZ
2161 return 0;
2162}
2163
2164static int ion_debug_heap_open(struct inode *inode, struct file *file)
2165{
2166 return single_open(file, ion_debug_heap_show, inode->i_private);
2167}
2168
2169static const struct file_operations debug_heap_fops = {
2170 .open = ion_debug_heap_open,
2171 .read = seq_read,
2172 .llseek = seq_lseek,
2173 .release = single_release,
2174};
2175
ea313b5f 2176static int debug_shrink_set(void *data, u64 val)
fe2faea7 2177{
e1d855b0
JS
2178 struct ion_heap *heap = data;
2179 struct shrink_control sc;
2180 int objs;
fe2faea7 2181
e1d855b0 2182 sc.gfp_mask = -1;
aeb7fa7b 2183 sc.nr_to_scan = val;
fe2faea7 2184
aeb7fa7b
GK
2185 if (!val) {
2186 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
2187 sc.nr_to_scan = objs;
2188 }
fe2faea7 2189
aeb7fa7b 2190 heap->shrinker.scan_objects(&heap->shrinker, &sc);
e1d855b0 2191 return 0;
fe2faea7
RSZ
2192}
2193
ea313b5f 2194static int debug_shrink_get(void *data, u64 *val)
fe2faea7 2195{
e1d855b0
JS
2196 struct ion_heap *heap = data;
2197 struct shrink_control sc;
2198 int objs;
fe2faea7 2199
e1d855b0
JS
2200 sc.gfp_mask = -1;
2201 sc.nr_to_scan = 0;
fe2faea7 2202
aeb7fa7b 2203 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
e1d855b0
JS
2204 *val = objs;
2205 return 0;
fe2faea7
RSZ
2206}
2207
ea313b5f 2208DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
e1d855b0 2209 debug_shrink_set, "%llu\n");
ea313b5f 2210
c30707be
RSZ
2211void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
2212{
b08585fb
MH
2213 struct dentry *debug_file;
2214
29ae6bc7
RSZ
2215 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
2216 !heap->ops->unmap_dma)
2217 pr_err("%s: can not add heap with invalid ops struct.\n",
2218 __func__);
2219
95e53ddd
MH
2220 spin_lock_init(&heap->free_lock);
2221 heap->free_list_size = 0;
2222
ea313b5f
RSZ
2223 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
2224 ion_heap_init_deferred_free(heap);
fe2faea7 2225
b9daf0b6
CC
2226 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
2227 ion_heap_init_shrinker(heap);
2228
c30707be 2229 heap->dev = dev;
8d7ab9a9 2230 down_write(&dev->lock);
7e416174
SR
2231 /*
2232 * use negative heap->id to reverse the priority -- when traversing
2233 * the list later attempt higher id numbers first
2234 */
cd69488c
RSZ
2235 plist_node_init(&heap->node, -heap->id);
2236 plist_add(&heap->node, &dev->heaps);
b08585fb
MH
2237 debug_file = debugfs_create_file(heap->name, 0664,
2238 dev->heaps_debug_root, heap,
2239 &debug_heap_fops);
2240
2241 if (!debug_file) {
2242 char buf[256], *path;
10f62861 2243
b08585fb
MH
2244 path = dentry_path(dev->heaps_debug_root, buf, 256);
2245 pr_err("Failed to create heap debugfs at %s/%s\n",
2246 path, heap->name);
2247 }
2248
aeb7fa7b 2249 if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
ea313b5f
RSZ
2250 char debug_name[64];
2251
2252 snprintf(debug_name, 64, "%s_shrink", heap->name);
b08585fb
MH
2253 debug_file = debugfs_create_file(
2254 debug_name, 0644, dev->heaps_debug_root, heap,
2255 &debug_shrink_fops);
2256 if (!debug_file) {
2257 char buf[256], *path;
10f62861 2258
b08585fb
MH
2259 path = dentry_path(dev->heaps_debug_root, buf, 256);
2260 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
2261 path, debug_name);
2262 }
ea313b5f 2263 }
aeb7fa7b 2264
8d7ab9a9 2265 up_write(&dev->lock);
c30707be 2266}
8c6c463e 2267EXPORT_SYMBOL(ion_device_add_heap);
c30707be 2268
1cac41cb
MB
2269#ifdef CONFIG_ION_EXYNOS_STAT_LOG
2270
2271#define MAX_DUMP_TASKS 8
2272#define MAX_DUMP_NAME_LEN 32
2273#define MAX_DUMP_BUFF_LEN 512
2274
2275static void ion_buffer_dump_flags(struct seq_file *s, unsigned long flags)
2276{
2277 if ((flags & ION_FLAG_CACHED) && !(flags & ION_FLAG_CACHED_NEEDS_SYNC))
2278 seq_printf(s, "cached|faultmap");
2279 else if (flags & ION_FLAG_CACHED)
2280 seq_printf(s, "cached|needsync");
2281 else
2282 seq_printf(s, "noncached");
2283
2284 if (flags & ION_FLAG_NOZEROED)
2285 seq_printf(s, "|nozeroed");
2286
2287 if (flags & ION_FLAG_PROTECTED)
2288 seq_printf(s, "|protected");
2289}
2290
2291static void ion_buffer_dump_tasks(struct ion_buffer *buffer, char *str)
2292{
2293 struct ion_task *task, *tmp;
2294 const char *delim = "|";
2295 size_t total_len = 0;
2296 int count = 0;
2297
2298 list_for_each_entry_safe(task, tmp, &buffer->master_list, list) {
2299 const char *name;
2300 size_t len = strlen(dev_name(task->master));
2301
2302 if (len > MAX_DUMP_NAME_LEN)
2303 len = MAX_DUMP_NAME_LEN;
2304 if (!strncmp(dev_name(task->master), "ion", len)) {
2305 continue;
2306 } else {
2307 name = dev_name(task->master) + 9;
2308 len -= 9;
2309 }
2310 if (total_len + len + 1 > MAX_DUMP_BUFF_LEN)
2311 break;
2312
2313 strncat((char *)(str + total_len), name, len);
2314 total_len += len;
2315 if (!list_is_last(&task->list, &buffer->master_list))
2316 str[total_len++] = *delim;
2317
2318 if (++count > MAX_DUMP_TASKS)
2319 break;
2320 }
2321}
2322
2323static int ion_debug_buffer_show(struct seq_file *s, void *unused)
2324{
2325 struct ion_device *dev = s->private;
2326 struct rb_node *n;
2327 char *master_name;
2328 size_t total_size = 0;
2329
2330 master_name = kzalloc(MAX_DUMP_BUFF_LEN, GFP_KERNEL);
2331 if (!master_name) {
2332 pr_err("%s: no memory for client string buffer\n", __func__);
2333 return -ENOMEM;
2334 }
2335
2336 seq_printf(s, "%20.s %16.s %4.s %16.s %4.s %10.s %4.s %3.s %6.s "
2337 "%24.s %9.s\n",
2338 "heap", "task", "pid", "thread", "tid",
2339 "size", "kmap", "ref", "handle",
2340 "master", "flag");
2341 seq_printf(s, "------------------------------------------"
2342 "----------------------------------------"
2343 "----------------------------------------"
2344 "--------------------------------------\n");
2345
2346 mutex_lock(&dev->buffer_lock);
2347 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
2348 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
2349 node);
2350 mutex_lock(&buffer->lock);
2351 ion_buffer_dump_tasks(buffer, master_name);
2352 total_size += buffer->size;
2353 seq_printf(s, "%20.s %16.s %4u %16.s %4u %10zu %4d %3d %6d "
2354 "%24.s %9lx", buffer->heap->name,
2355 buffer->task_comm, buffer->pid,
2356 buffer->thread_comm,
2357 buffer->tid, buffer->size, buffer->kmap_cnt,
2358 atomic_read(&buffer->ref.refcount),
2359 buffer->handle_count, master_name,
2360 buffer->flags);
2361 seq_printf(s, "(");
2362 ion_buffer_dump_flags(s, buffer->flags);
2363 seq_printf(s, ")\n");
2364 mutex_unlock(&buffer->lock);
2365
2366 memset(master_name, 0, MAX_DUMP_BUFF_LEN);
2367 }
2368 mutex_unlock(&dev->buffer_lock);
2369
2370 seq_printf(s, "------------------------------------------"
2371 "----------------------------------------"
2372 "----------------------------------------"
2373 "--------------------------------------\n");
2374 seq_printf(s, "%16.s %16zu\n", "total ", total_size);
2375 seq_printf(s, "------------------------------------------"
2376 "----------------------------------------"
2377 "----------------------------------------"
2378 "--------------------------------------\n");
2379
2380 kfree(master_name);
2381
2382 return 0;
2383}
2384
2385static int ion_debug_buffer_open(struct inode *inode, struct file *file)
2386{
2387 return single_open(file, ion_debug_buffer_show, inode->i_private);
2388}
2389
2390static const struct file_operations debug_buffer_fops = {
2391 .open = ion_debug_buffer_open,
2392 .read = seq_read,
2393 .llseek = seq_lseek,
2394 .release = single_release,
2395};
2396
2397static void ion_debug_event_show_one(struct seq_file *s,
2398 struct ion_eventlog *log)
2399{
2400 struct timeval tv = ktime_to_timeval(log->begin);
2401 long elapsed = ktime_us_delta(log->done, log->begin);
2402
2403 if (elapsed == 0)
2404 return;
2405
2406 seq_printf(s, "[%06ld.%06ld] ", tv.tv_sec, tv.tv_usec);
2407
2408 switch (log->type) {
2409 case ION_EVENT_TYPE_ALLOC:
2410 {
2411 struct ion_event_alloc *data = &log->data.alloc;
2412 seq_printf(s, "%8s %pK %18s %11zd ", "alloc",
2413 data->id, data->heap->name, data->size);
2414 break;
2415 }
2416 case ION_EVENT_TYPE_FREE:
2417 {
2418 struct ion_event_free *data = &log->data.free;
2419 seq_printf(s, "%8s %pK %18s %11zd ", "free",
2420 data->id, data->heap->name, data->size);
2421 break;
2422 }
2423 case ION_EVENT_TYPE_MMAP:
2424 {
2425 struct ion_event_mmap *data = &log->data.mmap;
2426 seq_printf(s, "%8s %pK %18s %11zd ", "mmap",
2427 data->id, data->heap->name, data->size);
2428 break;
2429 }
2430 case ION_EVENT_TYPE_SHRINK:
2431 {
2432 struct ion_event_shrink *data = &log->data.shrink;
2433 seq_printf(s, "%8s %16lx %18s %11zd ", "shrink",
2434 0l, "ion_noncontig_heap", data->size);
2435 elapsed = 0;
2436 break;
2437 }
2438 case ION_EVENT_TYPE_CLEAR:
2439 {
2440 struct ion_event_clear *data = &log->data.clear;
2441 seq_printf(s, "%8s %pK %18s %11zd ", "clear",
2442 data->id, data->heap->name, data->size);
2443 break;
2444 }
2445 }
2446
2447 seq_printf(s, "%9ld", elapsed);
2448
2449 if (elapsed > 100 * USEC_PER_MSEC)
2450 seq_printf(s, " *");
2451
2452 if (log->type == ION_EVENT_TYPE_ALLOC) {
2453 seq_printf(s, " ");
2454 ion_buffer_dump_flags(s, log->data.alloc.flags);
2455 } else if (log->type == ION_EVENT_TYPE_CLEAR) {
2456 seq_printf(s, " ");
2457 ion_buffer_dump_flags(s, log->data.clear.flags);
2458 }
2459
2460 if (log->type == ION_EVENT_TYPE_FREE && log->data.free.shrinker)
2461 seq_printf(s, " shrinker");
2462
2463 seq_printf(s, "\n");
2464}
2465
2466static int ion_debug_event_show(struct seq_file *s, void *unused)
2467{
2468 struct ion_device *dev = s->private;
2469 int index = atomic_read(&dev->event_idx) % ION_EVENT_LOG_MAX;
2470 int last = index;
2471
2472 seq_printf(s, "%13s %10s %8s %18s %11s %10s %24s\n", "timestamp",
2473 "type", "id", "heap", "size", "time (us)", "remarks");
2474 seq_printf(s, "-------------------------------------------");
2475 seq_printf(s, "-------------------------------------------");
2476 seq_printf(s, "-----------------------------------------\n");
2477
2478 do {
2479 if (++index >= ION_EVENT_LOG_MAX)
2480 index = 0;
2481 ion_debug_event_show_one(s, &dev->eventlog[index]);
2482 } while (index != last);
2483
2484 return 0;
2485}
2486
2487static int ion_debug_event_open(struct inode *inode, struct file *file)
2488{
2489 return single_open(file, ion_debug_event_show, inode->i_private);
2490}
2491
2492static const struct file_operations debug_event_fops = {
2493 .open = ion_debug_event_open,
2494 .read = seq_read,
2495 .llseek = seq_lseek,
2496 .release = single_release,
2497};
2498#endif
2499
c30707be
RSZ
2500struct ion_device *ion_device_create(long (*custom_ioctl)
2501 (struct ion_client *client,
2502 unsigned int cmd,
2503 unsigned long arg))
2504{
2505 struct ion_device *idev;
2506 int ret;
2507
2508 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
2509 if (!idev)
2510 return ERR_PTR(-ENOMEM);
2511
2512 idev->dev.minor = MISC_DYNAMIC_MINOR;
2513 idev->dev.name = "ion";
2514 idev->dev.fops = &ion_fops;
2515 idev->dev.parent = NULL;
2516 ret = misc_register(&idev->dev);
2517 if (ret) {
2518 pr_err("ion: failed to register misc device.\n");
283d9304 2519 kfree(idev);
c30707be
RSZ
2520 return ERR_PTR(ret);
2521 }
2522
2523 idev->debug_root = debugfs_create_dir("ion", NULL);
b08585fb
MH
2524 if (!idev->debug_root) {
2525 pr_err("ion: failed to create debugfs root directory.\n");
2526 goto debugfs_done;
2527 }
2528 idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
2529 if (!idev->heaps_debug_root) {
2530 pr_err("ion: failed to create debugfs heaps directory.\n");
2531 goto debugfs_done;
2532 }
2533 idev->clients_debug_root = debugfs_create_dir("clients",
2534 idev->debug_root);
1cac41cb 2535 if (!idev->clients_debug_root) {
b08585fb 2536 pr_err("ion: failed to create debugfs clients directory.\n");
1cac41cb
MB
2537 goto debugfs_done;
2538 }
2539
2540#ifdef CONFIG_ION_EXYNOS_STAT_LOG
2541 atomic_set(&idev->event_idx, -1);
2542 idev->buffer_debug_file = debugfs_create_file("buffer", 0444,
2543 idev->debug_root, idev,
2544 &debug_buffer_fops);
2545 if (!idev->buffer_debug_file) {
2546 pr_err("%s: failed to create buffer debug file\n", __func__);
2547 goto debugfs_done;
2548 }
2549
2550 idev->event_debug_file = debugfs_create_file("event", 0444,
2551 idev->debug_root, idev,
2552 &debug_event_fops);
2553 if (!idev->event_debug_file)
2554 pr_err("%s: failed to create event debug file\n", __func__);
2555#endif
b08585fb
MH
2556
2557debugfs_done:
c30707be
RSZ
2558
2559 idev->custom_ioctl = custom_ioctl;
2560 idev->buffers = RB_ROOT;
8d7ab9a9
RSZ
2561 mutex_init(&idev->buffer_lock);
2562 init_rwsem(&idev->lock);
cd69488c 2563 plist_head_init(&idev->heaps);
b892bf75 2564 idev->clients = RB_ROOT;
1cac41cb
MB
2565
2566 /* backup of ion device: assumes there is only one ion device */
2567 g_idev = idev;
2568
c30707be
RSZ
2569 return idev;
2570}
8c6c463e 2571EXPORT_SYMBOL(ion_device_create);
c30707be
RSZ
2572
2573void ion_device_destroy(struct ion_device *dev)
2574{
2575 misc_deregister(&dev->dev);
b08585fb 2576 debugfs_remove_recursive(dev->debug_root);
c30707be
RSZ
2577 kfree(dev);
2578}
8c6c463e 2579EXPORT_SYMBOL(ion_device_destroy);
2991b7a0
RSZ
2580
2581void __init ion_reserve(struct ion_platform_data *data)
2582{
fa9bba55 2583 int i;
2991b7a0
RSZ
2584
2585 for (i = 0; i < data->nr; i++) {
2586 if (data->heaps[i].size == 0)
2587 continue;
fa9bba55
RSZ
2588
2589 if (data->heaps[i].base == 0) {
2590 phys_addr_t paddr;
10f62861 2591
fa9bba55
RSZ
2592 paddr = memblock_alloc_base(data->heaps[i].size,
2593 data->heaps[i].align,
2594 MEMBLOCK_ALLOC_ANYWHERE);
2595 if (!paddr) {
51108985 2596 pr_err("%s: error allocating memblock for heap %d\n",
fa9bba55
RSZ
2597 __func__, i);
2598 continue;
2599 }
2600 data->heaps[i].base = paddr;
2601 } else {
2602 int ret = memblock_reserve(data->heaps[i].base,
2603 data->heaps[i].size);
2604 if (ret)
e61fc915 2605 pr_err("memblock reserve of %zx@%lx failed\n",
fa9bba55
RSZ
2606 data->heaps[i].size,
2607 data->heaps[i].base);
2608 }
e61fc915 2609 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
fa9bba55
RSZ
2610 data->heaps[i].name,
2611 data->heaps[i].base,
2612 data->heaps[i].size);
2991b7a0
RSZ
2613 }
2614}
1cac41cb
MB
2615
2616static struct ion_iovm_map *ion_buffer_iova_create(struct ion_buffer *buffer,
2617 struct device *dev, enum dma_data_direction dir, int prop)
2618{
2619 /* Must be called under buffer->lock held */
2620 struct ion_iovm_map *iovm_map;
2621 int ret = 0;
2622
2623 iovm_map = kzalloc(sizeof(struct ion_iovm_map), GFP_KERNEL);
2624 if (!iovm_map) {
2625 pr_err("%s: Failed to allocate ion_iovm_map for %s\n",
2626 __func__, dev_name(dev));
2627 return ERR_PTR(-ENOMEM);
2628 }
2629
2630 iovm_map->iova = iovmm_map(dev, buffer->sg_table->sgl,
2631 0, buffer->size, dir, prop);
2632
2633 if (iovm_map->iova == (dma_addr_t)-ENOSYS) {
2634 size_t len;
2635 ion_phys_addr_t addr;
2636
2637 BUG_ON(!buffer->heap->ops->phys);
2638 ret = buffer->heap->ops->phys(buffer->heap, buffer,
2639 &addr, &len);
2640 if (ret)
2641 pr_err("%s: Unable to get PA for %s\n",
2642 __func__, dev_name(dev));
2643 } else if (IS_ERR_VALUE(iovm_map->iova)) {
2644 ret = iovm_map->iova;
2645 pr_err("%s: Unable to allocate IOVA for %s\n",
2646 __func__, dev_name(dev));
2647 }
2648
2649 if (ret) {
2650 kfree(iovm_map);
2651 return ERR_PTR(ret);
2652 }
2653
2654 iovm_map->dev = dev;
2655 iovm_map->domain = get_domain_from_dev(dev);
2656 iovm_map->map_cnt = 1;
2657
2658 pr_debug("%s: new map added for dev %s, iova %pa, prop %d\n", __func__,
2659 dev_name(dev), &iovm_map->iova, prop);
2660
2661 return iovm_map;
2662}
2663
2664dma_addr_t ion_iovmm_map(struct dma_buf_attachment *attachment,
2665 off_t offset, size_t size,
2666 enum dma_data_direction direction, int prop)
2667{
2668 struct dma_buf *dmabuf = attachment->dmabuf;
2669 struct ion_buffer *buffer = dmabuf->priv;
2670 struct ion_iovm_map *iovm_map;
2671 struct iommu_domain *domain;
2672
2673 BUG_ON(dmabuf->ops != &dma_buf_ops);
2674
2675 if (IS_ENABLED(CONFIG_EXYNOS_CONTENT_PATH_PROTECTION) &&
2676 buffer->flags & ION_FLAG_PROTECTED) {
2677 struct ion_buffer_info *info = buffer->priv_virt;
2678
2679 if (info->prot_desc.dma_addr)
2680 return info->prot_desc.dma_addr;
2681 pr_err("%s: protected buffer but no secure iova\n", __func__);
2682 return -EINVAL;
2683 }
2684
2685 domain = get_domain_from_dev(attachment->dev);
2686 if (!domain) {
2687 pr_err("%s: invalid iommu device\n", __func__);
2688 return -EINVAL;
2689 }
2690
2691 mutex_lock(&buffer->lock);
2692 list_for_each_entry(iovm_map, &buffer->iovas, list) {
2693 if (domain == iovm_map->domain) {
2694 iovm_map->map_cnt++;
2695 mutex_unlock(&buffer->lock);
2696 return iovm_map->iova;
2697 }
2698 }
2699
2700 if (!ion_buffer_cached(buffer))
2701 prop &= ~IOMMU_CACHE;
2702
2703 iovm_map = ion_buffer_iova_create(buffer, attachment->dev,
2704 direction, prop);
2705 if (IS_ERR(iovm_map)) {
2706 mutex_unlock(&buffer->lock);
2707 return PTR_ERR(iovm_map);
2708 }
2709
2710 list_add_tail(&iovm_map->list, &buffer->iovas);
2711 mutex_unlock(&buffer->lock);
2712
2713 return iovm_map->iova;
2714}
2715
2716void ion_iovmm_unmap(struct dma_buf_attachment *attachment, dma_addr_t iova)
2717{
2718 struct ion_iovm_map *iovm_map;
2719 struct dma_buf * dmabuf = attachment->dmabuf;
2720 struct device *dev = attachment->dev;
2721 struct ion_buffer *buffer = attachment->dmabuf->priv;
2722 struct iommu_domain *domain;
2723
2724 BUG_ON(dmabuf->ops != &dma_buf_ops);
2725
2726 if (IS_ENABLED(CONFIG_EXYNOS_CONTENT_PATH_PROTECTION) &&
2727 buffer->flags & ION_FLAG_PROTECTED)
2728 return;
2729
2730 domain = get_domain_from_dev(attachment->dev);
2731 if (!domain) {
2732 pr_err("%s: invalid iommu device\n", __func__);
2733 return;
2734 }
2735
2736 mutex_lock(&buffer->lock);
2737 list_for_each_entry(iovm_map, &buffer->iovas, list) {
2738 if ((domain == iovm_map->domain) && (iova == iovm_map->iova)) {
2739 if (--iovm_map->map_cnt == 0) {
2740 list_del(&iovm_map->list);
2741 pr_debug("%s: unmap previous %pa for dev %s\n",
2742 __func__, &iovm_map->iova,
2743 dev_name(iovm_map->dev));
2744 iovmm_unmap(iovm_map->dev, iovm_map->iova);
2745 kfree(iovm_map);
2746 }
2747
2748 mutex_unlock(&buffer->lock);
2749 return;
2750 }
2751 }
2752
2753 mutex_unlock(&buffer->lock);
2754
2755 WARN(1, "IOVA %pa is not found for %s\n", &iova, dev_name(dev));
2756}