android: ion: add kernel log on errors
authorCho KyongHo <pullip.cho@samsung.com>
Thu, 22 Feb 2018 11:55:24 +0000 (20:55 +0900)
committerSangwook Ju <sw.ju@samsung.com>
Mon, 14 May 2018 10:45:24 +0000 (19:45 +0900)
It is very important to know the reason of an exception. It is too
difficult to know the details about the exception with an error
number returned by ION.

Change-Id: I51f92bb1cc1c1eee2e0a1a17803d4f5bb0720433
Signed-off-by: Cho KyongHo <pullip.cho@samsung.com>
drivers/staging/android/ion/ion-ioctl.c
drivers/staging/android/ion/ion.c
drivers/staging/android/ion/ion_buffer_protect.c
drivers/staging/android/ion/ion_carveout_heap.c
drivers/staging/android/ion/ion_cma_heap.c
drivers/staging/android/ion/ion_heap.c
drivers/staging/android/ion/ion_page_pool.c
drivers/staging/android/ion/ion_system_heap.c

index 021a956db1a894656ab4b11e9aaa73cf6cb7a489..52063f91df0572a98510da5cc7cbe79aa555d829 100644 (file)
 
 #include "ion.h"
 
+/*
+ * ION_IOC_FREE and ion_handle_data is deprecated from ION after 4.14.
+ * But it is used to study the version of ION by libion in Android.
+ * Therefore, ion_ioctl() should not blaim if a user send ION_IOC_FREE.
+ */
+struct ion_handle_data {
+       int handle;
+};
+
+#define ION_IOC_FREE   _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
+
 union ion_ioctl_arg {
        struct ion_allocation_data allocation;
        struct ion_heap_query query;
@@ -39,7 +50,13 @@ static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg)
                break;
        }
 
-       return ret ? -EINVAL : 0;
+       if (ret) {
+               pr_err("%s: reserved fields of query_data should be 0\n",
+                      __func__);
+               return -EINVAL;
+       }
+
+       return 0;
 }
 
 /* fix up the cases where the ioctl direction bits are incorrect */
@@ -59,8 +76,10 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 
        dir = ion_ioctl_dir(cmd);
 
-       if (_IOC_SIZE(cmd) > sizeof(data))
+       if (_IOC_SIZE(cmd) > sizeof(data)) {
+               pr_err("%s: unknown ioctl %#x\n", __func__, cmd);
                return -EINVAL;
+       }
 
        /*
         * The copy_from_user is unconditional here for both read and write
@@ -98,6 +117,8 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                ret = ion_query_heaps(&data.query);
                break;
        default:
+               if (cmd != ION_IOC_FREE)
+                       pr_err("%s: unknown ioctl %#x\n", __func__, cmd);
                return -ENOTTY;
        }
 
index 6f71a00715fa96d3789093d748053c9c975597d5..b726485f5cc3e7b3b56db703b5235cefacb6aad0 100644 (file)
@@ -127,6 +127,8 @@ err1:
        heap->ops->free(buffer);
 err2:
        kfree(buffer);
+       pr_err("%s: failed to alloc (len %zu, flag %#lx) buffer from %s heap\n",
+              __func__, len, flags, heap->name);
        return ERR_PTR(ret);
 }
 
@@ -166,8 +168,11 @@ static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
        if (WARN_ONCE(!vaddr,
                      "heap->ops->map_kernel should return ERR_PTR on error"))
                return ERR_PTR(-EINVAL);
-       if (IS_ERR(vaddr))
+       if (IS_ERR(vaddr)) {
+               pr_err("%s: failed to alloc kernel address of %zu buffer\n",
+                      __func__, buffer->size);
                return vaddr;
+       }
        buffer->vaddr = vaddr;
        buffer->kmap_cnt++;
        return vaddr;
@@ -440,8 +445,11 @@ struct dma_buf *__ion_alloc(size_t len, unsigned int heap_id_mask,
         */
        len = PAGE_ALIGN(len);
 
-       if (!len)
+       if (!len) {
+               pr_err("%s: zero size allocation - heapmask %#x, flags %#x\n",
+                      __func__, heap_id_mask, flags);
                return ERR_PTR(-EINVAL);
+       }
 
        down_read(&dev->lock);
        plist_for_each_entry(heap, &dev->heaps, node) {
@@ -454,8 +462,11 @@ struct dma_buf *__ion_alloc(size_t len, unsigned int heap_id_mask,
        }
        up_read(&dev->lock);
 
-       if (!buffer)
+       if (!buffer) {
+               pr_err("%s: no matching heap found against heapmaks %#x\n",
+                      __func__, heap_id_mask);
                return ERR_PTR(-ENODEV);
+       }
 
        if (IS_ERR(buffer))
                return ERR_CAST(buffer);
@@ -466,8 +477,11 @@ struct dma_buf *__ion_alloc(size_t len, unsigned int heap_id_mask,
        exp_info.priv = buffer;
 
        dmabuf = dma_buf_export(&exp_info);
-       if (IS_ERR(dmabuf))
+       if (IS_ERR(dmabuf)) {
+               pr_err("%s: failed to export dmabuf (err %ld)\n", __func__,
+                      -PTR_ERR(dmabuf));
                _ion_buffer_destroy(buffer);
+       }
 
        return dmabuf;
 }
@@ -481,8 +495,10 @@ int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags)
                return PTR_ERR(dmabuf);
 
        fd = dma_buf_fd(dmabuf, O_CLOEXEC);
-       if (fd < 0)
+       if (fd < 0) {
+               pr_err("%s: failed to get dmabuf fd (err %d)\n", __func__, -fd);
                dma_buf_put(dmabuf);
+       }
 
        return fd;
 }
@@ -502,8 +518,10 @@ int ion_query_heaps(struct ion_heap_query *query)
                goto out;
        }
 
-       if (query->cnt <= 0)
+       if (query->cnt <= 0) {
+               pr_err("%s: invalid heapdata count %u\n", __func__, query->cnt);
                goto out;
+       }
 
        max_cnt = query->cnt;
 
index 30bce3f10a3ac31e22d0d50f79d821def532eb5d..5b6363765177135688f6dc09aec0dd8c317449e7 100644 (file)
@@ -180,6 +180,8 @@ void *ion_buffer_protect_single(unsigned int protection_id, unsigned int size,
 
        ret = ion_secure_protect(protdesc, protalign);
        if (ret) {
+               pr_err("%s: protection failure (id%u,len%u,base%#lx,align%#x\n",
+                      __func__, protection_id, size, phys, protalign);
                kfree(protdesc);
                return ERR_PTR(ret);
        }
index db4f716e108342391d3ebf9af0472a20e6b3a89f..8182f037ceb242a561403bbe46c79f00755d59cc 100644 (file)
@@ -83,11 +83,16 @@ static int ion_carveout_heap_allocate(struct ion_heap *heap,
        if (!table)
                return -ENOMEM;
        ret = sg_alloc_table(table, 1, GFP_KERNEL);
-       if (ret)
+       if (ret) {
+               pr_err("%s: failed to allocate scatterlist (err %d)\n",
+                      __func__, ret);
                goto err_free;
+       }
 
        paddr = ion_carveout_allocate(carveout_heap, aligned_size);
        if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) {
+               pr_err("%s: failed to allocate from %s(id %d), size %lu\n",
+                      __func__, heap->name, heap->id, size);
                ret = -ENOMEM;
                goto err_free_table;
        }
@@ -144,8 +149,10 @@ static int carveout_heap_map_user(struct ion_heap *heap,
        struct ion_carveout_heap *carveout_heap =
                container_of(heap, struct ion_carveout_heap, heap);
 
-       if (carveout_heap->untouchable)
+       if (carveout_heap->untouchable) {
+               pr_err("%s: mmap of %s heap unallowed\n", __func__, heap->name);
                return -EACCES;
+       }
 
        return ion_heap_map_user(heap, buffer, vma);
 }
@@ -156,8 +163,10 @@ static void *carveout_heap_map_kernel(struct ion_heap *heap,
        struct ion_carveout_heap *carveout_heap =
                container_of(heap, struct ion_carveout_heap, heap);
 
-       if (carveout_heap->untouchable)
+       if (carveout_heap->untouchable) {
+               pr_err("%s: mapping %s heap unallowed\n", __func__, heap->name);
                return ERR_PTR(-EACCES);
+       }
 
        return ion_heap_map_kernel(heap, buffer);
 }
index ae246c6ec680410ae4aea0e32d255db488556316..82361db6fe669603adf45a7ff62edb992ab70c07 100644 (file)
@@ -84,8 +84,10 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
                goto err;
 
        ret = sg_alloc_table(table, 1, GFP_KERNEL);
-       if (ret)
+       if (ret) {
+               pr_err("%s: failed to alloc sgtable(err %d)\n", __func__, -ret);
                goto free_mem;
+       }
 
        sg_set_page(table->sgl, pages, size, 0);
 
index 91faa7f035b93c1befecff04c29dfc16827f59b5..1f73ad7bef1a57b829bead7c75f7a5e069d8864b 100644 (file)
@@ -56,8 +56,10 @@ void *ion_heap_map_kernel(struct ion_heap *heap,
        vaddr = vmap(pages, npages, VM_MAP, pgprot);
        vfree(pages);
 
-       if (!vaddr)
+       if (!vaddr) {
+               pr_err("%s: failed vmap %d pages\n", __func__, npages);
                return ERR_PTR(-ENOMEM);
+       }
 
        return vaddr;
 }
index 98df137319c0c0c575b0a9c0eb040fcb78b8a6dc..52d9d53bf85c3d7c49dce1efa0552c18fc94c8cb 100644 (file)
@@ -36,8 +36,12 @@ static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool, bool nozero)
                gfpmask &= ~__GFP_ZERO;
 
        page = alloc_pages(gfpmask, pool->order);
-       if (!page)
+       if (!page) {
+               if (pool->order == 0)
+                       pr_err("%s: failed to alloc order-0 page (gfp %pGg)\n",
+                              __func__, &gfpmask);
                return NULL;
+       }
        return page;
 }
 
index b5c3bce9feb361443aae958918c1ef8890d18487..7caad6ee5fc6d6c8d143bd62fe18c1be928f3ec5 100644 (file)
@@ -140,8 +140,10 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
        unsigned long size_remaining = PAGE_ALIGN(size);
        unsigned int max_order = orders[0];
 
-       if (size / PAGE_SIZE > totalram_pages / 2)
+       if (size / PAGE_SIZE > totalram_pages / 2) {
+               pr_err("%s: too large allocation, %zu bytes\n", __func__, size);
                return -ENOMEM;
+       }
 
        INIT_LIST_HEAD(&pages);
        while (size_remaining > 0) {
@@ -158,8 +160,10 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
        if (!table)
                goto free_pages;
 
-       if (sg_alloc_table(table, i, GFP_KERNEL))
+       if (sg_alloc_table(table, i, GFP_KERNEL)) {
+               pr_err("%s: failed to alloc sgtable of %d nent\n", __func__, i);
                goto free_table;
+       }
 
        sg = table->sgl;
        list_for_each_entry_safe(page, tmp_page, &pages, lru) {