#include "ion.h"
+/*
+ * ION_IOC_FREE and ion_handle_data is deprecated from ION after 4.14.
+ * But it is used to study the version of ION by libion in Android.
+ * Therefore, ion_ioctl() should not blaim if a user send ION_IOC_FREE.
+ */
+struct ion_handle_data {
+ int handle;
+};
+
+#define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
+
union ion_ioctl_arg {
struct ion_allocation_data allocation;
struct ion_heap_query query;
break;
}
- return ret ? -EINVAL : 0;
+ if (ret) {
+ pr_err("%s: reserved fields of query_data should be 0\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
}
/* fix up the cases where the ioctl direction bits are incorrect */
dir = ion_ioctl_dir(cmd);
- if (_IOC_SIZE(cmd) > sizeof(data))
+ if (_IOC_SIZE(cmd) > sizeof(data)) {
+ pr_err("%s: unknown ioctl %#x\n", __func__, cmd);
return -EINVAL;
+ }
/*
* The copy_from_user is unconditional here for both read and write
ret = ion_query_heaps(&data.query);
break;
default:
+ if (cmd != ION_IOC_FREE)
+ pr_err("%s: unknown ioctl %#x\n", __func__, cmd);
return -ENOTTY;
}
heap->ops->free(buffer);
err2:
kfree(buffer);
+ pr_err("%s: failed to alloc (len %zu, flag %#lx) buffer from %s heap\n",
+ __func__, len, flags, heap->name);
return ERR_PTR(ret);
}
if (WARN_ONCE(!vaddr,
"heap->ops->map_kernel should return ERR_PTR on error"))
return ERR_PTR(-EINVAL);
- if (IS_ERR(vaddr))
+ if (IS_ERR(vaddr)) {
+ pr_err("%s: failed to alloc kernel address of %zu buffer\n",
+ __func__, buffer->size);
return vaddr;
+ }
buffer->vaddr = vaddr;
buffer->kmap_cnt++;
return vaddr;
*/
len = PAGE_ALIGN(len);
- if (!len)
+ if (!len) {
+ pr_err("%s: zero size allocation - heapmask %#x, flags %#x\n",
+ __func__, heap_id_mask, flags);
return ERR_PTR(-EINVAL);
+ }
down_read(&dev->lock);
plist_for_each_entry(heap, &dev->heaps, node) {
}
up_read(&dev->lock);
- if (!buffer)
+ if (!buffer) {
+ pr_err("%s: no matching heap found against heapmaks %#x\n",
+ __func__, heap_id_mask);
return ERR_PTR(-ENODEV);
+ }
if (IS_ERR(buffer))
return ERR_CAST(buffer);
exp_info.priv = buffer;
dmabuf = dma_buf_export(&exp_info);
- if (IS_ERR(dmabuf))
+ if (IS_ERR(dmabuf)) {
+ pr_err("%s: failed to export dmabuf (err %ld)\n", __func__,
+ -PTR_ERR(dmabuf));
_ion_buffer_destroy(buffer);
+ }
return dmabuf;
}
return PTR_ERR(dmabuf);
fd = dma_buf_fd(dmabuf, O_CLOEXEC);
- if (fd < 0)
+ if (fd < 0) {
+ pr_err("%s: failed to get dmabuf fd (err %d)\n", __func__, -fd);
dma_buf_put(dmabuf);
+ }
return fd;
}
goto out;
}
- if (query->cnt <= 0)
+ if (query->cnt <= 0) {
+ pr_err("%s: invalid heapdata count %u\n", __func__, query->cnt);
goto out;
+ }
max_cnt = query->cnt;
ret = ion_secure_protect(protdesc, protalign);
if (ret) {
+ pr_err("%s: protection failure (id%u,len%u,base%#lx,align%#x\n",
+ __func__, protection_id, size, phys, protalign);
kfree(protdesc);
return ERR_PTR(ret);
}
if (!table)
return -ENOMEM;
ret = sg_alloc_table(table, 1, GFP_KERNEL);
- if (ret)
+ if (ret) {
+ pr_err("%s: failed to allocate scatterlist (err %d)\n",
+ __func__, ret);
goto err_free;
+ }
paddr = ion_carveout_allocate(carveout_heap, aligned_size);
if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) {
+ pr_err("%s: failed to allocate from %s(id %d), size %lu\n",
+ __func__, heap->name, heap->id, size);
ret = -ENOMEM;
goto err_free_table;
}
struct ion_carveout_heap *carveout_heap =
container_of(heap, struct ion_carveout_heap, heap);
- if (carveout_heap->untouchable)
+ if (carveout_heap->untouchable) {
+ pr_err("%s: mmap of %s heap unallowed\n", __func__, heap->name);
return -EACCES;
+ }
return ion_heap_map_user(heap, buffer, vma);
}
struct ion_carveout_heap *carveout_heap =
container_of(heap, struct ion_carveout_heap, heap);
- if (carveout_heap->untouchable)
+ if (carveout_heap->untouchable) {
+ pr_err("%s: mapping %s heap unallowed\n", __func__, heap->name);
return ERR_PTR(-EACCES);
+ }
return ion_heap_map_kernel(heap, buffer);
}
goto err;
ret = sg_alloc_table(table, 1, GFP_KERNEL);
- if (ret)
+ if (ret) {
+ pr_err("%s: failed to alloc sgtable(err %d)\n", __func__, -ret);
goto free_mem;
+ }
sg_set_page(table->sgl, pages, size, 0);
vaddr = vmap(pages, npages, VM_MAP, pgprot);
vfree(pages);
- if (!vaddr)
+ if (!vaddr) {
+ pr_err("%s: failed vmap %d pages\n", __func__, npages);
return ERR_PTR(-ENOMEM);
+ }
return vaddr;
}
gfpmask &= ~__GFP_ZERO;
page = alloc_pages(gfpmask, pool->order);
- if (!page)
+ if (!page) {
+ if (pool->order == 0)
+ pr_err("%s: failed to alloc order-0 page (gfp %pGg)\n",
+ __func__, &gfpmask);
return NULL;
+ }
return page;
}
unsigned long size_remaining = PAGE_ALIGN(size);
unsigned int max_order = orders[0];
- if (size / PAGE_SIZE > totalram_pages / 2)
+ if (size / PAGE_SIZE > totalram_pages / 2) {
+ pr_err("%s: too large allocation, %zu bytes\n", __func__, size);
return -ENOMEM;
+ }
INIT_LIST_HEAD(&pages);
while (size_remaining > 0) {
if (!table)
goto free_pages;
- if (sg_alloc_table(table, i, GFP_KERNEL))
+ if (sg_alloc_table(table, i, GFP_KERNEL)) {
+ pr_err("%s: failed to alloc sgtable of %d nent\n", __func__, i);
goto free_table;
+ }
sg = table->sgl;
list_for_each_entry_safe(page, tmp_page, &pages, lru) {