}
if (ret) {
- pr_err("%s: reserved fields of query_data should be 0\n",
- __func__);
+ perrfn("reserved fields of query_data should be 0");
return -EINVAL;
}
dir = ion_ioctl_dir(cmd);
if (_IOC_SIZE(cmd) > sizeof(data)) {
- pr_err("%s: unknown ioctl %#x\n", __func__, cmd);
+ perrfn("unknown ioctl %#x", cmd);
return -EINVAL;
}
break;
default:
if (cmd != ION_IOC_FREE)
- pr_err("%s: unknown ioctl %#x\n", __func__, cmd);
+ perrfn("unknown ioctl %#x", cmd);
return -ENOTTY;
}
} else if (buffer > entry) {
p = &(*p)->rb_right;
} else {
- pr_err("%s: buffer already found.", __func__);
+ perrfn("buffer already found.");
BUG();
}
}
heap->ops->free(buffer);
err2:
kfree(buffer);
- pr_err("%s: failed to alloc (len %zu, flag %#lx) buffer from %s heap\n",
- __func__, len, flags, heap->name);
+ perrfn("failed to alloc (len %zu, flag %#lx) buffer from %s heap",
+ len, flags, heap->name);
return ERR_PTR(ret);
}
"heap->ops->map_kernel should return ERR_PTR on error"))
return ERR_PTR(-EINVAL);
if (IS_ERR(vaddr)) {
- pr_err("%s: failed to alloc kernel address of %zu buffer\n",
- __func__, buffer->size);
+ perrfn("failed to alloc kernel address of %zu buffer",
+ buffer->size);
return vaddr;
}
buffer->vaddr = vaddr;
int ret = 0;
if (!buffer->heap->ops->map_user) {
- pr_err("%s: this heap does not define a method for mapping to userspace\n",
- __func__);
+ perrfn("this heap does not define a method for mapping to userspace");
return -EINVAL;
}
if ((buffer->flags & ION_FLAG_NOZEROED) != 0) {
- pr_err("%s: mmap() to nozeroed buffer is not allowed\n",
- __func__);
+ perrfn("mmap() to nozeroed buffer is not allowed");
return -EACCES;
}
if ((buffer->flags & ION_FLAG_PROTECTED) != 0) {
- pr_err("%s: mmap() to protected buffer is not allowed\n",
- __func__);
+ perrfn("mmap() to protected buffer is not allowed");
return -EACCES;
}
mutex_unlock(&buffer->lock);
if (ret)
- pr_err("%s: failure mapping buffer to userspace\n",
- __func__);
+ perrfn("failure mapping buffer to userspace");
return ret;
}
len = PAGE_ALIGN(len);
if (!len) {
- pr_err("%s: zero size allocation - heapmask %#x, flags %#x\n",
- __func__, heap_id_mask, flags);
+ perrfn("zero size allocation - heapmask %#x, flags %#x",
+ heap_id_mask, flags);
return ERR_PTR(-EINVAL);
}
up_read(&dev->lock);
if (!buffer) {
- pr_err("%s: no matching heap found against heapmaks %#x\n",
- __func__, heap_id_mask);
+ perrfn("no matching heap found against heapmaks %#x", heap_id_mask);
return ERR_PTR(-ENODEV);
}
dmabuf = dma_buf_export(&exp_info);
if (IS_ERR(dmabuf)) {
- pr_err("%s: failed to export dmabuf (err %ld)\n", __func__,
- -PTR_ERR(dmabuf));
+ perrfn("failed to export dmabuf (err %ld)", -PTR_ERR(dmabuf));
_ion_buffer_destroy(buffer);
}
fd = dma_buf_fd(dmabuf, O_CLOEXEC);
if (fd < 0) {
- pr_err("%s: failed to get dmabuf fd (err %d)\n", __func__, -fd);
+ perrfn("failed to get dmabuf fd (err %d)", -fd);
dma_buf_put(dmabuf);
}
}
if (query->cnt <= 0) {
- pr_err("%s: invalid heapdata count %u\n", __func__, query->cnt);
+ perrfn("invalid heapdata count %u", query->cnt);
goto out;
}
struct ion_device *dev = internal_dev;
if (!heap->ops->allocate || !heap->ops->free)
- pr_err("%s: can not add heap with invalid ops struct.\n",
- __func__);
+ perrfn("can not add heap with invalid ops struct.");
spin_lock_init(&heap->free_lock);
heap->free_list_size = 0;
char buf[256], *path;
path = dentry_path(dev->debug_root, buf, 256);
- pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
- path, debug_name);
+ perr("Failed to create heap shrinker debugfs at %s/%s",
+ path, debug_name);
}
}
idev->dev.parent = NULL;
ret = misc_register(&idev->dev);
if (ret) {
- pr_err("ion: failed to register misc device.\n");
+ perr("ion: failed to register misc device.");
kfree(idev);
return ret;
}
idev->debug_root = debugfs_create_dir("ion", NULL);
if (!idev->debug_root) {
- pr_err("ion: failed to create debugfs root directory.\n");
+ perr("ion: failed to create debugfs root directory.");
goto debugfs_done;
}
void *ion_buffer_kmap_get(struct ion_buffer *buffer);
void ion_buffer_kmap_put(struct ion_buffer *buffer);
+#define IONPREFIX "[Exynos][ION] "
+#define perr(format, arg...) \
+ pr_err(IONPREFIX format "\n", ##arg)
+
+#define perrfn(format, arg...) \
+ pr_err(IONPREFIX "%s: " format "\n", __func__, ##arg)
+
+#define perrdev(dev, format, arg...) \
+ dev_err(dev, IONPREFIX format "\n", ##arg)
+
+#define perrfndev(dev, format, arg...) \
+ dev_err(dev, IONPREFIX "%s: " format "\n", __func__, ##arg)
+
#endif /* _ION_H */
#include <asm/cacheflush.h>
+#include "ion.h"
#include "ion_exynos.h"
#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
unsigned long out_addr;
if (!secure_iova_pool) {
- pr_err("%s: Secure IOVA pool is not created\n", __func__);
+ perrfn("Secure IOVA pool is not created");
return -ENODEV;
}
spin_unlock(&siova_pool_lock);
if (out_addr == 0) {
- pr_err("%s: failed alloc secure iova. %zu/%zu bytes used\n",
- __func__, gen_pool_avail(secure_iova_pool),
+ perrfn("failed alloc secure iova. %zu/%zu bytes used",
+ gen_pool_avail(secure_iova_pool),
gen_pool_size(secure_iova_pool));
return -ENOMEM;
}
void ion_secure_iova_free(unsigned long addr, unsigned long size)
{
if (!secure_iova_pool) {
- pr_err("%s: Secure IOVA pool is not created\n", __func__);
+ perrfn("Secure IOVA pool is not created");
return;
}
{
secure_iova_pool = gen_pool_create(PAGE_SHIFT, -1);
if (!secure_iova_pool) {
- pr_err("%s: failed to create Secure IOVA pool\n", __func__);
+ perrfn("failed to create Secure IOVA pool");
return -ENOMEM;
}
if (gen_pool_add(secure_iova_pool, ION_SECURE_DMA_BASE,
ION_SECURE_DMA_END - ION_SECURE_DMA_BASE, -1)) {
- pr_err("%s: failed to set address range of Secure IOVA pool\n",
- __func__);
+ perrfn("failed to set address range of Secure IOVA pool");
return -ENOMEM;
}
err_smc:
ion_secure_iova_free(dma_addr, size);
err_iova:
- pr_err("%s: PROT:%#x (err=%d,va=%#lx,len=%#lx,cnt=%u,flg=%u)\n",
- __func__, SMC_DRM_PPMP_PROT, drmret, dma_addr, size,
+ perrfn("PROT:%#x (err=%d,va=%#lx,len=%#lx,cnt=%u,flg=%u)",
+ SMC_DRM_PPMP_PROT, drmret, dma_addr, size,
protdesc->chunk_count, protdesc->flags);
return ret;
ion_secure_iova_free(protdesc->dma_addr, size);
if (ret != DRMDRV_OK) {
- pr_err("%s: UNPROT:%d(err=%d,va=%#x,len=%#lx,cnt=%u,flg=%u)\n",
- __func__, SMC_DRM_PPMP_UNPROT, ret, protdesc->dma_addr,
+ perrfn("UNPROT:%d(err=%d,va=%#x,len=%#lx,cnt=%u,flg=%u)",
+ SMC_DRM_PPMP_UNPROT, ret, protdesc->dma_addr,
size, protdesc->chunk_count, protdesc->flags);
return -EACCES;
}
ret = ion_secure_protect(protdesc, protalign);
if (ret) {
- pr_err("%s: protection failure (id%u,len%u,base%#lx,align%#x\n",
- __func__, protection_id, size, phys, protalign);
+ perrfn("protection failure (id%u,len%u,base%#lx,align%#x",
+ protection_id, size, phys, protalign);
kfree(protdesc);
return ERR_PTR(ret);
}
ret = ion_secure_protect(protdesc, protalign);
if (ret) {
- pr_err("%s: protection failure (id%u,chk%u,count%u,align%#x\n",
- __func__, protection_id, chunk_size, count, protalign);
+ perrfn("protection failure (id%u,chk%u,count%u,align%#x",
+ protection_id, chunk_size, count, protalign);
kfree(protdesc);
return ERR_PTR(ret);
}
int ret;
if (carveout_heap->untouchable && !(flags & ION_FLAG_PROTECTED)) {
- pr_err("%s: ION_FLAG_PROTECTED needed by untouchable heap %s\n",
- __func__, heap->name);
+ perrfn("ION_FLAG_PROTECTED needed by untouchable heap %s",
+ heap->name);
return -EACCES;
}
return -ENOMEM;
ret = sg_alloc_table(table, 1, GFP_KERNEL);
if (ret) {
- pr_err("%s: failed to allocate scatterlist (err %d)\n",
- __func__, ret);
+ perrfn("failed to allocate scatterlist (err %d)", ret);
goto err_free;
}
paddr = ion_carveout_allocate(carveout_heap, aligned_size);
if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) {
- pr_err("%s: failed to allocate from %s(id %d), size %lu\n",
- __func__, heap->name, heap->id, size);
+ perrfn("failed to allocate from %s(id %d), size %lu",
+ heap->name, heap->id, size);
ret = -ENOMEM;
goto err_free_table;
}
container_of(heap, struct ion_carveout_heap, heap);
if (carveout_heap->untouchable) {
- pr_err("%s: mmap of %s heap unallowed\n", __func__, heap->name);
+ perrfn("mmap of %s heap unallowed", heap->name);
return -EACCES;
}
container_of(heap, struct ion_carveout_heap, heap);
if (carveout_heap->untouchable) {
- pr_err("%s: mapping %s heap unallowed\n", __func__, heap->name);
+ perrfn("mapping %s heap unallowed", heap->name);
return ERR_PTR(-EACCES);
}
pages = cma_alloc(cma_heap->cma, nr_pages, align, GFP_KERNEL);
if (!pages) {
- pr_err("%s: failed to allocate from %s(id %d), size %lu\n",
- __func__, cma_heap->heap.name, cma_heap->heap.id, len);
+ perrfn("failed to allocate from %s(id %d), size %lu",
+ cma_heap->heap.name, cma_heap->heap.id, len);
goto err;
}
ret = sg_alloc_table(table, 1, GFP_KERNEL);
if (ret) {
- pr_err("%s: failed to alloc sgtable(err %d)\n", __func__, -ret);
+ perrfn("failed to alloc sgtable(err %d)", -ret);
goto free_mem;
}
buffer_file = debugfs_create_file("buffers", 0444, idev->debug_root,
idev, &debug_buffers_fops);
if (!buffer_file)
- pr_err("%s: failed to create debugfs/ion/buffers\n", __func__);
+ perrfn("failed to create debugfs/ion/buffers");
ion_oom_notifier.idev = idev;
register_oom_notifier(&ion_oom_notifier.nb);
struct ion_heap *heap = ion_get_heap_by_name(heap_name);
if (!heap) {
- pr_err("%s: heap '%s' is not found\n", __func__, heap_name);
+ perrfn("heap '%s' is not found", heap_name);
return ERR_PTR(-EINVAL);
}
int ret = (int)iovm_map->iova;
kfree(iovm_map);
- dev_err(dev, "%s: failed to allocate iova (err %d)\n",
- __func__, ret);
+ perrfndev(dev, "failed to allocate iova (err %d)", ret);
return ERR_PTR(ret);
}
domain = get_domain_from_dev(attachment->dev);
if (!domain) {
- dev_err(attachment->dev, "%s: no iommu domain\n", __func__);
+ perrfndev(attachment->dev, "no iommu domain");
return -EINVAL;
}
domain = get_domain_from_dev(attachment->dev);
if (!domain) {
- dev_err(attachment->dev, "%s: no iommu domain\n", __func__);
+ perrfndev(attachment->dev, "no iommu domain");
return;
}
table->orig_nents, DMA_TO_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC);
if (nents < table->orig_nents) {
- pr_err("%s: failed dma_map_sg(nents %d)=nents %d\n",
- __func__, table->orig_nents, nents);
+ perrfn("failed dma_map_sg(nents %d)=nents %d",
+ table->orig_nents, nents);
if (id < MAX_BUFFER_IDS)
ida_simple_remove(&ion_buffer_ida, id);
return -ENOMEM;
prop = of_get_flat_dt_prop(rmem->fdt_node, "ion,heapname", &len);
if (!prop) {
- pr_err("%s: 'ion,heapname' is missing in '%s' node\n",
- __func__, rmem->name);
+ perrfn("'ion,heapname' is missing in '%s' node", rmem->name);
return -EINVAL;
}
heapname = (char *)prop;
if (reserved_mem_count == ARRAY_SIZE(ion_reserved_mem)) {
- pr_err("%s: Not enough reserved_mem slot for %s\n",
- __func__, rmem->name);
+ perrfn("Not enough reserved_mem slot for %s", rmem->name);
return -ENOMEM;
}
if (untch && reusable) {
- pr_err("%s: 'reusable', 'untouchable' should not be together\n",
- __func__);
+ perrfn("'reusable', 'untouchable' should not be together");
return -EINVAL;
}
ret = cma_init_reserved_mem(rmem->base, rmem->size, 0,
heapname, &cma);
if (ret < 0) {
- pr_err("%s: failed to init cma for '%s'\n",
- __func__, heapname);
+ perrfn("failed to init cma for '%s'", heapname);
return ret;
}
u32 align;
if (of_property_read_string(np, "ion,heapname", &pheap.name)) {
- pr_err("%s: failed to read ion,heapname in '%s'\n",
- __func__, np->name);
+ perrfn("failed to read ion,heapname in '%s'", np->name);
return false;
}
if (pheap.secure) {
if (of_property_read_u32(np, "ion,protection_id", &pheap.id)) {
- pr_err("%s: failed to read ion,protection_id in '%s'\n",
- __func__, np->name);
+ perrfn("failed to read ion,protection_id in '%s'", np->name);
return false;
}
if (pheap.id > 32) {
- pr_err("%s: too large protection id %d of '%s'\n",
- __func__, pheap.id, pheap.name);
+ perrfn("too large protection id %d of '%s'",
+ pheap.id, pheap.name);
return false;
}
if ((1 << pheap.id) & prot_id_map) {
- pr_err("%s: protection_id %d in '%s' already exists\n",
- __func__, pheap.id, np->name);
+ perrfn("protection_id %d in '%s' already exists",
+ pheap.id, np->name);
return false;
}
}
heap = ion_hpa_heap_create(&pheap, hpa_alloc_exceptions,
hpa_num_exception_areas);
if (IS_ERR(heap)) {
- pr_err("%s: failed to register '%s' heap\n",
- __func__, pheap.name);
+ perrfn("failed to register '%s' heap",
+ pheap.name);
return false;
}
pheap.untouchable = ion_reserved_mem[i].untouchable;
if (pheap.id > 32) {
- pr_err("%s: too large protection id %d of '%s'\n",
- __func__, pheap.id, pheap.name);
+ perrfn("too large protection id %d of '%s'",
+ pheap.id, pheap.name);
continue;
}
if (pheap.secure && ((1 << pheap.id) & prot_id_map)) {
- pr_err("%s: protection id %d of '%s' already exists\n",
- __func__, pheap.id, pheap.name);
+ perrfn("protection id %d of '%s' already exists",
+ pheap.id, pheap.name);
continue;
}
}
if (IS_ERR(heap)) {
- pr_err("%s: failed to register '%s' heap\n",
- __func__, pheap.name);
+ perrfn("failed to register '%s' heap", pheap.name);
continue;
}
vfree(pages);
if (!vaddr) {
- pr_err("%s: failed vmap %d pages\n", __func__, npages);
+ perrfn("failed vmap %d pages", npages);
return ERR_PTR(-ENOMEM);
}
heap->task = kthread_run(ion_heap_deferred_free, heap,
"%s", heap->name);
if (IS_ERR(heap->task)) {
- pr_err("%s: creating thread for deferred free failed\n",
- __func__);
+ perrfn("creating thread for deferred free failed");
return PTR_ERR_OR_ZERO(heap->task);
}
sched_setscheduler(heap->task, SCHED_IDLE, ¶m);
page = alloc_pages(gfpmask, pool->order);
if (!page) {
if (pool->order == 0)
- pr_err("%s: failed to alloc order-0 page (gfp %pGg)\n",
- __func__, &gfpmask);
+ perrfn("failed to alloc order-0 page (gfp %pGg)", &gfpmask);
return NULL;
}
return page;
unsigned int max_order = orders[0];
if (size / PAGE_SIZE > totalram_pages / 2) {
- pr_err("%s: too large allocation, %zu bytes\n", __func__, size);
+ perrfn("too large allocation, %zu bytes", size);
return -ENOMEM;
}
goto free_pages;
if (sg_alloc_table(table, i, GFP_KERNEL)) {
- pr_err("%s: failed to alloc sgtable of %d nent\n", __func__, i);
+ perrfn("failed to alloc sgtable of %d nent", i);
goto free_table;
}
attach = dma_buf_attach(dma_buf, dev);
if (IS_ERR(attach)) {
- dev_err(dev, "%s: failed to attach dmabuf (err %ld)\n",
- __func__, PTR_ERR(attach));
+ perrfndev(dev, "failed to attach dmabuf (err %ld)",
+ PTR_ERR(attach));
return PTR_ERR(attach);
}
table = dma_buf_map_attachment(attach, dir);
if (IS_ERR(table)) {
- dev_err(dev, "%s: failed to map dmabuf (err %ld)\n",
- __func__, PTR_ERR(table));
+ perrfndev(dev, "failed to map dmabuf (err %ld)",
+ PTR_ERR(table));
return PTR_ERR(table);
}
int i;
if (!test_data->dma_buf) {
- pr_err("%s: no dmabuf is attached\n", __func__);
+ perrfn("no dmabuf is attached");
return -EINVAL;
}
att = dma_buf_attach(test_data->dma_buf, test_data->dev);
if (IS_ERR(att)) {
- pr_err("%s: Failed to attach dmabuf\n", __func__);
+ perrfn("Failed to attach dmabuf");
return PTR_ERR(att);
}
sgt = dma_buf_map_attachment(att, DMA_TO_DEVICE);
if (IS_ERR(sgt)) {
- pr_err("%s: Failed to map to attachment\n", __func__);
+ perrfn("Failed to map to attachment");
ret = PTR_ERR(sgt);
goto err_map;
}
for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
if (len != sg->length) {
- pr_err(
- "%s: expected size %zu but found %u at %d\n",
- __func__, len, sg->length, i);
+ perrfn("expected size %zu but found %u at %d",
+ len, sg->length, i);
ret = -EINVAL;
break;
}
for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
if (addr >= sg_phys(sg)) {
- pr_err("%s: pages are not in address order\n",
- __func__);
+ perrfn("pages are not in address order");
ret = -EINVAL;
break;
}
case PHYS_IS_RESERVED:
{
if (sgt->orig_nents != 1) {
- pr_err("%s: buffer should be physically contiguous\n",
- __func__);
+ perrfn("buffer should be physically contiguous");
ret = -EINVAL;
break;
}
if (!PageReserved(sg_page(sgt->sgl))) {
- pr_err("%s: page of the buffer is not reserved\n",
- __func__);
+ perrfn("page of the buffer is not reserved");
ret = -EINVAL;
break;
}
case PHYS_IS_CMA:
{
if (sgt->orig_nents != 1) {
- pr_err("%s: buffer should be physically contiguous\n",
- __func__);
+ perrfn("buffer should be physically contiguous");
ret = -EINVAL;
break;
}
if (!is_migrate_cma_page(sg_page(sgt->sgl))) {
- pr_err("%s: page of the buffer is not cma page\n",
- __func__);
+ perrfn("page of the buffer is not cma page");
ret = -EINVAL;
break;
}
case PHYS_IS_ALIGNED:
{
if (sgt->orig_nents != 1) {
- pr_err("%s: buffer should be physically contiguous\n",
- __func__);
+ perrfn("buffer should be physically contiguous");
ret = -EINVAL;
break;
}
if ((arg & ~arg) != 0) {
- pr_err(
- "%s: arg %u of PHYS_IS_ALIGNED is not power of 2\n",
- __func__, arg);
+ perrfn("arg %u of PHYS_IS_ALIGNED is not power of 2", arg);
ret = -EINVAL;
break;
}
if (!IS_ALIGNED(sg_phys(sgt->sgl), arg)) {
- pr_err("%s: buffer is not aligned by %u\n",
- __func__, arg);
+ perrfn("buffer is not aligned by %u", arg);
ret = -EINVAL;
break;
}
break;
}
default:
- pr_err("%s: unknown command %u to ION_IOC_TEST_PHYS\n",
- __func__, cmd);
+ perrfn("unknown command %u to ION_IOC_TEST_PHYS", cmd);
ret = -EINVAL;
break;
}
testdev->misc.parent = &pdev->dev;
ret = misc_register(&testdev->misc);
if (ret) {
- pr_err("failed to register misc device.\n");
+ perr("failed to register misc device.");
return ret;
}