int r;
unsigned long total_size = 0;
- array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry));
+ array = kvmalloc_array(num_entries, sizeof(struct amdgpu_bo_list_entry), GFP_KERNEL);
if (!array)
return -ENOMEM;
memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
for (i = 0; i < list->num_entries; ++i)
amdgpu_bo_unref(&list->array[i].robj);
- drm_free_large(list->array);
+ kvfree(list->array);
list->gds_obj = gds_obj;
list->gws_obj = gws_obj;
error_free:
while (i--)
amdgpu_bo_unref(&array[i].robj);
- drm_free_large(array);
+ kvfree(array);
return r;
}
amdgpu_bo_unref(&list->array[i].robj);
mutex_destroy(&list->lock);
- drm_free_large(list->array);
+ kvfree(list->array);
kfree(list);
}
int r;
- info = drm_malloc_ab(args->in.bo_number,
- sizeof(struct drm_amdgpu_bo_list_entry));
+ info = kvmalloc_array(args->in.bo_number,
+ sizeof(struct drm_amdgpu_bo_list_entry), GFP_KERNEL);
if (!info)
return -ENOMEM;
memset(args, 0, sizeof(*args));
args->out.list_handle = handle;
- drm_free_large(info);
+ kvfree(info);
return 0;
error_free:
- drm_free_large(info);
+ kvfree(info);
return r;
}
size = p->chunks[i].length_dw;
cdata = (void __user *)(uintptr_t)user_chunk.chunk_data;
- p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
+ p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
if (p->chunks[i].kdata == NULL) {
ret = -ENOMEM;
i--;
i = p->nchunks - 1;
free_partial_kdata:
for (; i >= 0; i--)
- drm_free_large(p->chunks[i].kdata);
+ kvfree(p->chunks[i].kdata);
kfree(p->chunks);
p->chunks = NULL;
p->nchunks = 0;
return r;
if (binding_userptr) {
- drm_free_large(lobj->user_pages);
+ kvfree(lobj->user_pages);
lobj->user_pages = NULL;
}
}
release_pages(e->user_pages,
e->robj->tbo.ttm->num_pages,
false);
- drm_free_large(e->user_pages);
+ kvfree(e->user_pages);
e->user_pages = NULL;
}
list_for_each_entry(e, &need_pages, tv.head) {
struct ttm_tt *ttm = e->robj->tbo.ttm;
- e->user_pages = drm_calloc_large(ttm->num_pages,
- sizeof(struct page*));
+ e->user_pages = kvmalloc_array(ttm->num_pages,
+ sizeof(struct page*),
+ GFP_KERNEL | __GFP_ZERO);
if (!e->user_pages) {
r = -ENOMEM;
DRM_ERROR("calloc failure in %s\n", __func__);
r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
if (r) {
DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n");
- drm_free_large(e->user_pages);
+ kvfree(e->user_pages);
e->user_pages = NULL;
goto error_free_pages;
}
release_pages(e->user_pages,
e->robj->tbo.ttm->num_pages,
false);
- drm_free_large(e->user_pages);
+ kvfree(e->user_pages);
}
}
amdgpu_bo_list_put(parser->bo_list);
for (i = 0; i < parser->nchunks; i++)
- drm_free_large(parser->chunks[i].kdata);
+ kvfree(parser->chunks[i].kdata);
kfree(parser->chunks);
if (parser->job)
amdgpu_job_free(parser->job);
if (!parent->entries) {
unsigned num_entries = amdgpu_vm_num_entries(adev, level);
- parent->entries = drm_calloc_large(num_entries,
- sizeof(struct amdgpu_vm_pt));
+ parent->entries = kvmalloc_array(num_entries,
+ sizeof(struct amdgpu_vm_pt),
+ GFP_KERNEL | __GFP_ZERO);
if (!parent->entries)
return -ENOMEM;
memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt));
for (i = 0; i <= level->last_entry_used; i++)
amdgpu_vm_free_levels(&level->entries[i]);
- drm_free_large(level->entries);
+ kvfree(level->entries);
}
/**
npages = obj->size >> PAGE_SHIFT;
- pages = drm_malloc_ab(npages, sizeof(struct page *));
+ pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (pages == NULL)
return ERR_PTR(-ENOMEM);
while (i--)
put_page(pages[i]);
- drm_free_large(pages);
+ kvfree(pages);
return ERR_CAST(p);
}
EXPORT_SYMBOL(drm_gem_get_pages);
put_page(pages[i]);
}
- drm_free_large(pages);
+ kvfree(pages);
}
EXPORT_SYMBOL(drm_gem_put_pages);
uintptr_t ptr;
unsigned int flags = 0;
- pvec = drm_malloc_ab(npages, sizeof(struct page *));
+ pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (!pvec)
return ERR_PTR(-ENOMEM);
if (ret < 0) {
release_pages(pvec, pinned, 0);
- drm_free_large(pvec);
+ kvfree(pvec);
return ERR_PTR(ret);
}
mm = get_task_mm(etnaviv_obj->userptr.task);
pinned = 0;
if (mm == current->mm) {
- pvec = drm_malloc_ab(npages, sizeof(struct page *));
+ pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (!pvec) {
mmput(mm);
return -ENOMEM;
pinned = __get_user_pages_fast(etnaviv_obj->userptr.ptr, npages,
!etnaviv_obj->userptr.ro, pvec);
if (pinned < 0) {
- drm_free_large(pvec);
+ kvfree(pvec);
mmput(mm);
return pinned;
}
}
release_pages(pvec, pinned, 0);
- drm_free_large(pvec);
+ kvfree(pvec);
work = kmalloc(sizeof(*work), GFP_KERNEL);
if (!work) {
int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
release_pages(etnaviv_obj->pages, npages, 0);
- drm_free_large(etnaviv_obj->pages);
+ kvfree(etnaviv_obj->pages);
}
put_task_struct(etnaviv_obj->userptr.task);
}
* ours, just free the array we allocated:
*/
if (etnaviv_obj->pages)
- drm_free_large(etnaviv_obj->pages);
+ kvfree(etnaviv_obj->pages);
drm_prime_gem_destroy(&etnaviv_obj->base, etnaviv_obj->sgt);
}
npages = size / PAGE_SIZE;
etnaviv_obj->sgt = sgt;
- etnaviv_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+ etnaviv_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (!etnaviv_obj->pages) {
ret = -ENOMEM;
goto fail;
* Copy the command submission and bo array to kernel space in
* one go, and do this outside of any locks.
*/
- bos = drm_malloc_ab(args->nr_bos, sizeof(*bos));
- relocs = drm_malloc_ab(args->nr_relocs, sizeof(*relocs));
- stream = drm_malloc_ab(1, args->stream_size);
+ bos = kvmalloc_array(args->nr_bos, sizeof(*bos), GFP_KERNEL);
+ relocs = kvmalloc_array(args->nr_relocs, sizeof(*relocs), GFP_KERNEL);
+ stream = kvmalloc_array(1, args->stream_size, GFP_KERNEL);
cmdbuf = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc,
ALIGN(args->stream_size, 8) + 8,
args->nr_bos);
if (cmdbuf)
etnaviv_cmdbuf_free(cmdbuf);
if (stream)
- drm_free_large(stream);
+ kvfree(stream);
if (bos)
- drm_free_large(bos);
+ kvfree(bos);
if (relocs)
- drm_free_large(relocs);
+ kvfree(relocs);
return ret;
}
nr_pages = exynos_gem->size >> PAGE_SHIFT;
- exynos_gem->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
+ exynos_gem->pages = kvmalloc_array(nr_pages, sizeof(struct page *),
+ GFP_KERNEL | __GFP_ZERO);
if (!exynos_gem->pages) {
DRM_ERROR("failed to allocate pages.\n");
return -ENOMEM;
dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
exynos_gem->dma_addr, exynos_gem->dma_attrs);
err_free:
- drm_free_large(exynos_gem->pages);
+ kvfree(exynos_gem->pages);
return ret;
}
(dma_addr_t)exynos_gem->dma_addr,
exynos_gem->dma_attrs);
- drm_free_large(exynos_gem->pages);
+ kvfree(exynos_gem->pages);
}
static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
npages = exynos_gem->size >> PAGE_SHIFT;
- exynos_gem->pages = drm_malloc_ab(npages, sizeof(struct page *));
+ exynos_gem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (!exynos_gem->pages) {
ret = -ENOMEM;
goto err;
return &exynos_gem->base;
err_free_large:
- drm_free_large(exynos_gem->pages);
+ kvfree(exynos_gem->pages);
err:
drm_gem_object_release(&exynos_gem->base);
kfree(exynos_gem);
int ret;
total = READ_ONCE(dev_priv->mm.object_count);
- objects = drm_malloc_ab(total, sizeof(*objects));
+ objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
if (!objects)
return -ENOMEM;
mutex_unlock(&dev->struct_mutex);
out:
- drm_free_large(objects);
+ kvfree(objects);
return ret;
}
if (n_pages > ARRAY_SIZE(stack_pages)) {
/* Too big for stack -- allocate temporary array instead */
- pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
+ pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_TEMPORARY);
if (!pages)
return NULL;
}
addr = vmap(pages, n_pages, 0, pgprot);
if (pages != stack_pages)
- drm_free_large(pages);
+ kvfree(pages);
return addr;
}
for (i = 0; i < count; i++)
total += exec[i].relocation_count;
- reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
- reloc = drm_malloc_ab(total, sizeof(*reloc));
+ reloc_offset = kvmalloc_array(count, sizeof(*reloc_offset), GFP_KERNEL);
+ reloc = kvmalloc_array(total, sizeof(*reloc), GFP_KERNEL);
if (reloc == NULL || reloc_offset == NULL) {
- drm_free_large(reloc);
- drm_free_large(reloc_offset);
+ kvfree(reloc);
+ kvfree(reloc_offset);
mutex_lock(&dev->struct_mutex);
return -ENOMEM;
}
*/
err:
- drm_free_large(reloc);
- drm_free_large(reloc_offset);
+ kvfree(reloc);
+ kvfree(reloc_offset);
return ret;
}
}
/* Copy in the exec list from userland */
- exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
- exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+ exec_list = kvmalloc_array(sizeof(*exec_list), args->buffer_count, GFP_KERNEL);
+ exec2_list = kvmalloc_array(sizeof(*exec2_list), args->buffer_count, GFP_KERNEL);
if (exec_list == NULL || exec2_list == NULL) {
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
- drm_free_large(exec_list);
- drm_free_large(exec2_list);
+ kvfree(exec_list);
+ kvfree(exec2_list);
return -ENOMEM;
}
ret = copy_from_user(exec_list,
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
args->buffer_count, ret);
- drm_free_large(exec_list);
- drm_free_large(exec2_list);
+ kvfree(exec_list);
+ kvfree(exec2_list);
return -EFAULT;
}
}
}
- drm_free_large(exec_list);
- drm_free_large(exec2_list);
+ kvfree(exec_list);
+ kvfree(exec2_list);
return ret;
}
return -EINVAL;
}
- exec2_list = drm_malloc_gfp(args->buffer_count,
+ exec2_list = kvmalloc_array(args->buffer_count,
sizeof(*exec2_list),
GFP_TEMPORARY);
if (exec2_list == NULL) {
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
args->buffer_count, ret);
- drm_free_large(exec2_list);
+ kvfree(exec2_list);
return -EFAULT;
}
}
}
- drm_free_large(exec2_list);
+ kvfree(exec2_list);
return ret;
}
int ret = -ENOMEM;
/* Allocate a temporary list of source pages for random access. */
- page_addr_list = drm_malloc_gfp(n_pages,
+ page_addr_list = kvmalloc_array(n_pages,
sizeof(dma_addr_t),
GFP_TEMPORARY);
if (!page_addr_list)
DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n",
obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
- drm_free_large(page_addr_list);
+ kvfree(page_addr_list);
return st;
err_sg_alloc:
kfree(st);
err_st_alloc:
- drm_free_large(page_addr_list);
+ kvfree(page_addr_list);
DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
ret = -ENOMEM;
pinned = 0;
- pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY);
+ pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_TEMPORARY);
if (pvec != NULL) {
struct mm_struct *mm = obj->userptr.mm->mm;
unsigned int flags = 0;
mutex_unlock(&obj->mm.lock);
release_pages(pvec, pinned, 0);
- drm_free_large(pvec);
+ kvfree(pvec);
i915_gem_object_put(obj);
put_task_struct(work->task);
pinned = 0;
if (mm == current->mm) {
- pvec = drm_malloc_gfp(num_pages, sizeof(struct page *),
+ pvec = kvmalloc_array(num_pages, sizeof(struct page *),
GFP_TEMPORARY |
__GFP_NORETRY |
__GFP_NOWARN);
if (IS_ERR(pages))
release_pages(pvec, pinned, 0);
- drm_free_large(pvec);
+ kvfree(pvec);
return pages;
}
mock_engine_reset(engine);
- waiters = drm_malloc_gfp(count, sizeof(*waiters), GFP_TEMPORARY);
+ waiters = kvmalloc_array(count, sizeof(*waiters), GFP_TEMPORARY);
if (!waiters)
goto out_engines;
out_bitmap:
kfree(bitmap);
out_waiters:
- drm_free_large(waiters);
+ kvfree(waiters);
out_engines:
mock_engine_flush(engine);
return err;
mock_engine_reset(engine);
- waiters = drm_malloc_gfp(count, sizeof(*waiters), GFP_TEMPORARY);
+ waiters = kvmalloc_array(count, sizeof(*waiters), GFP_TEMPORARY);
if (!waiters)
goto out_engines;
out_bitmap:
kfree(bitmap);
out_waiters:
- drm_free_large(waiters);
+ kvfree(waiters);
out_engines:
mock_engine_flush(engine);
return err;
mock_engine_reset(engine);
- waiters = drm_malloc_gfp(count, sizeof(*waiters), GFP_TEMPORARY);
+ waiters = kvmalloc_array(count, sizeof(*waiters), GFP_TEMPORARY);
if (!waiters)
goto out_engines;
put_task_struct(waiters[n].tsk);
}
- drm_free_large(waiters);
+ kvfree(waiters);
out_engines:
mock_engine_flush(engine);
return err;
struct page **p;
int ret, i;
- p = drm_malloc_ab(npages, sizeof(struct page *));
+ p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (!p)
return ERR_PTR(-ENOMEM);
ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
if (ret) {
- drm_free_large(p);
+ kvfree(p);
return ERR_PTR(ret);
}
drm_gem_put_pages(obj, msm_obj->pages, true, false);
else {
drm_mm_remove_node(msm_obj->vram_node);
- drm_free_large(msm_obj->pages);
+ kvfree(msm_obj->pages);
}
msm_obj->pages = NULL;
* ours, just free the array we allocated:
*/
if (msm_obj->pages)
- drm_free_large(msm_obj->pages);
+ kvfree(msm_obj->pages);
drm_prime_gem_destroy(obj, msm_obj->sgt);
} else {
msm_obj = to_msm_bo(obj);
msm_obj->sgt = sgt;
- msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+ msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (!msm_obj->pages) {
ret = -ENOMEM;
goto fail;
p->dma_reloc_idx = 0;
/* FIXME: we assume that each relocs use 4 dwords */
p->nrelocs = chunk->length_dw / 4;
- p->relocs = drm_calloc_large(p->nrelocs, sizeof(struct radeon_bo_list));
+ p->relocs = kvmalloc_array(p->nrelocs, sizeof(struct radeon_bo_list),
+ GFP_KERNEL | __GFP_ZERO);
if (p->relocs == NULL) {
return -ENOMEM;
}
continue;
}
- p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
+ p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
size *= sizeof(uint32_t);
if (p->chunks[i].kdata == NULL) {
return -ENOMEM;
}
}
kfree(parser->track);
- drm_free_large(parser->relocs);
- drm_free_large(parser->vm_bos);
+ kvfree(parser->relocs);
+ kvfree(parser->vm_bos);
for (i = 0; i < parser->nchunks; i++)
- drm_free_large(parser->chunks[i].kdata);
+ kvfree(parser->chunks[i].kdata);
kfree(parser->chunks);
kfree(parser->chunks_array);
radeon_ib_free(parser->rdev, &parser->ib);
ttm_eu_backoff_reservation(&ticket, &list);
error_free:
- drm_free_large(vm_bos);
+ kvfree(vm_bos);
if (r && r != -ERESTARTSYS)
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
}
/* and then save the content of the ring */
- *data = drm_malloc_ab(size, sizeof(uint32_t));
+ *data = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
if (!*data) {
mutex_unlock(&rdev->ring_lock);
return 0;
}
radeon_ring_unlock_commit(rdev, ring, false);
- drm_free_large(data);
+ kvfree(data);
return 0;
}
struct radeon_bo_list *list;
unsigned i, idx;
- list = drm_malloc_ab(vm->max_pde_used + 2,
- sizeof(struct radeon_bo_list));
+ list = kvmalloc_array(vm->max_pde_used + 2,
+ sizeof(struct radeon_bo_list), GFP_KERNEL);
if (!list)
return NULL;
#include <linux/slab.h>
#include <linux/export.h>
#include <drm/drm_cache.h>
-#include <drm/drm_mem_util.h>
#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
*/
static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
{
- ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*));
+ ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
+ GFP_KERNEL | __GFP_ZERO);
}
static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
{
- ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages,
+ ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages,
sizeof(*ttm->ttm.pages) +
- sizeof(*ttm->dma_address));
+ sizeof(*ttm->dma_address),
+ GFP_KERNEL | __GFP_ZERO);
ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
}
void ttm_tt_fini(struct ttm_tt *ttm)
{
- drm_free_large(ttm->pages);
+ kvfree(ttm->pages);
ttm->pages = NULL;
}
EXPORT_SYMBOL(ttm_tt_fini);
{
struct ttm_tt *ttm = &ttm_dma->ttm;
- drm_free_large(ttm->pages);
+ kvfree(ttm->pages);
ttm->pages = NULL;
ttm_dma->dma_address = NULL;
}
return -ENOMEM;
obj->sg = sg;
- obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+ obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (obj->pages == NULL) {
DRM_ERROR("obj pages is NULL %d\n", npages);
return -ENOMEM;
void udl_gem_put_pages(struct udl_gem_object *obj)
{
if (obj->base.import_attach) {
- drm_free_large(obj->pages);
+ kvfree(obj->pages);
obj->pages = NULL;
return;
}
return -EINVAL;
}
- exec->bo = drm_calloc_large(exec->bo_count,
- sizeof(struct drm_gem_cma_object *));
+ exec->bo = kvmalloc_array(exec->bo_count,
+ sizeof(struct drm_gem_cma_object *),
+ GFP_KERNEL | __GFP_ZERO);
if (!exec->bo) {
DRM_ERROR("Failed to allocate validated BO pointers\n");
return -ENOMEM;
}
- handles = drm_malloc_ab(exec->bo_count, sizeof(uint32_t));
+ handles = kvmalloc_array(exec->bo_count, sizeof(uint32_t), GFP_KERNEL);
if (!handles) {
ret = -ENOMEM;
DRM_ERROR("Failed to allocate incoming GEM handles\n");
spin_unlock(&file_priv->table_lock);
fail:
- drm_free_large(handles);
+ kvfree(handles);
return ret;
}
* read the contents back for validation, and I think the
* bo->vaddr is uncached access.
*/
- temp = drm_malloc_ab(temp_size, 1);
+ temp = kvmalloc_array(temp_size, 1, GFP_KERNEL);
if (!temp) {
DRM_ERROR("Failed to allocate storage for copying "
"in bin/render CLs.\n");
ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true);
fail:
- drm_free_large(temp);
+ kvfree(temp);
return ret;
}
if (exec->bo) {
for (i = 0; i < exec->bo_count; i++)
drm_gem_object_unreference_unlocked(&exec->bo[i]->base);
- drm_free_large(exec->bo);
+ kvfree(exec->bo);
}
while (!list_empty(&exec->unref_list)) {
{
struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
- drm_free_large(vgem_obj->pages);
+ kvfree(vgem_obj->pages);
if (obj->import_attach)
drm_prime_gem_destroy(obj, vgem_obj->table);
npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
obj->table = sg;
- obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+ obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (!obj->pages) {
__vgem_gem_destroy(obj);
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&validate_list);
if (exbuf->num_bo_handles) {
- bo_handles = drm_malloc_ab(exbuf->num_bo_handles,
- sizeof(uint32_t));
- buflist = drm_calloc_large(exbuf->num_bo_handles,
- sizeof(struct ttm_validate_buffer));
+ bo_handles = kvmalloc_array(exbuf->num_bo_handles,
+ sizeof(uint32_t), GFP_KERNEL);
+ buflist = kvmalloc_array(exbuf->num_bo_handles,
+ sizeof(struct ttm_validate_buffer),
+ GFP_KERNEL | __GFP_ZERO);
if (!bo_handles || !buflist) {
- drm_free_large(bo_handles);
- drm_free_large(buflist);
+ kvfree(bo_handles);
+ kvfree(buflist);
return -ENOMEM;
}
if (copy_from_user(bo_handles, user_bo_handles,
exbuf->num_bo_handles * sizeof(uint32_t))) {
ret = -EFAULT;
- drm_free_large(bo_handles);
- drm_free_large(buflist);
+ kvfree(bo_handles);
+ kvfree(buflist);
return ret;
}
for (i = 0; i < exbuf->num_bo_handles; i++) {
gobj = drm_gem_object_lookup(drm_file, bo_handles[i]);
if (!gobj) {
- drm_free_large(bo_handles);
- drm_free_large(buflist);
+ kvfree(bo_handles);
+ kvfree(buflist);
return -ENOENT;
}
list_add(&buflist[i].head, &validate_list);
}
- drm_free_large(bo_handles);
+ kvfree(bo_handles);
}
ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
/* fence the command bo */
virtio_gpu_unref_list(&validate_list);
- drm_free_large(buflist);
+ kvfree(buflist);
dma_fence_put(&fence->f);
return 0;
ttm_eu_backoff_reservation(&ticket, &validate_list);
out_free:
virtio_gpu_unref_list(&validate_list);
- drm_free_large(buflist);
+ kvfree(buflist);
return ret;
}
#include <drm/drm_fourcc.h>
#include <drm/drm_global.h>
#include <drm/drm_hashtab.h>
-#include <drm/drm_mem_util.h>
#include <drm/drm_mm.h>
#include <drm/drm_os_linux.h>
#include <drm/drm_sarea.h>