From: Michal Hocko Date: Wed, 17 May 2017 12:23:12 +0000 (+0200) Subject: drm: drop drm_[cm]alloc* helpers X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=2098105ec65cb364f3d77baa446b2ad5ba6bc7b9;p=GitHub%2FLineageOS%2Fandroid_kernel_motorola_exynos9610.git drm: drop drm_[cm]alloc* helpers Now that drm_[cm]alloc* helpers are simple one line wrappers around kvmalloc_array and drm_free_large is just kvfree alias we can drop them and replace by their native forms. This shouldn't introduce any functional change. Changes since v1 - fix typo in drivers/gpu//drm/etnaviv/etnaviv_gem.c - noticed by 0day build robot Suggested-by: Daniel Vetter Signed-off-by: Michal Hocko drm: drop drm_[cm]alloc* helpers [danvet: Fixup vgem which grew another user very recently.] Signed-off-by: Daniel Vetter Acked-by: Christian König Link: http://patchwork.freedesktop.org/patch/msgid/20170517122312.GK18247@dhcp22.suse.cz --- diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index a6649874e6ce..9f0247cdda5e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -96,7 +96,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, int r; unsigned long total_size = 0; - array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry)); + array = kvmalloc_array(num_entries, sizeof(struct amdgpu_bo_list_entry), GFP_KERNEL); if (!array) return -ENOMEM; memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry)); @@ -148,7 +148,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, for (i = 0; i < list->num_entries; ++i) amdgpu_bo_unref(&list->array[i].robj); - drm_free_large(list->array); + kvfree(list->array); list->gds_obj = gds_obj; list->gws_obj = gws_obj; @@ -163,7 +163,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, error_free: while (i--) amdgpu_bo_unref(&array[i].robj); - drm_free_large(array); + kvfree(array); return r; } @@ -224,7 +224,7 @@ void amdgpu_bo_list_free(struct amdgpu_bo_list *list) amdgpu_bo_unref(&list->array[i].robj); mutex_destroy(&list->lock); - drm_free_large(list->array); + kvfree(list->array); kfree(list); } @@ -244,8 +244,8 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, int r; - info = drm_malloc_ab(args->in.bo_number, - sizeof(struct drm_amdgpu_bo_list_entry)); + info = kvmalloc_array(args->in.bo_number, + sizeof(struct drm_amdgpu_bo_list_entry), GFP_KERNEL); if (!info) return -ENOMEM; @@ -311,11 +311,11 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, memset(args, 0, sizeof(*args)); args->out.list_handle = handle; - drm_free_large(info); + kvfree(info); return 0; error_free: - drm_free_large(info); + kvfree(info); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 4e6b9501ab0a..5b3e0f63a115 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -194,7 +194,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) size = p->chunks[i].length_dw; cdata = (void __user *)(uintptr_t)user_chunk.chunk_data; - p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t)); + p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); if (p->chunks[i].kdata == NULL) { ret = -ENOMEM; i--; @@ -247,7 +247,7 @@ free_all_kdata: i = p->nchunks - 1; free_partial_kdata: for (; i >= 0; i--) - drm_free_large(p->chunks[i].kdata); + kvfree(p->chunks[i].kdata); kfree(p->chunks); p->chunks = NULL; p->nchunks = 0; @@ -505,7 +505,7 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, return r; if (binding_userptr) { - drm_free_large(lobj->user_pages); + kvfree(lobj->user_pages); lobj->user_pages = NULL; } } @@ -571,7 +571,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, release_pages(e->user_pages, e->robj->tbo.ttm->num_pages, false); - drm_free_large(e->user_pages); + kvfree(e->user_pages); e->user_pages = NULL; } @@ -601,8 +601,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, list_for_each_entry(e, &need_pages, tv.head) { struct ttm_tt *ttm = e->robj->tbo.ttm; - e->user_pages = drm_calloc_large(ttm->num_pages, - sizeof(struct page*)); + e->user_pages = kvmalloc_array(ttm->num_pages, + sizeof(struct page*), + GFP_KERNEL | __GFP_ZERO); if (!e->user_pages) { r = -ENOMEM; DRM_ERROR("calloc failure in %s\n", __func__); @@ -612,7 +613,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages); if (r) { DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n"); - drm_free_large(e->user_pages); + kvfree(e->user_pages); e->user_pages = NULL; goto error_free_pages; } @@ -708,7 +709,7 @@ error_free_pages: release_pages(e->user_pages, e->robj->tbo.ttm->num_pages, false); - drm_free_large(e->user_pages); + kvfree(e->user_pages); } } @@ -761,7 +762,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo amdgpu_bo_list_put(parser->bo_list); for (i = 0; i < parser->nchunks; i++) - drm_free_large(parser->chunks[i].kdata); + kvfree(parser->chunks[i].kdata); kfree(parser->chunks); if (parser->job) amdgpu_job_free(parser->job); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 07ff3b1514f1..749a6cde7985 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -279,8 +279,9 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, if (!parent->entries) { unsigned num_entries = amdgpu_vm_num_entries(adev, level); - parent->entries = drm_calloc_large(num_entries, - sizeof(struct amdgpu_vm_pt)); + parent->entries = kvmalloc_array(num_entries, + sizeof(struct amdgpu_vm_pt), + GFP_KERNEL | __GFP_ZERO); if (!parent->entries) return -ENOMEM; memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt)); @@ -2198,7 +2199,7 @@ static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level) for (i = 0; i <= level->last_entry_used; i++) amdgpu_vm_free_levels(&level->entries[i]); - drm_free_large(level->entries); + kvfree(level->entries); } /** diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index b1e28c944637..8dc11064253d 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -521,7 +521,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj) npages = obj->size >> PAGE_SHIFT; - pages = drm_malloc_ab(npages, sizeof(struct page *)); + pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); if (pages == NULL) return ERR_PTR(-ENOMEM); @@ -546,7 +546,7 @@ fail: while (i--) put_page(pages[i]); - drm_free_large(pages); + kvfree(pages); return ERR_CAST(p); } EXPORT_SYMBOL(drm_gem_get_pages); @@ -582,7 +582,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, put_page(pages[i]); } - drm_free_large(pages); + kvfree(pages); } EXPORT_SYMBOL(drm_gem_put_pages); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index fd56f92f3469..d6fb724fc3cc 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -748,7 +748,7 @@ static struct page **etnaviv_gem_userptr_do_get_pages( uintptr_t ptr; unsigned int flags = 0; - pvec = drm_malloc_ab(npages, sizeof(struct page *)); + pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); if (!pvec) return ERR_PTR(-ENOMEM); @@ -772,7 +772,7 @@ static struct page **etnaviv_gem_userptr_do_get_pages( if (ret < 0) { release_pages(pvec, pinned, 0); - drm_free_large(pvec); + kvfree(pvec); return ERR_PTR(ret); } @@ -823,7 +823,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj) mm = get_task_mm(etnaviv_obj->userptr.task); pinned = 0; if (mm == current->mm) { - pvec = drm_malloc_ab(npages, sizeof(struct page *)); + pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); if (!pvec) { mmput(mm); return -ENOMEM; @@ -832,7 +832,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj) pinned = __get_user_pages_fast(etnaviv_obj->userptr.ptr, npages, !etnaviv_obj->userptr.ro, pvec); if (pinned < 0) { - drm_free_large(pvec); + kvfree(pvec); mmput(mm); return pinned; } @@ -845,7 +845,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj) } release_pages(pvec, pinned, 0); - drm_free_large(pvec); + kvfree(pvec); work = kmalloc(sizeof(*work), GFP_KERNEL); if (!work) { @@ -879,7 +879,7 @@ static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj) int npages = etnaviv_obj->base.size >> PAGE_SHIFT; release_pages(etnaviv_obj->pages, npages, 0); - drm_free_large(etnaviv_obj->pages); + kvfree(etnaviv_obj->pages); } put_task_struct(etnaviv_obj->userptr.task); } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c index 62b47972a52e..367bf952f61a 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c @@ -87,7 +87,7 @@ static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj) * ours, just free the array we allocated: */ if (etnaviv_obj->pages) - drm_free_large(etnaviv_obj->pages); + kvfree(etnaviv_obj->pages); drm_prime_gem_destroy(&etnaviv_obj->base, etnaviv_obj->sgt); } @@ -128,7 +128,7 @@ struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev, npages = size / PAGE_SIZE; etnaviv_obj->sgt = sgt; - etnaviv_obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); + etnaviv_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); if (!etnaviv_obj->pages) { ret = -ENOMEM; goto fail; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index e1909429837e..a13930e1d8c9 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -343,9 +343,9 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, * Copy the command submission and bo array to kernel space in * one go, and do this outside of any locks. */ - bos = drm_malloc_ab(args->nr_bos, sizeof(*bos)); - relocs = drm_malloc_ab(args->nr_relocs, sizeof(*relocs)); - stream = drm_malloc_ab(1, args->stream_size); + bos = kvmalloc_array(args->nr_bos, sizeof(*bos), GFP_KERNEL); + relocs = kvmalloc_array(args->nr_relocs, sizeof(*relocs), GFP_KERNEL); + stream = kvmalloc_array(1, args->stream_size, GFP_KERNEL); cmdbuf = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc, ALIGN(args->stream_size, 8) + 8, args->nr_bos); @@ -487,11 +487,11 @@ err_submit_cmds: if (cmdbuf) etnaviv_cmdbuf_free(cmdbuf); if (stream) - drm_free_large(stream); + kvfree(stream); if (bos) - drm_free_large(bos); + kvfree(bos); if (relocs) - drm_free_large(relocs); + kvfree(relocs); return ret; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 55a1579d11b3..c23479be4850 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -59,7 +59,8 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem) nr_pages = exynos_gem->size >> PAGE_SHIFT; - exynos_gem->pages = drm_calloc_large(nr_pages, sizeof(struct page *)); + exynos_gem->pages = kvmalloc_array(nr_pages, sizeof(struct page *), + GFP_KERNEL | __GFP_ZERO); if (!exynos_gem->pages) { DRM_ERROR("failed to allocate pages.\n"); return -ENOMEM; @@ -101,7 +102,7 @@ err_dma_free: dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie, exynos_gem->dma_addr, exynos_gem->dma_attrs); err_free: - drm_free_large(exynos_gem->pages); + kvfree(exynos_gem->pages); return ret; } @@ -122,7 +123,7 @@ static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem) (dma_addr_t)exynos_gem->dma_addr, exynos_gem->dma_attrs); - drm_free_large(exynos_gem->pages); + kvfree(exynos_gem->pages); } static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, @@ -559,7 +560,7 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev, exynos_gem->dma_addr = sg_dma_address(sgt->sgl); npages = exynos_gem->size >> PAGE_SHIFT; - exynos_gem->pages = drm_malloc_ab(npages, sizeof(struct page *)); + exynos_gem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); if (!exynos_gem->pages) { ret = -ENOMEM; goto err; @@ -588,7 +589,7 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev, return &exynos_gem->base; err_free_large: - drm_free_large(exynos_gem->pages); + kvfree(exynos_gem->pages); err: drm_gem_object_release(&exynos_gem->base); kfree(exynos_gem); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index d689e511744e..07f87985ea0b 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -229,7 +229,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data) int ret; total = READ_ONCE(dev_priv->mm.object_count); - objects = drm_malloc_ab(total, sizeof(*objects)); + objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL); if (!objects) return -ENOMEM; @@ -274,7 +274,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data) mutex_unlock(&dev->struct_mutex); out: - drm_free_large(objects); + kvfree(objects); return ret; } diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b6ac3df18b58..0e07f35e270c 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2504,7 +2504,7 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj, if (n_pages > ARRAY_SIZE(stack_pages)) { /* Too big for stack -- allocate temporary array instead */ - pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY); + pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_TEMPORARY); if (!pages) return NULL; } @@ -2526,7 +2526,7 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj, addr = vmap(pages, n_pages, 0, pgprot); if (pages != stack_pages) - drm_free_large(pages); + kvfree(pages); return addr; } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index a3e59c8ef27b..4ee2dc38b7c9 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1019,11 +1019,11 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, for (i = 0; i < count; i++) total += exec[i].relocation_count; - reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset)); - reloc = drm_malloc_ab(total, sizeof(*reloc)); + reloc_offset = kvmalloc_array(count, sizeof(*reloc_offset), GFP_KERNEL); + reloc = kvmalloc_array(total, sizeof(*reloc), GFP_KERNEL); if (reloc == NULL || reloc_offset == NULL) { - drm_free_large(reloc); - drm_free_large(reloc_offset); + kvfree(reloc); + kvfree(reloc_offset); mutex_lock(&dev->struct_mutex); return -ENOMEM; } @@ -1099,8 +1099,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, */ err: - drm_free_large(reloc); - drm_free_large(reloc_offset); + kvfree(reloc); + kvfree(reloc_offset); return ret; } @@ -1859,13 +1859,13 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, } /* Copy in the exec list from userland */ - exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); - exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); + exec_list = kvmalloc_array(sizeof(*exec_list), args->buffer_count, GFP_KERNEL); + exec2_list = kvmalloc_array(sizeof(*exec2_list), args->buffer_count, GFP_KERNEL); if (exec_list == NULL || exec2_list == NULL) { DRM_DEBUG("Failed to allocate exec list for %d buffers\n", args->buffer_count); - drm_free_large(exec_list); - drm_free_large(exec2_list); + kvfree(exec_list); + kvfree(exec2_list); return -ENOMEM; } ret = copy_from_user(exec_list, @@ -1874,8 +1874,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, if (ret != 0) { DRM_DEBUG("copy %d exec entries failed %d\n", args->buffer_count, ret); - drm_free_large(exec_list); - drm_free_large(exec2_list); + kvfree(exec_list); + kvfree(exec2_list); return -EFAULT; } @@ -1924,8 +1924,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, } } - drm_free_large(exec_list); - drm_free_large(exec2_list); + kvfree(exec_list); + kvfree(exec2_list); return ret; } @@ -1943,7 +1943,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, return -EINVAL; } - exec2_list = drm_malloc_gfp(args->buffer_count, + exec2_list = kvmalloc_array(args->buffer_count, sizeof(*exec2_list), GFP_TEMPORARY); if (exec2_list == NULL) { @@ -1957,7 +1957,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, if (ret != 0) { DRM_DEBUG("copy %d exec entries failed %d\n", args->buffer_count, ret); - drm_free_large(exec2_list); + kvfree(exec2_list); return -EFAULT; } @@ -1984,6 +1984,6 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, } } - drm_free_large(exec2_list); + kvfree(exec2_list); return ret; } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 2aa6b97fd22f..7e3193aa7da1 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -3102,7 +3102,7 @@ intel_rotate_pages(struct intel_rotation_info *rot_info, int ret = -ENOMEM; /* Allocate a temporary list of source pages for random access. */ - page_addr_list = drm_malloc_gfp(n_pages, + page_addr_list = kvmalloc_array(n_pages, sizeof(dma_addr_t), GFP_TEMPORARY); if (!page_addr_list) @@ -3135,14 +3135,14 @@ intel_rotate_pages(struct intel_rotation_info *rot_info, DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n", obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size); - drm_free_large(page_addr_list); + kvfree(page_addr_list); return st; err_sg_alloc: kfree(st); err_st_alloc: - drm_free_large(page_addr_list); + kvfree(page_addr_list); DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n", obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size); diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 58ccf8b8ca1c..1a0ce1dc68f5 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -507,7 +507,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) ret = -ENOMEM; pinned = 0; - pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY); + pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_TEMPORARY); if (pvec != NULL) { struct mm_struct *mm = obj->userptr.mm->mm; unsigned int flags = 0; @@ -555,7 +555,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) mutex_unlock(&obj->mm.lock); release_pages(pvec, pinned, 0); - drm_free_large(pvec); + kvfree(pvec); i915_gem_object_put(obj); put_task_struct(work->task); @@ -642,7 +642,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) pinned = 0; if (mm == current->mm) { - pvec = drm_malloc_gfp(num_pages, sizeof(struct page *), + pvec = kvmalloc_array(num_pages, sizeof(struct page *), GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN); @@ -669,7 +669,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) if (IS_ERR(pages)) release_pages(pvec, pinned, 0); - drm_free_large(pvec); + kvfree(pvec); return pages; } diff --git a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c index 19860a372d90..7276194c04f7 100644 --- a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c @@ -117,7 +117,7 @@ static int igt_random_insert_remove(void *arg) mock_engine_reset(engine); - waiters = drm_malloc_gfp(count, sizeof(*waiters), GFP_TEMPORARY); + waiters = kvmalloc_array(count, sizeof(*waiters), GFP_TEMPORARY); if (!waiters) goto out_engines; @@ -169,7 +169,7 @@ out_order: out_bitmap: kfree(bitmap); out_waiters: - drm_free_large(waiters); + kvfree(waiters); out_engines: mock_engine_flush(engine); return err; @@ -187,7 +187,7 @@ static int igt_insert_complete(void *arg) mock_engine_reset(engine); - waiters = drm_malloc_gfp(count, sizeof(*waiters), GFP_TEMPORARY); + waiters = kvmalloc_array(count, sizeof(*waiters), GFP_TEMPORARY); if (!waiters) goto out_engines; @@ -254,7 +254,7 @@ static int igt_insert_complete(void *arg) out_bitmap: kfree(bitmap); out_waiters: - drm_free_large(waiters); + kvfree(waiters); out_engines: mock_engine_flush(engine); return err; @@ -368,7 +368,7 @@ static int igt_wakeup(void *arg) mock_engine_reset(engine); - waiters = drm_malloc_gfp(count, sizeof(*waiters), GFP_TEMPORARY); + waiters = kvmalloc_array(count, sizeof(*waiters), GFP_TEMPORARY); if (!waiters) goto out_engines; @@ -454,7 +454,7 @@ out_waiters: put_task_struct(waiters[n].tsk); } - drm_free_large(waiters); + kvfree(waiters); out_engines: mock_engine_flush(engine); return err; diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 68e509b3b9e4..465dab942afa 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -50,13 +50,13 @@ static struct page **get_pages_vram(struct drm_gem_object *obj, struct page **p; int ret, i; - p = drm_malloc_ab(npages, sizeof(struct page *)); + p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); if (!p) return ERR_PTR(-ENOMEM); ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); if (ret) { - drm_free_large(p); + kvfree(p); return ERR_PTR(ret); } @@ -127,7 +127,7 @@ static void put_pages(struct drm_gem_object *obj) drm_gem_put_pages(obj, msm_obj->pages, true, false); else { drm_mm_remove_node(msm_obj->vram_node); - drm_free_large(msm_obj->pages); + kvfree(msm_obj->pages); } msm_obj->pages = NULL; @@ -707,7 +707,7 @@ void msm_gem_free_object(struct drm_gem_object *obj) * ours, just free the array we allocated: */ if (msm_obj->pages) - drm_free_large(msm_obj->pages); + kvfree(msm_obj->pages); drm_prime_gem_destroy(obj, msm_obj->sgt); } else { @@ -863,7 +863,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, msm_obj = to_msm_bo(obj); msm_obj->sgt = sgt; - msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); + msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); if (!msm_obj->pages) { ret = -ENOMEM; goto fail; diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 3ac671f6c8e1..00b22af70f5c 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -87,7 +87,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) p->dma_reloc_idx = 0; /* FIXME: we assume that each relocs use 4 dwords */ p->nrelocs = chunk->length_dw / 4; - p->relocs = drm_calloc_large(p->nrelocs, sizeof(struct radeon_bo_list)); + p->relocs = kvmalloc_array(p->nrelocs, sizeof(struct radeon_bo_list), + GFP_KERNEL | __GFP_ZERO); if (p->relocs == NULL) { return -ENOMEM; } @@ -341,7 +342,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) continue; } - p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t)); + p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); size *= sizeof(uint32_t); if (p->chunks[i].kdata == NULL) { return -ENOMEM; @@ -440,10 +441,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo } } kfree(parser->track); - drm_free_large(parser->relocs); - drm_free_large(parser->vm_bos); + kvfree(parser->relocs); + kvfree(parser->vm_bos); for (i = 0; i < parser->nchunks; i++) - drm_free_large(parser->chunks[i].kdata); + kvfree(parser->chunks[i].kdata); kfree(parser->chunks); kfree(parser->chunks_array); radeon_ib_free(parser->rdev, &parser->ib); diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index dddb372de2b9..574bf7e6b118 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -587,7 +587,7 @@ error_unreserve: ttm_eu_backoff_reservation(&ticket, &list); error_free: - drm_free_large(vm_bos); + kvfree(vm_bos); if (r && r != -ERESTARTSYS) DRM_ERROR("Couldn't update BO_VA (%d)\n", r); diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 8c7872339c2a..84802b201bef 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -314,7 +314,7 @@ unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring } /* and then save the content of the ring */ - *data = drm_malloc_ab(size, sizeof(uint32_t)); + *data = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); if (!*data) { mutex_unlock(&rdev->ring_lock); return 0; @@ -356,7 +356,7 @@ int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring, } radeon_ring_unlock_commit(rdev, ring, false); - drm_free_large(data); + kvfree(data); return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index a1358748cea5..5f68245579a3 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -132,8 +132,8 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev, struct radeon_bo_list *list; unsigned i, idx; - list = drm_malloc_ab(vm->max_pde_used + 2, - sizeof(struct radeon_bo_list)); + list = kvmalloc_array(vm->max_pde_used + 2, + sizeof(struct radeon_bo_list), GFP_KERNEL); if (!list) return NULL; diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 5260179d788a..8ebc8d3560c3 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -39,7 +39,6 @@ #include #include #include -#include #include #include #include @@ -53,14 +52,16 @@ */ static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) { - ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*)); + ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*), + GFP_KERNEL | __GFP_ZERO); } static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm) { - ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, + ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages, sizeof(*ttm->ttm.pages) + - sizeof(*ttm->dma_address)); + sizeof(*ttm->dma_address), + GFP_KERNEL | __GFP_ZERO); ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages); } @@ -208,7 +209,7 @@ EXPORT_SYMBOL(ttm_tt_init); void ttm_tt_fini(struct ttm_tt *ttm) { - drm_free_large(ttm->pages); + kvfree(ttm->pages); ttm->pages = NULL; } EXPORT_SYMBOL(ttm_tt_fini); @@ -243,7 +244,7 @@ void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma) { struct ttm_tt *ttm = &ttm_dma->ttm; - drm_free_large(ttm->pages); + kvfree(ttm->pages); ttm->pages = NULL; ttm_dma->dma_address = NULL; } diff --git a/drivers/gpu/drm/udl/udl_dmabuf.c b/drivers/gpu/drm/udl/udl_dmabuf.c index ed0e636243b2..2e031a894813 100644 --- a/drivers/gpu/drm/udl/udl_dmabuf.c +++ b/drivers/gpu/drm/udl/udl_dmabuf.c @@ -228,7 +228,7 @@ static int udl_prime_create(struct drm_device *dev, return -ENOMEM; obj->sg = sg; - obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); + obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); if (obj->pages == NULL) { DRM_ERROR("obj pages is NULL %d\n", npages); return -ENOMEM; diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index 775c50e4f02c..db9ceceba30e 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c @@ -146,7 +146,7 @@ int udl_gem_get_pages(struct udl_gem_object *obj) void udl_gem_put_pages(struct udl_gem_object *obj) { if (obj->base.import_attach) { - drm_free_large(obj->pages); + kvfree(obj->pages); obj->pages = NULL; return; } diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index 735412e3725a..9dc7646d49ed 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -660,14 +660,15 @@ vc4_cl_lookup_bos(struct drm_device *dev, return -EINVAL; } - exec->bo = drm_calloc_large(exec->bo_count, - sizeof(struct drm_gem_cma_object *)); + exec->bo = kvmalloc_array(exec->bo_count, + sizeof(struct drm_gem_cma_object *), + GFP_KERNEL | __GFP_ZERO); if (!exec->bo) { DRM_ERROR("Failed to allocate validated BO pointers\n"); return -ENOMEM; } - handles = drm_malloc_ab(exec->bo_count, sizeof(uint32_t)); + handles = kvmalloc_array(exec->bo_count, sizeof(uint32_t), GFP_KERNEL); if (!handles) { ret = -ENOMEM; DRM_ERROR("Failed to allocate incoming GEM handles\n"); @@ -699,7 +700,7 @@ vc4_cl_lookup_bos(struct drm_device *dev, spin_unlock(&file_priv->table_lock); fail: - drm_free_large(handles); + kvfree(handles); return ret; } @@ -737,7 +738,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec) * read the contents back for validation, and I think the * bo->vaddr is uncached access. */ - temp = drm_malloc_ab(temp_size, 1); + temp = kvmalloc_array(temp_size, 1, GFP_KERNEL); if (!temp) { DRM_ERROR("Failed to allocate storage for copying " "in bin/render CLs.\n"); @@ -812,7 +813,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec) ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true); fail: - drm_free_large(temp); + kvfree(temp); return ret; } @@ -832,7 +833,7 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec) if (exec->bo) { for (i = 0; i < exec->bo_count; i++) drm_gem_object_unreference_unlocked(&exec->bo[i]->base); - drm_free_large(exec->bo); + kvfree(exec->bo); } while (!list_empty(&exec->unref_list)) { diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index 4b23ba049632..54ec94c5e9ac 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -51,7 +51,7 @@ static void vgem_gem_free_object(struct drm_gem_object *obj) { struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj); - drm_free_large(vgem_obj->pages); + kvfree(vgem_obj->pages); if (obj->import_attach) drm_prime_gem_destroy(obj, vgem_obj->table); @@ -328,7 +328,7 @@ static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev, npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE; obj->table = sg; - obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); + obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); if (!obj->pages) { __vgem_gem_destroy(obj); return ERR_PTR(-ENOMEM); diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 06cb16d75f4b..b94bd5440e57 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -120,13 +120,14 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, INIT_LIST_HEAD(&validate_list); if (exbuf->num_bo_handles) { - bo_handles = drm_malloc_ab(exbuf->num_bo_handles, - sizeof(uint32_t)); - buflist = drm_calloc_large(exbuf->num_bo_handles, - sizeof(struct ttm_validate_buffer)); + bo_handles = kvmalloc_array(exbuf->num_bo_handles, + sizeof(uint32_t), GFP_KERNEL); + buflist = kvmalloc_array(exbuf->num_bo_handles, + sizeof(struct ttm_validate_buffer), + GFP_KERNEL | __GFP_ZERO); if (!bo_handles || !buflist) { - drm_free_large(bo_handles); - drm_free_large(buflist); + kvfree(bo_handles); + kvfree(buflist); return -ENOMEM; } @@ -134,16 +135,16 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, if (copy_from_user(bo_handles, user_bo_handles, exbuf->num_bo_handles * sizeof(uint32_t))) { ret = -EFAULT; - drm_free_large(bo_handles); - drm_free_large(buflist); + kvfree(bo_handles); + kvfree(buflist); return ret; } for (i = 0; i < exbuf->num_bo_handles; i++) { gobj = drm_gem_object_lookup(drm_file, bo_handles[i]); if (!gobj) { - drm_free_large(bo_handles); - drm_free_large(buflist); + kvfree(bo_handles); + kvfree(buflist); return -ENOENT; } @@ -152,7 +153,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, list_add(&buflist[i].head, &validate_list); } - drm_free_large(bo_handles); + kvfree(bo_handles); } ret = virtio_gpu_object_list_validate(&ticket, &validate_list); @@ -172,7 +173,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, /* fence the command bo */ virtio_gpu_unref_list(&validate_list); - drm_free_large(buflist); + kvfree(buflist); dma_fence_put(&fence->f); return 0; @@ -180,7 +181,7 @@ out_unresv: ttm_eu_backoff_reservation(&ticket, &validate_list); out_free: virtio_gpu_unref_list(&validate_list); - drm_free_large(buflist); + kvfree(buflist); return ret; } diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 52085832f711..b9b5566acfe6 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -70,7 +70,6 @@ #include #include #include -#include #include #include #include