From: Rebecca Schultz Zavin Date: Fri, 13 Dec 2013 22:23:41 +0000 (-0800) Subject: gpu: ion: Use alloc_pages instead of vmalloc from the system heap X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=b15934b62d959ddc75f7a85fa2a721bea28756b6;p=GitHub%2FLineageOS%2Fandroid_kernel_motorola_exynos9610.git gpu: ion: Use alloc_pages instead of vmalloc from the system heap With this change the ion_system_heap will only use kernel address space when the memory is mapped into the kernel (rare case). Signed-off-by: Rebecca Schultz Zavin [jstultz: modified patch to apply to staging directory] Signed-off-by: John Stultz Signed-off-by: Greg Kroah-Hartman --- diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c index d7e0fa0c1ef6..3383a881b720 100644 --- a/drivers/staging/android/ion/ion_system_heap.c +++ b/drivers/staging/android/ion/ion_system_heap.c @@ -26,75 +26,103 @@ static int ion_system_heap_allocate(struct ion_heap *heap, struct ion_buffer *buffer, unsigned long size, unsigned long align, unsigned long flags) -{ - buffer->priv_virt = vmalloc_user(size); - if (!buffer->priv_virt) - return -ENOMEM; - return 0; -} - -void ion_system_heap_free(struct ion_buffer *buffer) -{ - vfree(buffer->priv_virt); -} - -struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap, - struct ion_buffer *buffer) { struct sg_table *table; struct scatterlist *sg; - int i; - int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; - void *vaddr = buffer->priv_virt; - int ret; + int i, j; + int npages = PAGE_ALIGN(size) / PAGE_SIZE; - table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); + table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); if (!table) - return ERR_PTR(-ENOMEM); - ret = sg_alloc_table(table, npages, GFP_KERNEL); - if (ret) + return -ENOMEM; + i = sg_alloc_table(table, npages, GFP_KERNEL); + if (i) goto err0; for_each_sg(table->sgl, sg, table->nents, i) { struct page *page; - page = vmalloc_to_page(vaddr); - if (!page) { - ret = -ENOMEM; + page = alloc_page(GFP_KERNEL); + if (!page) goto err1; - } sg_set_page(sg, page, PAGE_SIZE, 0); - vaddr += PAGE_SIZE; } - return table; + buffer->priv_virt = table; + return 0; err1: + for_each_sg(table->sgl, sg, i, j) + __free_page(sg_page(sg)); sg_free_table(table); err0: kfree(table); - return ERR_PTR(ret); + return -ENOMEM; } -void ion_system_heap_unmap_dma(struct ion_heap *heap, - struct ion_buffer *buffer) +void ion_system_heap_free(struct ion_buffer *buffer) { + int i; + struct scatterlist *sg; + struct sg_table *table = buffer->priv_virt; + + for_each_sg(table->sgl, sg, table->nents, i) + __free_page(sg_page(sg)); if (buffer->sg_table) sg_free_table(buffer->sg_table); kfree(buffer->sg_table); } +struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return buffer->priv_virt; +} + +void ion_system_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return; +} + void *ion_system_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer) { - return buffer->priv_virt; + struct scatterlist *sg; + int i; + void *vaddr; + struct sg_table *table = buffer->priv_virt; + struct page **pages = kmalloc(sizeof(struct page *) * table->nents, + GFP_KERNEL); + + for_each_sg(table->sgl, sg, table->nents, i) + pages[i] = sg_page(sg); + vaddr = vmap(pages, table->nents, VM_MAP, PAGE_KERNEL); + kfree(pages); + + return vaddr; } void ion_system_heap_unmap_kernel(struct ion_heap *heap, struct ion_buffer *buffer) { + vunmap(buffer->vaddr); } int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, struct vm_area_struct *vma) { - return remap_vmalloc_range(vma, buffer->priv_virt, vma->vm_pgoff); + struct sg_table *table = buffer->priv_virt; + unsigned long addr = vma->vm_start; + unsigned long offset = vma->vm_pgoff; + struct scatterlist *sg; + int i; + + for_each_sg(table->sgl, sg, table->nents, i) { + if (offset) { + offset--; + continue; + } + vm_insert_page(vma, addr, sg_page(sg)); + addr += PAGE_SIZE; + } + return 0; } static struct ion_heap_ops vmalloc_ops = {