pgnr = bo->pgnr;
- bo->page_obj = atomisp_kernel_malloc(
- sizeof(struct hmm_page_object) * pgnr);
+ bo->page_obj = kmalloc(sizeof(struct hmm_page_object) * pgnr,
+ GFP_KERNEL);
if (unlikely(!bo->page_obj)) {
dev_err(atomisp_dev, "out of memory for bo->page_obj\n");
return -ENOMEM;
alloc_pgnr = i;
free_private_bo_pages(bo, dypool, repool, alloc_pgnr);
- atomisp_kernel_free(bo->page_obj);
+ kfree(bo->page_obj);
return -ENOMEM;
}
{
free_private_bo_pages(bo, dypool, repool, bo->pgnr);
- atomisp_kernel_free(bo->page_obj);
+ kfree(bo->page_obj);
}
/*
struct vm_area_struct *vma;
struct page **pages;
- pages = atomisp_kernel_malloc(sizeof(struct page *) * bo->pgnr);
+ pages = kmalloc(sizeof(struct page *) * bo->pgnr, GFP_KERNEL);
if (unlikely(!pages)) {
dev_err(atomisp_dev, "out of memory for pages...\n");
return -ENOMEM;
}
- bo->page_obj = atomisp_kernel_malloc(
- sizeof(struct hmm_page_object) * bo->pgnr);
+ bo->page_obj = kmalloc(sizeof(struct hmm_page_object) * bo->pgnr,
+ GFP_KERNEL);
if (unlikely(!bo->page_obj)) {
dev_err(atomisp_dev, "out of memory for bo->page_obj...\n");
- atomisp_kernel_free(pages);
+ kfree(pages);
return -ENOMEM;
}
up_read(¤t->mm->mmap_sem);
if (vma == NULL) {
dev_err(atomisp_dev, "find_vma failed\n");
- atomisp_kernel_free(bo->page_obj);
- atomisp_kernel_free(pages);
+ kfree(bo->page_obj);
+ kfree(pages);
mutex_lock(&bo->mutex);
return -EFAULT;
}
bo->page_obj[i].type = HMM_PAGE_TYPE_GENERAL;
}
hmm_mem_stat.usr_size += bo->pgnr;
- atomisp_kernel_free(pages);
+ kfree(pages);
return 0;
out_of_mem:
for (i = 0; i < page_nr; i++)
put_page(pages[i]);
- atomisp_kernel_free(pages);
- atomisp_kernel_free(bo->page_obj);
+ kfree(pages);
+ kfree(bo->page_obj);
return -ENOMEM;
}
put_page(bo->page_obj[i].page);
hmm_mem_stat.usr_size -= bo->pgnr;
- atomisp_kernel_free(bo->page_obj);
+ kfree(bo->page_obj);
}
/*
bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
}
- pages = atomisp_kernel_malloc(sizeof(*pages) * bo->pgnr);
+ pages = kmalloc(sizeof(*pages) * bo->pgnr, GFP_KERNEL);
if (unlikely(!pages)) {
mutex_unlock(&bo->mutex);
dev_err(atomisp_dev, "out of memory for pages...\n");
bo->vmap_addr = vmap(pages, bo->pgnr, VM_MAP,
cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE);
if (unlikely(!bo->vmap_addr)) {
- atomisp_kernel_free(pages);
+ kfree(pages);
mutex_unlock(&bo->mutex);
dev_err(atomisp_dev, "vmap failed...\n");
return NULL;
}
bo->status |= (cached ? HMM_BO_VMAPED_CACHED : HMM_BO_VMAPED);
- atomisp_kernel_free(pages);
+ kfree(pages);
mutex_unlock(&bo->mutex);
return bo->vmap_addr;
if (pool_size == 0)
return 0;
- dypool_info = atomisp_kernel_malloc(
- sizeof(struct hmm_dynamic_pool_info));
+ dypool_info = kmalloc(sizeof(struct hmm_dynamic_pool_info),
+ GFP_KERNEL);
if (unlikely(!dypool_info)) {
dev_err(atomisp_dev, "out of memory for repool_info.\n");
return -ENOMEM;
sizeof(struct hmm_page), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!dypool_info->pgptr_cache) {
- atomisp_kernel_free(dypool_info);
+ kfree(dypool_info);
return -ENOMEM;
}
kmem_cache_destroy(dypool_info->pgptr_cache);
- atomisp_kernel_free(dypool_info);
+ kfree(dypool_info);
*pool = NULL;
}
{
struct hmm_reserved_pool_info *pool_info;
- pool_info = atomisp_kernel_malloc(
- sizeof(struct hmm_reserved_pool_info));
+ pool_info = kmalloc(sizeof(struct hmm_reserved_pool_info),
+ GFP_KERNEL);
if (unlikely(!pool_info)) {
dev_err(atomisp_dev, "out of memory for repool_info.\n");
return -ENOMEM;
}
- pool_info->pages = atomisp_kernel_malloc(
- sizeof(struct page *) * pool_size);
+ pool_info->pages = kmalloc(sizeof(struct page *) * pool_size,
+ GFP_KERNEL);
if (unlikely(!pool_info->pages)) {
dev_err(atomisp_dev, "out of memory for repool_info->pages.\n");
- atomisp_kernel_free(pool_info);
+ kfree(pool_info);
return -ENOMEM;
}
__free_pages(repool_info->pages[i], 0);
}
- atomisp_kernel_free(repool_info->pages);
- atomisp_kernel_free(repool_info);
+ kfree(repool_info->pages);
+ kfree(repool_info);
*pool = NULL;
}