/* Allocate memory */
mali_allocation = (mali_mem_allocation *)kzalloc(sizeof(mali_mem_allocation), GFP_KERNEL);
if (NULL == mali_allocation) {
-#ifdef AML_MALI_DEBUG
MALI_PRINT_ERROR(("mali_mem_allocation_struct_create: descriptor was NULL\n"));
- show_mem(SHOW_MEM_FILTER_NODES);
-#endif
-
return NULL;
}
s32 index = -1;
*backend = (mali_mem_backend *)kzalloc(sizeof(mali_mem_backend), GFP_KERNEL);
if (NULL == *backend) {
-#ifdef AML_MALI_DEBUG
- MALI_PRINT_ERROR(("mali_mem_backend_struct_create: backend descriptor was NULL\n"));
- show_mem(SHOW_MEM_FILTER_NODES);
-#endif
+ MALI_PRINT_ERROR( ("mali_mem_backend_struct_create: backend descriptor was NULL\n"));
return -1;
}
mem_backend = *backend;
mutex_unlock(&mali_idr_mutex);
index = ret;
if (ret < 0) {
- MALI_DEBUG_PRINT(1, ("mali_mem_backend_struct_create: Can't allocate idr for backend! \n"));
+ MALI_PRINT_ERROR(("mali_mem_backend_struct_create: Can't allocate idr for backend! \n"));
kfree(mem_backend);
return -ENOSPC;
}
int retval = 0;
mali_mem_allocation *mali_allocation = NULL;
struct mali_vma_node *mali_vma_node = NULL;
-
MALI_DEBUG_PRINT(4, (" _mali_ukk_mem_allocate, vaddr=0x%x, size =0x%x! \n", args->gpu_vaddr, args->psize));
if (args->vsize < args->psize) {
mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, args->gpu_vaddr, 0);
if (unlikely(mali_vma_node)) {
- MALI_DEBUG_PRINT_ERROR(("The mali virtual address has already been used ! \n"));
+ MALI_PRINT_ERROR(("The mali virtual address has already been used ! \n"));
return _MALI_OSK_ERR_FAULT;
}
/**
mali_allocation = mali_mem_allocation_struct_create(session);
if (mali_allocation == NULL) {
- MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_allocate: Failed to create allocation struct! \n"));
+ MALI_PRINT_ERROR((" _mali_ukk_mem_allocate, vaddr=0x%x, size =0x%x! %s, %d\n", args->gpu_vaddr, args->psize, __FILE__, __LINE__));
+ MALI_PRINT_ERROR(("_mali_ukk_mem_allocate: Failed to create allocation struct! \n"));
return _MALI_OSK_ERR_NOMEM;
}
mali_allocation->psize = args->psize;
mali_allocation->mali_vma_node.vm_node.size = args->vsize;
mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);
-
mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, args->psize);
if (mali_allocation->backend_handle < 0) {
ret = _MALI_OSK_ERR_NOMEM;
- MALI_DEBUG_PRINT(1, ("mali_allocation->backend_handle < 0! \n"));
+ MALI_PRINT_ERROR(("mali_allocation->psize = %d mali_allocation->vsize = %d mali_allocation->type = %d \n", mali_allocation->psize, mali_allocation->vsize, mali_allocation->type));
+ MALI_PRINT_ERROR(("mali_allocation->backend_handle < 0! \n"));
goto failed_alloc_backend;
}
ret = mali_mem_mali_map_prepare(mali_allocation);
if (0 != ret) {
_mali_osk_mutex_signal(session->memory_lock);
+ MALI_PRINT_ERROR(("mali_allocation->psize = %d mali_allocation->vsize = %d mali_allocation->type = %d \n", mali_allocation->psize, mali_allocation->vsize, mali_allocation->type));
+ MALI_PRINT_ERROR(("Aml-------%s, %d\n", __FILE__, __LINE__));
goto failed_prepare_map;
}
_mali_osk_mutex_signal(session->memory_lock);
#if defined(CONFIG_DMA_SHARED_BUFFER)
ret = mali_mem_secure_attach_dma_buf(&mem_backend->secure_mem, mem_backend->size, args->secure_shared_fd);
if (_MALI_OSK_ERR_OK != ret) {
- MALI_DEBUG_PRINT(1, ("Failed to attach dma buf for secure memory! \n"));
+ MALI_PRINT_ERROR(("Failed to attach dma buf for secure memory! \n"));
goto failed_alloc_pages;
}
#else
ret = _MALI_OSK_ERR_UNSUPPORTED;
- MALI_DEBUG_PRINT(1, ("DMA not supported for mali secure memory! \n"));
+ MALI_PRINT_ERROR(("DMA not supported for mali secure memory! \n"));
goto failed_alloc_pages;
#endif
} else {
if (retval) {
ret = _MALI_OSK_ERR_NOMEM;
- MALI_DEBUG_PRINT(1, (" can't allocate enough pages! \n"));
+ MALI_PRINT_ERROR(("mali_allocation->psize = %d mali_allocation->vsize = %d mali_allocation->type = %d \n", mali_allocation->psize, mali_allocation->vsize, mali_allocation->type));
+ MALI_PRINT_ERROR((" can't allocate enough pages! \n"));
goto failed_alloc_pages;
}
}
MALI_DEBUG_ASSERT_POINTER(os_mem);
if (atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE + size > mali_mem_os_allocator.allocation_limit) {
- MALI_DEBUG_PRINT(2, ("Mali Mem: Unable to allocate %u bytes. Currently allocated: %lu, max limit %lu\n",
+ MALI_PRINT_ERROR( ("Mali Mem: Unable to allocate %u bytes. Currently allocated: %lu, max limit %lu\n",
size,
atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE,
mali_mem_os_allocator.allocation_limit));
#endif
new_page = alloc_page(flags);
-
+ if (new_page == NULL) {
+ new_page = alloc_page(flags | GFP_KERNEL);
+ }
if (unlikely(NULL == new_page)) {
+ MALI_PRINT_ERROR(("alloc_page() return NULL at last! Please check kernel memory!"));
/* Calculate the number of pages actually allocated, and free them. */
#ifdef AML_MALI_DEBUG
MALI_PRINT_ERROR(("alloc_page() return NULL\n"));
err = dma_mapping_error(&mali_platform_device->dev, dma_addr);
if (unlikely(err)) {
- MALI_DEBUG_PRINT_ERROR(("OS Mem: Failed to DMA map page %p: %u",
+ MALI_PRINT_ERROR(("OS Mem: Failed to DMA map page %p: %u",
new_page, err));
__free_page(new_page);
os_mem->count = (page_count - remaining) + i;
atomic_add(page_count, &mali_mem_os_allocator.allocated_pages);
if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES > mali_mem_os_allocator.pool_count) {
- MALI_DEBUG_PRINT(4, ("OS Mem: Stopping pool trim timer, only %u pages on pool\n", mali_mem_os_allocator.pool_count));
+// MALI_PRINT_ERROR( ("OS Mem: Stopping pool trim timer, only %u pages on pool\n", mali_mem_os_allocator.pool_count));
cancel_delayed_work(&mali_mem_os_allocator.timed_shrinker);
}