r4p1-rel0
authorKasin Lee <kasin.li@amlogic.com>
Wed, 6 Aug 2014 11:51:45 +0000 (19:51 +0800)
committerKasin Lee <kasin.li@amlogic.com>
Wed, 6 Aug 2014 11:51:45 +0000 (19:51 +0800)
Change-Id: Ia5e355b9fc14669d5998f31691a94e35b0d485a8

49 files changed:
mali/common/mali_dlbu.c
mali/common/mali_dlbu.h
mali/common/mali_dma.h
mali/common/mali_gp_job.h
mali/common/mali_gp_scheduler.c
mali/common/mali_group.c
mali/common/mali_kernel_core.c
mali/common/mali_mmu.c
mali/common/mali_mmu.h
mali/common/mali_mmu_page_directory.c
mali/common/mali_mmu_page_directory.h
mali/common/mali_osk.h
mali/common/mali_osk_types.h
mali/common/mali_pp_job.c
mali/common/mali_pp_job.h
mali/common/mali_pp_scheduler.c
mali/common/mali_soft_job.c
mali/common/mali_soft_job.h
mali/common/mali_timeline.c
mali/common/mali_timeline.h
mali/common/mali_ukk.h
mali/include/linux/mali/mali_utgard_ioctl.h
mali/include/linux/mali/mali_utgard_uk_types.h
mali/linux/mali_kernel_linux.c
mali/linux/mali_kernel_sysfs.c
mali/linux/mali_memory.h [changed mode: 0644->0755]
mali/linux/mali_memory_dma_buf.c
mali/linux/mali_memory_external.c
mali/linux/mali_memory_os_alloc.c
mali/linux/mali_memory_os_alloc.h
mali/linux/mali_memory_ump.c
mali/linux/mali_osk_low_level_mem.c
mali/linux/mali_osk_misc.c
mali/linux/mali_osk_profiling.c
mali/linux/mali_osk_specific.h
mali/linux/mali_ukk_core.c [changed mode: 0644->0755]
mali/linux/mali_ukk_gp.c [changed mode: 0644->0755]
mali/linux/mali_ukk_mem.c
mali/linux/mali_ukk_pp.c
mali/linux/mali_ukk_profiling.c
mali/linux/mali_ukk_soft_job.c
mali/linux/mali_ukk_vsync.c [changed mode: 0644->0755]
mali/linux/mali_ukk_wrappers.h [changed mode: 0644->0755]
mali/platform/meson_m450/platform_m8.c
ump/common/ump_kernel_api.c
ump/common/ump_kernel_common.c
ump/common/ump_kernel_ref_drv.c
ump/linux/ump_kernel_random_mapping.c
umplock/umplock_driver.c

index f70a5c7d7ac414763f70329b7d55c0a3ac188334..905deb91f7eff3ea59525064f987919075abcd3a 100755 (executable)
@@ -20,7 +20,7 @@
  */
 #define MALI_DLBU_SIZE 0x400
 
-u32 mali_dlbu_phys_addr = 0;
+mali_dma_addr mali_dlbu_phys_addr = 0;
 static mali_io_address mali_dlbu_cpu_addr = NULL;
 
 /**
index b77657b51bf1f4f7f5172beb99fa172c3428c988..d5436d3d57f9fc333855de9eeacccaa6b14fc59a 100755 (executable)
 
 struct mali_pp_job;
 struct mali_group;
-
-extern u32 mali_dlbu_phys_addr;
-
 struct mali_dlbu_core;
 
+extern mali_dma_addr mali_dlbu_phys_addr;
+
 _mali_osk_errcode_t mali_dlbu_initialize(void);
 void mali_dlbu_terminate(void);
 
index 61eef113a940c306bf319220c117f1830e24a1ac..1f7ab534443e9b46cb92e0df40064bd19e0dcde8 100755 (executable)
@@ -19,7 +19,7 @@
 
 typedef struct mali_dma_cmd_buf {
        u32 *virt_addr;           /**< CPU address of command buffer */
-       u32 phys_addr;            /**< Physical address of command buffer */
+       mali_dma_addr phys_addr;  /**< Physical address of command buffer */
        u32 size;                 /**< Number of prepared words in command buffer */
 } mali_dma_cmd_buf;
 
index f404089068df236ec0da89024d5536f669307bb4..3b525aa5cd70cb16f29673a1ed4033892366c2cd 100755 (executable)
@@ -58,7 +58,7 @@ MALI_STATIC_INLINE u32 mali_gp_job_get_cache_order(struct mali_gp_job *job)
        return (NULL == job) ? 0 : job->cache_order;
 }
 
-MALI_STATIC_INLINE u32 mali_gp_job_get_user_id(struct mali_gp_job *job)
+MALI_STATIC_INLINE u64 mali_gp_job_get_user_id(struct mali_gp_job *job)
 {
        return job->uargs.user_job_ptr;
 }
index ff973f8736008cfeb5571c23a2978c52a0a4d67a..4a3e9a48fa5e06fe59ec658a7d238e1f2c17ba7c 100755 (executable)
@@ -364,7 +364,7 @@ _mali_osk_errcode_t _mali_ukk_gp_start_job(void *ctx, _mali_uk_gp_start_job_s *u
                return _MALI_OSK_ERR_NOMEM;
        }
 
-       timeline_point_ptr = (u32 __user *) job->uargs.timeline_point_ptr;
+       timeline_point_ptr = (u32 __user *)(uintptr_t)job->uargs.timeline_point_ptr;
 
        point = mali_gp_scheduler_submit_job(session, job);
 
@@ -379,7 +379,8 @@ _mali_osk_errcode_t _mali_ukk_gp_start_job(void *ctx, _mali_uk_gp_start_job_s *u
 _mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores(_mali_uk_get_gp_number_of_cores_s *args)
 {
        MALI_DEBUG_ASSERT_POINTER(args);
-       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+       MALI_DEBUG_ASSERT_POINTER((struct mali_session_data *)(uintptr_t)args->ctx);
+
        args->number_of_cores = 1;
        return _MALI_OSK_ERR_OK;
 }
@@ -387,28 +388,19 @@ _mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores(_mali_uk_get_gp_number_of_c
 _mali_osk_errcode_t _mali_ukk_get_gp_core_version(_mali_uk_get_gp_core_version_s *args)
 {
        MALI_DEBUG_ASSERT_POINTER(args);
-       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+       MALI_DEBUG_ASSERT_POINTER((struct mali_session_data *)(uintptr_t)args->ctx);
+
        args->version = gp_version;
        return _MALI_OSK_ERR_OK;
 }
 
 _mali_osk_errcode_t _mali_ukk_gp_suspend_response(_mali_uk_gp_suspend_response_s *args)
 {
-       struct mali_session_data *session;
        struct mali_gp_job *resumed_job;
        _mali_osk_notification_t *new_notification = NULL;
 
        MALI_DEBUG_ASSERT_POINTER(args);
 
-       if (NULL == args->ctx) {
-               return _MALI_OSK_ERR_INVALID_ARGS;
-       }
-
-       session = (struct mali_session_data *)args->ctx;
-       if (NULL == session) {
-               return _MALI_OSK_ERR_FAULT;
-       }
-
        if (_MALIGP_JOB_RESUME_WITH_NEW_HEAP == args->code) {
                new_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_STALLED, sizeof(_mali_uk_gp_job_suspended_s));
 
index efa9d54a37e3fd57f0672268c89a943c60e9ec23..2796c12c487d8b581cb705b8ce6899485fb28b31 100755 (executable)
@@ -590,7 +590,7 @@ static void mali_group_job_prepare_virtual(struct mali_group *group, struct mali
                                 * remove it from broadcast
                                 */
                                mali_bcast_remove_group(group->bcast_core, child);
-                               MALI_DEBUG_PRINT(4, ("Mali Virtual Group: Remained PP group %0x remove from bcast_core\n", (int)child));
+                               MALI_DEBUG_PRINT(4, ("Mali Virtual Group: Remained PP group %p remove from bcast_core\n", child));
                        }
                }
 
@@ -1344,6 +1344,11 @@ _mali_osk_errcode_t mali_group_upper_half_mmu(void * data)
                        MALI_SUCCESS;
        }
 #endif
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       if (MALI_FALSE == mali_pm_domain_lock_state(group->pm_domain)) {
+               goto out;
+       }
+#endif
 
        /* Check if it was our device which caused the interrupt (we could be sharing the IRQ line) */
        int_stat = mali_mmu_get_int_status(mmu);
@@ -1397,11 +1402,12 @@ static void mali_group_bottom_half_mmu(void *data)
                /* An actual page fault has occurred. */
 #ifdef DEBUG
                u32 fault_address = mali_mmu_get_page_fault_addr(mmu);
-               MALI_DEBUG_PRINT(2, ("Mali MMU: Page fault detected at 0x%x from bus id %d of type %s on %s\n",
-                                    (void *)fault_address,
+               MALI_DEBUG_PRINT(2, ("Mali MMU: Page fault detected at 0x%08x from bus id %d of type %s on %s\n",
+                                    fault_address,
                                     (status >> 6) & 0x1F,
                                     (status & 32) ? "write" : "read",
                                     mmu->hw_core.description));
+               mali_mmu_pagedir_diag(group->session->page_directory, fault_address);
 #endif
 
                mali_group_mmu_page_fault_and_unlock(group);
index 770cf662119deadc44d414e5d8ea36d47f49bc1b..66603357ebfdd2d0c5e6130989bc8450632cc18e 100755 (executable)
@@ -1167,7 +1167,25 @@ u32 mali_kernel_core_get_gpu_minor_version(void)
 _mali_osk_errcode_t _mali_ukk_get_api_version(_mali_uk_get_api_version_s *args)
 {
        MALI_DEBUG_ASSERT_POINTER(args);
-       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+       MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+       /* check compatability */
+       if (args->version == _MALI_UK_API_VERSION) {
+               args->compatible = 1;
+       } else {
+               args->compatible = 0;
+       }
+
+       args->version = _MALI_UK_API_VERSION; /* report our version */
+
+       /* success regardless of being compatible or not */
+       MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_api_version_v2(_mali_uk_get_api_version_v2_s *args)
+{
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
 
        /* check compatability */
        if (args->version == _MALI_UK_API_VERSION) {
@@ -1187,12 +1205,14 @@ _mali_osk_errcode_t _mali_ukk_wait_for_notification(_mali_uk_wait_for_notificati
        _mali_osk_errcode_t err;
        _mali_osk_notification_t *notification;
        _mali_osk_notification_queue_t *queue;
+       struct mali_session_data *session;
 
        /* check input */
        MALI_DEBUG_ASSERT_POINTER(args);
-       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+       MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
 
-       queue = ((struct mali_session_data *)args->ctx)->ioctl_queue;
+       session = (struct mali_session_data *)(uintptr_t)args->ctx;
+       queue = session->ioctl_queue;
 
        /* if the queue does not exist we're currently shutting down */
        if (NULL == queue) {
@@ -1221,12 +1241,14 @@ _mali_osk_errcode_t _mali_ukk_post_notification(_mali_uk_post_notification_s *ar
 {
        _mali_osk_notification_t *notification;
        _mali_osk_notification_queue_t *queue;
+       struct mali_session_data *session;
 
        /* check input */
        MALI_DEBUG_ASSERT_POINTER(args);
-       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+       MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
 
-       queue = ((struct mali_session_data *)args->ctx)->ioctl_queue;
+       session = (struct mali_session_data *)(uintptr_t)args->ctx;
+       queue = session->ioctl_queue;
 
        /* if the queue does not exist we're currently shutting down */
        if (NULL == queue) {
@@ -1250,9 +1272,9 @@ _mali_osk_errcode_t _mali_ukk_request_high_priority(_mali_uk_request_high_priori
        struct mali_session_data *session;
 
        MALI_DEBUG_ASSERT_POINTER(args);
-       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+       MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
 
-       session = (struct mali_session_data *) args->ctx;
+       session = (struct mali_session_data *)(uintptr_t)args->ctx;
 
        if (!session->use_high_priority_job_queue) {
                session->use_high_priority_job_queue = MALI_TRUE;
index 81754a00b306a5ea58c8ca55e5ae9c499ff4701e..585e803fa38d14fbad16e1dd67f92ee816dda51d 100755 (executable)
@@ -45,15 +45,15 @@ MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_raw_reset(struct mali_mmu_core *
 
 /* page fault queue flush helper pages
  * note that the mapping pointers are currently unused outside of the initialization functions */
-static u32 mali_page_fault_flush_page_directory = MALI_INVALID_PAGE;
+static mali_dma_addr mali_page_fault_flush_page_directory = MALI_INVALID_PAGE;
 static mali_io_address mali_page_fault_flush_page_directory_mapping = NULL;
-static u32 mali_page_fault_flush_page_table = MALI_INVALID_PAGE;
+static mali_dma_addr mali_page_fault_flush_page_table = MALI_INVALID_PAGE;
 static mali_io_address mali_page_fault_flush_page_table_mapping = NULL;
-static u32 mali_page_fault_flush_data_page = MALI_INVALID_PAGE;
+static mali_dma_addr mali_page_fault_flush_data_page = MALI_INVALID_PAGE;
 static mali_io_address mali_page_fault_flush_data_page_mapping = NULL;
 
 /* an empty page directory (no address valid) which is active on any MMU not currently marked as in use */
-static u32 mali_empty_page_directory_phys   = MALI_INVALID_PAGE;
+static mali_dma_addr mali_empty_page_directory_phys   = MALI_INVALID_PAGE;
 static mali_io_address mali_empty_page_directory_virt = NULL;
 
 
@@ -93,9 +93,12 @@ void mali_mmu_terminate(void)
        mali_empty_page_directory_virt = NULL;
 
        /* Free the page fault flush pages */
-       mali_destroy_fault_flush_pages(&mali_page_fault_flush_page_directory, &mali_page_fault_flush_page_directory_mapping,
-                                      &mali_page_fault_flush_page_table, &mali_page_fault_flush_page_table_mapping,
-                                      &mali_page_fault_flush_data_page, &mali_page_fault_flush_data_page_mapping);
+       mali_destroy_fault_flush_pages(&mali_page_fault_flush_page_directory,
+                                      &mali_page_fault_flush_page_directory_mapping,
+                                      &mali_page_fault_flush_page_table,
+                                      &mali_page_fault_flush_page_table_mapping,
+                                      &mali_page_fault_flush_data_page,
+                                      &mali_page_fault_flush_data_page_mapping);
 }
 
 struct mali_mmu_core *mali_mmu_create(_mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual)
@@ -106,9 +109,8 @@ struct mali_mmu_core *mali_mmu_create(_mali_osk_resource_t *resource, struct mal
 
        MALI_DEBUG_PRINT(2, ("Mali MMU: Creating Mali MMU: %s\n", resource->description));
 
-       mmu = _mali_osk_calloc(1,sizeof(struct mali_mmu_core));
-       if (NULL != mmu)
-       {
+       mmu = _mali_osk_calloc(1, sizeof(struct mali_mmu_core));
+       if (NULL != mmu) {
                if (_MALI_OSK_ERR_OK == mali_hw_core_create(&mmu->hw_core, resource, MALI_MMU_REGISTERS_SIZE)) {
                        if (_MALI_OSK_ERR_OK == mali_group_add_mmu_core(group, mmu)) {
                                if (is_virtual) {
index 0d89e2a65da8b018c77df98b3f8f0883a952e8ae..f3040da219aebdf8376fa8d7b4846b657586895c 100755 (executable)
 #include "mali_mmu_page_directory.h"
 #include "mali_hw_core.h"
 
-#include <linux/kernel.h>
-#include <asm/io.h>
-#include <mach/am_regs.h>
-#include <linux/module.h>
-#if MESON_CPU_TYPE == MESON_CPU_TYPE_MESON6
-#include "meson_m400/mali_fix.h"
-#endif
-
-
 /* Forward declaration from mali_group.h */
 struct mali_group;
 
index b11bbe94d30e7ab6d8efd6a43542e1c233fd9215..3e0d9d40ad246abde0009a7410430658af333c86 100755 (executable)
@@ -22,7 +22,7 @@ u32 mali_allocate_empty_page(mali_io_address *virt_addr)
 {
        _mali_osk_errcode_t err;
        mali_io_address mapping;
-       u32 address;
+       mali_dma_addr address;
 
        if (_MALI_OSK_ERR_OK != mali_mmu_get_table_page(&address, &mapping)) {
                /* Allocation failed */
@@ -43,16 +43,17 @@ u32 mali_allocate_empty_page(mali_io_address *virt_addr)
        return address;
 }
 
-void mali_free_empty_page(u32 address, mali_io_address virt_addr)
+void mali_free_empty_page(mali_dma_addr address, mali_io_address virt_addr)
 {
        if (MALI_INVALID_PAGE != address) {
                mali_mmu_release_table_page(address, virt_addr);
        }
 }
 
-_mali_osk_errcode_t mali_create_fault_flush_pages(u32 *page_directory, mali_io_address *page_directory_mapping,
-               u32 *page_table, mali_io_address *page_table_mapping,
-               u32 *data_page, mali_io_address *data_page_mapping)
+_mali_osk_errcode_t mali_create_fault_flush_pages(mali_dma_addr *page_directory,
+               mali_io_address *page_directory_mapping,
+               mali_dma_addr *page_table, mali_io_address *page_table_mapping,
+               mali_dma_addr *data_page, mali_io_address *data_page_mapping)
 {
        _mali_osk_errcode_t err;
 
@@ -76,9 +77,10 @@ _mali_osk_errcode_t mali_create_fault_flush_pages(u32 *page_directory, mali_io_a
        return err;
 }
 
-void mali_destroy_fault_flush_pages(u32 *page_directory, mali_io_address *page_directory_mapping,
-                                   u32 *page_table, mali_io_address *page_table_mapping,
-                                   u32 *data_page, mali_io_address *data_page_mapping)
+void mali_destroy_fault_flush_pages(
+       mali_dma_addr *page_directory, mali_io_address *page_directory_mapping,
+       mali_dma_addr *page_table, mali_io_address *page_table_mapping,
+       mali_dma_addr *data_page, mali_io_address *data_page_mapping)
 {
        if (MALI_INVALID_PAGE != *page_directory) {
                mali_mmu_release_table_page(*page_directory, *page_directory_mapping);
@@ -117,15 +119,15 @@ _mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u3
        const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1);
        _mali_osk_errcode_t err;
        mali_io_address pde_mapping;
-       u32 pde_phys;
+       mali_dma_addr pde_phys;
        int i;
 
-       if (last_pde < first_pde) {
-               MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
-       }
+       if (last_pde < first_pde)
+               return _MALI_OSK_ERR_INVALID_ARGS;
 
        for (i = first_pde; i <= last_pde; i++) {
-               if (0 == (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i * sizeof(u32)) & MALI_MMU_FLAGS_PRESENT)) {
+               if (0 == (_mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+                                                i * sizeof(u32)) & MALI_MMU_FLAGS_PRESENT)) {
                        /* Page table not present */
                        MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]);
                        MALI_DEBUG_ASSERT(NULL == pagedir->page_entries_mapped[i]);
@@ -149,7 +151,7 @@ _mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u3
        }
        _mali_osk_write_mem_barrier();
 
-       MALI_SUCCESS;
+       return _MALI_OSK_ERR_OK;
 }
 
 MALI_STATIC_INLINE void mali_mmu_zero_pte(mali_io_address page_table, u32 mali_address, u32 size)
@@ -163,6 +165,13 @@ MALI_STATIC_INLINE void mali_mmu_zero_pte(mali_io_address page_table, u32 mali_a
        }
 }
 
+static u32 mali_page_directory_get_phys_address(struct mali_page_directory *pagedir, u32 index)
+{
+       return (_mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+                                      index * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
+}
+
+
 _mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size)
 {
        const int first_pde = MALI_MMU_PDE_ENTRY(mali_address);
@@ -246,17 +255,22 @@ _mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir,
 struct mali_page_directory *mali_mmu_pagedir_alloc(void)
 {
        struct mali_page_directory *pagedir;
+       _mali_osk_errcode_t err;
+       mali_dma_addr phys;
 
        pagedir = _mali_osk_calloc(1, sizeof(struct mali_page_directory));
        if (NULL == pagedir) {
                return NULL;
        }
 
-       if (_MALI_OSK_ERR_OK != mali_mmu_get_table_page(&pagedir->page_directory, &pagedir->page_directory_mapped)) {
+       err = mali_mmu_get_table_page(&phys, &pagedir->page_directory_mapped);
+       if (_MALI_OSK_ERR_OK != err) {
                _mali_osk_free(pagedir);
                return NULL;
        }
 
+       pagedir->page_directory = (u32)phys;
+
        /* Zero page directory */
        fill_page(pagedir->page_directory_mapped, 0);
 
@@ -270,8 +284,11 @@ void mali_mmu_pagedir_free(struct mali_page_directory *pagedir)
 
        /* Free referenced page tables and zero PDEs. */
        for (i = 0; i < num_page_table_entries; i++) {
-               if (pagedir->page_directory_mapped && (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, sizeof(u32)*i) & MALI_MMU_FLAGS_PRESENT)) {
-                       u32 phys = _mali_osk_mem_ioread32(pagedir->page_directory_mapped, i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK;
+               if (pagedir->page_directory_mapped && (_mali_osk_mem_ioread32(
+                               pagedir->page_directory_mapped,
+                               sizeof(u32)*i) & MALI_MMU_FLAGS_PRESENT)) {
+                       mali_dma_addr phys = _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+                                            i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK;
                        _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32), 0);
                        mali_mmu_release_table_page(phys, pagedir->page_entries_mapped[i]);
                }
@@ -285,22 +302,54 @@ void mali_mmu_pagedir_free(struct mali_page_directory *pagedir)
 }
 
 
-void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address, u32 phys_address, u32 size, u32 permission_bits)
+void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address,
+                            mali_dma_addr phys_address, u32 size, u32 permission_bits)
 {
        u32 end_address = mali_address + size;
+       u32 mali_phys = (u32)phys_address;
 
        /* Map physical pages into MMU page tables */
-       for (; mali_address < end_address; mali_address += MALI_MMU_PAGE_SIZE, phys_address += MALI_MMU_PAGE_SIZE) {
+       for (; mali_address < end_address; mali_address += MALI_MMU_PAGE_SIZE, mali_phys += MALI_MMU_PAGE_SIZE) {
                MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)]);
                _mali_osk_mem_iowrite32_relaxed(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)],
                                                MALI_MMU_PTE_ENTRY(mali_address) * sizeof(u32),
-                                               phys_address | permission_bits);
+                                               mali_phys | permission_bits);
        }
 }
 
-u32 mali_page_directory_get_phys_address(struct mali_page_directory *pagedir, u32 index)
+void mali_mmu_pagedir_diag(struct mali_page_directory *pagedir, u32 fault_addr)
 {
-       return (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, index * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
+#if defined(DEBUG)
+       u32 pde_index, pte_index;
+       u32 pde, pte;
+
+       pde_index = MALI_MMU_PDE_ENTRY(fault_addr);
+       pte_index = MALI_MMU_PTE_ENTRY(fault_addr);
+
+
+       pde = _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+                                    pde_index * sizeof(u32));
+
+
+       if (pde & MALI_MMU_FLAGS_PRESENT) {
+               u32 pte_addr = MALI_MMU_ENTRY_ADDRESS(pde);
+
+               pte = _mali_osk_mem_ioread32(pagedir->page_entries_mapped[pde_index],
+                                            pte_index * sizeof(u32));
+
+               MALI_DEBUG_PRINT(2, ("\tMMU: %08x: Page table present: %08x\n"
+                                    "\t\tPTE: %08x, page %08x is %s\n",
+                                    fault_addr, pte_addr, pte,
+                                    MALI_MMU_ENTRY_ADDRESS(pte),
+                                    pte & MALI_MMU_FLAGS_DEFAULT ? "rw" : "not present"));
+       } else {
+               MALI_DEBUG_PRINT(2, ("\tMMU: %08x: Page table not present: %08x\n",
+                                    fault_addr, pde));
+       }
+#else
+       MALI_IGNORE(pagedir);
+       MALI_IGNORE(fault_addr);
+#endif
 }
 
 /* For instrumented */
@@ -399,10 +448,9 @@ _mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size(_mali_uk_query_mmu_
        struct dump_info info = { 0, 0, 0, NULL };
        struct mali_session_data *session_data;
 
+       session_data = (struct mali_session_data *)(uintptr_t)(args->ctx);
+       MALI_DEBUG_ASSERT_POINTER(session_data);
        MALI_DEBUG_ASSERT_POINTER(args);
-       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
-
-       session_data = (struct mali_session_data *)(args->ctx);
 
        MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info));
        MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info));
@@ -416,18 +464,17 @@ _mali_osk_errcode_t _mali_ukk_dump_mmu_page_table(_mali_uk_dump_mmu_page_table_s
        struct mali_session_data *session_data;
 
        MALI_DEBUG_ASSERT_POINTER(args);
-       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
-       MALI_CHECK_NON_NULL(args->buffer, _MALI_OSK_ERR_INVALID_ARGS);
 
-       session_data = (struct mali_session_data *)(args->ctx);
+       session_data = (struct mali_session_data *)(uintptr_t)(args->ctx);
+       MALI_DEBUG_ASSERT_POINTER(session_data);
 
        info.buffer_left = args->size;
-       info.buffer = args->buffer;
+       info.buffer = (u32 *)(uintptr_t)args->buffer;
 
-       args->register_writes = info.buffer;
+       args->register_writes = (uintptr_t)info.buffer;
        MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info));
 
-       args->page_table_dump = info.buffer;
+       args->page_table_dump = (uintptr_t)info.buffer;
        MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info));
 
        args->register_writes_size = info.register_writes_size;
index c49e93ea4f07a9d96b7ceb05e309ce8fb06565d5..a53fee966b5a785bb09e89f1f2e7851447eb3929 100755 (executable)
@@ -88,20 +88,23 @@ _mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u3
 _mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size);
 
 /* Back virtual address space with actual pages. Assumes input is contiguous and 4k aligned. */
-void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address, u32 phys_address, u32 size, u32 cache_settings);
-
-u32 mali_page_directory_get_phys_address(struct mali_page_directory *pagedir, u32 index);
+void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address,
+                            mali_dma_addr phys_address, u32 size, u32 permission_bits);
 
 u32 mali_allocate_empty_page(mali_io_address *virtual);
-void mali_free_empty_page(u32 address, mali_io_address virtual);
-_mali_osk_errcode_t mali_create_fault_flush_pages(u32 *page_directory, mali_io_address *page_directory_mapping,
-               u32 *page_table, mali_io_address *page_table_mapping,
-               u32 *data_page, mali_io_address *data_page_mapping);
-void mali_destroy_fault_flush_pages(u32 *page_directory, mali_io_address *page_directory_mapping,
-                                   u32 *page_table, mali_io_address *page_table_mapping,
-                                   u32 *data_page, mali_io_address *data_page_mapping);
+void mali_free_empty_page(mali_dma_addr address, mali_io_address virt_addr);
+_mali_osk_errcode_t mali_create_fault_flush_pages(mali_dma_addr *page_directory,
+               mali_io_address *page_directory_mapping,
+               mali_dma_addr *page_table, mali_io_address *page_table_mapping,
+               mali_dma_addr *data_page, mali_io_address *data_page_mapping);
+void mali_destroy_fault_flush_pages(
+       mali_dma_addr *page_directory, mali_io_address *page_directory_mapping,
+       mali_dma_addr *page_table, mali_io_address *page_table_mapping,
+       mali_dma_addr *data_page, mali_io_address *data_page_mapping);
 
 struct mali_page_directory *mali_mmu_pagedir_alloc(void);
 void mali_mmu_pagedir_free(struct mali_page_directory *pagedir);
 
+void mali_mmu_pagedir_diag(struct mali_page_directory *pagedir, u32 fault_addr);
+
 #endif /* __MALI_MMU_PAGE_DIRECTORY_H__ */
index edff2760d0d2829507ef5ef9e7920bd0b7691dfb..d5c9651735a35dfe09c0f11bc48d18f9d90fc679 100755 (executable)
@@ -1237,6 +1237,17 @@ void _mali_osk_dbgmsg(const char *fmt, ...);
  */
 u32 _mali_osk_snprintf(char *buf, u32 size, const char *fmt, ...);
 
+/** @brief Print fmt into print_ctx.
+ *
+ * The interpretation of \a fmt is the same as the \c format parameter in
+ * _mali_osu_vsnprintf().
+ *
+ * @param print_ctx a pointer to the result file buffer
+ * @param fmt a _mali_osu_vsnprintf() style format string
+ * @param ... a variable-number of parameters suitable for \a fmt
+ */
+void _mali_osk_ctxprintf(_mali_osk_print_ctx *print_ctx, const char *fmt, ...);
+
 /** @brief Abnormal process abort.
  *
  * Terminates the caller-process if this function is called.
index feeefb92b34bd195a238763a6cfd02d0143680e0..cb757e877b8b9486c5900d4e5dfd0506a76c9491 100755 (executable)
@@ -448,6 +448,10 @@ typedef struct _mali_osk_wait_queue_t_struct _mali_osk_wait_queue_t;
 
 /** @} */ /* end group uddapi */
 
+/** @brief Mali print ctx type which uses seq_file
+  */
+typedef struct seq_file _mali_osk_print_ctx;
+
 #ifdef __cplusplus
 }
 #endif
index f000df287c75506fe54c964d032c499f4412d6c2..672766c5064463a634a71e4ece4caa8a52a8bd03 100755 (executable)
@@ -36,7 +36,8 @@ void mali_pp_job_terminate(void)
        _mali_osk_atomic_term(&pp_counter_per_sub_job_count);
 }
 
-struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, _mali_uk_pp_start_job_s *uargs, u32 id)
+struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session,
+                                      _mali_uk_pp_start_job_s __user *uargs, u32 id)
 {
        struct mali_pp_job *job;
        u32 perf_counter_flag;
@@ -89,13 +90,14 @@ struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, _mali_
                job->num_memory_cookies = job->uargs.num_memory_cookies;
                if (job->num_memory_cookies > 0) {
                        u32 size;
+                       u32 __user *memory_cookies = (u32 __user *)(uintptr_t)job->uargs.memory_cookies;
 
                        if (job->uargs.num_memory_cookies > session->descriptor_mapping->current_nr_mappings) {
                                MALI_PRINT_ERROR(("Mali PP job: Too many memory cookies specified in job object\n"));
                                goto fail;
                        }
 
-                       size = sizeof(*job->uargs.memory_cookies) * job->num_memory_cookies;
+                       size = sizeof(*memory_cookies) * job->num_memory_cookies;
 
                        job->memory_cookies = _mali_osk_malloc(size);
                        if (NULL == job->memory_cookies) {
@@ -103,7 +105,7 @@ struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, _mali_
                                goto fail;
                        }
 
-                       if (0 != _mali_osk_copy_from_user(job->memory_cookies, job->uargs.memory_cookies, size)) {
+                       if (0 != _mali_osk_copy_from_user(job->memory_cookies, memory_cookies, size)) {
                                MALI_PRINT_ERROR(("Mali PP job: Failed to copy %d bytes of memory cookies from user!\n", size));
                                goto fail;
                        }
index 978eadd749c347bd09872cc8d61b8354d6410de4..a5a9a59a8727cb4a83dcbeef8b1fb76ca835ca2a 100755 (executable)
@@ -89,7 +89,7 @@ MALI_STATIC_INLINE u32 mali_pp_job_get_cache_order(struct mali_pp_job *job)
        return (NULL == job) ? 0 : job->cache_order;
 }
 
-MALI_STATIC_INLINE u32 mali_pp_job_get_user_id(struct mali_pp_job *job)
+MALI_STATIC_INLINE u64 mali_pp_job_get_user_id(struct mali_pp_job *job)
 {
        return job->uargs.user_job_ptr;
 }
index 58ecd12af1046d2c5a21ef8637f86465d6685030..d77f425175c78b456173d9556766cbd32180e316 100755 (executable)
@@ -48,7 +48,7 @@ struct mali_pp_scheduler_job_queue {
 #endif /* !defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) */
 
 static void mali_pp_scheduler_job_queued(void);
-static void mali_pp_scheduler_job_completed(void);
+static void mali_pp_scheduler_job_completed(mali_bool job_started);
 
 /* Maximum of 8 PP cores (a group can only have maximum of 1 PP core) */
 #define MALI_MAX_NUMBER_OF_PP_GROUPS 9
@@ -638,7 +638,7 @@ static void mali_pp_scheduler_return_job_to_user(struct mali_pp_job *job, mali_b
 #endif
 }
 
-static void mali_pp_scheduler_finalize_job(struct mali_pp_job *job)
+static void mali_pp_scheduler_finalize_job(struct mali_pp_job *job, mali_bool job_started)
 {
        /* This job object should not be on any lists. */
        MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
@@ -658,7 +658,7 @@ static void mali_pp_scheduler_finalize_job(struct mali_pp_job *job)
        }
 #endif
 
-       mali_pp_scheduler_job_completed();
+       mali_pp_scheduler_job_completed(job_started);
 }
 
 void mali_pp_scheduler_schedule(void)
@@ -1097,7 +1097,7 @@ void mali_pp_scheduler_job_done(struct mali_group *group, struct mali_pp_job *jo
 
                if (job_is_done) {
                        /* Return job to user and delete it. */
-                       mali_pp_scheduler_finalize_job(job);
+                       mali_pp_scheduler_finalize_job(job, MALI_TRUE);
                }
 
                /* A GP job might be queued by tracker release above,
@@ -1170,7 +1170,7 @@ void mali_pp_scheduler_job_done(struct mali_group *group, struct mali_pp_job *jo
 
        if (job_is_done) {
                /* Return job to user and delete it. */
-               mali_pp_scheduler_finalize_job(job);
+               mali_pp_scheduler_finalize_job(job, MALI_TRUE);
        }
 }
 
@@ -1243,7 +1243,7 @@ _mali_osk_errcode_t _mali_ukk_pp_start_job(void *ctx, _mali_uk_pp_start_job_s *u
                return _MALI_OSK_ERR_NOMEM;
        }
 
-       timeline_point_ptr = (u32 __user *) job->uargs.timeline_point_ptr;
+       timeline_point_ptr = (u32 __user *)(uintptr_t)job->uargs.timeline_point_ptr;
 
        point = mali_pp_scheduler_submit_job(session, job);
        job = NULL;
@@ -1264,6 +1264,8 @@ _mali_osk_errcode_t _mali_ukk_pp_and_gp_start_job(void *ctx, _mali_uk_pp_and_gp_
        struct mali_gp_job *gp_job;
        u32 __user *timeline_point_ptr = NULL;
        mali_timeline_point point;
+       _mali_uk_pp_start_job_s __user *pp_args;
+       _mali_uk_gp_start_job_s __user *gp_args;
 
        MALI_DEBUG_ASSERT_POINTER(ctx);
        MALI_DEBUG_ASSERT_POINTER(uargs);
@@ -1274,20 +1276,23 @@ _mali_osk_errcode_t _mali_ukk_pp_and_gp_start_job(void *ctx, _mali_uk_pp_and_gp_
                return _MALI_OSK_ERR_NOMEM;
        }
 
-       pp_job = mali_pp_job_create(session, kargs.pp_args, mali_scheduler_get_new_id());
+       pp_args = (_mali_uk_pp_start_job_s __user *)(uintptr_t)kargs.pp_args;
+       gp_args = (_mali_uk_gp_start_job_s __user *)(uintptr_t)kargs.gp_args;
+
+       pp_job = mali_pp_job_create(session, pp_args, mali_scheduler_get_new_id());
        if (NULL == pp_job) {
                MALI_PRINT_ERROR(("Failed to create PP job.\n"));
                return _MALI_OSK_ERR_NOMEM;
        }
 
-       gp_job = mali_gp_job_create(session, kargs.gp_args, mali_scheduler_get_new_id(), mali_pp_job_get_tracker(pp_job));
+       gp_job = mali_gp_job_create(session, gp_args, mali_scheduler_get_new_id(), mali_pp_job_get_tracker(pp_job));
        if (NULL == gp_job) {
                MALI_PRINT_ERROR(("Failed to create GP job.\n"));
                mali_pp_job_delete(pp_job);
                return _MALI_OSK_ERR_NOMEM;
        }
 
-       timeline_point_ptr = (u32 __user *) pp_job->uargs.timeline_point_ptr;
+       timeline_point_ptr = (u32 __user *)(uintptr_t)pp_job->uargs.timeline_point_ptr;
 
        /* Submit GP job. */
        mali_gp_scheduler_submit_job(session, gp_job);
@@ -1308,9 +1313,10 @@ _mali_osk_errcode_t _mali_ukk_pp_and_gp_start_job(void *ctx, _mali_uk_pp_and_gp_
 _mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores(_mali_uk_get_pp_number_of_cores_s *args)
 {
        MALI_DEBUG_ASSERT_POINTER(args);
-       MALI_DEBUG_ASSERT_POINTER(args->ctx);
+
        args->number_of_total_cores = num_cores;
        args->number_of_enabled_cores = enabled_cores;
+
        return _MALI_OSK_ERR_OK;
 }
 
@@ -1327,8 +1333,9 @@ u32 mali_pp_scheduler_get_num_cores_enabled(void)
 _mali_osk_errcode_t _mali_ukk_get_pp_core_version(_mali_uk_get_pp_core_version_s *args)
 {
        MALI_DEBUG_ASSERT_POINTER(args);
-       MALI_DEBUG_ASSERT_POINTER(args->ctx);
+
        args->version = pp_version;
+
        return _MALI_OSK_ERR_OK;
 }
 
@@ -1339,10 +1346,10 @@ void _mali_ukk_pp_job_disable_wb(_mali_uk_pp_disable_wb_s *args)
        struct mali_pp_job *tmp;
        u32 fb_lookup_id;
 
-       MALI_DEBUG_ASSERT_POINTER(args);
-       MALI_DEBUG_ASSERT_POINTER(args->ctx);
+       session = (struct mali_session_data *)(uintptr_t)args->ctx;
 
-       session = (struct mali_session_data *)args->ctx;
+       MALI_DEBUG_ASSERT_POINTER(session);
+       MALI_DEBUG_ASSERT_POINTER(args);
 
        fb_lookup_id = args->fb_id & MALI_PP_JOB_FB_LOOKUP_LIST_MASK;
 
@@ -1432,7 +1439,7 @@ void mali_pp_scheduler_abort_session(struct mali_session_data *session)
        _MALI_OSK_LIST_FOREACHENTRY(job, tmp_job, &removed_jobs, struct mali_pp_job, list) {
                mali_timeline_tracker_release(&job->tracker);
                mali_pp_job_delete(job);
-               mali_pp_scheduler_job_completed();
+               mali_pp_scheduler_job_completed(MALI_TRUE);
        }
 
        /* Abort any running jobs from the session. */
@@ -1786,7 +1793,7 @@ static void mali_pp_scheduler_notify_core_change(u32 num_cores)
 
 static void mali_pp_scheduler_core_scale_up(unsigned int target_core_nr)
 {
-       MALI_DEBUG_PRINT(3, ("Requesting %d cores: enabling %d cores\n", target_core_nr, target_core_nr - enabled_cores));
+       MALI_DEBUG_PRINT(2, ("Requesting %d cores: enabling %d cores\n", target_core_nr, target_core_nr - enabled_cores));
 
        _mali_osk_pm_dev_ref_add_no_power_on();
        _mali_osk_pm_dev_barrier();
@@ -1824,7 +1831,7 @@ static void mali_pp_scheduler_core_scale_up(unsigned int target_core_nr)
 
 static void mali_pp_scheduler_core_scale_down(unsigned int target_core_nr)
 {
-       MALI_DEBUG_PRINT(3, ("Requesting %d cores: disabling %d cores\n", target_core_nr, enabled_cores - target_core_nr));
+       MALI_DEBUG_PRINT(2, ("Requesting %d cores: disabling %d cores\n", target_core_nr, enabled_cores - target_core_nr));
 
        mali_pp_scheduler_suspend();
 
@@ -1959,12 +1966,12 @@ static void mali_pp_scheduler_job_queued(void)
        }
 }
 
-static void mali_pp_scheduler_job_completed(void)
+static void mali_pp_scheduler_job_completed(mali_bool job_started)
 {
        /* Release the PM reference we got in the mali_pp_scheduler_job_queued() function */
        _mali_osk_pm_dev_ref_dec();
 
-       if (mali_utilization_enabled()) {
+       if (mali_utilization_enabled() && job_started) {
                mali_utilization_pp_end();
        }
 }
@@ -2106,7 +2113,7 @@ mali_scheduler_mask mali_pp_scheduler_activate_job(struct mali_pp_job *job)
                mali_pp_scheduler_abort_job_and_unlock_scheduler(job);
 
                mali_pp_job_mark_sub_job_completed(job, MALI_FALSE); /* Flagging the job as failed. */
-               mali_pp_scheduler_finalize_job(job);
+               mali_pp_scheduler_finalize_job(job, MALI_FALSE);
 
                return MALI_SCHEDULER_MASK_EMPTY;
        }
index f7020f0180d48c12cf54891b6e2cf3061203efec..91dada9b636ef302cc7277784ae8ecad072850f1 100755 (executable)
@@ -48,9 +48,7 @@ MALI_STATIC_INLINE void mali_soft_job_system_assert_locked(struct mali_soft_job_
 
 struct mali_soft_job_system *mali_soft_job_system_create(struct mali_session_data *session)
 {
-       u32 i;
        struct mali_soft_job_system *system;
-       struct mali_soft_job *job;
 
        MALI_DEBUG_ASSERT_POINTER(session);
 
@@ -67,18 +65,10 @@ struct mali_soft_job_system *mali_soft_job_system_create(struct mali_session_dat
                return NULL;
        }
        system->lock_owner = 0;
+       system->last_job_id = 0;
 
-       _MALI_OSK_INIT_LIST_HEAD(&(system->jobs_free));
        _MALI_OSK_INIT_LIST_HEAD(&(system->jobs_used));
 
-       for (i = 0; i < MALI_MAX_NUM_SOFT_JOBS; ++i) {
-               job = &(system->jobs[i]);
-               _mali_osk_list_add(&(job->system_list), &(system->jobs_free));
-               job->system = system;
-               job->state = MALI_SOFT_JOB_STATE_FREE;
-               job->id = i;
-       }
-
        return system;
 }
 
@@ -87,16 +77,7 @@ void mali_soft_job_system_destroy(struct mali_soft_job_system *system)
        MALI_DEBUG_ASSERT_POINTER(system);
 
        /* All jobs should be free at this point. */
-       MALI_DEBUG_CODE({
-               u32 i;
-               struct mali_soft_job *job;
-
-               for (i = 0; i < MALI_MAX_NUM_SOFT_JOBS; ++i)
-               {
-                       job = &(system->jobs[i]);
-                       MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_FREE == job->state);
-               }
-       });
+       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&(system->jobs_used)));
 
        if (NULL != system) {
                if (NULL != system->lock) {
@@ -106,31 +87,6 @@ void mali_soft_job_system_destroy(struct mali_soft_job_system *system)
        }
 }
 
-static struct mali_soft_job *mali_soft_job_system_alloc_job(struct mali_soft_job_system *system)
-{
-       struct mali_soft_job *job;
-
-       MALI_DEBUG_ASSERT_POINTER(system);
-       MALI_ASSERT_SOFT_JOB_SYSTEM_LOCKED(system);
-
-       if (_mali_osk_list_empty(&(system->jobs_free))) {
-               /* No jobs available. */
-               return NULL;
-       }
-
-       /* Grab first job and move it to the used list. */
-       job = _MALI_OSK_LIST_ENTRY(system->jobs_free.next, struct mali_soft_job, system_list);
-       MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_FREE == job->state);
-
-       _mali_osk_list_move(&(job->system_list), &(system->jobs_used));
-       job->state = MALI_SOFT_JOB_STATE_ALLOCATED;
-
-       MALI_DEBUG_ASSERT(MALI_SOFT_JOB_INVALID_ID != job->id);
-       MALI_DEBUG_ASSERT(system == job->system);
-
-       return job;
-}
-
 static void mali_soft_job_system_free_job(struct mali_soft_job_system *system, struct mali_soft_job *job)
 {
        MALI_DEBUG_ASSERT_POINTER(job);
@@ -138,23 +94,26 @@ static void mali_soft_job_system_free_job(struct mali_soft_job_system *system, s
 
        mali_soft_job_system_lock(job->system);
 
-       MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_FREE != job->state);
        MALI_DEBUG_ASSERT(MALI_SOFT_JOB_INVALID_ID != job->id);
        MALI_DEBUG_ASSERT(system == job->system);
 
-       job->state = MALI_SOFT_JOB_STATE_FREE;
-       _mali_osk_list_move(&(job->system_list), &(system->jobs_free));
+       _mali_osk_list_del(&(job->system_list));
 
        mali_soft_job_system_unlock(job->system);
+
+       _mali_osk_free(job);
 }
 
 MALI_STATIC_INLINE struct mali_soft_job *mali_soft_job_system_lookup_job(struct mali_soft_job_system *system, u32 job_id)
 {
+       struct mali_soft_job *job, *tmp;
+
        MALI_DEBUG_ASSERT_POINTER(system);
        MALI_ASSERT_SOFT_JOB_SYSTEM_LOCKED(system);
 
-       if (job_id < MALI_MAX_NUM_SOFT_JOBS) {
-               return &system->jobs[job_id];
+       _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &system->jobs_used, struct mali_soft_job, system_list) {
+               if (job->id == job_id)
+                       return job;
        }
 
        return NULL;
@@ -181,39 +140,40 @@ void mali_soft_job_destroy(struct mali_soft_job *job)
        }
 }
 
-struct mali_soft_job *mali_soft_job_create(struct mali_soft_job_system *system, mali_soft_job_type type, u32 user_job)
+struct mali_soft_job *mali_soft_job_create(struct mali_soft_job_system *system, mali_soft_job_type type, u64 user_job)
 {
        struct mali_soft_job *job;
        _mali_osk_notification_t *notification = NULL;
 
        MALI_DEBUG_ASSERT_POINTER(system);
-       MALI_DEBUG_ASSERT(MALI_SOFT_JOB_TYPE_USER_SIGNALED >= type);
+       MALI_DEBUG_ASSERT((MALI_SOFT_JOB_TYPE_USER_SIGNALED == type) ||
+                         (MALI_SOFT_JOB_TYPE_SELF_SIGNALED == type));
 
-       if (MALI_SOFT_JOB_TYPE_USER_SIGNALED == type) {
-               notification = _mali_osk_notification_create(_MALI_NOTIFICATION_SOFT_ACTIVATED, sizeof(_mali_uk_soft_job_activated_s));
-               if (unlikely(NULL == notification)) {
-                       MALI_PRINT_ERROR(("Mali Soft Job: failed to allocate notification"));
-                       return NULL;
-               }
+       notification = _mali_osk_notification_create(_MALI_NOTIFICATION_SOFT_ACTIVATED, sizeof(_mali_uk_soft_job_activated_s));
+       if (unlikely(NULL == notification)) {
+               MALI_PRINT_ERROR(("Mali Soft Job: failed to allocate notification"));
+               return NULL;
        }
 
-       mali_soft_job_system_lock(system);
-
-       job = mali_soft_job_system_alloc_job(system);
-       if (NULL == job) {
-               mali_soft_job_system_unlock(system);
-               MALI_PRINT_ERROR(("Mali Soft Job: failed to allocate job"));
-               _mali_osk_notification_delete(notification);
+       job = _mali_osk_malloc(sizeof(struct mali_soft_job));
+       if (unlikely(NULL == job)) {
+               MALI_DEBUG_PRINT(2, ("Mali Soft Job: system alloc job failed. \n"));
                return NULL;
        }
 
+       mali_soft_job_system_lock(system);
+
+       job->system = system;
+       job->id = system->last_job_id++;
+       job->state = MALI_SOFT_JOB_STATE_ALLOCATED;
+
+       _mali_osk_list_add(&(job->system_list), &(system->jobs_used));
+
        job->type = type;
        job->user_job = user_job;
        job->activated = MALI_FALSE;
 
-       if (MALI_SOFT_JOB_TYPE_USER_SIGNALED == type) {
-               job->activated_notification = notification;
-       }
+       job->activated_notification = notification;
 
        _mali_osk_atomic_init(&job->refcount, 1);
 
@@ -277,7 +237,8 @@ _mali_osk_errcode_t mali_soft_job_system_signal_job(struct mali_soft_job_system
 
        job = mali_soft_job_system_lookup_job(system, job_id);
 
-       if (NULL == job || !(MALI_SOFT_JOB_STATE_STARTED == job->state || MALI_SOFT_JOB_STATE_TIMED_OUT == job->state)) {
+       if ((NULL == job) || (MALI_SOFT_JOB_TYPE_USER_SIGNALED != job->type)
+           || !(MALI_SOFT_JOB_STATE_STARTED == job->state || MALI_SOFT_JOB_STATE_TIMED_OUT == job->state)) {
                mali_soft_job_system_unlock(system);
                MALI_PRINT_ERROR(("Mali Soft Job: invalid soft job id %u", job_id));
                return _MALI_OSK_ERR_ITEM_NOT_FOUND;
@@ -354,9 +315,25 @@ void mali_soft_job_system_activate_job(struct mali_soft_job *job)
 
        /* Wake up sleeping signaler. */
        job->activated = MALI_TRUE;
-       _mali_osk_wait_queue_wake_up(job->tracker.system->wait_queue);
 
-       mali_soft_job_system_unlock(job->system);
+       /* If job type is self signaled, release tracker, move soft job to free list, and scheduler at once */
+       if (MALI_SOFT_JOB_TYPE_SELF_SIGNALED == job->type) {
+               mali_scheduler_mask schedule_mask;
+
+               MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state);
+
+               job->state = MALI_SOFT_JOB_STATE_SIGNALED;
+               mali_soft_job_system_unlock(job->system);
+
+               schedule_mask = mali_timeline_tracker_release(&job->tracker);
+               mali_scheduler_schedule_from_mask(schedule_mask, MALI_FALSE);
+
+               mali_soft_job_destroy(job);
+       } else {
+               _mali_osk_wait_queue_wake_up(job->tracker.system->wait_queue);
+
+               mali_soft_job_system_unlock(job->system);
+       }
 }
 
 mali_scheduler_mask mali_soft_job_system_timeout_job(struct mali_soft_job *job)
@@ -408,7 +385,6 @@ mali_scheduler_mask mali_soft_job_system_timeout_job(struct mali_soft_job *job)
 
 void mali_soft_job_system_abort(struct mali_soft_job_system *system)
 {
-       u32 i;
        struct mali_soft_job *job, *tmp;
        _MALI_OSK_LIST_HEAD_STATIC_INIT(jobs);
 
@@ -420,11 +396,8 @@ void mali_soft_job_system_abort(struct mali_soft_job_system *system)
 
        mali_soft_job_system_lock(system);
 
-       for (i = 0; i < MALI_MAX_NUM_SOFT_JOBS; ++i) {
-               job = &(system->jobs[i]);
-
-               MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_FREE      == job->state ||
-                                 MALI_SOFT_JOB_STATE_STARTED   == job->state ||
+       _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &system->jobs_used, struct mali_soft_job, system_list) {
+               MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED   == job->state ||
                                  MALI_SOFT_JOB_STATE_TIMED_OUT == job->state);
 
                if (MALI_SOFT_JOB_STATE_STARTED == job->state) {
index 2afa3b97ca3918088a2706f20ab9f23114b193a5..77711b1bd3ed7c9a879e1381265f7e5c43ec5877 100755 (executable)
@@ -26,17 +26,18 @@ struct mali_soft_job_system;
  * Soft jobs of type MALI_SOFT_JOB_TYPE_USER_SIGNALED will only complete after activation if either
  * they are signaled by user-space (@ref mali_soft_job_system_signaled_job) or if they are timed out
  * by the Timeline system.
+ * Soft jobs of type MALI_SOFT_JOB_TYPE_SELF_SIGNALED will release job resource automatically
+ * in kernel when the job is activated.
  */
 typedef enum mali_soft_job_type {
+       MALI_SOFT_JOB_TYPE_SELF_SIGNALED,
        MALI_SOFT_JOB_TYPE_USER_SIGNALED,
 } mali_soft_job_type;
 
 /**
  * Soft job state.
  *
- * All soft jobs in a soft job system will initially be in state MALI_SOFT_JOB_STATE_FREE.  On @ref
- * mali_soft_job_system_start_job a job will first be allocated.  A job in state
- * MALI_SOFT_JOB_STATE_FREE will be picked and the state changed to MALI_SOFT_JOB_STATE_ALLOCATED.
+ * mali_soft_job_system_start_job a job will first be allocated.The job's state set to MALI_SOFT_JOB_STATE_ALLOCATED.
  * Once the job is added to the timeline system, the state changes to MALI_SOFT_JOB_STATE_STARTED.
  *
  * For soft jobs of type MALI_SOFT_JOB_TYPE_USER_SIGNALED the state is changed to
@@ -47,11 +48,8 @@ typedef enum mali_soft_job_type {
  * state is changed to MALI_SOFT_JOB_STATE_TIMED_OUT.  This can only happen to soft jobs in state
  * MALI_SOFT_JOB_STATE_STARTED.
  *
- * When a soft job's reference count reaches zero, it will be freed and the state returns to
- * MALI_SOFT_JOB_STATE_FREE.
  */
 typedef enum mali_soft_job_state {
-       MALI_SOFT_JOB_STATE_FREE,
        MALI_SOFT_JOB_STATE_ALLOCATED,
        MALI_SOFT_JOB_STATE_STARTED,
        MALI_SOFT_JOB_STATE_SIGNALED,
@@ -60,9 +58,6 @@ typedef enum mali_soft_job_state {
 
 #define MALI_SOFT_JOB_INVALID_ID ((u32) -1)
 
-/* Maximum number of soft jobs per soft system. */
-#define MALI_MAX_NUM_SOFT_JOBS 20
-
 /**
  * Soft job struct.
  *
@@ -70,7 +65,7 @@ typedef enum mali_soft_job_state {
  */
 typedef struct mali_soft_job {
        mali_soft_job_type            type;                   /**< Soft job type.  Must be one of MALI_SOFT_JOB_TYPE_*. */
-       u32                           user_job;               /**< Identifier for soft job in user space. */
+       u64                           user_job;               /**< Identifier for soft job in user space. */
        _mali_osk_atomic_t            refcount;               /**< Soft jobs are reference counted to prevent premature deletion. */
        struct mali_timeline_tracker  tracker;                /**< Timeline tracker for soft job. */
        mali_bool                     activated;              /**< MALI_TRUE if the job has been activated, MALI_FALSE if not. */
@@ -90,13 +85,11 @@ typedef struct mali_soft_job {
  */
 typedef struct mali_soft_job_system {
        struct mali_session_data *session;                    /**< The session this soft job system belongs to. */
-
-       struct mali_soft_job jobs[MALI_MAX_NUM_SOFT_JOBS];    /**< Array of all soft jobs in this system. */
-       _MALI_OSK_LIST_HEAD(jobs_free);                       /**< List of all free soft jobs. */
        _MALI_OSK_LIST_HEAD(jobs_used);                       /**< List of all allocated soft jobs. */
 
        _mali_osk_spinlock_irq_t *lock;                       /**< Lock used to protect soft job system and its soft jobs. */
        u32 lock_owner;                                       /**< Contains tid of thread that locked the system or 0, if not locked. */
+       u32 last_job_id;                                      /**< Recored the last job id protected by lock. */
 } mali_soft_job_system;
 
 /**
@@ -125,7 +118,7 @@ void mali_soft_job_system_destroy(struct mali_soft_job_system *system);
  * @param user_job Identifier for soft job in user space.
  * @return New soft job if successful, NULL if not.
  */
-struct mali_soft_job *mali_soft_job_create(struct mali_soft_job_system *system, mali_soft_job_type type, u32 user_job);
+struct mali_soft_job *mali_soft_job_create(struct mali_soft_job_system *system, mali_soft_job_type type, u64 user_job);
 
 /**
  * Destroy soft job.
index a3cdaf85018305d5aef7d20adce84c2239636df8..21fec0036d3f755bad02c5aae9ee14ae88a6ded9 100755 (executable)
@@ -47,7 +47,7 @@ static void put_sync_fences(struct work_struct *ignore)
        hlist_move_list(&mali_timeline_sync_fence_to_free_list, &list);
        spin_unlock_irqrestore(&mali_timeline_sync_fence_to_free_lock, flags);
 
-       hlist_for_each_entry_safe(o, pos, tmp, &mali_timeline_sync_fence_to_free_list, list) {
+       hlist_for_each_entry_safe(o, pos, tmp, &list, list) {
                sync_fence_put(o->fence);
                kfree(o);
        }
@@ -1299,8 +1299,12 @@ static mali_bool is_waiting_on_timeline(struct mali_timeline_tracker *tracker, e
        MALI_DEBUG_ASSERT_POINTER(timeline->system);
        system = timeline->system;
 
-       if (MALI_TIMELINE_MAX > id) {
-               return mali_timeline_is_point_on(system->timelines[id], tracker->fence.points[id]);
+       if (MALI_TIMELINE_MAX > id ) {
+               if(MALI_TIMELINE_NO_POINT != tracker->fence.points[id]) {
+                       return mali_timeline_is_point_on(system->timelines[id], tracker->fence.points[id]);
+               } else {
+                       return MALI_FALSE;
+               }
        } else {
                MALI_DEBUG_ASSERT(MALI_TIMELINE_NONE == id);
                return MALI_FALSE;
@@ -1311,9 +1315,9 @@ static const char *timeline_id_to_string(enum mali_timeline_id id)
 {
        switch (id) {
        case MALI_TIMELINE_GP:
-               return "  GP";
+               return "GP";
        case MALI_TIMELINE_PP:
-               return "  PP";
+               return "PP";
        case MALI_TIMELINE_SOFT:
                return "SOFT";
        default:
@@ -1325,9 +1329,9 @@ static const char *timeline_tracker_type_to_string(enum mali_timeline_tracker_ty
 {
        switch (type) {
        case MALI_TIMELINE_TRACKER_GP:
-               return "  GP";
+               return "GP";
        case MALI_TIMELINE_TRACKER_PP:
-               return "  PP";
+               return "PP";
        case MALI_TIMELINE_TRACKER_SOFT:
                return "SOFT";
        case MALI_TIMELINE_TRACKER_WAIT:
@@ -1361,54 +1365,68 @@ mali_timeline_tracker_state mali_timeline_debug_get_tracker_state(struct mali_ti
        return MALI_TIMELINE_TS_FINISH;
 }
 
-void mali_timeline_debug_print_tracker(struct mali_timeline_tracker *tracker)
+void mali_timeline_debug_print_tracker(struct mali_timeline_tracker *tracker, _mali_osk_print_ctx *print_ctx)
 {
        const char *tracker_state = "IWAF";
+       char state_char = 'I';
+       char tracker_type[32] = {0};
 
        MALI_DEBUG_ASSERT_POINTER(tracker);
 
+       state_char = *(tracker_state + mali_timeline_debug_get_tracker_state(tracker));
+       _mali_osk_snprintf(tracker_type, sizeof(tracker_type), "%s", timeline_tracker_type_to_string(tracker->type));
+
        if (0 != tracker->trigger_ref_count) {
-               MALI_PRINTF(("TL:  %s %u %c - ref_wait:%u [%s%u,%s%u,%s%u,%d]  (0x%08X)\n",
-                            timeline_tracker_type_to_string(tracker->type), tracker->point,
-                            *(tracker_state + mali_timeline_debug_get_tracker_state(tracker)),
-                            tracker->trigger_ref_count,
-                            is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "W" : " ", tracker->fence.points[0],
-                            is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "W" : " ", tracker->fence.points[1],
-                            is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "W" : " ", tracker->fence.points[2],
-                            tracker->fence.sync_fd, tracker->job));
+#if defined(CONFIG_SYNC)
+               _mali_osk_ctxprintf(print_ctx, "TL:  %s %u %c - ref_wait:%u [%s%u,%s%u,%s%u, fd:%d, fence:(0x%08X)]  job:(0x%08X)\n",
+                               tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
+                               is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "W" : " ", tracker->fence.points[0],
+                               is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "W" : " ", tracker->fence.points[1],
+                               is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "W" : " ", tracker->fence.points[2],
+                               tracker->fence.sync_fd, tracker->sync_fence, tracker->job);
        } else {
-               MALI_PRINTF(("TL:  %s %u %c  (0x%08X)\n",
-                            timeline_tracker_type_to_string(tracker->type), tracker->point,
-                            *(tracker_state + mali_timeline_debug_get_tracker_state(tracker)),
-                            tracker->job));
+               _mali_osk_ctxprintf(print_ctx, "TL:  %s %u %c  fd:%d  fence:(0x%08X)  job:(0x%08X)\n",
+                               tracker_type, tracker->point, state_char,
+                               tracker->fence.sync_fd, tracker->sync_fence, tracker->job);
        }
+#else
+               _mali_osk_ctxprintf(print_ctx, "TL:  %s %u %c - ref_wait:%u [%s%u,%s%u,%s%u]  job:(0x%08X)\n",
+                               tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
+                               is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "W" : " ", tracker->fence.points[0],
+                               is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "W" : " ", tracker->fence.points[1],
+                               is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "W" : " ", tracker->fence.points[2],
+                               tracker->job);
+       } else {
+               _mali_osk_ctxprintf(print_ctx, "TL:  %s %u %c  job:(0x%08X)\n",
+                               tracker_type, tracker->point, state_char,
+                               tracker->job);
+       }
+#endif
 }
 
-void mali_timeline_debug_print_timeline(struct mali_timeline *timeline)
+void mali_timeline_debug_print_timeline(struct mali_timeline *timeline, _mali_osk_print_ctx *print_ctx)
 {
        struct mali_timeline_tracker *tracker = NULL;
-       int i_max = 30;
 
        MALI_DEBUG_ASSERT_POINTER(timeline);
 
        tracker = timeline->tracker_tail;
-       while (NULL != tracker && 0 < --i_max) {
-               mali_timeline_debug_print_tracker(tracker);
+       while (NULL != tracker) {
+               mali_timeline_debug_print_tracker(tracker, print_ctx);
                tracker = tracker->timeline_next;
        }
-
-       if (0 == i_max) {
-               MALI_PRINTF(("TL: Too many trackers in list to print\n"));
-       }
 }
 
-void mali_timeline_debug_print_system(struct mali_timeline_system *system)
+void mali_timeline_debug_print_system(struct mali_timeline_system *system, _mali_osk_print_ctx *print_ctx)
 {
        int i;
        int num_printed = 0;
+       u32 tid = _mali_osk_get_tid();
 
        MALI_DEBUG_ASSERT_POINTER(system);
 
+       mali_spinlock_reentrant_wait(system->spinlock, tid);
+
        /* Print all timelines */
        for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
                struct mali_timeline *timeline = system->timelines[i];
@@ -1417,15 +1435,18 @@ void mali_timeline_debug_print_system(struct mali_timeline_system *system)
 
                if (NULL == timeline->tracker_head) continue;
 
-               MALI_PRINTF(("TL: Timeline %s:\n",
-                            timeline_id_to_string((enum mali_timeline_id)i)));
-               mali_timeline_debug_print_timeline(timeline);
+               _mali_osk_ctxprintf(print_ctx, "TL: Timeline %s:\n",
+                               timeline_id_to_string((enum mali_timeline_id)i));
+
+               mali_timeline_debug_print_timeline(timeline, print_ctx);
                num_printed++;
        }
 
        if (0 == num_printed) {
-               MALI_PRINTF(("TL: All timelines empty\n"));
+               _mali_osk_ctxprintf(print_ctx, "TL: All timelines empty\n");
        }
+
+       mali_spinlock_reentrant_signal(system->spinlock, tid);
 }
 
 #endif /* defined(MALI_TIMELINE_DEBUG_FUNCTIONS) */
index 141b9d50c6e5afa1054daaeb00dd2e37b303a244..646501eb39279e39cc09e7288df58667c1c427ab 100755 (executable)
@@ -475,21 +475,21 @@ mali_timeline_tracker_state mali_timeline_debug_get_tracker_state(struct mali_ti
  *
  * @param tracker Tracker to print.
  */
-void mali_timeline_debug_print_tracker(struct mali_timeline_tracker *tracker);
+void mali_timeline_debug_print_tracker(struct mali_timeline_tracker *tracker, _mali_osk_print_ctx *print_ctx);
 
 /**
  * Print debug information about timeline.
  *
  * @param timeline Timeline to print.
  */
-void mali_timeline_debug_print_timeline(struct mali_timeline *timeline);
+void mali_timeline_debug_print_timeline(struct mali_timeline *timeline, _mali_osk_print_ctx *print_ctx);
 
 /**
  * Print debug information about timeline system.
  *
  * @param system Timeline system to print.
  */
-void mali_timeline_debug_print_system(struct mali_timeline_system *system);
+void mali_timeline_debug_print_system(struct mali_timeline_system *system, _mali_osk_print_ctx *print_ctx);
 
 #endif /* defined(MALI_TIMELINE_DEBUG_FUNCTIONS) */
 
index 65858dc9c1ff637c618d0a4cbbd89d0fbbd98e28..be07b79317846aa04a2bdcd745b57a29f6762a56 100755 (executable)
@@ -234,12 +234,22 @@ _mali_osk_errcode_t _mali_ukk_wait_for_notification(_mali_uk_wait_for_notificati
 _mali_osk_errcode_t _mali_ukk_post_notification(_mali_uk_post_notification_s *args);
 
 /** @brief Verifies if the user and kernel side of this API are compatible.
+ *
+ * This function is obsolete, but kept to allow old, incompatible user space
+ * clients to robustly detect the incompatibility.
  *
  * @param args see _mali_uk_get_api_version_s in "mali_utgard_uk_types.h"
  * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
  */
 _mali_osk_errcode_t _mali_ukk_get_api_version(_mali_uk_get_api_version_s *args);
 
+/** @brief Verifies if the user and kernel side of this API are compatible.
+ *
+ * @param args see _mali_uk_get_api_version_v2_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_api_version_v2(_mali_uk_get_api_version_v2_s *args);
+
 /** @brief Get the user space settings applicable for calling process.
  *
  * @param args see _mali_uk_get_user_settings_s in "mali_utgard_uk_types.h"
@@ -365,49 +375,6 @@ _mali_osk_errcode_t _mali_ukk_attach_ump_mem(_mali_uk_attach_ump_mem_s *args);
 _mali_osk_errcode_t _mali_ukk_release_ump_mem(_mali_uk_release_ump_mem_s *args);
 #endif /* CONFIG_MALI400_UMP */
 
-/** @brief Determine virtual-to-physical mapping of a contiguous memory range
- * (optional)
- *
- * This allows the user-side to do a virtual-to-physical address translation.
- * In conjunction with _mali_uku_map_external_mem, this can be used to do
- * direct rendering.
- *
- * This function will only succeed on a virtual range that is mapped into the
- * current process, and that is contigious.
- *
- * If va is not page-aligned, then it is rounded down to the next page
- * boundary. The remainer is added to size, such that ((u32)va)+size before
- * rounding is equal to ((u32)va)+size after rounding. The rounded modified
- * va and size will be written out into args on success.
- *
- * If the supplied size is zero, or not a multiple of the system's PAGE_SIZE,
- * then size will be rounded up to the next multiple of PAGE_SIZE before
- * translation occurs. The rounded up size will be written out into args on
- * success.
- *
- * On most OSs, virtual-to-physical address translation is a priveledged
- * function. Therefore, the implementer must validate the range supplied, to
- * ensure they are not providing arbitrary virtual-to-physical address
- * translations. While it is unlikely such a mechanism could be used to
- * compromise the security of a system on its own, it is possible it could be
- * combined with another small security risk to cause a much larger security
- * risk.
- *
- * @note This is an optional part of the interface, and is only used by certain
- * implementations of libEGL. If the platform layer in your libEGL
- * implementation does not require Virtual-to-Physical address translation,
- * then this function need not be implemented. A stub implementation should not
- * be required either, as it would only be removed by the compiler's dead code
- * elimination.
- *
- * @note if implemented, this function is entirely platform-dependant, and does
- * not exist in common code.
- *
- * @param args see _mali_uk_va_to_mali_pa_s in "mali_utgard_uk_types.h"
- * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
- */
-_mali_osk_errcode_t _mali_ukk_va_to_mali_pa(_mali_uk_va_to_mali_pa_s *args);
-
 /** @} */ /* end group _mali_uk_memory */
 
 
@@ -536,36 +503,12 @@ _mali_osk_errcode_t _mali_ukk_gp_suspend_response(_mali_uk_gp_suspend_response_s
 /** @addtogroup _mali_uk_profiling U/K Timeline profiling module
  * @{ */
 
-/** @brief Start recording profiling events.
- *
- * @param args see _mali_uk_profiling_start_s in "mali_utgard_uk_types.h"
- */
-_mali_osk_errcode_t _mali_ukk_profiling_start(_mali_uk_profiling_start_s *args);
-
 /** @brief Add event to profiling buffer.
  *
  * @param args see _mali_uk_profiling_add_event_s in "mali_utgard_uk_types.h"
  */
 _mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args);
 
-/** @brief Stop recording profiling events.
- *
- * @param args see _mali_uk_profiling_stop_s in "mali_utgard_uk_types.h"
- */
-_mali_osk_errcode_t _mali_ukk_profiling_stop(_mali_uk_profiling_stop_s *args);
-
-/** @brief Retrieve a recorded profiling event.
- *
- * @param args see _mali_uk_profiling_get_event_s in "mali_utgard_uk_types.h"
- */
-_mali_osk_errcode_t _mali_ukk_profiling_get_event(_mali_uk_profiling_get_event_s *args);
-
-/** @brief Clear recorded profiling events.
- *
- * @param args see _mali_uk_profiling_clear_s in "mali_utgard_uk_types.h"
- */
-_mali_osk_errcode_t _mali_ukk_profiling_clear(_mali_uk_profiling_clear_s *args);
-
 /** @brief Return the total memory usage
  *
  * @param args see _mali_uk_profiling_memory_usage_get_s in "mali_utgard_uk_types.h"
index a72129e4279e63c0e9e3e3cb1d819084777ad2c7..f70c2479b6eaa6f1c8c73fb65b9121d48cdf53f5 100755 (executable)
@@ -38,56 +38,46 @@ extern "C" {
 #define MALI_IOC_PROFILING_BASE (_MALI_UK_PROFILING_SUBSYSTEM + MALI_IOC_BASE)
 #define MALI_IOC_VSYNC_BASE     (_MALI_UK_VSYNC_SUBSYSTEM + MALI_IOC_BASE)
 
-#define MALI_IOC_WAIT_FOR_NOTIFICATION      _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_WAIT_FOR_NOTIFICATION, _mali_uk_wait_for_notification_s *)
-#define MALI_IOC_GET_API_VERSION            _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_API_VERSION, _mali_uk_get_api_version_s *)
-#define MALI_IOC_POST_NOTIFICATION          _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_POST_NOTIFICATION, _mali_uk_post_notification_s *)
-#define MALI_IOC_GET_USER_SETTING           _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_USER_SETTING, _mali_uk_get_user_setting_s *)
-#define MALI_IOC_GET_USER_SETTINGS          _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_USER_SETTINGS, _mali_uk_get_user_settings_s *)
-#define MALI_IOC_REQUEST_HIGH_PRIORITY      _IOW (MALI_IOC_CORE_BASE, _MALI_UK_REQUEST_HIGH_PRIORITY, _mali_uk_request_high_priority_s *)
-#define MALI_IOC_TIMELINE_GET_LATEST_POINT  _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_GET_LATEST_POINT, _mali_uk_timeline_get_latest_point_s *)
-#define MALI_IOC_TIMELINE_WAIT              _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_WAIT, _mali_uk_timeline_wait_s *)
-#define MALI_IOC_TIMELINE_CREATE_SYNC_FENCE _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_CREATE_SYNC_FENCE, _mali_uk_timeline_create_sync_fence_s *)
-#define MALI_IOC_SOFT_JOB_START             _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_SOFT_JOB_START, _mali_uk_soft_job_start_s *)
-#define MALI_IOC_SOFT_JOB_SIGNAL            _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_SOFT_JOB_SIGNAL, _mali_uk_soft_job_signal_s *)
+#define MALI_IOC_WAIT_FOR_NOTIFICATION      _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_WAIT_FOR_NOTIFICATION, _mali_uk_wait_for_notification_s)
+#define MALI_IOC_GET_API_VERSION            _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_API_VERSION, u32)
+#define MALI_IOC_GET_API_VERSION_V2         _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_API_VERSION, _mali_uk_get_api_version_v2_s)
+#define MALI_IOC_POST_NOTIFICATION          _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_POST_NOTIFICATION, _mali_uk_post_notification_s)
+#define MALI_IOC_GET_USER_SETTING           _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_USER_SETTING, _mali_uk_get_user_setting_s)
+#define MALI_IOC_GET_USER_SETTINGS          _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_USER_SETTINGS, _mali_uk_get_user_settings_s)
+#define MALI_IOC_REQUEST_HIGH_PRIORITY      _IOW (MALI_IOC_CORE_BASE, _MALI_UK_REQUEST_HIGH_PRIORITY, _mali_uk_request_high_priority_s)
+#define MALI_IOC_TIMELINE_GET_LATEST_POINT  _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_GET_LATEST_POINT, _mali_uk_timeline_get_latest_point_s)
+#define MALI_IOC_TIMELINE_WAIT              _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_WAIT, _mali_uk_timeline_wait_s)
+#define MALI_IOC_TIMELINE_CREATE_SYNC_FENCE _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_CREATE_SYNC_FENCE, _mali_uk_timeline_create_sync_fence_s)
+#define MALI_IOC_SOFT_JOB_START             _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_SOFT_JOB_START, _mali_uk_soft_job_start_s)
+#define MALI_IOC_SOFT_JOB_SIGNAL            _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_SOFT_JOB_SIGNAL, _mali_uk_soft_job_signal_s)
 
-#define MALI_IOC_MEM_MAP_EXT                _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_MAP_EXT_MEM, _mali_uk_map_external_mem_s *)
-#define MALI_IOC_MEM_UNMAP_EXT              _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_UNMAP_EXT_MEM, _mali_uk_unmap_external_mem_s *)
-#define MALI_IOC_MEM_ATTACH_DMA_BUF         _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_ATTACH_DMA_BUF, _mali_uk_attach_dma_buf_s *)
-#define MALI_IOC_MEM_RELEASE_DMA_BUF        _IOW(MALI_IOC_MEMORY_BASE, _MALI_UK_RELEASE_DMA_BUF, _mali_uk_release_dma_buf_s *)
-#define MALI_IOC_MEM_DMA_BUF_GET_SIZE       _IOR(MALI_IOC_MEMORY_BASE, _MALI_UK_DMA_BUF_GET_SIZE, _mali_uk_dma_buf_get_size_s *)
-#define MALI_IOC_MEM_ATTACH_UMP             _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_ATTACH_UMP_MEM, _mali_uk_attach_ump_mem_s *)
-#define MALI_IOC_MEM_RELEASE_UMP            _IOW(MALI_IOC_MEMORY_BASE, _MALI_UK_RELEASE_UMP_MEM, _mali_uk_release_ump_mem_s *)
-#define MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE _IOR (MALI_IOC_MEMORY_BASE, _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, _mali_uk_query_mmu_page_table_dump_size_s *)
-#define MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE    _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_DUMP_MMU_PAGE_TABLE, _mali_uk_dump_mmu_page_table_s *)
-#define MALI_IOC_MEM_WRITE_SAFE             _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_MEM_WRITE_SAFE, _mali_uk_mem_write_safe_s *)
+#define MALI_IOC_MEM_MAP_EXT                _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_MAP_EXT_MEM, _mali_uk_map_external_mem_s)
+#define MALI_IOC_MEM_UNMAP_EXT              _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_UNMAP_EXT_MEM, _mali_uk_unmap_external_mem_s)
+#define MALI_IOC_MEM_ATTACH_DMA_BUF         _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_ATTACH_DMA_BUF, _mali_uk_attach_dma_buf_s)
+#define MALI_IOC_MEM_RELEASE_DMA_BUF        _IOW(MALI_IOC_MEMORY_BASE, _MALI_UK_RELEASE_DMA_BUF, _mali_uk_release_dma_buf_s)
+#define MALI_IOC_MEM_DMA_BUF_GET_SIZE       _IOR(MALI_IOC_MEMORY_BASE, _MALI_UK_DMA_BUF_GET_SIZE, _mali_uk_dma_buf_get_size_s)
+#define MALI_IOC_MEM_ATTACH_UMP             _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_ATTACH_UMP_MEM, _mali_uk_attach_ump_mem_s)
+#define MALI_IOC_MEM_RELEASE_UMP            _IOW(MALI_IOC_MEMORY_BASE, _MALI_UK_RELEASE_UMP_MEM, _mali_uk_release_ump_mem_s)
+#define MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE _IOR (MALI_IOC_MEMORY_BASE, _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, _mali_uk_query_mmu_page_table_dump_size_s)
+#define MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE    _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_DUMP_MMU_PAGE_TABLE, _mali_uk_dump_mmu_page_table_s)
+#define MALI_IOC_MEM_WRITE_SAFE             _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_MEM_WRITE_SAFE, _mali_uk_mem_write_safe_s)
 
-#define MALI_IOC_PP_START_JOB               _IOWR(MALI_IOC_PP_BASE, _MALI_UK_PP_START_JOB, _mali_uk_pp_start_job_s *)
-#define MALI_IOC_PP_AND_GP_START_JOB        _IOWR(MALI_IOC_PP_BASE, _MALI_UK_PP_AND_GP_START_JOB, _mali_uk_pp_and_gp_start_job_s *)
-#define MALI_IOC_PP_NUMBER_OF_CORES_GET     _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_NUMBER_OF_CORES, _mali_uk_get_pp_number_of_cores_s *)
-#define MALI_IOC_PP_CORE_VERSION_GET        _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_CORE_VERSION, _mali_uk_get_pp_core_version_s * )
-#define MALI_IOC_PP_DISABLE_WB              _IOW (MALI_IOC_PP_BASE, _MALI_UK_PP_DISABLE_WB, _mali_uk_pp_disable_wb_s * )
+#define MALI_IOC_PP_START_JOB               _IOWR(MALI_IOC_PP_BASE, _MALI_UK_PP_START_JOB, _mali_uk_pp_start_job_s)
+#define MALI_IOC_PP_AND_GP_START_JOB        _IOWR(MALI_IOC_PP_BASE, _MALI_UK_PP_AND_GP_START_JOB, _mali_uk_pp_and_gp_start_job_s)
+#define MALI_IOC_PP_NUMBER_OF_CORES_GET     _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_NUMBER_OF_CORES, _mali_uk_get_pp_number_of_cores_s)
+#define MALI_IOC_PP_CORE_VERSION_GET        _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_CORE_VERSION, _mali_uk_get_pp_core_version_s)
+#define MALI_IOC_PP_DISABLE_WB              _IOW (MALI_IOC_PP_BASE, _MALI_UK_PP_DISABLE_WB, _mali_uk_pp_disable_wb_s)
 
-#define MALI_IOC_GP2_START_JOB              _IOWR(MALI_IOC_GP_BASE, _MALI_UK_GP_START_JOB, _mali_uk_gp_start_job_s *)
-#define MALI_IOC_GP2_NUMBER_OF_CORES_GET    _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_NUMBER_OF_CORES, _mali_uk_get_gp_number_of_cores_s *)
-#define MALI_IOC_GP2_CORE_VERSION_GET       _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_CORE_VERSION, _mali_uk_get_gp_core_version_s *)
-#define MALI_IOC_GP2_SUSPEND_RESPONSE       _IOW (MALI_IOC_GP_BASE, _MALI_UK_GP_SUSPEND_RESPONSE,_mali_uk_gp_suspend_response_s *)
+#define MALI_IOC_GP2_START_JOB              _IOWR(MALI_IOC_GP_BASE, _MALI_UK_GP_START_JOB, _mali_uk_gp_start_job_s)
+#define MALI_IOC_GP2_NUMBER_OF_CORES_GET    _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_NUMBER_OF_CORES, _mali_uk_get_gp_number_of_cores_s)
+#define MALI_IOC_GP2_CORE_VERSION_GET       _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_CORE_VERSION, _mali_uk_get_gp_core_version_s)
+#define MALI_IOC_GP2_SUSPEND_RESPONSE       _IOW (MALI_IOC_GP_BASE, _MALI_UK_GP_SUSPEND_RESPONSE,_mali_uk_gp_suspend_response_s)
 
-#define MALI_IOC_PROFILING_START            _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_START, _mali_uk_profiling_start_s *)
-#define MALI_IOC_PROFILING_ADD_EVENT        _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_ADD_EVENT, _mali_uk_profiling_add_event_s*)
-#define MALI_IOC_PROFILING_STOP             _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_STOP, _mali_uk_profiling_stop_s *)
-#define MALI_IOC_PROFILING_GET_EVENT        _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_GET_EVENT, _mali_uk_profiling_get_event_s *)
-#define MALI_IOC_PROFILING_CLEAR            _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_CLEAR, _mali_uk_profiling_clear_s *)
-#define MALI_IOC_PROFILING_GET_CONFIG       _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_GET_CONFIG, _mali_uk_get_user_settings_s *)
-#define MALI_IOC_PROFILING_REPORT_SW_COUNTERS  _IOW (MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_REPORT_SW_COUNTERS, _mali_uk_sw_counters_report_s *)
-#define MALI_IOC_PROFILING_MEMORY_USAGE_GET _IOR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_MEMORY_USAGE_GET, _mali_uk_profiling_memory_usage_get_s *)
+#define MALI_IOC_PROFILING_ADD_EVENT        _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_ADD_EVENT, _mali_uk_profiling_add_event_s)
+#define MALI_IOC_PROFILING_REPORT_SW_COUNTERS  _IOW (MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_REPORT_SW_COUNTERS, _mali_uk_sw_counters_report_s)
+#define MALI_IOC_PROFILING_MEMORY_USAGE_GET _IOR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_MEMORY_USAGE_GET, _mali_uk_profiling_memory_usage_get_s)
 
-#define MALI_IOC_VSYNC_EVENT_REPORT         _IOW (MALI_IOC_VSYNC_BASE, _MALI_UK_VSYNC_EVENT_REPORT, _mali_uk_vsync_event_report_s *)
-
-/* Deprecated ioctls */
-#define MALI_IOC_MEM_GET_BIG_BLOCK          _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_GET_BIG_BLOCK, void *)
-#define MALI_IOC_MEM_FREE_BIG_BLOCK         _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_FREE_BIG_BLOCK, void *)
-#define MALI_IOC_MEM_INIT                   _IOR (MALI_IOC_MEMORY_BASE, _MALI_UK_INIT_MEM, void *)
-#define MALI_IOC_MEM_TERM                   _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_TERM_MEM, void *)
+#define MALI_IOC_VSYNC_EVENT_REPORT         _IOW (MALI_IOC_VSYNC_BASE, _MALI_UK_VSYNC_EVENT_REPORT, _mali_uk_vsync_event_report_s)
 
 #ifdef __cplusplus
 }
index 7f818276cdc1afa737c081908e4788825cd112f9..524e2c3f178be389db69276d0859cb46ea9a689b 100755 (executable)
@@ -56,7 +56,6 @@ typedef enum {
        _MALI_UK_PP_SUBSYSTEM,        /**< Fragment Processor Group of U/K calls */
        _MALI_UK_GP_SUBSYSTEM,        /**< Vertex Processor Group of U/K calls */
        _MALI_UK_PROFILING_SUBSYSTEM, /**< Profiling Group of U/K calls */
-       _MALI_UK_PMM_SUBSYSTEM,       /**< Power Management Module Group of U/K calls */
        _MALI_UK_VSYNC_SUBSYSTEM,     /**< VSYNC Group of U/K calls */
 } _mali_uk_subsystem_t;
 
@@ -87,8 +86,6 @@ typedef enum {
 
        _MALI_UK_INIT_MEM                = 0,    /**< _mali_ukk_init_mem() */
        _MALI_UK_TERM_MEM,                       /**< _mali_ukk_term_mem() */
-       _MALI_UK_GET_BIG_BLOCK,                  /**< _mali_ukk_get_big_block() */
-       _MALI_UK_FREE_BIG_BLOCK,                 /**< _mali_ukk_free_big_block() */
        _MALI_UK_MAP_MEM,                        /**< _mali_ukk_mem_mmap() */
        _MALI_UK_UNMAP_MEM,                      /**< _mali_ukk_mem_munmap() */
        _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, /**< _mali_ukk_mem_get_mmu_page_table_dump_size() */
@@ -100,7 +97,6 @@ typedef enum {
        _MALI_UK_RELEASE_UMP_MEM,                /**< _mali_ukk_release_ump_mem() */
        _MALI_UK_MAP_EXT_MEM,                    /**< _mali_uku_map_external_mem() */
        _MALI_UK_UNMAP_EXT_MEM,                  /**< _mali_uku_unmap_external_mem() */
-       _MALI_UK_VA_TO_MALI_PA,                  /**< _mali_uku_va_to_mali_pa() */
        _MALI_UK_MEM_WRITE_SAFE,                 /**< _mali_uku_mem_write_safe() */
 
        /** Common functions for each core */
@@ -126,30 +122,14 @@ typedef enum {
 
        /** Profiling functions */
 
-       _MALI_UK_PROFILING_START         = 0, /**< __mali_uku_profiling_start() */
-       _MALI_UK_PROFILING_ADD_EVENT,         /**< __mali_uku_profiling_add_event() */
-       _MALI_UK_PROFILING_STOP,              /**< __mali_uku_profiling_stop() */
-       _MALI_UK_PROFILING_GET_EVENT,         /**< __mali_uku_profiling_get_event() */
-       _MALI_UK_PROFILING_CLEAR,             /**< __mali_uku_profiling_clear() */
-       _MALI_UK_PROFILING_GET_CONFIG,        /**< __mali_uku_profiling_get_config() */
+       _MALI_UK_PROFILING_ADD_EVENT     = 0, /**< __mali_uku_profiling_add_event() */
        _MALI_UK_PROFILING_REPORT_SW_COUNTERS,/**< __mali_uku_profiling_report_sw_counters() */
        _MALI_UK_PROFILING_MEMORY_USAGE_GET,  /**< __mali_uku_profiling_memory_usage_get() */
 
        /** VSYNC reporting fuctions */
        _MALI_UK_VSYNC_EVENT_REPORT      = 0, /**< _mali_ukk_vsync_event_report() */
-
 } _mali_uk_functions;
 
-/** @brief Get the size necessary for system info
- *
- * @see _mali_ukk_get_system_info_size()
- */
-typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
-       u32 size;                       /**< [out] size of buffer necessary to hold system information data, in bytes */
-} _mali_uk_get_system_info_size_s;
-
-
 /** @defgroup _mali_uk_getsysteminfo U/K Get System Info
  * @{ */
 
@@ -163,95 +143,6 @@ typedef struct {
  */
 typedef u32 _mali_core_version;
 
-/**
- * Enum values for the different modes the driver can be put in.
- * Normal is the default mode. The driver then uses a job queue and takes job objects from the clients.
- * Job completion is reported using the _mali_ukk_wait_for_notification call.
- * The driver blocks this io command until a job has completed or failed or a timeout occurs.
- *
- * The 'raw' mode is reserved for future expansion.
- */
-typedef enum _mali_driver_mode {
-       _MALI_DRIVER_MODE_RAW = 1,    /**< Reserved for future expansion */
-       _MALI_DRIVER_MODE_NORMAL = 2  /**< Normal mode of operation */
-} _mali_driver_mode;
-
-/** @brief List of possible cores
- *
- * add new entries to the end of this enum */
-typedef enum _mali_core_type {
-       _MALI_GP2 = 2,                /**< MaliGP2 Programmable Vertex Processor */
-       _MALI_200 = 5,                /**< Mali200 Programmable Fragment Processor */
-       _MALI_400_GP = 6,             /**< Mali400 Programmable Vertex Processor */
-       _MALI_400_PP = 7,             /**< Mali400 Programmable Fragment Processor */
-       /* insert new core here, do NOT alter the existing values */
-} _mali_core_type;
-
-
-/** @brief Capabilities of Memory Banks
- *
- * These may be used to restrict memory banks for certain uses. They may be
- * used when access is not possible (e.g. Bus does not support access to it)
- * or when access is possible but not desired (e.g. Access is slow).
- *
- * In the case of 'possible but not desired', there is no way of specifying
- * the flags as an optimization hint, so that the memory could be used as a
- * last resort.
- *
- * @see _mali_mem_info
- */
-typedef enum _mali_bus_usage {
-
-       _MALI_PP_READABLE   = (1 << 0), /** Readable by the Fragment Processor */
-       _MALI_PP_WRITEABLE  = (1 << 1), /** Writeable by the Fragment Processor */
-       _MALI_GP_READABLE   = (1 << 2), /** Readable by the Vertex Processor */
-       _MALI_GP_WRITEABLE  = (1 << 3), /** Writeable by the Vertex Processor */
-       _MALI_CPU_READABLE  = (1 << 4), /** Readable by the CPU */
-       _MALI_CPU_WRITEABLE = (1 << 5), /** Writeable by the CPU */
-       _MALI_GP_L2_ALLOC   = (1 << 6), /** GP allocate mali L2 cache lines*/
-       _MALI_MMU_READABLE  = _MALI_PP_READABLE | _MALI_GP_READABLE,   /** Readable by the MMU (including all cores behind it) */
-       _MALI_MMU_WRITEABLE = _MALI_PP_WRITEABLE | _MALI_GP_WRITEABLE, /** Writeable by the MMU (including all cores behind it) */
-} _mali_bus_usage;
-
-typedef enum mali_memory_cache_settings {
-       MALI_CACHE_STANDARD                     = 0,
-       MALI_CACHE_GP_READ_ALLOCATE     = 1,
-} mali_memory_cache_settings ;
-
-
-/** @brief Information about the Mali Memory system
- *
- * Information is stored in a linked list, which is stored entirely in the
- * buffer pointed to by the system_info member of the
- * _mali_uk_get_system_info_s arguments provided to _mali_ukk_get_system_info()
- *
- * Each element of the linked list describes a single Mali Memory bank.
- * Each allocation can only come from one bank, and will not cross multiple
- * banks.
- *
- * On Mali-MMU systems, there is only one bank, which describes the maximum
- * possible address range that could be allocated (which may be much less than
- * the available physical memory)
- *
- * The flags member describes the capabilities of the memory. It is an error
- * to attempt to build a job for a particular core (PP or GP) when the memory
- * regions used do not have the capabilities for supporting that core. This
- * would result in a job abort from the Device Driver.
- *
- * For example, it is correct to build a PP job where read-only data structures
- * are taken from a memory with _MALI_PP_READABLE set and
- * _MALI_PP_WRITEABLE clear, and a framebuffer with  _MALI_PP_WRITEABLE set and
- * _MALI_PP_READABLE clear. However, it would be incorrect to use a framebuffer
- * where _MALI_PP_WRITEABLE is clear.
- */
-typedef struct _mali_mem_info {
-       u32 size;                     /**< Size of the memory bank in bytes */
-       _mali_bus_usage flags;        /**< Capabilitiy flags of the memory */
-       u32 maximum_order_supported;  /**< log2 supported size */
-       u32 identifier;               /* mali_memory_cache_settings cache_settings; */
-       struct _mali_mem_info *next;  /**< Next List Link */
-} _mali_mem_info;
-
 /** @} */ /* end group _mali_uk_core */
 
 
@@ -284,7 +175,7 @@ typedef enum _maligp_job_suspended_response_code {
 } _maligp_job_suspended_response_code;
 
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        u32 cookie;                     /**< [in] cookie from the _mali_uk_gp_job_suspended_s notification */
        _maligp_job_suspended_response_code code; /**< [in] abort or resume response code, see \ref _maligp_job_suspended_response_code */
        u32 arguments[2];               /**< [in] 0 when aborting a job. When resuming a job, the Mali start and end address for a new heap to resume the job with */
@@ -295,14 +186,7 @@ typedef struct {
 /** @defgroup _mali_uk_gpstartjob_s Vertex Processor Start Job
  * @{ */
 
-/** @brief Status indicating the result of starting a Vertex or Fragment processor job */
-typedef enum {
-       _MALI_UK_START_JOB_STARTED,                         /**< Job started */
-       _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE           /**< Job could not be started at this time. Try starting the job again */
-} _mali_uk_start_job_status;
-
 /** @brief Status indicating the result of the execution of a Vertex or Fragment processor job  */
-
 typedef enum {
        _MALI_UK_JOB_STATUS_END_SUCCESS         = 1 << (16 + 0),
        _MALI_UK_JOB_STATUS_END_OOM             = 1 << (16 + 1),
@@ -372,8 +256,8 @@ typedef enum {
  *
  */
 typedef struct {
-       void *ctx;                          /**< [in,out] user-kernel context (trashed on output) */
-       u32 user_job_ptr;                   /**< [in] identifier for the job in user space, a @c mali_gp_job_info* */
+       u64 ctx;                          /**< [in,out] user-kernel context (trashed on output) */
+       u64 user_job_ptr;                   /**< [in] identifier for the job in user space, a @c mali_gp_job_info* */
        u32 priority;                       /**< [in] job priority. A lower number means higher priority */
        u32 frame_registers[MALIGP2_NUM_REGS_FRAME]; /**< [in] core specific registers associated with this job */
        u32 perf_counter_flag;              /**< [in] bitmask indicating which performance counters to enable, see \ref _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE and related macro definitions */
@@ -382,7 +266,7 @@ typedef struct {
        u32 frame_builder_id;               /**< [in] id of the originating frame builder */
        u32 flush_id;                       /**< [in] flush id within the originating frame builder */
        _mali_uk_fence_t fence;             /**< [in] fence this job must wait on */
-       u32 *timeline_point_ptr;            /**< [in,out] pointer to location where point on gp timeline for this job will be written */
+       u64 timeline_point_ptr;            /**< [in,out] pointer to u32: location where point on gp timeline for this job will be written */
 } _mali_uk_gp_start_job_s;
 
 #define _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE (1<<0) /**< Enable performance counter SRC0 for a job */
@@ -392,7 +276,7 @@ typedef struct {
 /** @} */ /* end group _mali_uk_gpstartjob_s */
 
 typedef struct {
-       u32 user_job_ptr;               /**< [out] identifier for the job in user space */
+       u64 user_job_ptr;               /**< [out] identifier for the job in user space */
        _mali_uk_job_status status;     /**< [out] status of finished job */
        u32 heap_current_addr;          /**< [out] value of the GP PLB PL heap start address register */
        u32 perf_counter0;              /**< [out] value of performance counter 0 (see ARM DDI0415A) */
@@ -400,7 +284,7 @@ typedef struct {
 } _mali_uk_gp_job_finished_s;
 
 typedef struct {
-       u32 user_job_ptr;                    /**< [out] identifier for the job in user space */
+       u64 user_job_ptr;                    /**< [out] identifier for the job in user space */
        u32 cookie;                          /**< [out] identifier for the core in kernel space on which the job stalled */
 } _mali_uk_gp_job_suspended_s;
 
@@ -467,8 +351,8 @@ typedef struct {
  *
  */
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
-       u32 user_job_ptr;               /**< [in] identifier for the job in user space */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 user_job_ptr;               /**< [in] identifier for the job in user space */
        u32 priority;                   /**< [in] job priority. A lower number means higher priority */
        u32 frame_registers[_MALI_PP_MAX_FRAME_REGISTERS];         /**< [in] core specific registers associated with first sub job, see ARM DDI0415A */
        u32 frame_registers_addr_frame[_MALI_PP_MAX_SUB_JOBS - 1]; /**< [in] ADDR_FRAME registers for sub job 1-7 */
@@ -488,21 +372,21 @@ typedef struct {
        u32 tilesy;                         /**< [in] number of tiles in y direction (needed for reading the heatmap memory) */
        u32 heatmap_mem;                    /**< [in] memory address to store counter values per tile (aka heatmap) */
        u32 num_memory_cookies;             /**< [in] number of memory cookies attached to job */
-       u32 *memory_cookies;                /**< [in] memory cookies attached to job  */
+       u64 memory_cookies;               /**< [in] pointer to array of u32 memory cookies attached to job */
        _mali_uk_fence_t fence;             /**< [in] fence this job must wait on */
-       u32 *timeline_point_ptr;            /**< [in,out] pointer to location where point on pp timeline for this job will be written */
+       u64 timeline_point_ptr;           /**< [in,out] pointer to location of u32 where point on pp timeline for this job will be written */
 } _mali_uk_pp_start_job_s;
 
 typedef struct {
-       void *ctx;                          /**< [in,out] user-kernel context (trashed on output) */
-       _mali_uk_gp_start_job_s *gp_args;   /**< [in,out] GP uk arguments (see _mali_uk_gp_start_job_s) */
-       _mali_uk_pp_start_job_s *pp_args;   /**< [in,out] PP uk arguments (see _mali_uk_pp_start_job_s) */
+       u64 ctx;       /**< [in,out] user-kernel context (trashed on output) */
+       u64 gp_args;   /**< [in,out] GP uk arguments (see _mali_uk_gp_start_job_s) */
+       u64 pp_args;   /**< [in,out] PP uk arguments (see _mali_uk_pp_start_job_s) */
 } _mali_uk_pp_and_gp_start_job_s;
 
 /** @} */ /* end group _mali_uk_ppstartjob_s */
 
 typedef struct {
-       u32 user_job_ptr;                          /**< [out] identifier for the job in user space */
+       u64 user_job_ptr;                          /**< [out] identifier for the job in user space */
        _mali_uk_job_status status;                /**< [out] status of finished job */
        u32 perf_counter0[_MALI_PP_MAX_SUB_JOBS];  /**< [out] value of perfomance counter 0 (see ARM DDI0415A), one for each sub job */
        u32 perf_counter1[_MALI_PP_MAX_SUB_JOBS];  /**< [out] value of perfomance counter 1 (see ARM DDI0415A), one for each sub job */
@@ -526,7 +410,7 @@ typedef enum {
 } _mali_uk_pp_job_wbx_flag;
 
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        u32 fb_id;                      /**< [in] Frame builder ID of job to disable WB units for */
        u32 wb0_memory;
        u32 wb1_memory;
@@ -540,20 +424,20 @@ typedef struct {
  * @{ */
 
 typedef struct {
-       void *ctx;                          /**< [in,out] user-kernel context (trashed on output) */
-       u32 type;                           /**< [in] type of soft job */
-       u32 user_job;                       /**< [in] identifier for the job in user space */
-       u32 *job_id_ptr;                    /**< [in,out] pointer to location where job id will be written */
+       u64 ctx;                            /**< [in,out] user-kernel context (trashed on output) */
+       u64 user_job;                       /**< [in] identifier for the job in user space */
+       u64 job_id_ptr;                     /**< [in,out] pointer to location of u32 where job id will be written */
        _mali_uk_fence_t fence;             /**< [in] fence this job must wait on */
        u32 point;                          /**< [out] point on soft timeline for this job */
+       u32 type;                           /**< [in] type of soft job */
 } _mali_uk_soft_job_start_s;
 
 typedef struct {
-       u32 user_job;                       /**< [out] identifier for the job in user space */
+       u64 user_job;                       /**< [out] identifier for the job in user space */
 } _mali_uk_soft_job_activated_s;
 
 typedef struct {
-       void *ctx;                          /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                          /**< [in,out] user-kernel context (trashed on output) */
        u32 job_id;                         /**< [in] id for soft job */
 } _mali_uk_soft_job_signal_s;
 
@@ -689,7 +573,7 @@ typedef struct {
  * when the polygon list builder unit has run out of memory.
  */
 typedef struct {
-       void *ctx;                       /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                       /**< [in,out] user-kernel context (trashed on output) */
        _mali_uk_notification_type type; /**< [out] Type of notification available */
        union {
                _mali_uk_gp_job_suspended_s gp_job_suspended;/**< [out] Notification data for _MALI_NOTIFICATION_GP_STALLED notification type */
@@ -706,7 +590,7 @@ typedef struct {
  * This is used to send a quit message to the callback thread.
  */
 typedef struct {
-       void *ctx;                       /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                       /**< [in,out] user-kernel context (trashed on output) */
        _mali_uk_notification_type type; /**< [in] Type of notification to post */
 } _mali_uk_post_notification_s;
 
@@ -740,7 +624,7 @@ typedef struct {
  * The 16bit integer is stored twice in a 32bit integer
  * For example, for version 1 the value would be 0x00010001
  */
-#define _MALI_API_VERSION 401
+#define _MALI_API_VERSION 600
 #define _MALI_UK_API_VERSION _MAKE_VERSION_ID(_MALI_API_VERSION)
 
 /**
@@ -765,10 +649,31 @@ typedef u32 _mali_uk_api_version;
  * of the interface may be backwards compatible.
  */
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 ctx;                        /**< [in,out] user-kernel context (trashed on output) */
        _mali_uk_api_version version;   /**< [in,out] API version of user-side interface. */
        int compatible;                 /**< [out] @c 1 when @version is compatible, @c 0 otherwise */
 } _mali_uk_get_api_version_s;
+
+/** @brief Arguments for _mali_uk_get_api_version_v2()
+ *
+ * The user-side interface version must be written into the version member,
+ * encoded using _MAKE_VERSION_ID(). It will be compared to the API version of
+ * the kernel-side interface.
+ *
+ * On successful return, the version member will be the API version of the
+ * kernel-side interface. _MALI_UK_API_VERSION macro defines the current version
+ * of the API.
+ *
+ * The compatible member must be checked to see if the version of the user-side
+ * interface is compatible with the kernel-side interface, since future versions
+ * of the interface may be backwards compatible.
+ */
+typedef struct {
+       u64 ctx;                        /**< [in,out] user-kernel context (trashed on output) */
+       _mali_uk_api_version version;   /**< [in,out] API version of user-side interface. */
+       int compatible;                 /**< [out] @c 1 when @version is compatible, @c 0 otherwise */
+} _mali_uk_get_api_version_v2_s;
+
 /** @} */ /* end group _mali_uk_getapiversion_s */
 
 /** @defgroup _mali_uk_get_user_settings_s Get user space settings */
@@ -782,21 +687,21 @@ typedef struct {
  *
  */
 typedef struct {
-       void *ctx;                       /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                       /**< [in,out] user-kernel context (trashed on output) */
        u32 settings[_MALI_UK_USER_SETTING_MAX]; /**< [out] The values for all settings */
 } _mali_uk_get_user_settings_s;
 
 /** @brief struct to hold the value of a particular setting from the user space within a given context
  */
 typedef struct {
-       void *ctx;                       /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                       /**< [in,out] user-kernel context (trashed on output) */
        _mali_uk_user_setting_t setting; /**< [in] setting to get */
        u32 value;                       /**< [out] value of setting */
 } _mali_uk_get_user_setting_s;
 
 /** @brief Arguments for _mali_ukk_request_high_priority() */
 typedef struct {
-       void *ctx;                       /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                       /**< [in,out] user-kernel context (trashed on output) */
 } _mali_uk_request_high_priority_s;
 
 /** @} */ /* end group _mali_uk_core */
@@ -809,7 +714,7 @@ typedef struct {
 #define _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE (1<<0)
 
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        u32 phys_addr;                  /**< [in] physical address */
        u32 size;                       /**< [in] size */
        u32 mali_address;               /**< [in] mali address to map the physical memory to */
@@ -819,13 +724,13 @@ typedef struct {
 } _mali_uk_map_external_mem_s;
 
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        u32 cookie;                     /**< [out] identifier for mapped memory object in kernel space  */
 } _mali_uk_unmap_external_mem_s;
 
 /** @note This is identical to _mali_uk_map_external_mem_s above, however phys_addr is replaced by memory descriptor */
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        u32 mem_fd;                     /**< [in] Memory descriptor */
        u32 size;                       /**< [in] size */
        u32 mali_address;               /**< [in] mali address to map the physical memory to */
@@ -835,19 +740,19 @@ typedef struct {
 } _mali_uk_attach_dma_buf_s;
 
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        u32 mem_fd;                     /**< [in] Memory descriptor */
        u32 size;                       /**< [out] size */
 } _mali_uk_dma_buf_get_size_s;
 
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
-       u32 cookie;                     /**< [in] identifier for mapped memory object in kernel space  */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 cookie;                     /**< [in] identifier for mapped memory object in kernel space  */
 } _mali_uk_release_dma_buf_s;
 
 /** @note This is identical to _mali_uk_map_external_mem_s above, however phys_addr is replaced by secure_id */
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        u32 secure_id;                  /**< [in] secure id */
        u32 size;                       /**< [in] size */
        u32 mali_address;               /**< [in] mali address to map the physical memory to */
@@ -857,61 +762,33 @@ typedef struct {
 } _mali_uk_attach_ump_mem_s;
 
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        u32 cookie;                     /**< [in] identifier for mapped memory object in kernel space  */
 } _mali_uk_release_ump_mem_s;
 
-/** @brief Arguments for _mali_ukk_va_to_mali_pa()
- *
- * if size is zero or not a multiple of the system's page size, it will be
- * rounded up to the next multiple of the page size. This will occur before
- * any other use of the size parameter.
- *
- * if va is not PAGE_SIZE aligned, it will be rounded down to the next page
- * boundary.
- *
- * The range (va) to ((u32)va)+(size-1) inclusive will be checked for physical
- * contiguity.
- *
- * The implementor will check that the entire physical range is allowed to be mapped
- * into user-space.
- *
- * Failure will occur if either of the above are not satisfied.
- *
- * Otherwise, the physical base address of the range is returned through pa,
- * va is updated to be page aligned, and size is updated to be a non-zero
- * multiple of the system's pagesize.
- */
-typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
-       void *va;                       /**< [in,out] Virtual address of the start of the range */
-       u32 pa;                         /**< [out] Physical base address of the range */
-       u32 size;                       /**< [in,out] Size of the range, in bytes. */
-} _mali_uk_va_to_mali_pa_s;
-
 /**
  * @brief Arguments for _mali_uk[uk]_mem_write_safe()
  */
 typedef struct {
-       void *ctx;        /**< [in,out] user-kernel context (trashed on output) */
-       const void *src;  /**< [in]     Pointer to source data */
-       void *dest;       /**< [in]     Destination Mali buffer */
-       u32 size;         /**< [in,out] Number of bytes to write/copy on input, number of bytes actually written/copied on output */
+       u64 ctx;  /**< [in,out] user-kernel context (trashed on output) */
+       u64 src;  /**< [in] Pointer to source data */
+       u64 dest; /**< [in] Destination Mali buffer */
+       u32 size;   /**< [in,out] Number of bytes to write/copy on input, number of bytes actually written/copied on output */
 } _mali_uk_mem_write_safe_s;
 
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        u32 size;                       /**< [out] size of MMU page table information (registers + page tables) */
 } _mali_uk_query_mmu_page_table_dump_size_s;
 
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        u32 size;                       /**< [in] size of buffer to receive mmu page table information */
-       void *buffer;                   /**< [in,out] buffer to receive mmu page table information */
+       u64 buffer;                   /**< [in,out] buffer to receive mmu page table information */
        u32 register_writes_size;       /**< [out] size of MMU register dump */
-       u32 *register_writes;           /**< [out] pointer within buffer where MMU register dump is stored */
+       u64 register_writes;           /**< [out] pointer within buffer where MMU register dump is stored */
        u32 page_table_dump_size;       /**< [out] size of MMU page table dump */
-       u32 *page_table_dump;           /**< [out] pointer within buffer where MMU page table dump is stored */
+       u64 page_table_dump;           /**< [out] pointer within buffer where MMU page table dump is stored */
 } _mali_uk_dump_mmu_page_table_s;
 
 /** @} */ /* end group _mali_uk_memory */
@@ -927,7 +804,7 @@ typedef struct {
  * will contain the number of Fragment Processor cores in the system.
  */
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        u32 number_of_total_cores;      /**< [out] Total number of Fragment Processor cores in the system */
        u32 number_of_enabled_cores;    /**< [out] Number of enabled Fragment Processor cores */
 } _mali_uk_get_pp_number_of_cores_s;
@@ -939,8 +816,9 @@ typedef struct {
  * the version that all Fragment Processor cores are compatible with.
  */
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        _mali_core_version version;     /**< [out] version returned from core, see \ref _mali_core_version  */
+       u32 padding;
 } _mali_uk_get_pp_core_version_s;
 
 /** @} */ /* end group _mali_uk_pp */
@@ -956,7 +834,7 @@ typedef struct {
  * will contain the number of Vertex Processor cores in the system.
  */
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        u32 number_of_cores;            /**< [out] number of Vertex Processor cores in the system */
 } _mali_uk_get_gp_number_of_cores_s;
 
@@ -967,45 +845,23 @@ typedef struct {
  * the version that all Vertex Processor cores are compatible with.
  */
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        _mali_core_version version;     /**< [out] version returned from core, see \ref _mali_core_version */
 } _mali_uk_get_gp_core_version_s;
 
-typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
-       u32 limit;                      /**< [in,out] The desired limit for number of events to record on input, actual limit on output */
-} _mali_uk_profiling_start_s;
+/** @} */ /* end group _mali_uk_gp */
 
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        u32 event_id;                   /**< [in] event id to register (see  enum mali_profiling_events for values) */
        u32 data[5];                    /**< [in] event specific data */
 } _mali_uk_profiling_add_event_s;
 
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
-       u32 count;                      /**< [out] The number of events sampled */
-} _mali_uk_profiling_stop_s;
-
-typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
-       u32 index;                      /**< [in] which index to get (starting at zero) */
-       u64 timestamp;                  /**< [out] timestamp of event */
-       u32 event_id;                   /**< [out] event id of event (see  enum mali_profiling_events for values) */
-       u32 data[5];                    /**< [out] event specific data */
-} _mali_uk_profiling_get_event_s;
-
-typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
-} _mali_uk_profiling_clear_s;
-
-typedef struct {
-       void *ctx;                     /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                     /**< [in,out] user-kernel context (trashed on output) */
        u32 memory_usage;              /**< [out] total memory usage */
 } _mali_uk_profiling_memory_usage_get_s;
 
-/** @} */ /* end group _mali_uk_gp */
-
 
 /** @addtogroup _mali_uk_memory U/K Memory
  * @{ */
@@ -1037,14 +893,11 @@ typedef struct {
  * implementation of the U/K interface. Its value must be zero.
  */
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        void *mapping;                  /**< [out] Returns user-space virtual address for the mapping */
        u32 size;                       /**< [in] Size of the requested mapping */
        u32 phys_addr;                  /**< [in] Physical address - could be offset, depending on caller+callee convention */
        u32 cookie;                     /**< [out] Returns a cookie for use in munmap calls */
-       void *uku_private;              /**< [in] User-side Private word used by U/K interface */
-       void *ukk_private;              /**< [in] Kernel-side Private word used by U/K interface */
-       mali_memory_cache_settings cache_settings; /**< [in] Option to set special cache flags, tuning L2 efficency */
 } _mali_uk_mem_mmap_s;
 
 /** @brief Arguments to _mali_ukk_mem_munmap()
@@ -1058,7 +911,7 @@ typedef struct {
  * originally obtained range, or to unmap more than was originally obtained.
  */
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        void *mapping;                  /**< [in] The mapping returned from mmap call */
        u32 size;                       /**< [in] The size passed to mmap call */
        u32 cookie;                     /**< [in] Cookie from mmap call */
@@ -1082,7 +935,7 @@ typedef enum _mali_uk_vsync_event {
  *
  */
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        _mali_uk_vsync_event event;     /**< [in] VSYNCH event type */
 } _mali_uk_vsync_event_report_s;
 
@@ -1096,9 +949,9 @@ typedef struct {
  * Values recorded for each of the software counters during a single renderpass.
  */
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
-       u32 *counters;                  /**< [in] The array of counter values */
-       u32  num_counters;              /**< [in] The number of elements in counters array */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 counters;                  /**< [in] The array of u32 counter values */
+       u32 num_counters;              /**< [in] The number of elements in counters array */
 } _mali_uk_sw_counters_report_s;
 
 /** @} */ /* end group _mali_uk_sw_counters_report */
@@ -1107,20 +960,20 @@ typedef struct {
  * @{ */
 
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        u32 timeline;                   /**< [in] timeline id */
        u32 point;                      /**< [out] latest point on timeline */
 } _mali_uk_timeline_get_latest_point_s;
 
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        _mali_uk_fence_t fence;         /**< [in] fence */
        u32 timeout;                    /**< [in] timeout (0 for no wait, -1 for blocking) */
        u32 status;                     /**< [out] status of fence (1 if signaled, 0 if timeout) */
 } _mali_uk_timeline_wait_s;
 
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        _mali_uk_fence_t fence;         /**< [in] mali fence to create linux sync fence from */
        s32 sync_fd;                    /**< [out] file descriptor for new linux sync fence */
 } _mali_uk_timeline_create_sync_fence_s;
index a18aa6e1fd128bfc4ebd3801c9fe22865c305572..2a2b1b873f22ef0ec7edb8aec90e8e6660af8c8d 100755 (executable)
@@ -22,6 +22,7 @@
 #include "mali_kernel_license.h"
 #include <linux/platform_device.h>
 #include <linux/miscdevice.h>
+#include <linux/bug.h>
 #include <linux/mali/mali_utgard.h>
 #include "mali_kernel_common.h"
 #include "mali_session.h"
@@ -222,6 +223,7 @@ struct file_operations mali_fops = {
 #else
        .ioctl = mali_ioctl,
 #endif
+       .compat_ioctl = mali_ioctl,
        .mmap = mali_mmap
 };
 
@@ -627,69 +629,56 @@ static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
 
        switch (cmd) {
        case MALI_IOC_WAIT_FOR_NOTIFICATION:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_wait_for_notification_s), sizeof(u64)));
                err = wait_for_notification_wrapper(session_data, (_mali_uk_wait_for_notification_s __user *)arg);
                break;
 
+       case MALI_IOC_GET_API_VERSION_V2:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_api_version_v2_s), sizeof(u64)));
+               err = get_api_version_v2_wrapper(session_data, (_mali_uk_get_api_version_v2_s __user *)arg);
+               break;
+
        case MALI_IOC_GET_API_VERSION:
                err = get_api_version_wrapper(session_data, (_mali_uk_get_api_version_s __user *)arg);
                break;
 
        case MALI_IOC_POST_NOTIFICATION:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_post_notification_s), sizeof(u64)));
                err = post_notification_wrapper(session_data, (_mali_uk_post_notification_s __user *)arg);
                break;
 
        case MALI_IOC_GET_USER_SETTINGS:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_user_settings_s), sizeof(u64)));
                err = get_user_settings_wrapper(session_data, (_mali_uk_get_user_settings_s __user *)arg);
                break;
 
        case MALI_IOC_REQUEST_HIGH_PRIORITY:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_request_high_priority_s), sizeof(u64)));
                err = request_high_priority_wrapper(session_data, (_mali_uk_request_high_priority_s __user *)arg);
                break;
 
 #if defined(CONFIG_MALI400_PROFILING)
-       case MALI_IOC_PROFILING_START:
-               err = profiling_start_wrapper(session_data, (_mali_uk_profiling_start_s __user *)arg);
-               break;
-
        case MALI_IOC_PROFILING_ADD_EVENT:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_profiling_add_event_s), sizeof(u64)));
                err = profiling_add_event_wrapper(session_data, (_mali_uk_profiling_add_event_s __user *)arg);
                break;
 
-       case MALI_IOC_PROFILING_STOP:
-               err = profiling_stop_wrapper(session_data, (_mali_uk_profiling_stop_s __user *)arg);
-               break;
-
-       case MALI_IOC_PROFILING_GET_EVENT:
-               err = profiling_get_event_wrapper(session_data, (_mali_uk_profiling_get_event_s __user *)arg);
-               break;
-
-       case MALI_IOC_PROFILING_CLEAR:
-               err = profiling_clear_wrapper(session_data, (_mali_uk_profiling_clear_s __user *)arg);
-               break;
-
-       case MALI_IOC_PROFILING_GET_CONFIG:
-               /* Deprecated: still compatible with get_user_settings */
-               err = get_user_settings_wrapper(session_data, (_mali_uk_get_user_settings_s __user *)arg);
-               break;
-
        case MALI_IOC_PROFILING_REPORT_SW_COUNTERS:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_sw_counters_report_s), sizeof(u64)));
                err = profiling_report_sw_counters_wrapper(session_data, (_mali_uk_sw_counters_report_s __user *)arg);
                break;
 
 
        case MALI_IOC_PROFILING_MEMORY_USAGE_GET:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_profiling_memory_usage_get_s), sizeof(u64)));
                err = profiling_memory_usage_get_wrapper(session_data, (_mali_uk_profiling_memory_usage_get_s __user *)arg);
                break;
 
 #else
 
-       case MALI_IOC_PROFILING_START:              /* FALL-THROUGH */
        case MALI_IOC_PROFILING_ADD_EVENT:          /* FALL-THROUGH */
-       case MALI_IOC_PROFILING_STOP:               /* FALL-THROUGH */
-       case MALI_IOC_PROFILING_GET_EVENT:          /* FALL-THROUGH */
-       case MALI_IOC_PROFILING_CLEAR:              /* FALL-THROUGH */
-       case MALI_IOC_PROFILING_GET_CONFIG:         /* FALL-THROUGH */
        case MALI_IOC_PROFILING_REPORT_SW_COUNTERS: /* FALL-THROUGH */
+       case MALI_IOC_PROFILING_MEMORY_USAGE_GET:   /* FALL-THROUGH */
                MALI_DEBUG_PRINT(2, ("Profiling not supported\n"));
                err = -ENOTTY;
                break;
@@ -697,32 +686,39 @@ static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
 #endif
 
        case MALI_IOC_MEM_WRITE_SAFE:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_mem_write_safe_s), sizeof(u64)));
                err = mem_write_safe_wrapper(session_data, (_mali_uk_mem_write_safe_s __user *)arg);
                break;
 
        case MALI_IOC_MEM_MAP_EXT:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_map_external_mem_s), sizeof(u64)));
                err = mem_map_ext_wrapper(session_data, (_mali_uk_map_external_mem_s __user *)arg);
                break;
 
        case MALI_IOC_MEM_UNMAP_EXT:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_unmap_external_mem_s), sizeof(u64)));
                err = mem_unmap_ext_wrapper(session_data, (_mali_uk_unmap_external_mem_s __user *)arg);
                break;
 
        case MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_query_mmu_page_table_dump_size_s), sizeof(u64)));
                err = mem_query_mmu_page_table_dump_size_wrapper(session_data, (_mali_uk_query_mmu_page_table_dump_size_s __user *)arg);
                break;
 
        case MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_dump_mmu_page_table_s), sizeof(u64)));
                err = mem_dump_mmu_page_table_wrapper(session_data, (_mali_uk_dump_mmu_page_table_s __user *)arg);
                break;
 
 #if defined(CONFIG_MALI400_UMP)
 
        case MALI_IOC_MEM_ATTACH_UMP:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_attach_ump_mem_s), sizeof(u64)));
                err = mem_attach_ump_wrapper(session_data, (_mali_uk_attach_ump_mem_s __user *)arg);
                break;
 
        case MALI_IOC_MEM_RELEASE_UMP:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_release_ump_mem_s), sizeof(u64)));
                err = mem_release_ump_wrapper(session_data, (_mali_uk_release_ump_mem_s __user *)arg);
                break;
 
@@ -737,14 +733,17 @@ static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
 
 #ifdef CONFIG_DMA_SHARED_BUFFER
        case MALI_IOC_MEM_ATTACH_DMA_BUF:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_attach_dma_buf_s), sizeof(u64)));
                err = mali_attach_dma_buf(session_data, (_mali_uk_attach_dma_buf_s __user *)arg);
                break;
 
        case MALI_IOC_MEM_RELEASE_DMA_BUF:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_release_dma_buf_s), sizeof(u64)));
                err = mali_release_dma_buf(session_data, (_mali_uk_release_dma_buf_s __user *)arg);
                break;
 
        case MALI_IOC_MEM_DMA_BUF_GET_SIZE:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_dma_buf_get_size_s), sizeof(u64)));
                err = mali_dma_buf_get_size(session_data, (_mali_uk_dma_buf_get_size_s __user *)arg);
                break;
 #else
@@ -758,73 +757,76 @@ static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
 #endif
 
        case MALI_IOC_PP_START_JOB:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_pp_start_job_s), sizeof(u64)));
                err = pp_start_job_wrapper(session_data, (_mali_uk_pp_start_job_s __user *)arg);
                break;
 
        case MALI_IOC_PP_AND_GP_START_JOB:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_pp_and_gp_start_job_s), sizeof(u64)));
                err = pp_and_gp_start_job_wrapper(session_data, (_mali_uk_pp_and_gp_start_job_s __user *)arg);
                break;
 
        case MALI_IOC_PP_NUMBER_OF_CORES_GET:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_pp_number_of_cores_s), sizeof(u64)));
                err = pp_get_number_of_cores_wrapper(session_data, (_mali_uk_get_pp_number_of_cores_s __user *)arg);
                break;
 
        case MALI_IOC_PP_CORE_VERSION_GET:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_pp_core_version_s), sizeof(u64)));
                err = pp_get_core_version_wrapper(session_data, (_mali_uk_get_pp_core_version_s __user *)arg);
                break;
 
        case MALI_IOC_PP_DISABLE_WB:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_pp_disable_wb_s), sizeof(u64)));
                err = pp_disable_wb_wrapper(session_data, (_mali_uk_pp_disable_wb_s __user *)arg);
                break;
 
        case MALI_IOC_GP2_START_JOB:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_gp_start_job_s), sizeof(u64)));
                err = gp_start_job_wrapper(session_data, (_mali_uk_gp_start_job_s __user *)arg);
                break;
 
        case MALI_IOC_GP2_NUMBER_OF_CORES_GET:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_gp_number_of_cores_s), sizeof(u64)));
                err = gp_get_number_of_cores_wrapper(session_data, (_mali_uk_get_gp_number_of_cores_s __user *)arg);
                break;
 
        case MALI_IOC_GP2_CORE_VERSION_GET:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_gp_core_version_s), sizeof(u64)));
                err = gp_get_core_version_wrapper(session_data, (_mali_uk_get_gp_core_version_s __user *)arg);
                break;
 
        case MALI_IOC_GP2_SUSPEND_RESPONSE:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_gp_suspend_response_s), sizeof(u64)));
                err = gp_suspend_response_wrapper(session_data, (_mali_uk_gp_suspend_response_s __user *)arg);
                break;
 
        case MALI_IOC_VSYNC_EVENT_REPORT:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_vsync_event_report_s), sizeof(u64)));
                err = vsync_event_report_wrapper(session_data, (_mali_uk_vsync_event_report_s __user *)arg);
                break;
 
        case MALI_IOC_TIMELINE_GET_LATEST_POINT:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_timeline_get_latest_point_s), sizeof(u64)));
                err = timeline_get_latest_point_wrapper(session_data, (_mali_uk_timeline_get_latest_point_s __user *)arg);
                break;
        case MALI_IOC_TIMELINE_WAIT:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_timeline_wait_s), sizeof(u64)));
                err = timeline_wait_wrapper(session_data, (_mali_uk_timeline_wait_s __user *)arg);
                break;
        case MALI_IOC_TIMELINE_CREATE_SYNC_FENCE:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_timeline_create_sync_fence_s), sizeof(u64)));
                err = timeline_create_sync_fence_wrapper(session_data, (_mali_uk_timeline_create_sync_fence_s __user *)arg);
                break;
        case MALI_IOC_SOFT_JOB_START:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_soft_job_start_s), sizeof(u64)));
                err = soft_job_start_wrapper(session_data, (_mali_uk_soft_job_start_s __user *)arg);
                break;
        case MALI_IOC_SOFT_JOB_SIGNAL:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_soft_job_signal_s), sizeof(u64)));
                err = soft_job_signal_wrapper(session_data, (_mali_uk_soft_job_signal_s __user *)arg);
                break;
 
-       case MALI_IOC_MEM_INIT: /* Fallthrough */
-       case MALI_IOC_MEM_TERM: /* Fallthrough */
-               MALI_DEBUG_PRINT(2, ("Deprecated ioctls called\n"));
-               err = -ENOTTY;
-               break;
-
-       case MALI_IOC_MEM_GET_BIG_BLOCK: /* Fallthrough */
-       case MALI_IOC_MEM_FREE_BIG_BLOCK:
-               MALI_PRINT_ERROR(("Non-MMU mode is no longer supported.\n"));
-               err = -ENOTTY;
-               break;
-
        default:
                MALI_DEBUG_PRINT(2, ("No handler for ioctl 0x%08X 0x%08lX\n", cmd, arg));
                err = -ENOTTY;
index 10d970fad431fc9e3c58a18d8bf1f53d92f9ccf6..bc5c29a5d5c41fda7b8ed622c0ce8696f71dd821 100755 (executable)
@@ -49,6 +49,7 @@
 #include "mali_gp_job.h"
 #include "mali_pp_job.h"
 #include "mali_pp_scheduler.h"
+#include "mali_session.h"
 
 #define PRIVATE_DATA_COUNTER_MAKE_GP(src) (src)
 #define PRIVATE_DATA_COUNTER_MAKE_PP(src) ((1 << 24) | src)
@@ -168,10 +169,10 @@ static const struct file_operations hw_core_base_addr_fops = {
 
 static ssize_t profiling_counter_src_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
 {
-       u32 is_pp = PRIVATE_DATA_COUNTER_IS_PP((u32)filp->private_data);
-       u32 src_id = PRIVATE_DATA_COUNTER_GET_SRC((u32)filp->private_data);
-       mali_bool is_sub_job = PRIVATE_DATA_COUNTER_IS_SUB_JOB((u32)filp->private_data);
-       u32 sub_job = PRIVATE_DATA_COUNTER_GET_SUB_JOB((u32)filp->private_data);
+       u32 is_pp = PRIVATE_DATA_COUNTER_IS_PP((uintptr_t)filp->private_data);
+       u32 src_id = PRIVATE_DATA_COUNTER_GET_SRC((uintptr_t)filp->private_data);
+       mali_bool is_sub_job = PRIVATE_DATA_COUNTER_IS_SUB_JOB((uintptr_t)filp->private_data);
+       u32 sub_job = PRIVATE_DATA_COUNTER_GET_SUB_JOB((uintptr_t)filp->private_data);
        char buf[64];
        int r;
        u32 val;
@@ -213,10 +214,10 @@ static ssize_t profiling_counter_src_read(struct file *filp, char __user *ubuf,
 
 static ssize_t profiling_counter_src_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
 {
-       u32 is_pp = PRIVATE_DATA_COUNTER_IS_PP((u32)filp->private_data);
-       u32 src_id = PRIVATE_DATA_COUNTER_GET_SRC((u32)filp->private_data);
-       mali_bool is_sub_job = PRIVATE_DATA_COUNTER_IS_SUB_JOB((u32)filp->private_data);
-       u32 sub_job = PRIVATE_DATA_COUNTER_GET_SUB_JOB((u32)filp->private_data);
+       u32 is_pp = PRIVATE_DATA_COUNTER_IS_PP((uintptr_t)filp->private_data);
+       u32 src_id = PRIVATE_DATA_COUNTER_GET_SRC((uintptr_t)filp->private_data);
+       mali_bool is_sub_job = PRIVATE_DATA_COUNTER_IS_SUB_JOB((uintptr_t)filp->private_data);
+       u32 sub_job = PRIVATE_DATA_COUNTER_GET_SUB_JOB((uintptr_t)filp->private_data);
        char buf[64];
        long val;
        int ret;
@@ -950,9 +951,11 @@ static int mali_sysfs_user_settings_register(void)
        struct dentry *mali_user_settings_dir = debugfs_create_dir("userspace_settings", mali_debugfs_dir);
 
        if (mali_user_settings_dir != NULL) {
-               int i;
+               long i;
                for (i = 0; i < _MALI_UK_USER_SETTING_MAX; i++) {
-                       debugfs_create_file(_mali_uk_user_setting_descriptions[i], 0600, mali_user_settings_dir, (void *)i, &user_settings_fops);
+                       debugfs_create_file(_mali_uk_user_setting_descriptions[i],
+                                           0600, mali_user_settings_dir, (void *)i,
+                                           &user_settings_fops);
                }
        }
 
@@ -1176,6 +1179,39 @@ static const struct file_operations version_fops = {
        .read = version_read,
 };
 
+#if defined(DEBUG)
+static int timeline_debugfs_show(struct seq_file *s, void *private_data)
+{
+       struct mali_session_data *session, *tmp;
+       u32 session_seq = 1;
+
+       seq_printf(s, "timeline system info: \n=================\n\n");
+
+       mali_session_lock();
+       MALI_SESSION_FOREACH(session, tmp, link){
+               seq_printf(s, "session %d <%p> start:\n", session_seq,session);
+               mali_timeline_debug_print_system(session->timeline_system,s);
+               seq_printf(s, "session %d end\n\n\n", session_seq++);
+       }
+       mali_session_unlock();
+
+       return 0;
+}
+
+static int timeline_debugfs_open( struct inode *inode, struct file *file)
+{
+       return single_open(file, timeline_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations timeline_dump_fops = {
+       .owner = THIS_MODULE,
+       .open = timeline_debugfs_open,
+       .read  = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release
+};
+#endif
+
 int mali_sysfs_register(const char *mali_dev_name)
 {
        mali_debugfs_dir = debugfs_create_dir(mali_dev_name, NULL);
@@ -1209,7 +1245,7 @@ int mali_sysfs_register(const char *mali_dev_name)
                        mali_gp_dir = debugfs_create_dir("gp", mali_debugfs_dir);
                        if (mali_gp_dir != NULL) {
                                u32 num_groups;
-                               int i;
+                               long i;
 
                                num_groups = mali_group_get_glob_num_groups();
                                for (i = 0; i < num_groups; i++) {
@@ -1232,7 +1268,7 @@ int mali_sysfs_register(const char *mali_dev_name)
                        mali_pp_dir = debugfs_create_dir("pp", mali_debugfs_dir);
                        if (mali_pp_dir != NULL) {
                                u32 num_groups;
-                               int i;
+                               long i;
 
                                debugfs_create_file("num_cores_total", 0400, mali_pp_dir, NULL, &pp_num_cores_total_fops);
                                debugfs_create_file("num_cores_enabled", 0600, mali_pp_dir, NULL, &pp_num_cores_enabled_fops);
@@ -1298,7 +1334,7 @@ int mali_sysfs_register(const char *mali_dev_name)
                        mali_profiling_dir = debugfs_create_dir("profiling", mali_debugfs_dir);
                        if (mali_profiling_dir != NULL) {
                                u32 max_sub_jobs;
-                               int i;
+                               long i;
                                struct dentry *mali_profiling_gp_dir;
                                struct dentry *mali_profiling_pp_dir;
 #if defined(CONFIG_MALI400_INTERNAL_PROFILING)
@@ -1331,8 +1367,14 @@ int mali_sysfs_register(const char *mali_dev_name)
                                        _mali_osk_snprintf(buf, sizeof(buf), "%u", i);
                                        mali_profiling_pp_x_dir = debugfs_create_dir(buf, mali_profiling_pp_dir);
                                        if (NULL != mali_profiling_pp_x_dir) {
-                                               debugfs_create_file("counter_src0", 0600, mali_profiling_pp_x_dir, (void *)PRIVATE_DATA_COUNTER_MAKE_PP_SUB_JOB(0, i), &profiling_counter_src_fops);
-                                               debugfs_create_file("counter_src1", 0600, mali_profiling_pp_x_dir, (void *)PRIVATE_DATA_COUNTER_MAKE_PP_SUB_JOB(1, i), &profiling_counter_src_fops);
+                                               debugfs_create_file("counter_src0",
+                                                                   0600, mali_profiling_pp_x_dir,
+                                                                   (void *)PRIVATE_DATA_COUNTER_MAKE_PP_SUB_JOB(0, i),
+                                                                   &profiling_counter_src_fops);
+                                               debugfs_create_file("counter_src1",
+                                                                   0600, mali_profiling_pp_x_dir,
+                                                                   (void *)PRIVATE_DATA_COUNTER_MAKE_PP_SUB_JOB(1, i),
+                                                                   &profiling_counter_src_fops);
                                        }
                                }
 
@@ -1354,6 +1396,9 @@ int mali_sysfs_register(const char *mali_dev_name)
                        debugfs_create_file("state_dump", 0400, mali_debugfs_dir, NULL, &mali_seq_internal_state_fops);
 #endif
 
+#if defined(DEBUG)
+                       debugfs_create_file("timeline_dump", 0400, mali_debugfs_dir, NULL, &timeline_dump_fops);
+#endif
                        if (mali_sysfs_user_settings_register()) {
                                /* Failed to create the debugfs entries for the user settings DB. */
                                MALI_DEBUG_PRINT(2, ("Failed to create user setting debugfs files. Ignoring...\n"));
old mode 100644 (file)
new mode 100755 (executable)
index fb0937c..dcdc953
@@ -32,7 +32,8 @@ void mali_memory_terminate(void);
  * @param table_page GPU pointer to the allocated page
  * @param mapping CPU pointer to the mapping of the allocated page
  */
-MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_get_table_page(u32 *table_page, mali_io_address *mapping)
+MALI_STATIC_INLINE _mali_osk_errcode_t
+mali_mmu_get_table_page(mali_dma_addr *table_page, mali_io_address *mapping)
 {
        return mali_mem_os_get_table_page(table_page, mapping);
 }
@@ -43,7 +44,8 @@ MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_get_table_page(u32 *table_page,
  *
  * @param pa the GPU address of the page to release
  */
-MALI_STATIC_INLINE void mali_mmu_release_table_page(u32 phys, void *virt)
+MALI_STATIC_INLINE void
+mali_mmu_release_table_page(mali_dma_addr phys, void *virt)
 {
        mali_mem_os_release_table_page(phys, virt);
 }
index 0bb2e50e9e44b55c48a6ba835a7b2cc6fa0d2289..248e29980255eeeb4a643437413719efcdf76269 100755 (executable)
@@ -378,11 +378,11 @@ int mali_release_dma_buf(struct mali_session_data *session, _mali_uk_release_dma
                return -EFAULT;
        }
 
-       MALI_DEBUG_PRINT(3, ("Mali DMA-buf: release descriptor cookie %d\n", args.cookie));
+       MALI_DEBUG_PRINT(3, ("Mali DMA-buf: release descriptor cookie %ld\n", args.cookie));
 
        _mali_osk_mutex_wait(session->memory_lock);
 
-       descriptor = mali_descriptor_mapping_free(session->descriptor_mapping, args.cookie);
+       descriptor = mali_descriptor_mapping_free(session->descriptor_mapping, (u32)args.cookie);
 
        if (NULL != descriptor) {
                MALI_DEBUG_PRINT(3, ("Mali DMA-buf: Releasing dma-buf at mali address %x\n", descriptor->mali_mapping.addr));
@@ -393,7 +393,7 @@ int mali_release_dma_buf(struct mali_session_data *session, _mali_uk_release_dma
 
                mali_mem_descriptor_destroy(descriptor);
        } else {
-               MALI_DEBUG_PRINT_ERROR(("Invalid memory descriptor %d used to release dma-buf\n", args.cookie));
+               MALI_DEBUG_PRINT_ERROR(("Invalid memory descriptor %ld used to release dma-buf\n", args.cookie));
                ret = -EINVAL;
        }
 
index dfd14a34a47cf767c56ba2f801f3c3646976423f..3b67d6c2344ea6cbf511995fb5537f5f529cb9c6 100755 (executable)
@@ -31,9 +31,8 @@ _mali_osk_errcode_t _mali_ukk_map_external_mem(_mali_uk_map_external_mem_s *args
        _mali_osk_errcode_t err;
 
        MALI_DEBUG_ASSERT_POINTER(args);
-       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
 
-       session = (struct mali_session_data *)args->ctx;
+       session = (struct mali_session_data *)(uintptr_t)args->ctx;
        MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);
 
        /* check arguments */
@@ -45,10 +44,8 @@ _mali_osk_errcode_t _mali_ukk_map_external_mem(_mali_uk_map_external_mem_s *args
 
        MALI_DEBUG_PRINT(3,
                         ("Requested to map physical memory 0x%x-0x%x into virtual memory 0x%x\n",
-                         (void *)args->phys_addr,
-                         (void *)(args->phys_addr + args->size - 1),
-                         (void *)args->mali_address)
-                       );
+                         args->phys_addr, (args->phys_addr + args->size - 1),
+                         args->mali_address));
 
        /* Validate the mali physical range */
        if (_MALI_OSK_ERR_OK != mali_mem_validation_check(args->phys_addr, args->size)) {
@@ -106,9 +103,8 @@ _mali_osk_errcode_t _mali_ukk_unmap_external_mem(_mali_uk_unmap_external_mem_s *
        struct mali_session_data *session;
 
        MALI_DEBUG_ASSERT_POINTER(args);
-       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
 
-       session = (struct mali_session_data *)args->ctx;
+       session = (struct mali_session_data *)(uintptr_t)args->ctx;
        MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);
 
        if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_get(session->descriptor_mapping, args->cookie, (void **)&descriptor)) {
index 48ba42b49b513b553ef575cf295bd3e6d3039412..036e82de0ae2c63b8c2b15994eaf6582b26812f6 100755 (executable)
 #define MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB * 256)
 #define MALI_OS_MEMORY_POOL_TRIM_JIFFIES (10 * CONFIG_HZ) /* Default to 10s */
 
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+/* Write combine dma_attrs */
+static DEFINE_DMA_ATTRS(dma_attrs_wc);
+#endif
+
 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
 static int mali_mem_os_shrink(int nr_to_scan, gfp_t gfp_mask);
@@ -140,8 +145,16 @@ static int mali_mem_os_alloc_pages(mali_mem_allocation *descriptor, u32 size)
        /* Allocate new pages, if needed. */
        for (i = 0; i < remaining; i++) {
                dma_addr_t dma_addr;
+               gfp_t flags = __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD;
+               int err;
 
-               new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD);
+#if defined(CONFIG_ARM) && !defined(CONFIG_ARM_LPAE)
+               flags |= GFP_HIGHUSER;
+#else
+               flags |= GFP_DMA32;
+#endif
+
+               new_page = alloc_page(flags);
 
                if (unlikely(NULL == new_page)) {
                        /* Calculate the number of pages actually allocated, and free them. */
@@ -155,6 +168,17 @@ static int mali_mem_os_alloc_pages(mali_mem_allocation *descriptor, u32 size)
                dma_addr = dma_map_page(&mali_platform_device->dev, new_page,
                                        0, _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
 
+               err = dma_mapping_error(&mali_platform_device->dev, dma_addr);
+               if (unlikely(err)) {
+                       MALI_DEBUG_PRINT_ERROR(("OS Mem: Failed to DMA map page %p: %u",
+                                               new_page, err));
+                       __free_page(new_page);
+                       descriptor->os_mem.count = (page_count - remaining) + i;
+                       atomic_add(descriptor->os_mem.count, &mali_mem_os_allocator.allocated_pages);
+                       mali_mem_os_free(descriptor);
+                       return -EFAULT;
+               }
+
                /* Store page phys addr */
                SetPagePrivate(new_page);
                set_page_private(new_page, dma_addr);
@@ -188,8 +212,16 @@ static int mali_mem_os_mali_map(mali_mem_allocation *descriptor, struct mali_ses
        }
 
        list_for_each_entry(page, &descriptor->os_mem.pages, lru) {
-               u32 phys = page_private(page);
-               mali_mmu_pagedir_update(pagedir, virt, phys, MALI_MMU_PAGE_SIZE, prop);
+               dma_addr_t phys = page_private(page);
+
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
+               /* Verify that the "physical" address is 32-bit and
+                * usable for Mali, when on a system with bus addresses
+                * wider than 32-bit. */
+               MALI_DEBUG_ASSERT(0 == (phys >> 32));
+#endif
+
+               mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys, MALI_MMU_PAGE_SIZE, prop);
                virt += MALI_MMU_PAGE_SIZE;
        }
 
@@ -294,19 +326,20 @@ void mali_mem_os_release(mali_mem_allocation *descriptor)
 #define MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE 128
 static struct {
        struct {
-               u32 phys;
+               mali_dma_addr phys;
                mali_io_address mapping;
        } page[MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE];
-       u32 count;
+       size_t count;
        spinlock_t lock;
 } mali_mem_page_table_page_pool = {
        .count = 0,
        .lock = __SPIN_LOCK_UNLOCKED(pool_lock),
 };
 
-_mali_osk_errcode_t mali_mem_os_get_table_page(u32 *phys, mali_io_address *mapping)
+_mali_osk_errcode_t mali_mem_os_get_table_page(mali_dma_addr *phys, mali_io_address *mapping)
 {
        _mali_osk_errcode_t ret = _MALI_OSK_ERR_NOMEM;
+       dma_addr_t tmp_phys;
 
        spin_lock(&mali_mem_page_table_page_pool.lock);
        if (0 < mali_mem_page_table_page_pool.count) {
@@ -319,16 +352,32 @@ _mali_osk_errcode_t mali_mem_os_get_table_page(u32 *phys, mali_io_address *mappi
        spin_unlock(&mali_mem_page_table_page_pool.lock);
 
        if (_MALI_OSK_ERR_OK != ret) {
-               *mapping = dma_alloc_writecombine(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE, phys, GFP_KERNEL);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+               *mapping = dma_alloc_attrs(&mali_platform_device->dev,
+                                          _MALI_OSK_MALI_PAGE_SIZE, &tmp_phys,
+                                          GFP_KERNEL, &dma_attrs_wc);
+#else
+               *mapping = dma_alloc_writecombine(&mali_platform_device->dev,
+                                                 _MALI_OSK_MALI_PAGE_SIZE, &tmp_phys, GFP_KERNEL);
+#endif
                if (NULL != *mapping) {
                        ret = _MALI_OSK_ERR_OK;
+
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
+                       /* Verify that the "physical" address is 32-bit and
+                        * usable for Mali, when on a system with bus addresses
+                        * wider than 32-bit. */
+                       MALI_DEBUG_ASSERT(0 == (tmp_phys >> 32));
+#endif
+
+                       *phys = (mali_dma_addr)tmp_phys;
                }
        }
 
        return ret;
 }
 
-void mali_mem_os_release_table_page(u32 phys, void *virt)
+void mali_mem_os_release_table_page(mali_dma_addr phys, void *virt)
 {
        spin_lock(&mali_mem_page_table_page_pool.lock);
        if (MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE > mali_mem_page_table_page_pool.count) {
@@ -342,7 +391,14 @@ void mali_mem_os_release_table_page(u32 phys, void *virt)
        } else {
                spin_unlock(&mali_mem_page_table_page_pool.lock);
 
-               dma_free_writecombine(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE, virt, phys);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+               dma_free_attrs(&mali_platform_device->dev,
+                              _MALI_OSK_MALI_PAGE_SIZE, virt, phys,
+                              &dma_attrs_wc);
+#else
+               dma_free_writecombine(&mali_platform_device->dev,
+                                     _MALI_OSK_MALI_PAGE_SIZE, virt, phys);
+#endif
        }
 }
 
@@ -367,7 +423,7 @@ static void mali_mem_os_free_page(struct page *page)
  */
 static void mali_mem_os_page_table_pool_free(size_t nr_to_free)
 {
-       u32 phys_arr[MALI_MEM_OS_CHUNK_TO_FREE];
+       mali_dma_addr phys_arr[MALI_MEM_OS_CHUNK_TO_FREE];
        void *virt_arr[MALI_MEM_OS_CHUNK_TO_FREE];
        u32 i;
 
@@ -387,7 +443,14 @@ static void mali_mem_os_page_table_pool_free(size_t nr_to_free)
 
        /* After releasing the spinlock: free the pages we removed from the pool. */
        for (i = 0; i < nr_to_free; i++) {
-               dma_free_writecombine(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE, virt_arr[i], phys_arr[i]);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+               dma_free_attrs(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE,
+                              virt_arr[i], (dma_addr_t)phys_arr[i], &dma_attrs_wc);
+#else
+               dma_free_writecombine(&mali_platform_device->dev,
+                                     _MALI_OSK_MALI_PAGE_SIZE,
+                                     virt_arr[i], (dma_addr_t)phys_arr[i]);
+#endif
        }
 }
 
@@ -530,6 +593,10 @@ _mali_osk_errcode_t mali_mem_os_init(void)
                return _MALI_OSK_ERR_NOMEM;
        }
 
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+       dma_set_attr(DMA_ATTR_WRITE_COMBINE, &dma_attrs_wc);
+#endif
+
        register_shrinker(&mali_mem_os_allocator.shrinker);
 
        return _MALI_OSK_ERR_OK;
index 753d5ccdcdc915ddfce55c285563d4a3c659da2d..8df7e99c33c29d7a094e73d92e84a1339a80a7e9 100755 (executable)
@@ -36,9 +36,9 @@ mali_mem_allocation *mali_mem_os_alloc(u32 mali_addr, u32 size, struct vm_area_s
  */
 void mali_mem_os_release(mali_mem_allocation *descriptor);
 
-_mali_osk_errcode_t mali_mem_os_get_table_page(u32 *phys, mali_io_address *mapping);
+_mali_osk_errcode_t mali_mem_os_get_table_page(mali_dma_addr *phys, mali_io_address *mapping);
 
-void mali_mem_os_release_table_page(u32 phys, void *virt);
+void mali_mem_os_release_table_page(mali_dma_addr phys, void *virt);
 
 _mali_osk_errcode_t mali_mem_os_init(void);
 void mali_mem_os_term(void);
index 8e59eacf68dbcd990ca13c5845b8dfb78d5d5be8..92e654a3de5b92febd51a616a041a688c62b71cd 100755 (executable)
@@ -114,10 +114,9 @@ _mali_osk_errcode_t _mali_ukk_attach_ump_mem(_mali_uk_attach_ump_mem_s *args)
        int md, ret;
 
        MALI_DEBUG_ASSERT_POINTER(args);
-       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+       MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
 
-       session = (struct mali_session_data *)args->ctx;
-       MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);
+       session = (struct mali_session_data *)(uintptr_t)args->ctx;
 
        /* check arguments */
        /* NULL might be a valid Mali address */
@@ -191,10 +190,9 @@ _mali_osk_errcode_t _mali_ukk_release_ump_mem(_mali_uk_release_ump_mem_s *args)
        struct mali_session_data *session;
 
        MALI_DEBUG_ASSERT_POINTER(args);
-       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+       MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
 
-       session = (struct mali_session_data *)args->ctx;
-       MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);
+       session = (struct mali_session_data *)(uintptr_t)args->ctx;
 
        if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_get(session->descriptor_mapping, args->cookie, (void **)&descriptor)) {
                MALI_DEBUG_PRINT(1, ("Invalid memory descriptor %d used to release ump memory\n", args->cookie));
index e83b4ccd7d02e88df7b65fbfa4136c0b568d7c95..280f98559db4caa11eb2df279fd4e54232834e52 100755 (executable)
@@ -82,7 +82,7 @@ void _mali_osk_cache_ensure_uncached_range_flushed(void *uncached_mapping, u32 o
        _mali_osk_write_mem_barrier();
 }
 
-u32 _mali_osk_mem_write_safe(void *dest, const void *src, u32 size)
+u32 _mali_osk_mem_write_safe(void __user *dest, const void __user *src, u32 size)
 {
 #define MALI_MEM_SAFE_COPY_BLOCK_SIZE 4096
        u32 retval = 0;
@@ -125,13 +125,22 @@ u32 _mali_osk_mem_write_safe(void *dest, const void *src, u32 size)
 
 _mali_osk_errcode_t _mali_ukk_mem_write_safe(_mali_uk_mem_write_safe_s *args)
 {
+       void __user *src;
+       void __user *dst;
+       struct mali_session_data *session;
+
        MALI_DEBUG_ASSERT_POINTER(args);
 
-       if (NULL == args->ctx) {
+       session = (struct mali_session_data *)(uintptr_t)args->ctx;
+
+       if (NULL == session) {
                return _MALI_OSK_ERR_INVALID_ARGS;
        }
 
+       src = (void __user *)(uintptr_t)args->src;
+       dst = (void __user *)(uintptr_t)args->dest;
+
        /* Return number of bytes actually copied */
-       args->size = _mali_osk_mem_write_safe(args->dest, args->src, args->size);
+       args->size = _mali_osk_mem_write_safe(dst, src, args->size);
        return _MALI_OSK_ERR_OK;
 }
index 6e104ad6c86e6cf85f793754eacbaefe394d10db..54bdc4369fba4c306d3a2e000db631927254bcff 100755 (executable)
@@ -16,6 +16,7 @@
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
 #include <linux/sched.h>
+#include <linux/seq_file.h>
 #include <linux/module.h>
 #include "mali_osk.h"
 
@@ -41,6 +42,17 @@ u32 _mali_osk_snprintf(char *buf, u32 size, const char *fmt, ...)
        return res;
 }
 
+void _mali_osk_ctxprintf(_mali_osk_print_ctx *print_ctx, const char *fmt, ...)
+{
+       va_list args;
+       char buf[512];
+
+       va_start(args, fmt);
+       vscnprintf(buf,512,fmt,args);
+       seq_printf(print_ctx,buf);
+       va_end(args);
+}
+
 void _mali_osk_abort(void)
 {
        /* make a simple fault by dereferencing a NULL pointer */
index 8b9e274ea48e5ef971c1c8da25b33de7defd889c..c125bc37e58149d4b827db11f0c27264d3b06472 100755 (executable)
@@ -37,45 +37,6 @@ void _mali_osk_profiling_term(void)
        /* Nothing to do */
 }
 
-_mali_osk_errcode_t _mali_osk_profiling_start(u32 *limit)
-{
-       /* Nothing to do */
-       return _MALI_OSK_ERR_OK;
-}
-
-_mali_osk_errcode_t _mali_osk_profiling_stop(u32 *count)
-{
-       /* Nothing to do */
-       return _MALI_OSK_ERR_OK;
-}
-
-u32 _mali_osk_profiling_get_count(void)
-{
-       return 0;
-}
-
-_mali_osk_errcode_t _mali_osk_profiling_get_event(u32 index, u64 *timestamp, u32 *event_id, u32 data[5])
-{
-       /* Nothing to do */
-       return _MALI_OSK_ERR_OK;
-}
-
-_mali_osk_errcode_t _mali_osk_profiling_clear(void)
-{
-       /* Nothing to do */
-       return _MALI_OSK_ERR_OK;
-}
-
-mali_bool _mali_osk_profiling_is_recording(void)
-{
-       return MALI_FALSE;
-}
-
-mali_bool _mali_osk_profiling_have_recording(void)
-{
-       return MALI_FALSE;
-}
-
 void _mali_osk_profiling_report_sw_counters(u32 *counters)
 {
        trace_mali_sw_counters(_mali_osk_get_pid(), _mali_osk_get_tid(), NULL, counters);
@@ -86,12 +47,6 @@ void _mali_osk_profiling_memory_usage_get(u32 *memory_usage)
        *memory_usage = _mali_ukk_report_memory_usage();
 }
 
-
-_mali_osk_errcode_t _mali_ukk_profiling_start(_mali_uk_profiling_start_s *args)
-{
-       return _mali_osk_profiling_start(&args->limit);
-}
-
 _mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args)
 {
        /* Always add process and thread identificator in the first two data elements for events from user space */
@@ -100,24 +55,12 @@ _mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s
        return _MALI_OSK_ERR_OK;
 }
 
-_mali_osk_errcode_t _mali_ukk_profiling_stop(_mali_uk_profiling_stop_s *args)
-{
-       return _mali_osk_profiling_stop(&args->count);
-}
-
-_mali_osk_errcode_t _mali_ukk_profiling_get_event(_mali_uk_profiling_get_event_s *args)
+_mali_osk_errcode_t _mali_ukk_sw_counters_report(_mali_uk_sw_counters_report_s *args)
 {
-       return _mali_osk_profiling_get_event(args->index, &args->timestamp, &args->event_id, args->data);
-}
+       u32 *counters = (u32 *)(uintptr_t)args->counters;
 
-_mali_osk_errcode_t _mali_ukk_profiling_clear(_mali_uk_profiling_clear_s *args)
-{
-       return _mali_osk_profiling_clear();
-}
+       _mali_osk_profiling_report_sw_counters(counters);
 
-_mali_osk_errcode_t _mali_ukk_sw_counters_report(_mali_uk_sw_counters_report_s *args)
-{
-       _mali_osk_profiling_report_sw_counters(args->counters);
        return _MALI_OSK_ERR_OK;
 }
 
index ac054be61a70664b037f7ca515c95a6d0a4da0b3..e8d68dd8fae460afd0c22c6d7ce9fe6287a64330 100755 (executable)
 
 typedef struct dma_pool *mali_dma_pool;
 
+typedef u32 mali_dma_addr;
+
 
 MALI_STATIC_INLINE mali_dma_pool mali_dma_pool_create(u32 size, u32 alignment, u32 boundary)
 {
-       return dma_pool_create("mali-dma", &mali_platform_device->dev, size, alignment, boundary);
+       return dma_pool_create("mali-dma", &mali_platform_device->dev,
+                              (size_t)size, (size_t)alignment, (size_t)boundary);
 }
 
 MALI_STATIC_INLINE void mali_dma_pool_destroy(mali_dma_pool pool)
@@ -42,14 +45,26 @@ MALI_STATIC_INLINE void mali_dma_pool_destroy(mali_dma_pool pool)
        dma_pool_destroy(pool);
 }
 
-MALI_STATIC_INLINE mali_io_address mali_dma_pool_alloc(mali_dma_pool pool, u32 *phys_addr)
+MALI_STATIC_INLINE mali_io_address mali_dma_pool_alloc(mali_dma_pool pool, mali_dma_addr *phys_addr)
 {
-       return dma_pool_alloc(pool, GFP_KERNEL, phys_addr);
+       void *ret;
+       dma_addr_t phys;
+
+       ret = dma_pool_alloc(pool, GFP_KERNEL, &phys);
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
+       /* Verify that the "physical" address is 32-bit and
+        * usable for Mali, when on a system with bus addresses
+        * wider than 32-bit. */
+       BUG_ON(0 != (phys >> 32));
+#endif
+       *phys_addr = phys;
+
+       return ret;
 }
 
-MALI_STATIC_INLINE void mali_dma_pool_free(mali_dma_pool pool, void *virt_addr, u32 phys_addr)
+MALI_STATIC_INLINE void mali_dma_pool_free(mali_dma_pool pool, void *virt_addr, mali_dma_addr phys_addr)
 {
-       dma_pool_free(pool, virt_addr, phys_addr);
+       dma_pool_free(pool, virt_addr, (dma_addr_t)phys_addr);
 }
 
 
old mode 100644 (file)
new mode 100755 (executable)
index 112cea2..61752ad
@@ -26,7 +26,7 @@ int get_api_version_wrapper(struct mali_session_data *session_data, _mali_uk_get
 
        if (0 != get_user(kargs.version, &uargs->version)) return -EFAULT;
 
-       kargs.ctx = session_data;
+       kargs.ctx = (uintptr_t)session_data;
        err = _mali_ukk_get_api_version(&kargs);
        if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
 
@@ -36,6 +36,25 @@ int get_api_version_wrapper(struct mali_session_data *session_data, _mali_uk_get
        return 0;
 }
 
+int get_api_version_v2_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_v2_s __user *uargs)
+{
+       _mali_uk_get_api_version_v2_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       if (0 != get_user(kargs.version, &uargs->version)) return -EFAULT;
+
+       kargs.ctx = (uintptr_t)session_data;
+       err = _mali_ukk_get_api_version_v2(&kargs);
+       if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+       if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+       if (0 != put_user(kargs.compatible, &uargs->compatible)) return -EFAULT;
+
+       return 0;
+}
+
 int wait_for_notification_wrapper(struct mali_session_data *session_data, _mali_uk_wait_for_notification_s __user *uargs)
 {
        _mali_uk_wait_for_notification_s kargs;
@@ -43,12 +62,12 @@ int wait_for_notification_wrapper(struct mali_session_data *session_data, _mali_
 
        MALI_CHECK_NON_NULL(uargs, -EINVAL);
 
-       kargs.ctx = session_data;
+       kargs.ctx = (uintptr_t)session_data;
        err = _mali_ukk_wait_for_notification(&kargs);
        if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
 
        if (_MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS != kargs.type) {
-               kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+               kargs.ctx = (uintptr_t)NULL; /* prevent kernel address to be returned to user space */
                if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_wait_for_notification_s))) return -EFAULT;
        } else {
                if (0 != put_user(kargs.type, &uargs->type)) return -EFAULT;
@@ -64,7 +83,7 @@ int post_notification_wrapper(struct mali_session_data *session_data, _mali_uk_p
 
        MALI_CHECK_NON_NULL(uargs, -EINVAL);
 
-       kargs.ctx = session_data;
+       kargs.ctx = (uintptr_t)session_data;
 
        if (0 != get_user(kargs.type, &uargs->type)) {
                return -EFAULT;
@@ -85,13 +104,13 @@ int get_user_settings_wrapper(struct mali_session_data *session_data, _mali_uk_g
 
        MALI_CHECK_NON_NULL(uargs, -EINVAL);
 
-       kargs.ctx = session_data;
+       kargs.ctx = (uintptr_t)session_data;
        err = _mali_ukk_get_user_settings(&kargs);
        if (_MALI_OSK_ERR_OK != err) {
                return map_errcode(err);
        }
 
-       kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+       kargs.ctx = 0; /* prevent kernel address to be returned to user space */
        if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_get_user_settings_s))) return -EFAULT;
 
        return 0;
@@ -104,10 +123,10 @@ int request_high_priority_wrapper(struct mali_session_data *session_data, _mali_
 
        MALI_CHECK_NON_NULL(uargs, -EINVAL);
 
-       kargs.ctx = session_data;
+       kargs.ctx = (uintptr_t)session_data;
        err = _mali_ukk_request_high_priority(&kargs);
 
-       kargs.ctx = NULL;
+       kargs.ctx = 0;
 
        return map_errcode(err);
 }
old mode 100644 (file)
new mode 100755 (executable)
index 573cabd..a8b5163
@@ -40,7 +40,7 @@ int gp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk
        MALI_CHECK_NON_NULL(uargs, -EINVAL);
        MALI_CHECK_NON_NULL(session_data, -EINVAL);
 
-       kargs.ctx = session_data;
+       kargs.ctx = (uintptr_t)session_data;
        err =  _mali_ukk_get_gp_core_version(&kargs);
        if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
 
@@ -61,7 +61,7 @@ int gp_suspend_response_wrapper(struct mali_session_data *session_data, _mali_uk
 
        if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_gp_suspend_response_s))) return -EFAULT;
 
-       kargs.ctx = session_data;
+       kargs.ctx = (uintptr_t)session_data;
        err = _mali_ukk_gp_suspend_response(&kargs);
        if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
 
@@ -79,7 +79,7 @@ int gp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali
        MALI_CHECK_NON_NULL(uargs, -EINVAL);
        MALI_CHECK_NON_NULL(session_data, -EINVAL);
 
-       kargs.ctx = session_data;
+       kargs.ctx = (uintptr_t)session_data;
        err = _mali_ukk_get_gp_number_of_cores(&kargs);
        if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
 
index 623e5aaaa2d452be80b6af74cb3afd8eb83b8579..6edf9daa76be93287ae6be11a7e6330906869c35 100755 (executable)
@@ -28,7 +28,7 @@ int mem_write_safe_wrapper(struct mali_session_data *session_data, _mali_uk_mem_
                return -EFAULT;
        }
 
-       kargs.ctx = session_data;
+       kargs.ctx = (uintptr_t)session_data;
 
        /* Check if we can access the buffers */
        if (!access_ok(VERIFY_WRITE, kargs.dest, kargs.size)
@@ -68,7 +68,7 @@ int mem_map_ext_wrapper(struct mali_session_data *session_data, _mali_uk_map_ext
                return -EFAULT;
        }
 
-       uk_args.ctx = session_data;
+       uk_args.ctx = (uintptr_t)session_data;
        err_code = _mali_ukk_map_external_mem(&uk_args);
 
        if (0 != put_user(uk_args.cookie, &argument->cookie)) {
@@ -76,7 +76,7 @@ int mem_map_ext_wrapper(struct mali_session_data *session_data, _mali_uk_map_ext
                        /* Rollback */
                        _mali_uk_unmap_external_mem_s uk_args_unmap;
 
-                       uk_args_unmap.ctx = session_data;
+                       uk_args_unmap.ctx = (uintptr_t)session_data;
                        uk_args_unmap.cookie = uk_args.cookie;
                        err_code = _mali_ukk_unmap_external_mem(&uk_args_unmap);
                        if (_MALI_OSK_ERR_OK != err_code) {
@@ -104,7 +104,7 @@ int mem_unmap_ext_wrapper(struct mali_session_data *session_data, _mali_uk_unmap
                return -EFAULT;
        }
 
-       uk_args.ctx = session_data;
+       uk_args.ctx = (uintptr_t)session_data;
        err_code = _mali_ukk_unmap_external_mem(&uk_args);
 
        /* Return the error that _mali_ukk_free_big_block produced */
@@ -126,7 +126,7 @@ int mem_release_ump_wrapper(struct mali_session_data *session_data, _mali_uk_rel
                return -EFAULT;
        }
 
-       uk_args.ctx = session_data;
+       uk_args.ctx = (uintptr_t)session_data;
        err_code = _mali_ukk_release_ump_mem(&uk_args);
 
        /* Return the error that _mali_ukk_free_big_block produced */
@@ -147,7 +147,7 @@ int mem_attach_ump_wrapper(struct mali_session_data *session_data, _mali_uk_atta
                return -EFAULT;
        }
 
-       uk_args.ctx = session_data;
+       uk_args.ctx = (uintptr_t)session_data;
        err_code = _mali_ukk_attach_ump_mem(&uk_args);
 
        if (0 != put_user(uk_args.cookie, &argument->cookie)) {
@@ -155,7 +155,7 @@ int mem_attach_ump_wrapper(struct mali_session_data *session_data, _mali_uk_atta
                        /* Rollback */
                        _mali_uk_release_ump_mem_s uk_args_unmap;
 
-                       uk_args_unmap.ctx = session_data;
+                       uk_args_unmap.ctx = (uintptr_t)session_data;
                        uk_args_unmap.cookie = uk_args.cookie;
                        err_code = _mali_ukk_release_ump_mem(&uk_args_unmap);
                        if (_MALI_OSK_ERR_OK != err_code) {
@@ -178,7 +178,7 @@ int mem_query_mmu_page_table_dump_size_wrapper(struct mali_session_data *session
        MALI_CHECK_NON_NULL(uargs, -EINVAL);
        MALI_CHECK_NON_NULL(session_data, -EINVAL);
 
-       kargs.ctx = session_data;
+       kargs.ctx = (uintptr_t)session_data;
 
        err = _mali_ukk_query_mmu_page_table_dump_size(&kargs);
        if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
@@ -192,31 +192,37 @@ int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mal
 {
        _mali_uk_dump_mmu_page_table_s kargs;
        _mali_osk_errcode_t err;
-       void *buffer;
+       void __user *user_buffer;
+       void *buffer = NULL;
        int rc = -EFAULT;
 
        /* validate input */
        MALI_CHECK_NON_NULL(uargs, -EINVAL);
        /* the session_data pointer was validated by caller */
 
-       kargs.buffer = NULL;
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_dump_mmu_page_table_s)))
+               goto err_exit;
 
-       /* get location of user buffer */
-       if (0 != get_user(buffer, &uargs->buffer)) goto err_exit;
-       /* get size of mmu page table info buffer from user space */
-       if (0 != get_user(kargs.size, &uargs->size)) goto err_exit;
-       /* verify we can access the whole of the user buffer */
-       if (!access_ok(VERIFY_WRITE, buffer, kargs.size)) goto err_exit;
+       user_buffer = (void __user *)(uintptr_t)kargs.buffer;
+       if (!access_ok(VERIFY_WRITE, user_buffer, kargs.size))
+               goto err_exit;
 
        /* allocate temporary buffer (kernel side) to store mmu page table info */
-       MALI_CHECK(kargs.size > 0, -ENOMEM);
-       kargs.buffer = _mali_osk_valloc(kargs.size);
-       if (NULL == kargs.buffer) {
+       if (kargs.size <= 0)
+               return -EINVAL;
+       /* Allow at most 8MiB buffers, this is more than enough to dump a fully
+        * populated page table. */
+       if (kargs.size > SZ_8M)
+               return -EINVAL;
+
+       buffer = (void *)(uintptr_t)_mali_osk_valloc(kargs.size);
+       if (NULL == buffer) {
                rc = -ENOMEM;
                goto err_exit;
        }
 
-       kargs.ctx = session_data;
+       kargs.ctx = (uintptr_t)session_data;
+       kargs.buffer = (uintptr_t)buffer;
        err = _mali_ukk_dump_mmu_page_table(&kargs);
        if (_MALI_OSK_ERR_OK != err) {
                rc = map_errcode(err);
@@ -224,14 +230,20 @@ int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mal
        }
 
        /* copy mmu page table info back to user space and update pointers */
-       if (0 != copy_to_user(uargs->buffer, kargs.buffer, kargs.size)) goto err_exit;
-       if (0 != put_user((kargs.register_writes - (u32 *)kargs.buffer) + (u32 *)uargs->buffer, &uargs->register_writes)) goto err_exit;
-       if (0 != put_user((kargs.page_table_dump - (u32 *)kargs.buffer) + (u32 *)uargs->buffer, &uargs->page_table_dump)) goto err_exit;
-       if (0 != put_user(kargs.register_writes_size, &uargs->register_writes_size)) goto err_exit;
-       if (0 != put_user(kargs.page_table_dump_size, &uargs->page_table_dump_size)) goto err_exit;
+       if (0 != copy_to_user(user_buffer, buffer, kargs.size))
+               goto err_exit;
+
+       kargs.register_writes = kargs.register_writes -
+                               (uintptr_t)buffer + (uintptr_t)user_buffer;
+       kargs.page_table_dump = kargs.page_table_dump -
+                               (uintptr_t)buffer + (uintptr_t)user_buffer;
+
+       if (0 != copy_to_user(uargs, &kargs, sizeof(kargs)))
+               goto err_exit;
+
        rc = 0;
 
 err_exit:
-       if (kargs.buffer) _mali_osk_vfree(kargs.buffer);
+       if (buffer) _mali_osk_vfree(buffer);
        return rc;
 }
index 9ed9acff97135b7b1e8cbcde7b65020b9d1e86ca..7053ff36a59b5b14cfcf55716f3e7a2477daf526 100755 (executable)
@@ -57,14 +57,14 @@ int pp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali
        MALI_CHECK_NON_NULL(uargs, -EINVAL);
        MALI_CHECK_NON_NULL(session_data, -EINVAL);
 
-       kargs.ctx = session_data;
+       kargs.ctx = (uintptr_t)session_data;
 
        err = _mali_ukk_get_pp_number_of_cores(&kargs);
        if (_MALI_OSK_ERR_OK != err) {
                return map_errcode(err);
        }
 
-       kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+       kargs.ctx = (uintptr_t)NULL; /* prevent kernel address to be returned to user space */
        if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_get_pp_number_of_cores_s))) {
                return -EFAULT;
        }
@@ -80,7 +80,7 @@ int pp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk
        MALI_CHECK_NON_NULL(uargs, -EINVAL);
        MALI_CHECK_NON_NULL(session_data, -EINVAL);
 
-       kargs.ctx = session_data;
+       kargs.ctx = (uintptr_t)session_data;
        err = _mali_ukk_get_pp_core_version(&kargs);
        if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
 
@@ -98,7 +98,7 @@ int pp_disable_wb_wrapper(struct mali_session_data *session_data, _mali_uk_pp_di
 
        if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_pp_disable_wb_s))) return -EFAULT;
 
-       kargs.ctx = session_data;
+       kargs.ctx = (uintptr_t)session_data;
        _mali_ukk_pp_job_disable_wb(&kargs);
 
        return 0;
index e9180322a180260140a158a6cde8d0d5764fa2c2..e341f633bda41e6d9adf376bfe6e68a619e2787c 100755 (executable)
 #include "mali_session.h"
 #include "mali_ukk_wrappers.h"
 
-int profiling_start_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_start_s __user *uargs)
-{
-       _mali_uk_profiling_start_s kargs;
-       _mali_osk_errcode_t err;
-
-       MALI_CHECK_NON_NULL(uargs, -EINVAL);
-
-       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_start_s))) {
-               return -EFAULT;
-       }
-
-       kargs.ctx = session_data;
-       err = _mali_ukk_profiling_start(&kargs);
-       if (_MALI_OSK_ERR_OK != err) {
-               return map_errcode(err);
-       }
-
-       if (0 != put_user(kargs.limit, &uargs->limit)) {
-               return -EFAULT;
-       }
-
-       return 0;
-}
-
 int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_add_event_s __user *uargs)
 {
        _mali_uk_profiling_add_event_s kargs;
@@ -52,7 +28,7 @@ int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk
                return -EFAULT;
        }
 
-       kargs.ctx = session_data;
+       kargs.ctx = (uintptr_t)session_data;
        err = _mali_ukk_profiling_add_event(&kargs);
        if (_MALI_OSK_ERR_OK != err) {
                return map_errcode(err);
@@ -61,68 +37,6 @@ int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk
        return 0;
 }
 
-int profiling_stop_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stop_s __user *uargs)
-{
-       _mali_uk_profiling_stop_s kargs;
-       _mali_osk_errcode_t err;
-
-       MALI_CHECK_NON_NULL(uargs, -EINVAL);
-
-       kargs.ctx = session_data;
-       err = _mali_ukk_profiling_stop(&kargs);
-       if (_MALI_OSK_ERR_OK != err) {
-               return map_errcode(err);
-       }
-
-       if (0 != put_user(kargs.count, &uargs->count)) {
-               return -EFAULT;
-       }
-
-       return 0;
-}
-
-int profiling_get_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_get_event_s __user *uargs)
-{
-       _mali_uk_profiling_get_event_s kargs;
-       _mali_osk_errcode_t err;
-
-       MALI_CHECK_NON_NULL(uargs, -EINVAL);
-
-       if (0 != get_user(kargs.index, &uargs->index)) {
-               return -EFAULT;
-       }
-
-       kargs.ctx = session_data;
-
-       err = _mali_ukk_profiling_get_event(&kargs);
-       if (_MALI_OSK_ERR_OK != err) {
-               return map_errcode(err);
-       }
-
-       kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
-       if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_profiling_get_event_s))) {
-               return -EFAULT;
-       }
-
-       return 0;
-}
-
-int profiling_clear_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_clear_s __user *uargs)
-{
-       _mali_uk_profiling_clear_s kargs;
-       _mali_osk_errcode_t err;
-
-       MALI_CHECK_NON_NULL(uargs, -EINVAL);
-
-       kargs.ctx = session_data;
-       err = _mali_ukk_profiling_clear(&kargs);
-       if (_MALI_OSK_ERR_OK != err) {
-               return map_errcode(err);
-       }
-
-       return 0;
-}
-
 int profiling_memory_usage_get_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_memory_usage_get_s __user *uargs)
 {
        _mali_osk_errcode_t err;
@@ -131,13 +45,13 @@ int profiling_memory_usage_get_wrapper(struct mali_session_data *session_data, _
        MALI_CHECK_NON_NULL(uargs, -EINVAL);
        MALI_CHECK_NON_NULL(session_data, -EINVAL);
 
-       kargs.ctx = session_data;
+       kargs.ctx = (uintptr_t)session_data;
        err = _mali_ukk_profiling_memory_usage_get(&kargs);
        if (_MALI_OSK_ERR_OK != err) {
                return map_errcode(err);
        }
 
-       kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+       kargs.ctx = (uintptr_t)NULL; /* prevent kernel address to be returned to user space */
        if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_profiling_memory_usage_get_s))) {
                return -EFAULT;
        }
@@ -150,6 +64,7 @@ int profiling_report_sw_counters_wrapper(struct mali_session_data *session_data,
        _mali_uk_sw_counters_report_s kargs;
        _mali_osk_errcode_t err;
        u32 *counter_buffer;
+       u32 __user *counters;
 
        MALI_CHECK_NON_NULL(uargs, -EINVAL);
 
@@ -168,13 +83,15 @@ int profiling_report_sw_counters_wrapper(struct mali_session_data *session_data,
                return -ENOMEM;
        }
 
-       if (0 != copy_from_user(counter_buffer, kargs.counters, sizeof(u32) * kargs.num_counters)) {
+       counters = (u32 *)(uintptr_t)kargs.counters;
+
+       if (0 != copy_from_user(counter_buffer, counters, sizeof(u32) * kargs.num_counters)) {
                kfree(counter_buffer);
                return -EFAULT;
        }
 
-       kargs.ctx = session_data;
-       kargs.counters = counter_buffer;
+       kargs.ctx = (uintptr_t)session_data;
+       kargs.counters = (uintptr_t)counter_buffer;
 
        err = _mali_ukk_sw_counters_report(&kargs);
 
@@ -186,5 +103,3 @@ int profiling_report_sw_counters_wrapper(struct mali_session_data *session_data,
 
        return 0;
 }
-
-
index c5cb848694bf586e7ca0de07427c0e981f281ae0..c6b67c5f37f50de9f349c9e1c5d2e0dda0d9cc8c 100755 (executable)
@@ -21,8 +21,9 @@
 
 int soft_job_start_wrapper(struct mali_session_data *session, _mali_uk_soft_job_start_s __user *uargs)
 {
-       u32 type, user_job, point;
-       _mali_uk_fence_t uk_fence;
+       _mali_uk_soft_job_start_s kargs;
+       u32 type, point;
+       u64 user_job;
        struct mali_timeline_fence fence;
        struct mali_soft_job *job = NULL;
        u32 __user *job_id_ptr = NULL;
@@ -35,14 +36,17 @@ int soft_job_start_wrapper(struct mali_session_data *session, _mali_uk_soft_job_
 
        MALI_DEBUG_ASSERT_POINTER(session->soft_job_system);
 
-       if (0 != get_user(type, &uargs->type))                 return -EFAULT;
-       if (0 != get_user(user_job, &uargs->user_job))         return -EFAULT;
-       if (0 != get_user(job_id_ptr, &uargs->job_id_ptr))     return -EFAULT;
+       if (0 != copy_from_user(&kargs, uargs, sizeof(kargs))) {
+               return -EFAULT;
+       }
+
+       type = kargs.type;
+       user_job = kargs.user_job;
+       job_id_ptr = (u32 __user *)(uintptr_t)kargs.job_id_ptr;
 
-       if (0 != copy_from_user(&uk_fence, &uargs->fence, sizeof(_mali_uk_fence_t))) return -EFAULT;
-       mali_timeline_fence_copy_uk_fence(&fence, &uk_fence);
+       mali_timeline_fence_copy_uk_fence(&fence, &kargs.fence);
 
-       if (MALI_SOFT_JOB_TYPE_USER_SIGNALED < type) {
+       if ((MALI_SOFT_JOB_TYPE_USER_SIGNALED != type) && (MALI_SOFT_JOB_TYPE_SELF_SIGNALED != type)) {
                MALI_DEBUG_PRINT_ERROR(("Invalid soft job type specified\n"));
                return -EINVAL;
        }
old mode 100644 (file)
new mode 100755 (executable)
index bf5f357..b0fa93b
@@ -28,7 +28,7 @@ int vsync_event_report_wrapper(struct mali_session_data *session_data, _mali_uk_
                return -EFAULT;
        }
 
-       kargs.ctx = session_data;
+       kargs.ctx = (uintptr_t)session_data;
        err = _mali_ukk_vsync_event_report(&kargs);
        if (_MALI_OSK_ERR_OK != err) {
                return map_errcode(err);
old mode 100644 (file)
new mode 100755 (executable)
index 793393c..c2ecf22
@@ -25,6 +25,7 @@ extern "C" {
 
 int wait_for_notification_wrapper(struct mali_session_data *session_data, _mali_uk_wait_for_notification_s __user *uargs);
 int get_api_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_s __user *uargs);
+int get_api_version_v2_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_v2_s __user *uargs);
 int get_user_settings_wrapper(struct mali_session_data *session_data, _mali_uk_get_user_settings_s __user *uargs);
 int post_notification_wrapper(struct mali_session_data *session_data, _mali_uk_post_notification_s __user *uargs);
 int request_high_priority_wrapper(struct mali_session_data *session_data, _mali_uk_request_high_priority_s __user *uargs);
@@ -56,11 +57,7 @@ int gp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali
 int gp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_core_version_s __user *uargs);
 int gp_suspend_response_wrapper(struct mali_session_data *session_data, _mali_uk_gp_suspend_response_s __user *uargs);
 
-int profiling_start_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_start_s __user *uargs);
 int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_add_event_s __user *uargs);
-int profiling_stop_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stop_s __user *uargs);
-int profiling_get_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_get_event_s __user *uargs);
-int profiling_clear_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_clear_s __user *uargs);
 int profiling_report_sw_counters_wrapper(struct mali_session_data *session_data, _mali_uk_sw_counters_report_s __user *uargs);
 int profiling_memory_usage_get_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_memory_usage_get_s __user *uargs);
 
index d0dc4e5ae2f3670de93ad6006e9ad4f14125d0f4..f787f4e3b8922c44b76cdd5f35721f4ac29ed0cf 100755 (executable)
@@ -126,7 +126,6 @@ int get_mali_freq_level(int freq)
 {
        int mali_freq_num;
        int i = 0, level = -1;
-       int mali_freq_num;
 
        if(freq < 0)
                return level;
index aa7e43f173e9082f7c94c5624a60fc023316daef..398781dbebfee38807b1132566cb2b8888066a5b 100755 (executable)
@@ -167,12 +167,12 @@ _mali_osk_errcode_t _ump_uku_get_api_version(_ump_uk_api_version_s *args)
        /* check compatability */
        if (args->version == UMP_IOCTL_API_VERSION) {
                DBG_MSG(3, ("API version set to newest %d (compatible)\n",
-                               GET_VERSION(args->version)));
+                           GET_VERSION(args->version)));
                args->compatible = 1;
                session_data->api_version = args->version;
        } else {
                DBG_MSG(2, ("API version set to %d (incompatible with client version %d)\n",
-                               GET_VERSION(UMP_IOCTL_API_VERSION), GET_VERSION(args->version)));
+                           GET_VERSION(UMP_IOCTL_API_VERSION), GET_VERSION(args->version)));
                args->compatible = 0;
                args->version = UMP_IOCTL_API_VERSION; /* report our version */
        }
@@ -237,14 +237,14 @@ _mali_osk_errcode_t _ump_ukk_size_get(_ump_uk_size_get_s *user_interaction)
        if (NULL != mem) {
                user_interaction->size = mem->size_bytes;
                DBG_MSG(4, ("Returning size. ID: %u, size: %lu ",
-                               (ump_secure_id)user_interaction->secure_id,
-                               (unsigned long)user_interaction->size));
+                           (ump_secure_id)user_interaction->secure_id,
+                           (unsigned long)user_interaction->size));
                ump_random_mapping_put(mem);
                ret = _MALI_OSK_ERR_OK;
        } else {
                user_interaction->size = 0;
                DBG_MSG(1, ("Failed to look up mapping in ump_ioctl_size_get(). ID: %u\n",
-                                       (ump_secure_id)user_interaction->secure_id));
+                           (ump_secure_id)user_interaction->secure_id));
        }
 
        return ret;
@@ -262,7 +262,7 @@ void _ump_ukk_msync(_ump_uk_msync_s *args)
        mem = ump_random_mapping_get(device.secure_id_map, (int)args->secure_id);
        if (NULL == mem) {
                DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_msync(). ID: %u\n",
-                                       (ump_secure_id)args->secure_id));
+                           (ump_secure_id)args->secure_id));
                return;
        }
 
@@ -363,7 +363,7 @@ void _ump_ukk_switch_hw_usage(_ump_uk_switch_hw_usage_s *args)
        mem = ump_random_mapping_get(device.secure_id_map, (int)args->secure_id);
        if (NULL == mem) {
                DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_switch_hw_usage(). ID: %u\n",
-                                       (ump_secure_id)args->secure_id));
+                           (ump_secure_id)args->secure_id));
                return;
        }
 
@@ -371,19 +371,19 @@ void _ump_ukk_switch_hw_usage(_ump_uk_switch_hw_usage_s *args)
        mem->hw_device = args->new_user;
 
        DBG_MSG(3, ("UMP[%02u] Switch usage  Start  New: %s  Prev: %s.\n",
-                               (ump_secure_id)args->secure_id,
-                               args->new_user ? "MALI" : "CPU",
-                               old_user ? "MALI" : "CPU"));
+                   (ump_secure_id)args->secure_id,
+                   args->new_user ? "MALI" : "CPU",
+                   old_user ? "MALI" : "CPU"));
 
        if (!mem->is_cached) {
                DBG_MSG(3, ("UMP[%02u] Changing owner of uncached memory. Cache flushing not needed.\n",
-                                       (ump_secure_id)args->secure_id));
+                           (ump_secure_id)args->secure_id));
                goto out;
        }
 
        if (old_user == args->new_user) {
                DBG_MSG(4, ("UMP[%02u] Setting the new_user equal to previous for. Cache flushing not needed.\n",
-                                       (ump_secure_id)args->secure_id));
+                           (ump_secure_id)args->secure_id));
                goto out;
        }
        if (
@@ -391,7 +391,7 @@ void _ump_ukk_switch_hw_usage(_ump_uk_switch_hw_usage_s *args)
                (old_user != _UMP_UK_USED_BY_CPU) && (args->new_user != _UMP_UK_USED_BY_CPU)
        ) {
                DBG_MSG(4, ("UMP[%02u] Previous and new user is not CPU. Cache flushing not needed.\n",
-                                       (ump_secure_id)args->secure_id));
+                           (ump_secure_id)args->secure_id));
                goto out;
        }
 
@@ -424,7 +424,7 @@ void _ump_ukk_lock(_ump_uk_lock_s *args)
        mem = ump_random_mapping_get(device.secure_id_map, (int)args->secure_id);
        if (NULL == mem) {
                DBG_MSG(1, ("UMP[%02u] Failed to look up mapping in _ump_ukk_lock(). ID: %u\n",
-                                       (ump_secure_id)args->secure_id));
+                           (ump_secure_id)args->secure_id));
                return;
        }
 
@@ -442,12 +442,12 @@ void _ump_ukk_unlock(_ump_uk_unlock_s *args)
        mem = ump_random_mapping_get(device.secure_id_map, (int)args->secure_id);
        if (NULL == mem) {
                DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_unlock(). ID: %u\n",
-                                       (ump_secure_id)args->secure_id));
+                           (ump_secure_id)args->secure_id));
                return;
        }
 
        DBG_MSG(1, ("UMP[%02u] Unlocking. Old Lock flag:\n",
-                               (u32)args->secure_id, (u32) mem->lock_usage));
+                   (u32)args->secure_id, (u32) mem->lock_usage));
 
        mem->lock_usage = (ump_lock_usage) UMP_NOT_LOCKED;
 
index e1d8b3a0275a31f832d8246df38d290b4c39b4dd..3a9dfe8664b8af91945a05f8e250d3fbc6c8f7a1 100755 (executable)
@@ -107,8 +107,8 @@ _mali_osk_errcode_t _ump_ukk_open(void **context)
        }
 
        session_data->cookies_map = ump_descriptor_mapping_create(
-                               UMP_COOKIES_PER_SESSION_INITIAL,
-                               UMP_COOKIES_PER_SESSION_MAXIMUM);
+                                           UMP_COOKIES_PER_SESSION_INITIAL,
+                                           UMP_COOKIES_PER_SESSION_MAXIMUM);
 
        if (NULL == session_data->cookies_map) {
                MSG_ERR(("Failed to create descriptor mapping for _ump_ukk_map_mem cookies\n"));
index 3898218317a138563b75985b56727cd10874a1bf..aa08c17a917f59ff8bea8985874447b1f8afbc31 100755 (executable)
@@ -145,8 +145,8 @@ _mali_osk_errcode_t _ump_ukk_allocate(_ump_uk_allocate_s *user_interaction)
        /* Now, ask the active memory backend to do the actual memory allocation */
        if (!device.backend->allocate(device.backend->ctx, new_allocation)) {
                DBG_MSG(3, ("OOM: No more UMP memory left. Failed to allocate memory in ump_ioctl_allocate(). Size: %lu, requested size: %lu\n",
-                                       new_allocation->size_bytes,
-                                       (unsigned long)user_interaction->size));
+                           new_allocation->size_bytes,
+                           (unsigned long)user_interaction->size));
                _mali_osk_free(new_allocation);
                _mali_osk_free(session_memory_element);
                return _MALI_OSK_ERR_INVALID_FUNC;
@@ -174,8 +174,8 @@ _mali_osk_errcode_t _ump_ukk_allocate(_ump_uk_allocate_s *user_interaction)
        user_interaction->secure_id = new_allocation->secure_id;
        user_interaction->size = new_allocation->size_bytes;
        DBG_MSG(3, ("UMP memory allocated. ID: %u, size: %lu\n",
-                               new_allocation->secure_id,
-                               new_allocation->size_bytes));
+                   new_allocation->secure_id,
+                   new_allocation->size_bytes));
 
        return _MALI_OSK_ERR_OK;
 }
index b66e04af3acc6d50be71cd9412460f47dee2be47..95bcb95602a0e893c33ad2849ffbc73637bb7af1 100755 (executable)
@@ -74,7 +74,7 @@ ump_random_mapping *ump_random_mapping_create(void)
                return NULL;
 
        map->lock = _mali_osk_mutex_rw_init(_MALI_OSK_LOCKFLAG_ORDERED,
-                       _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP);
+                                           _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP);
        if (NULL != map->lock) {
                map->root = RB_ROOT;
 #if UMP_RANDOM_MAP_DELAY
@@ -135,8 +135,7 @@ ump_dd_mem *ump_random_mapping_get(ump_random_mapping *map, int id)
                map->failed.count++;
 
                if (time_is_before_jiffies(map->failed.timestamp +
-                               UMP_FAILED_LOOKUP_DELAY * HZ))
-               {
+                                          UMP_FAILED_LOOKUP_DELAY * HZ)) {
                        /* If it is a long time since last failure, reset
                         * the counter and skip the delay this time. */
                        map->failed.count = 0;
@@ -182,7 +181,7 @@ void ump_random_mapping_put(ump_dd_mem *mem)
 
        new_ref = _ump_osk_atomic_dec_and_read(&mem->ref_count);
        DBG_MSG(5, ("Memory reference decremented. ID: %u, new value: %d\n",
-                               mem->secure_id, new_ref));
+                   mem->secure_id, new_ref));
 
        if (0 == new_ref) {
                DBG_MSG(3, ("Final release of memory. ID: %u\n", mem->secure_id));
index 12d6cc9d34a2df7dfff03a5e753eff190b219c5f..825f16fa86170725afa4fd3644e2becf9153086a 100755 (executable)
@@ -23,7 +23,7 @@
 
 typedef struct lock_cmd_priv {
        uint32_t msg[128];    /*ioctl args*/
-       u32 pid;                          /*process id*/
+       u32 pid;              /*process id*/
 } _lock_cmd_priv;
 
 typedef struct lock_ref {
@@ -34,6 +34,7 @@ typedef struct lock_ref {
 typedef struct umplock_item {
        u32 secure_id;
        u32 id_ref_count;
+       u32 owner;
        _lock_access_usage usage;
        _lock_ref references[MAX_PIDS];
        struct semaphore item_lock;
@@ -51,122 +52,25 @@ struct umplock_device {
        struct class *umplock_class;
 };
 
-static char umplock_dev_name[] = "umplock";
-
-int umplock_major = 0;
-module_param(umplock_major, int, S_IRUGO); /* r--r--r-- */
-MODULE_PARM_DESC(umplock_major, "Device major number");
-
-static int  umplock_driver_open(struct inode *inode, struct file *filp);
-static int  umplock_driver_release(struct inode *inode, struct file *filp);
-static long umplock_driver_ioctl(struct file *f, unsigned int cmd, unsigned long arg);
-
-static struct file_operations umplock_fops = {
-       .owner   = THIS_MODULE,
-       .open    = umplock_driver_open,
-       .release = umplock_driver_release,
-       .unlocked_ioctl = umplock_driver_ioctl,
-};
-
 static struct umplock_device umplock_device;
 static umplock_device_private device;
+static dev_t umplock_dev;
+static char umplock_dev_name[] = "umplock";
 
-void umplock_init_locklist(void)
-{
-       memset(&device.items, 0, sizeof(umplock_item)*MAX_ITEMS);
-       atomic_set(&device.sessions, 0);
-}
-
-void umplock_deinit_locklist(void)
-{
-       memset(&device.items, 0, sizeof(umplock_item)*MAX_ITEMS);
-}
-
-int umplock_device_initialize(void)
-{
-       int err;
-       dev_t dev = 0;
-
-       if (0 == umplock_major) {
-               err = alloc_chrdev_region(&dev, 0, 1, umplock_dev_name);
-               umplock_major = MAJOR(dev);
-       } else {
-               dev = MKDEV(umplock_major, 0);
-               err = register_chrdev_region(dev, 1, umplock_dev_name);
-       }
-
-       if (0 == err) {
-               memset(&umplock_device, 0, sizeof(umplock_device));
-               cdev_init(&umplock_device.cdev, &umplock_fops);
-               umplock_device.cdev.owner = THIS_MODULE;
-               umplock_device.cdev.ops = &umplock_fops;
-
-               err = cdev_add(&umplock_device.cdev, dev, 1);
-               if (0 == err) {
-                       umplock_device.umplock_class = class_create(THIS_MODULE, umplock_dev_name);
-                       if (IS_ERR(umplock_device.umplock_class)) {
-                               err = PTR_ERR(umplock_device.umplock_class);
-                       } else {
-                               struct device *mdev;
-                               mdev = device_create(umplock_device.umplock_class, NULL, dev, NULL, umplock_dev_name);
-                               if (!IS_ERR(mdev)) {
-                                       return 0; /* all ok */
-                               }
-
-                               err = PTR_ERR(mdev);
-                               class_destroy(umplock_device.umplock_class);
-                       }
-                       cdev_del(&umplock_device.cdev);
-               }
-
-               unregister_chrdev_region(dev, 1);
-       }
-
-       return 1;
-}
-
-void umplock_device_terminate(void)
-{
-       dev_t dev = MKDEV(umplock_major, 0);
-
-       device_destroy(umplock_device.umplock_class, dev);
-       class_destroy(umplock_device.umplock_class);
-
-       cdev_del(&umplock_device.cdev);
-       unregister_chrdev_region(dev, 1);
-}
-
-int umplock_constructor(void)
-{
-       mutex_init(&device.item_list_lock);
-       if (!umplock_device_initialize()) return 1;
-       umplock_init_locklist();
-
-       return 0;
-}
+int umplock_debug_level = 0;
+module_param(umplock_debug_level, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(umplock_debug_level, "set umplock_debug_level to print debug messages");
 
-void umplock_destructor(void)
-{
-       umplock_deinit_locklist();
-       umplock_device_terminate();
-       mutex_destroy(&device.item_list_lock);
-}
+#define PDEBUG(level, fmt, args...) do { if ((level) <= umplock_debug_level) printk(KERN_DEBUG "umplock: " fmt, ##args); } while (0)
+#define PERROR(fmt, args...) do { printk(KERN_ERR "umplock: " fmt, ##args); } while (0)
 
 int umplock_find_item(u32 secure_id)
 {
        int i;
        for (i = 0; i < MAX_ITEMS; i++) {
-               if (device.items[i].secure_id == secure_id) return i;
-       }
-
-       return -1;
-}
-
-int umplock_find_slot(void)
-{
-       int i;
-       for (i = 0; i < MAX_ITEMS; i++) {
-               if (device.items[i].secure_id == 0) return i;
+               if (device.items[i].secure_id == secure_id) {
+                       return i;
+               }
        }
 
        return -1;
@@ -181,8 +85,9 @@ static int umplock_find_item_by_pid(_lock_cmd_priv *lock_cmd, int *item_slot, in
 
        i = umplock_find_item(lock_item->secure_id);
 
-       if (i < 0)
+       if (i < 0) {
                return -1;
+       }
 
        for (j = 0; j < MAX_PIDS; j++) {
                if (device.items[i].references[j].pid == lock_cmd->pid) {
@@ -198,11 +103,14 @@ static int umplock_find_client_valid(u32 pid)
 {
        int i;
 
-       if (pid == 0)
+       if (pid == 0) {
                return -1;
+       }
 
        for (i = 0; i < MAX_PIDS; i++) {
-               if (device.pids[i] == pid) return i;
+               if (device.pids[i] == pid) {
+                       return i;
+               }
        }
 
        return -1;
@@ -216,44 +124,41 @@ static int do_umplock_create_locked(_lock_cmd_priv *lock_cmd)
 
        i_index = ref_index = -1;
 
-#if 0
-       if (lock_item->usage == 1) printk(KERN_DEBUG "UMPLOCK: C 0x%x GPU SURFACE\n", lock_item->secure_id);
-       else if (lock_item->usage == 2) printk(KERN_DEBUG "UMPLOCK: C 0x%x GPU TEXTURE\n", lock_item->secure_id);
-       else printk(KERN_DEBUG "UMPLOCK: C 0x%x CPU\n", lock_item->secure_id);
-#endif
-
        ret = umplock_find_client_valid(lock_cmd->pid);
        if (ret < 0) {
                /*lock request from an invalid client pid, do nothing*/
-               return 0;
+               return -EINVAL;
        }
 
        ret = umplock_find_item_by_pid(lock_cmd, &i_index, &ref_index);
        if (ret >= 0) {
-               if (device.items[i_index].references[ref_index].ref_count == 0)
-                       device.items[i_index].references[ref_index].ref_count = 1;
        } else if ((i_index = umplock_find_item(lock_item->secure_id)) >= 0) {
                for (ref_index = 0; ref_index < MAX_PIDS; ref_index++) {
-                       if (device.items[i_index].references[ref_index].pid == 0) break;
+                       if (device.items[i_index].references[ref_index].pid == 0) {
+                               break;
+                       }
                }
                if (ref_index < MAX_PIDS) {
                        device.items[i_index].references[ref_index].pid = lock_cmd->pid;
-                       device.items[i_index].references[ref_index].ref_count = 1;
+                       device.items[i_index].references[ref_index].ref_count = 0;
                } else {
-                       printk(KERN_ERR "UMPLOCK: whoops, item ran out of available reference slot\n");
+                       PERROR("whoops, item ran out of available reference slots\n");
+                       return -EINVAL;
+
                }
        } else {
-               i_index = umplock_find_slot();
+               i_index = umplock_find_item(0);
 
                if (i_index >= 0) {
                        device.items[i_index].secure_id = lock_item->secure_id;
-                       device.items[i_index].id_ref_count = 1;
+                       device.items[i_index].id_ref_count = 0;
                        device.items[i_index].usage = lock_item->usage;
                        device.items[i_index].references[0].pid = lock_cmd->pid;
-                       device.items[i_index].references[0].ref_count = 1;
+                       device.items[i_index].references[0].ref_count = 0;
                        sema_init(&device.items[i_index].item_lock, 1);
                } else {
-                       printk(KERN_ERR "UMPLOCK: whoops, ran out of available slots\n");
+                       PERROR("whoops, ran out of available slots\n");
+                       return -EINVAL;
                }
        }
 
@@ -263,119 +168,130 @@ static int do_umplock_create_locked(_lock_cmd_priv *lock_cmd)
 
 static int do_umplock_create(_lock_cmd_priv *lock_cmd)
 {
-       int ret = 0;
-       mutex_lock(&device.item_list_lock);
-       ret = do_umplock_create_locked(lock_cmd);
-       mutex_unlock(&device.item_list_lock);
-       return ret;
+       return 0;
 }
 
 static int do_umplock_process(_lock_cmd_priv *lock_cmd)
 {
-       int ret, i_index, ref_index, ref_count;
+       int ret, i_index, ref_index;
+       _lock_item_s *lock_item = (_lock_item_s *)&lock_cmd->msg;
 
        mutex_lock(&device.item_list_lock);
 
-       do_umplock_create_locked(lock_cmd);
+       if (0 == lock_item->secure_id) {
+               PERROR("IOCTL_UMPLOCK_PROCESS called with secure_id is 0, pid: %d\n", lock_cmd->pid);
+               mutex_unlock(&device.item_list_lock);
+               return -EINVAL;
+       }
 
-       ret = umplock_find_client_valid(lock_cmd->pid);
+       ret = do_umplock_create_locked(lock_cmd);
        if (ret < 0) {
-               /*lock request from an invalid client pid, do nothing*/
                mutex_unlock(&device.item_list_lock);
-               return 0;
+               return -EINVAL;
        }
 
        ret = umplock_find_item_by_pid(lock_cmd, &i_index, &ref_index);
-       ref_count = device.items[i_index].references[ref_index].ref_count;
-       if (ret >= 0) {
-               if (ref_count == 1) {
-                       /*add ref before down to wait for the umplock*/
-                       device.items[i_index].references[ref_index].ref_count++;
-                       device.items[i_index].id_ref_count++;
-                       mutex_unlock(&device.item_list_lock);
-                       if (down_interruptible(&device.items[i_index].item_lock)) {
-                               /*wait up without hold the umplock. restore previous state and return*/
-                               mutex_lock(&device.item_list_lock);
-                               device.items[i_index].references[ref_index].ref_count--;
-                               device.items[i_index].id_ref_count--;
-                               if (device.items[i_index].references[ref_index].ref_count == 1) {
-                                       device.items[i_index].references[ref_index].ref_count = 0;
-                                       device.items[i_index].references[ref_index].pid = 0;
-                                       if (device.items[i_index].id_ref_count == 1) {
-                                               device.items[i_index].id_ref_count = 0;
-                                               device.items[i_index].secure_id = 0;
-                                       }
-                               }
-                               mutex_unlock(&device.item_list_lock);
-                               return -ERESTARTSYS;
-                       }
-                       mutex_lock(&device.item_list_lock);
-               } else {
-                       /*already got the umplock, add ref*/
-                       device.items[i_index].references[ref_index].ref_count++;
-                       device.items[i_index].id_ref_count++;
-               }
-#if 0
-               if (lock_item->usage == 1) printk(KERN_DEBUG "UMPLOCK:  P 0x%x GPU SURFACE\n", lock_item->secure_id);
-               else if (lock_item->usage == 2) printk(KERN_DEBUG "UMPLOCK:  P 0x%x GPU TEXTURE\n", lock_item->secure_id);
-               else printk(KERN_DEBUG "UMPLOCK:  P 0x%x CPU\n", lock_item->secure_id);
-#endif
-       } else {
+       if (ret < 0) {
                /*fail to find a item*/
-               printk(KERN_ERR "UMPLOCK: IOCTL_UMPLOCK_PROCESS called with invalid parameter\n");
+               PERROR("IOCTL_UMPLOCK_PROCESS called with invalid parameter, pid: %d\n", lock_cmd->pid);
                mutex_unlock(&device.item_list_lock);
                return -EINVAL;
        }
+       device.items[i_index].references[ref_index].ref_count++;
+       device.items[i_index].id_ref_count++;
+       PDEBUG(1, "try to lock, pid: %d, secure_id: 0x%x, ref_count: %d\n", lock_cmd->pid, lock_item->secure_id, device.items[i_index].references[ref_index].ref_count);
+
+       if (lock_cmd->pid == device.items[i_index].owner) {
+               PDEBUG(1, "already own the lock, pid: %d, secure_id: 0x%x, ref_count: %d\n", lock_cmd->pid, lock_item->secure_id, device.items[i_index].references[ref_index].ref_count);
+               mutex_unlock(&device.item_list_lock);
+               return 0;
+       }
+
+       mutex_unlock(&device.item_list_lock);
+       if (down_interruptible(&device.items[i_index].item_lock)) {
+               /*wait up without hold the umplock. restore previous state and return*/
+               mutex_lock(&device.item_list_lock);
+               device.items[i_index].references[ref_index].ref_count--;
+               device.items[i_index].id_ref_count--;
+               if (0 == device.items[i_index].references[ref_index].ref_count) {
+                       device.items[i_index].references[ref_index].pid = 0;
+                       if (0 == device.items[i_index].id_ref_count) {
+                               PDEBUG(1, "release item, pid: %d, secure_id: 0x%x\n", lock_cmd->pid, lock_item->secure_id);
+                               device.items[i_index].secure_id = 0;
+                       }
+               }
+
+               PERROR("failed lock, pid: %d, secure_id: 0x%x, ref_count: %d\n", lock_cmd->pid, lock_item->secure_id, device.items[i_index].references[ref_index].ref_count);
+
+               mutex_unlock(&device.item_list_lock);
+               return -ERESTARTSYS;
+       }
+
+       mutex_lock(&device.item_list_lock);
+       PDEBUG(1, "got lock, pid: %d, secure_id: 0x%x, ref_count: %d\n", lock_cmd->pid, lock_item->secure_id, device.items[i_index].references[ref_index].ref_count);
+       device.items[i_index].owner = lock_cmd->pid;
        mutex_unlock(&device.item_list_lock);
+
        return 0;
 }
 
 static int do_umplock_release(_lock_cmd_priv *lock_cmd)
 {
-       int i_index, ref_index, ref_count;
-       int ret;
+       int ret, i_index, ref_index;
+       _lock_item_s *lock_item = (_lock_item_s *)&lock_cmd->msg;
 
        mutex_lock(&device.item_list_lock);
+
+       if (0 == lock_item->secure_id) {
+               PERROR("IOCTL_UMPLOCK_RELEASE called with secure_id is 0, pid: %d\n", lock_cmd->pid);
+               mutex_unlock(&device.item_list_lock);
+               return -EINVAL;
+       }
+
        ret = umplock_find_client_valid(lock_cmd->pid);
        if (ret < 0) {
                /*lock request from an invalid client pid, do nothing*/
                mutex_unlock(&device.item_list_lock);
-               return 0;
+               return -EPERM;
        }
 
        i_index = ref_index = -1;
 
        ret = umplock_find_item_by_pid(lock_cmd, &i_index, &ref_index);
+       if (ret < 0) {
+               /*fail to find item*/
+               PERROR("IOCTL_UMPLOCK_RELEASE called with invalid parameter pid: %d, secid: 0x%x\n", lock_cmd->pid, lock_item->secure_id);
+               mutex_unlock(&device.item_list_lock);
+               return -EINVAL;
+       }
 
-       if (ret >= 0) {
-               device.items[i_index].references[ref_index].ref_count--;
-               ref_count = device.items[i_index].references[ref_index].ref_count;
-               device.items[i_index].id_ref_count--;
+       /* if the lock is not owned by this process */
+       if (lock_cmd->pid != device.items[i_index].owner) {
+               mutex_unlock(&device.item_list_lock);
+               return -EPERM;
+       }
 
-#if 0
-               if (lock_item->usage == 1) printk(KERN_DEBUG "UMPLOCK:   R 0x%x GPU SURFACE\n", lock_item->secure_id);
-               else if (lock_item->usage == 2) printk(KERN_DEBUG "UMPLOCK:   R 0x%x GPU TEXTURE\n", lock_item->secure_id);
-               else printk(KERN_DEBUG "UMPLOCK:   R 0x%x CPU\n", lock_item->secure_id);
-#endif
-               /*reached the last reference to the umplock*/
-               if (ref_count == 1) {
-                       /*release the umplock*/
-                       up(&device.items[i_index].item_lock);
+       /* if the ref_count is 0, that means nothing to unlock, just return */
+       if (0 == device.items[i_index].references[ref_index].ref_count) {
+               mutex_unlock(&device.item_list_lock);
+               return 0;
+       }
 
-                       device.items[i_index].references[ref_index].ref_count = 0;
-                       device.items[i_index].references[ref_index].pid = 0;
-                       if (device.items[i_index].id_ref_count == 1) {
-                               device.items[i_index].id_ref_count = 0;
-                               device.items[i_index].secure_id = 0;
-                       }
+       device.items[i_index].references[ref_index].ref_count--;
+       device.items[i_index].id_ref_count--;
+       PDEBUG(1, "unlock, pid: %d, secure_id: 0x%x, ref_count: %d\n", lock_cmd->pid, lock_item->secure_id, device.items[i_index].references[ref_index].ref_count);
+
+       if (0 == device.items[i_index].references[ref_index].ref_count) {
+               device.items[i_index].references[ref_index].pid = 0;
+               if (0 == device.items[i_index].id_ref_count) {
+                       PDEBUG(1, "release item, pid: %d, secure_id: 0x%x\n", lock_cmd->pid, lock_item->secure_id);
+                       device.items[i_index].secure_id = 0;
                }
-       } else {
-               /*fail to find item*/
-               printk(KERN_ERR "UMPLOCK: IOCTL_UMPLOCK_RELEASE called with invalid parameter pid : %d tgid :%d  secid: %d  \n", lock_cmd->pid, current->tgid, ((_lock_item_s *)&lock_cmd->msg)->secure_id);
-               mutex_unlock(&device.item_list_lock);
-               return -EINVAL;
+               device.items[i_index].owner = 0;
+               up(&device.items[i_index].item_lock);
        }
        mutex_unlock(&device.item_list_lock);
+
        return 0;
 }
 
@@ -383,20 +299,21 @@ static int do_umplock_zap(void)
 {
        int i;
 
-       printk(KERN_DEBUG "UMPLOCK: ZAP ALL ENTRIES!\n");
+       PDEBUG(1, "ZAP ALL ENTRIES!\n");
 
        mutex_lock(&device.item_list_lock);
 
        for (i = 0; i < MAX_ITEMS; i++) {
                device.items[i].secure_id = 0;
-               memset(&device.items[i].references, 0, sizeof(_lock_ref)*MAX_PIDS);
+               memset(&device.items[i].references, 0, sizeof(_lock_ref) * MAX_PIDS);
                sema_init(&device.items[i].item_lock, 1);
        }
-       mutex_unlock(&device.item_list_lock);
 
        for (i = 0; i < MAX_PIDS; i++) {
                device.pids[i] = 0;
        }
+       mutex_unlock(&device.item_list_lock);
+
        return 0;
 }
 
@@ -404,21 +321,22 @@ static int do_umplock_dump(void)
 {
        int i, j;
 
-       printk("dump all the items\n");
-
        mutex_lock(&device.item_list_lock);
+       PERROR("dump all the items begin\n");
        for (i = 0; i < MAX_ITEMS; i++) {
                for (j = 0; j < MAX_PIDS; j++) {
                        if (device.items[i].secure_id != 0 && device.items[i].references[j].pid != 0) {
-                               printk("item[%d]->secure_id=%d\t reference[%d].ref_count=%d.pid=%d\n",
+                               PERROR("item[%d]->secure_id=0x%x, owner=%d\t reference[%d].ref_count=%d.pid=%d\n",
                                       i,
                                       device.items[i].secure_id,
+                                      device.items[i].owner,
                                       j,
                                       device.items[i].references[j].ref_count,
                                       device.items[i].references[j].pid);
                        }
                }
        }
+       PERROR("dump all the items end\n");
        mutex_unlock(&device.item_list_lock);
 
        return 0;
@@ -430,6 +348,7 @@ int do_umplock_client_add(_lock_cmd_priv *lock_cmd)
        mutex_lock(&device.item_list_lock);
        for (i = 0; i < MAX_PIDS; i++) {
                if (device.pids[i] == lock_cmd->pid) {
+                       mutex_unlock(&device.item_list_lock);
                        return 0;
                }
        }
@@ -441,7 +360,8 @@ int do_umplock_client_add(_lock_cmd_priv *lock_cmd)
        }
        mutex_unlock(&device.item_list_lock);
        if (i == MAX_PIDS) {
-               printk(KERN_ERR "Oops, Run out of cient slots\n ");
+               PERROR("Oops, Run out of client slots\n ");
+               return -EINVAL;
        }
        return 0;
 }
@@ -464,6 +384,7 @@ int do_umplock_client_delete(_lock_cmd_priv *lock_cmd)
        /*walk through umplock item list and release reference attached to this client*/
        for (i_index = 0; i_index < MAX_ITEMS; i_index++) {
                lock_item->secure_id = device.items[i_index].secure_id;
+
                /*find the item index and reference slot for the lock_item*/
                ret = umplock_find_item_by_pid(lock_cmd, &i_index, &ref_index);
 
@@ -473,6 +394,9 @@ int do_umplock_client_delete(_lock_cmd_priv *lock_cmd)
                }
                while (device.items[i_index].references[ref_index].ref_count) {
                        /*release references on this client*/
+
+                       PDEBUG(1, "delete client, pid: %d, ref_count: %d\n", lock_cmd->pid, device.items[i_index].references[ref_index].ref_count);
+
                        mutex_unlock(&device.item_list_lock);
                        do_umplock_release(lock_cmd);
                        mutex_lock(&device.item_list_lock);
@@ -559,7 +483,7 @@ static int umplock_driver_open(struct inode *inode, struct file *filp)
        _lock_cmd_priv lock_cmd;
 
        atomic_inc(&device.sessions);
-       printk(KERN_DEBUG "UMPLOCK: OPEN SESSION (%i references)\n", atomic_read(&device.sessions));
+       PDEBUG(1, "OPEN SESSION (%i references)\n", atomic_read(&device.sessions));
 
        lock_cmd.pid = (u32)current->tgid;
        do_umplock_client_add(&lock_cmd);
@@ -569,39 +493,106 @@ static int umplock_driver_open(struct inode *inode, struct file *filp)
 
 static int umplock_driver_release(struct inode *inode, struct file *filp)
 {
+       int sessions = 0;
        _lock_cmd_priv lock_cmd;
 
        lock_cmd.pid = (u32)current->tgid;
        do_umplock_client_delete(&lock_cmd);
 
+       mutex_lock(&device.item_list_lock);
        atomic_dec(&device.sessions);
-       printk(KERN_DEBUG "UMPLOCK: CLOSE SESSION (%i references)\n", atomic_read(&device.sessions));
-       if (atomic_read(&device.sessions) == 0) {
+       sessions = atomic_read(&device.sessions);
+       PDEBUG(1, "CLOSE SESSION (%i references)\n", sessions);
+       mutex_unlock(&device.item_list_lock);
+       if (sessions == 0) {
                do_umplock_zap();
        }
 
        return 0;
 }
 
+static struct file_operations umplock_fops = {
+       .owner   = THIS_MODULE,
+       .open    = umplock_driver_open,
+       .release = umplock_driver_release,
+       .unlocked_ioctl = umplock_driver_ioctl,
+};
+
+int umplock_device_initialize(void)
+{
+       int err;
+
+       err = alloc_chrdev_region(&umplock_dev, 0, 1, umplock_dev_name);
+
+       if (0 == err) {
+               memset(&umplock_device, 0, sizeof(umplock_device));
+               cdev_init(&umplock_device.cdev, &umplock_fops);
+               umplock_device.cdev.owner = THIS_MODULE;
+               umplock_device.cdev.ops = &umplock_fops;
+
+               err = cdev_add(&umplock_device.cdev, umplock_dev, 1);
+               if (0 == err) {
+                       umplock_device.umplock_class = class_create(THIS_MODULE, umplock_dev_name);
+                       if (IS_ERR(umplock_device.umplock_class)) {
+                               err = PTR_ERR(umplock_device.umplock_class);
+                       } else {
+                               struct device *mdev;
+                               mdev = device_create(umplock_device.umplock_class, NULL, umplock_dev, NULL, umplock_dev_name);
+                               if (!IS_ERR(mdev)) {
+                                       return 0; /* all ok */
+                               }
+
+                               err = PTR_ERR(mdev);
+                               class_destroy(umplock_device.umplock_class);
+                       }
+                       cdev_del(&umplock_device.cdev);
+               }
+
+               unregister_chrdev_region(umplock_dev, 1);
+       } else {
+               PERROR("alloc chardev region failed\n");
+       }
+
+       return err;
+}
+
+void umplock_device_terminate(void)
+{
+       device_destroy(umplock_device.umplock_class, umplock_dev);
+       class_destroy(umplock_device.umplock_class);
+
+       cdev_del(&umplock_device.cdev);
+       unregister_chrdev_region(umplock_dev, 1);
+}
+
 static int __init umplock_initialize_module(void)
 {
-       printk(KERN_DEBUG "Inserting UMP lock device driver. Compiled: %s, time: %s\n", __DATE__, __TIME__);
+       PDEBUG(1, "Inserting UMP lock device driver. Compiled: %s, time: %s\n", __DATE__, __TIME__);
 
-       if (!umplock_constructor()) {
-               printk(KERN_ERR "UMP lock device driver init failed\n");
+       mutex_init(&device.item_list_lock);
+       if (umplock_device_initialize() != 0) {
+               PERROR("UMP lock device driver init failed\n");
                return -ENOTTY;
        }
+       memset(&device.items, 0, sizeof(umplock_item) * MAX_ITEMS);
+       memset(&device.pids, 0, sizeof(u32) * MAX_PIDS);
+       atomic_set(&device.sessions, 0);
 
-       printk(KERN_DEBUG "UMP lock device driver loaded\n");
+       PDEBUG(1, "UMP lock device driver loaded\n");
 
        return 0;
 }
 
 static void __exit umplock_cleanup_module(void)
 {
-       printk(KERN_DEBUG "unloading UMP lock module\n");
-       umplock_destructor();
-       printk(KERN_DEBUG "UMP lock module unloaded\n");
+       PDEBUG(1, "unloading UMP lock module\n");
+
+       memset(&device.items, 0, sizeof(umplock_item) * MAX_ITEMS);
+       memset(&device.pids, 0, sizeof(u32) * MAX_PIDS);
+       umplock_device_terminate();
+       mutex_destroy(&device.item_list_lock);
+
+       PDEBUG(1, "UMP lock module unloaded\n");
 }
 
 module_init(umplock_initialize_module);