*/
#define MALI_DLBU_SIZE 0x400
-u32 mali_dlbu_phys_addr = 0;
+mali_dma_addr mali_dlbu_phys_addr = 0;
static mali_io_address mali_dlbu_cpu_addr = NULL;
/**
struct mali_pp_job;
struct mali_group;
-
-extern u32 mali_dlbu_phys_addr;
-
struct mali_dlbu_core;
+extern mali_dma_addr mali_dlbu_phys_addr;
+
_mali_osk_errcode_t mali_dlbu_initialize(void);
void mali_dlbu_terminate(void);
typedef struct mali_dma_cmd_buf {
u32 *virt_addr; /**< CPU address of command buffer */
- u32 phys_addr; /**< Physical address of command buffer */
+ mali_dma_addr phys_addr; /**< Physical address of command buffer */
u32 size; /**< Number of prepared words in command buffer */
} mali_dma_cmd_buf;
return (NULL == job) ? 0 : job->cache_order;
}
-MALI_STATIC_INLINE u32 mali_gp_job_get_user_id(struct mali_gp_job *job)
+MALI_STATIC_INLINE u64 mali_gp_job_get_user_id(struct mali_gp_job *job)
{
return job->uargs.user_job_ptr;
}
return _MALI_OSK_ERR_NOMEM;
}
- timeline_point_ptr = (u32 __user *) job->uargs.timeline_point_ptr;
+ timeline_point_ptr = (u32 __user *)(uintptr_t)job->uargs.timeline_point_ptr;
point = mali_gp_scheduler_submit_job(session, job);
_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores(_mali_uk_get_gp_number_of_cores_s *args)
{
MALI_DEBUG_ASSERT_POINTER(args);
- MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ MALI_DEBUG_ASSERT_POINTER((struct mali_session_data *)(uintptr_t)args->ctx);
+
args->number_of_cores = 1;
return _MALI_OSK_ERR_OK;
}
_mali_osk_errcode_t _mali_ukk_get_gp_core_version(_mali_uk_get_gp_core_version_s *args)
{
MALI_DEBUG_ASSERT_POINTER(args);
- MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ MALI_DEBUG_ASSERT_POINTER((struct mali_session_data *)(uintptr_t)args->ctx);
+
args->version = gp_version;
return _MALI_OSK_ERR_OK;
}
_mali_osk_errcode_t _mali_ukk_gp_suspend_response(_mali_uk_gp_suspend_response_s *args)
{
- struct mali_session_data *session;
struct mali_gp_job *resumed_job;
_mali_osk_notification_t *new_notification = NULL;
MALI_DEBUG_ASSERT_POINTER(args);
- if (NULL == args->ctx) {
- return _MALI_OSK_ERR_INVALID_ARGS;
- }
-
- session = (struct mali_session_data *)args->ctx;
- if (NULL == session) {
- return _MALI_OSK_ERR_FAULT;
- }
-
if (_MALIGP_JOB_RESUME_WITH_NEW_HEAP == args->code) {
new_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_STALLED, sizeof(_mali_uk_gp_job_suspended_s));
* remove it from broadcast
*/
mali_bcast_remove_group(group->bcast_core, child);
- MALI_DEBUG_PRINT(4, ("Mali Virtual Group: Remained PP group %0x remove from bcast_core\n", (int)child));
+ MALI_DEBUG_PRINT(4, ("Mali Virtual Group: Remained PP group %p remove from bcast_core\n", child));
}
}
MALI_SUCCESS;
}
#endif
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ if (MALI_FALSE == mali_pm_domain_lock_state(group->pm_domain)) {
+ goto out;
+ }
+#endif
/* Check if it was our device which caused the interrupt (we could be sharing the IRQ line) */
int_stat = mali_mmu_get_int_status(mmu);
/* An actual page fault has occurred. */
#ifdef DEBUG
u32 fault_address = mali_mmu_get_page_fault_addr(mmu);
- MALI_DEBUG_PRINT(2, ("Mali MMU: Page fault detected at 0x%x from bus id %d of type %s on %s\n",
- (void *)fault_address,
+ MALI_DEBUG_PRINT(2, ("Mali MMU: Page fault detected at 0x%08x from bus id %d of type %s on %s\n",
+ fault_address,
(status >> 6) & 0x1F,
(status & 32) ? "write" : "read",
mmu->hw_core.description));
+ mali_mmu_pagedir_diag(group->session->page_directory, fault_address);
#endif
mali_group_mmu_page_fault_and_unlock(group);
_mali_osk_errcode_t _mali_ukk_get_api_version(_mali_uk_get_api_version_s *args)
{
MALI_DEBUG_ASSERT_POINTER(args);
- MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+ /* check compatability */
+ if (args->version == _MALI_UK_API_VERSION) {
+ args->compatible = 1;
+ } else {
+ args->compatible = 0;
+ }
+
+ args->version = _MALI_UK_API_VERSION; /* report our version */
+
+ /* success regardless of being compatible or not */
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_api_version_v2(_mali_uk_get_api_version_v2_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
/* check compatability */
if (args->version == _MALI_UK_API_VERSION) {
_mali_osk_errcode_t err;
_mali_osk_notification_t *notification;
_mali_osk_notification_queue_t *queue;
+ struct mali_session_data *session;
/* check input */
MALI_DEBUG_ASSERT_POINTER(args);
- MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
- queue = ((struct mali_session_data *)args->ctx)->ioctl_queue;
+ session = (struct mali_session_data *)(uintptr_t)args->ctx;
+ queue = session->ioctl_queue;
/* if the queue does not exist we're currently shutting down */
if (NULL == queue) {
{
_mali_osk_notification_t *notification;
_mali_osk_notification_queue_t *queue;
+ struct mali_session_data *session;
/* check input */
MALI_DEBUG_ASSERT_POINTER(args);
- MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
- queue = ((struct mali_session_data *)args->ctx)->ioctl_queue;
+ session = (struct mali_session_data *)(uintptr_t)args->ctx;
+ queue = session->ioctl_queue;
/* if the queue does not exist we're currently shutting down */
if (NULL == queue) {
struct mali_session_data *session;
MALI_DEBUG_ASSERT_POINTER(args);
- MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
- session = (struct mali_session_data *) args->ctx;
+ session = (struct mali_session_data *)(uintptr_t)args->ctx;
if (!session->use_high_priority_job_queue) {
session->use_high_priority_job_queue = MALI_TRUE;
/* page fault queue flush helper pages
* note that the mapping pointers are currently unused outside of the initialization functions */
-static u32 mali_page_fault_flush_page_directory = MALI_INVALID_PAGE;
+static mali_dma_addr mali_page_fault_flush_page_directory = MALI_INVALID_PAGE;
static mali_io_address mali_page_fault_flush_page_directory_mapping = NULL;
-static u32 mali_page_fault_flush_page_table = MALI_INVALID_PAGE;
+static mali_dma_addr mali_page_fault_flush_page_table = MALI_INVALID_PAGE;
static mali_io_address mali_page_fault_flush_page_table_mapping = NULL;
-static u32 mali_page_fault_flush_data_page = MALI_INVALID_PAGE;
+static mali_dma_addr mali_page_fault_flush_data_page = MALI_INVALID_PAGE;
static mali_io_address mali_page_fault_flush_data_page_mapping = NULL;
/* an empty page directory (no address valid) which is active on any MMU not currently marked as in use */
-static u32 mali_empty_page_directory_phys = MALI_INVALID_PAGE;
+static mali_dma_addr mali_empty_page_directory_phys = MALI_INVALID_PAGE;
static mali_io_address mali_empty_page_directory_virt = NULL;
mali_empty_page_directory_virt = NULL;
/* Free the page fault flush pages */
- mali_destroy_fault_flush_pages(&mali_page_fault_flush_page_directory, &mali_page_fault_flush_page_directory_mapping,
- &mali_page_fault_flush_page_table, &mali_page_fault_flush_page_table_mapping,
- &mali_page_fault_flush_data_page, &mali_page_fault_flush_data_page_mapping);
+ mali_destroy_fault_flush_pages(&mali_page_fault_flush_page_directory,
+ &mali_page_fault_flush_page_directory_mapping,
+ &mali_page_fault_flush_page_table,
+ &mali_page_fault_flush_page_table_mapping,
+ &mali_page_fault_flush_data_page,
+ &mali_page_fault_flush_data_page_mapping);
}
struct mali_mmu_core *mali_mmu_create(_mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual)
MALI_DEBUG_PRINT(2, ("Mali MMU: Creating Mali MMU: %s\n", resource->description));
- mmu = _mali_osk_calloc(1,sizeof(struct mali_mmu_core));
- if (NULL != mmu)
- {
+ mmu = _mali_osk_calloc(1, sizeof(struct mali_mmu_core));
+ if (NULL != mmu) {
if (_MALI_OSK_ERR_OK == mali_hw_core_create(&mmu->hw_core, resource, MALI_MMU_REGISTERS_SIZE)) {
if (_MALI_OSK_ERR_OK == mali_group_add_mmu_core(group, mmu)) {
if (is_virtual) {
#include "mali_mmu_page_directory.h"
#include "mali_hw_core.h"
-#include <linux/kernel.h>
-#include <asm/io.h>
-#include <mach/am_regs.h>
-#include <linux/module.h>
-#if MESON_CPU_TYPE == MESON_CPU_TYPE_MESON6
-#include "meson_m400/mali_fix.h"
-#endif
-
-
/* Forward declaration from mali_group.h */
struct mali_group;
{
_mali_osk_errcode_t err;
mali_io_address mapping;
- u32 address;
+ mali_dma_addr address;
if (_MALI_OSK_ERR_OK != mali_mmu_get_table_page(&address, &mapping)) {
/* Allocation failed */
return address;
}
-void mali_free_empty_page(u32 address, mali_io_address virt_addr)
+void mali_free_empty_page(mali_dma_addr address, mali_io_address virt_addr)
{
if (MALI_INVALID_PAGE != address) {
mali_mmu_release_table_page(address, virt_addr);
}
}
-_mali_osk_errcode_t mali_create_fault_flush_pages(u32 *page_directory, mali_io_address *page_directory_mapping,
- u32 *page_table, mali_io_address *page_table_mapping,
- u32 *data_page, mali_io_address *data_page_mapping)
+_mali_osk_errcode_t mali_create_fault_flush_pages(mali_dma_addr *page_directory,
+ mali_io_address *page_directory_mapping,
+ mali_dma_addr *page_table, mali_io_address *page_table_mapping,
+ mali_dma_addr *data_page, mali_io_address *data_page_mapping)
{
_mali_osk_errcode_t err;
return err;
}
-void mali_destroy_fault_flush_pages(u32 *page_directory, mali_io_address *page_directory_mapping,
- u32 *page_table, mali_io_address *page_table_mapping,
- u32 *data_page, mali_io_address *data_page_mapping)
+void mali_destroy_fault_flush_pages(
+ mali_dma_addr *page_directory, mali_io_address *page_directory_mapping,
+ mali_dma_addr *page_table, mali_io_address *page_table_mapping,
+ mali_dma_addr *data_page, mali_io_address *data_page_mapping)
{
if (MALI_INVALID_PAGE != *page_directory) {
mali_mmu_release_table_page(*page_directory, *page_directory_mapping);
const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1);
_mali_osk_errcode_t err;
mali_io_address pde_mapping;
- u32 pde_phys;
+ mali_dma_addr pde_phys;
int i;
- if (last_pde < first_pde) {
- MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
- }
+ if (last_pde < first_pde)
+ return _MALI_OSK_ERR_INVALID_ARGS;
for (i = first_pde; i <= last_pde; i++) {
- if (0 == (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i * sizeof(u32)) & MALI_MMU_FLAGS_PRESENT)) {
+ if (0 == (_mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+ i * sizeof(u32)) & MALI_MMU_FLAGS_PRESENT)) {
/* Page table not present */
MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]);
MALI_DEBUG_ASSERT(NULL == pagedir->page_entries_mapped[i]);
}
_mali_osk_write_mem_barrier();
- MALI_SUCCESS;
+ return _MALI_OSK_ERR_OK;
}
MALI_STATIC_INLINE void mali_mmu_zero_pte(mali_io_address page_table, u32 mali_address, u32 size)
}
}
+static u32 mali_page_directory_get_phys_address(struct mali_page_directory *pagedir, u32 index)
+{
+ return (_mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+ index * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
+}
+
+
_mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size)
{
const int first_pde = MALI_MMU_PDE_ENTRY(mali_address);
struct mali_page_directory *mali_mmu_pagedir_alloc(void)
{
struct mali_page_directory *pagedir;
+ _mali_osk_errcode_t err;
+ mali_dma_addr phys;
pagedir = _mali_osk_calloc(1, sizeof(struct mali_page_directory));
if (NULL == pagedir) {
return NULL;
}
- if (_MALI_OSK_ERR_OK != mali_mmu_get_table_page(&pagedir->page_directory, &pagedir->page_directory_mapped)) {
+ err = mali_mmu_get_table_page(&phys, &pagedir->page_directory_mapped);
+ if (_MALI_OSK_ERR_OK != err) {
_mali_osk_free(pagedir);
return NULL;
}
+ pagedir->page_directory = (u32)phys;
+
/* Zero page directory */
fill_page(pagedir->page_directory_mapped, 0);
/* Free referenced page tables and zero PDEs. */
for (i = 0; i < num_page_table_entries; i++) {
- if (pagedir->page_directory_mapped && (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, sizeof(u32)*i) & MALI_MMU_FLAGS_PRESENT)) {
- u32 phys = _mali_osk_mem_ioread32(pagedir->page_directory_mapped, i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK;
+ if (pagedir->page_directory_mapped && (_mali_osk_mem_ioread32(
+ pagedir->page_directory_mapped,
+ sizeof(u32)*i) & MALI_MMU_FLAGS_PRESENT)) {
+ mali_dma_addr phys = _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+ i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK;
_mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32), 0);
mali_mmu_release_table_page(phys, pagedir->page_entries_mapped[i]);
}
}
-void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address, u32 phys_address, u32 size, u32 permission_bits)
+void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address,
+ mali_dma_addr phys_address, u32 size, u32 permission_bits)
{
u32 end_address = mali_address + size;
+ u32 mali_phys = (u32)phys_address;
/* Map physical pages into MMU page tables */
- for (; mali_address < end_address; mali_address += MALI_MMU_PAGE_SIZE, phys_address += MALI_MMU_PAGE_SIZE) {
+ for (; mali_address < end_address; mali_address += MALI_MMU_PAGE_SIZE, mali_phys += MALI_MMU_PAGE_SIZE) {
MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)]);
_mali_osk_mem_iowrite32_relaxed(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)],
MALI_MMU_PTE_ENTRY(mali_address) * sizeof(u32),
- phys_address | permission_bits);
+ mali_phys | permission_bits);
}
}
-u32 mali_page_directory_get_phys_address(struct mali_page_directory *pagedir, u32 index)
+void mali_mmu_pagedir_diag(struct mali_page_directory *pagedir, u32 fault_addr)
{
- return (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, index * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
+#if defined(DEBUG)
+ u32 pde_index, pte_index;
+ u32 pde, pte;
+
+ pde_index = MALI_MMU_PDE_ENTRY(fault_addr);
+ pte_index = MALI_MMU_PTE_ENTRY(fault_addr);
+
+
+ pde = _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+ pde_index * sizeof(u32));
+
+
+ if (pde & MALI_MMU_FLAGS_PRESENT) {
+ u32 pte_addr = MALI_MMU_ENTRY_ADDRESS(pde);
+
+ pte = _mali_osk_mem_ioread32(pagedir->page_entries_mapped[pde_index],
+ pte_index * sizeof(u32));
+
+ MALI_DEBUG_PRINT(2, ("\tMMU: %08x: Page table present: %08x\n"
+ "\t\tPTE: %08x, page %08x is %s\n",
+ fault_addr, pte_addr, pte,
+ MALI_MMU_ENTRY_ADDRESS(pte),
+ pte & MALI_MMU_FLAGS_DEFAULT ? "rw" : "not present"));
+ } else {
+ MALI_DEBUG_PRINT(2, ("\tMMU: %08x: Page table not present: %08x\n",
+ fault_addr, pde));
+ }
+#else
+ MALI_IGNORE(pagedir);
+ MALI_IGNORE(fault_addr);
+#endif
}
/* For instrumented */
struct dump_info info = { 0, 0, 0, NULL };
struct mali_session_data *session_data;
+ session_data = (struct mali_session_data *)(uintptr_t)(args->ctx);
+ MALI_DEBUG_ASSERT_POINTER(session_data);
MALI_DEBUG_ASSERT_POINTER(args);
- MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
-
- session_data = (struct mali_session_data *)(args->ctx);
MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info));
MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info));
struct mali_session_data *session_data;
MALI_DEBUG_ASSERT_POINTER(args);
- MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
- MALI_CHECK_NON_NULL(args->buffer, _MALI_OSK_ERR_INVALID_ARGS);
- session_data = (struct mali_session_data *)(args->ctx);
+ session_data = (struct mali_session_data *)(uintptr_t)(args->ctx);
+ MALI_DEBUG_ASSERT_POINTER(session_data);
info.buffer_left = args->size;
- info.buffer = args->buffer;
+ info.buffer = (u32 *)(uintptr_t)args->buffer;
- args->register_writes = info.buffer;
+ args->register_writes = (uintptr_t)info.buffer;
MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info));
- args->page_table_dump = info.buffer;
+ args->page_table_dump = (uintptr_t)info.buffer;
MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info));
args->register_writes_size = info.register_writes_size;
_mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size);
/* Back virtual address space with actual pages. Assumes input is contiguous and 4k aligned. */
-void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address, u32 phys_address, u32 size, u32 cache_settings);
-
-u32 mali_page_directory_get_phys_address(struct mali_page_directory *pagedir, u32 index);
+void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address,
+ mali_dma_addr phys_address, u32 size, u32 permission_bits);
u32 mali_allocate_empty_page(mali_io_address *virtual);
-void mali_free_empty_page(u32 address, mali_io_address virtual);
-_mali_osk_errcode_t mali_create_fault_flush_pages(u32 *page_directory, mali_io_address *page_directory_mapping,
- u32 *page_table, mali_io_address *page_table_mapping,
- u32 *data_page, mali_io_address *data_page_mapping);
-void mali_destroy_fault_flush_pages(u32 *page_directory, mali_io_address *page_directory_mapping,
- u32 *page_table, mali_io_address *page_table_mapping,
- u32 *data_page, mali_io_address *data_page_mapping);
+void mali_free_empty_page(mali_dma_addr address, mali_io_address virt_addr);
+_mali_osk_errcode_t mali_create_fault_flush_pages(mali_dma_addr *page_directory,
+ mali_io_address *page_directory_mapping,
+ mali_dma_addr *page_table, mali_io_address *page_table_mapping,
+ mali_dma_addr *data_page, mali_io_address *data_page_mapping);
+void mali_destroy_fault_flush_pages(
+ mali_dma_addr *page_directory, mali_io_address *page_directory_mapping,
+ mali_dma_addr *page_table, mali_io_address *page_table_mapping,
+ mali_dma_addr *data_page, mali_io_address *data_page_mapping);
struct mali_page_directory *mali_mmu_pagedir_alloc(void);
void mali_mmu_pagedir_free(struct mali_page_directory *pagedir);
+void mali_mmu_pagedir_diag(struct mali_page_directory *pagedir, u32 fault_addr);
+
#endif /* __MALI_MMU_PAGE_DIRECTORY_H__ */
*/
u32 _mali_osk_snprintf(char *buf, u32 size, const char *fmt, ...);
+/** @brief Print fmt into print_ctx.
+ *
+ * The interpretation of \a fmt is the same as the \c format parameter in
+ * _mali_osu_vsnprintf().
+ *
+ * @param print_ctx a pointer to the result file buffer
+ * @param fmt a _mali_osu_vsnprintf() style format string
+ * @param ... a variable-number of parameters suitable for \a fmt
+ */
+void _mali_osk_ctxprintf(_mali_osk_print_ctx *print_ctx, const char *fmt, ...);
+
/** @brief Abnormal process abort.
*
* Terminates the caller-process if this function is called.
/** @} */ /* end group uddapi */
+/** @brief Mali print ctx type which uses seq_file
+ */
+typedef struct seq_file _mali_osk_print_ctx;
+
#ifdef __cplusplus
}
#endif
_mali_osk_atomic_term(&pp_counter_per_sub_job_count);
}
-struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, _mali_uk_pp_start_job_s *uargs, u32 id)
+struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session,
+ _mali_uk_pp_start_job_s __user *uargs, u32 id)
{
struct mali_pp_job *job;
u32 perf_counter_flag;
job->num_memory_cookies = job->uargs.num_memory_cookies;
if (job->num_memory_cookies > 0) {
u32 size;
+ u32 __user *memory_cookies = (u32 __user *)(uintptr_t)job->uargs.memory_cookies;
if (job->uargs.num_memory_cookies > session->descriptor_mapping->current_nr_mappings) {
MALI_PRINT_ERROR(("Mali PP job: Too many memory cookies specified in job object\n"));
goto fail;
}
- size = sizeof(*job->uargs.memory_cookies) * job->num_memory_cookies;
+ size = sizeof(*memory_cookies) * job->num_memory_cookies;
job->memory_cookies = _mali_osk_malloc(size);
if (NULL == job->memory_cookies) {
goto fail;
}
- if (0 != _mali_osk_copy_from_user(job->memory_cookies, job->uargs.memory_cookies, size)) {
+ if (0 != _mali_osk_copy_from_user(job->memory_cookies, memory_cookies, size)) {
MALI_PRINT_ERROR(("Mali PP job: Failed to copy %d bytes of memory cookies from user!\n", size));
goto fail;
}
return (NULL == job) ? 0 : job->cache_order;
}
-MALI_STATIC_INLINE u32 mali_pp_job_get_user_id(struct mali_pp_job *job)
+MALI_STATIC_INLINE u64 mali_pp_job_get_user_id(struct mali_pp_job *job)
{
return job->uargs.user_job_ptr;
}
#endif /* !defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) */
static void mali_pp_scheduler_job_queued(void);
-static void mali_pp_scheduler_job_completed(void);
+static void mali_pp_scheduler_job_completed(mali_bool job_started);
/* Maximum of 8 PP cores (a group can only have maximum of 1 PP core) */
#define MALI_MAX_NUMBER_OF_PP_GROUPS 9
#endif
}
-static void mali_pp_scheduler_finalize_job(struct mali_pp_job *job)
+static void mali_pp_scheduler_finalize_job(struct mali_pp_job *job, mali_bool job_started)
{
/* This job object should not be on any lists. */
MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
}
#endif
- mali_pp_scheduler_job_completed();
+ mali_pp_scheduler_job_completed(job_started);
}
void mali_pp_scheduler_schedule(void)
if (job_is_done) {
/* Return job to user and delete it. */
- mali_pp_scheduler_finalize_job(job);
+ mali_pp_scheduler_finalize_job(job, MALI_TRUE);
}
/* A GP job might be queued by tracker release above,
if (job_is_done) {
/* Return job to user and delete it. */
- mali_pp_scheduler_finalize_job(job);
+ mali_pp_scheduler_finalize_job(job, MALI_TRUE);
}
}
return _MALI_OSK_ERR_NOMEM;
}
- timeline_point_ptr = (u32 __user *) job->uargs.timeline_point_ptr;
+ timeline_point_ptr = (u32 __user *)(uintptr_t)job->uargs.timeline_point_ptr;
point = mali_pp_scheduler_submit_job(session, job);
job = NULL;
struct mali_gp_job *gp_job;
u32 __user *timeline_point_ptr = NULL;
mali_timeline_point point;
+ _mali_uk_pp_start_job_s __user *pp_args;
+ _mali_uk_gp_start_job_s __user *gp_args;
MALI_DEBUG_ASSERT_POINTER(ctx);
MALI_DEBUG_ASSERT_POINTER(uargs);
return _MALI_OSK_ERR_NOMEM;
}
- pp_job = mali_pp_job_create(session, kargs.pp_args, mali_scheduler_get_new_id());
+ pp_args = (_mali_uk_pp_start_job_s __user *)(uintptr_t)kargs.pp_args;
+ gp_args = (_mali_uk_gp_start_job_s __user *)(uintptr_t)kargs.gp_args;
+
+ pp_job = mali_pp_job_create(session, pp_args, mali_scheduler_get_new_id());
if (NULL == pp_job) {
MALI_PRINT_ERROR(("Failed to create PP job.\n"));
return _MALI_OSK_ERR_NOMEM;
}
- gp_job = mali_gp_job_create(session, kargs.gp_args, mali_scheduler_get_new_id(), mali_pp_job_get_tracker(pp_job));
+ gp_job = mali_gp_job_create(session, gp_args, mali_scheduler_get_new_id(), mali_pp_job_get_tracker(pp_job));
if (NULL == gp_job) {
MALI_PRINT_ERROR(("Failed to create GP job.\n"));
mali_pp_job_delete(pp_job);
return _MALI_OSK_ERR_NOMEM;
}
- timeline_point_ptr = (u32 __user *) pp_job->uargs.timeline_point_ptr;
+ timeline_point_ptr = (u32 __user *)(uintptr_t)pp_job->uargs.timeline_point_ptr;
/* Submit GP job. */
mali_gp_scheduler_submit_job(session, gp_job);
_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores(_mali_uk_get_pp_number_of_cores_s *args)
{
MALI_DEBUG_ASSERT_POINTER(args);
- MALI_DEBUG_ASSERT_POINTER(args->ctx);
+
args->number_of_total_cores = num_cores;
args->number_of_enabled_cores = enabled_cores;
+
return _MALI_OSK_ERR_OK;
}
_mali_osk_errcode_t _mali_ukk_get_pp_core_version(_mali_uk_get_pp_core_version_s *args)
{
MALI_DEBUG_ASSERT_POINTER(args);
- MALI_DEBUG_ASSERT_POINTER(args->ctx);
+
args->version = pp_version;
+
return _MALI_OSK_ERR_OK;
}
struct mali_pp_job *tmp;
u32 fb_lookup_id;
- MALI_DEBUG_ASSERT_POINTER(args);
- MALI_DEBUG_ASSERT_POINTER(args->ctx);
+ session = (struct mali_session_data *)(uintptr_t)args->ctx;
- session = (struct mali_session_data *)args->ctx;
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT_POINTER(args);
fb_lookup_id = args->fb_id & MALI_PP_JOB_FB_LOOKUP_LIST_MASK;
_MALI_OSK_LIST_FOREACHENTRY(job, tmp_job, &removed_jobs, struct mali_pp_job, list) {
mali_timeline_tracker_release(&job->tracker);
mali_pp_job_delete(job);
- mali_pp_scheduler_job_completed();
+ mali_pp_scheduler_job_completed(MALI_TRUE);
}
/* Abort any running jobs from the session. */
static void mali_pp_scheduler_core_scale_up(unsigned int target_core_nr)
{
- MALI_DEBUG_PRINT(3, ("Requesting %d cores: enabling %d cores\n", target_core_nr, target_core_nr - enabled_cores));
+ MALI_DEBUG_PRINT(2, ("Requesting %d cores: enabling %d cores\n", target_core_nr, target_core_nr - enabled_cores));
_mali_osk_pm_dev_ref_add_no_power_on();
_mali_osk_pm_dev_barrier();
static void mali_pp_scheduler_core_scale_down(unsigned int target_core_nr)
{
- MALI_DEBUG_PRINT(3, ("Requesting %d cores: disabling %d cores\n", target_core_nr, enabled_cores - target_core_nr));
+ MALI_DEBUG_PRINT(2, ("Requesting %d cores: disabling %d cores\n", target_core_nr, enabled_cores - target_core_nr));
mali_pp_scheduler_suspend();
}
}
-static void mali_pp_scheduler_job_completed(void)
+static void mali_pp_scheduler_job_completed(mali_bool job_started)
{
/* Release the PM reference we got in the mali_pp_scheduler_job_queued() function */
_mali_osk_pm_dev_ref_dec();
- if (mali_utilization_enabled()) {
+ if (mali_utilization_enabled() && job_started) {
mali_utilization_pp_end();
}
}
mali_pp_scheduler_abort_job_and_unlock_scheduler(job);
mali_pp_job_mark_sub_job_completed(job, MALI_FALSE); /* Flagging the job as failed. */
- mali_pp_scheduler_finalize_job(job);
+ mali_pp_scheduler_finalize_job(job, MALI_FALSE);
return MALI_SCHEDULER_MASK_EMPTY;
}
struct mali_soft_job_system *mali_soft_job_system_create(struct mali_session_data *session)
{
- u32 i;
struct mali_soft_job_system *system;
- struct mali_soft_job *job;
MALI_DEBUG_ASSERT_POINTER(session);
return NULL;
}
system->lock_owner = 0;
+ system->last_job_id = 0;
- _MALI_OSK_INIT_LIST_HEAD(&(system->jobs_free));
_MALI_OSK_INIT_LIST_HEAD(&(system->jobs_used));
- for (i = 0; i < MALI_MAX_NUM_SOFT_JOBS; ++i) {
- job = &(system->jobs[i]);
- _mali_osk_list_add(&(job->system_list), &(system->jobs_free));
- job->system = system;
- job->state = MALI_SOFT_JOB_STATE_FREE;
- job->id = i;
- }
-
return system;
}
MALI_DEBUG_ASSERT_POINTER(system);
/* All jobs should be free at this point. */
- MALI_DEBUG_CODE({
- u32 i;
- struct mali_soft_job *job;
-
- for (i = 0; i < MALI_MAX_NUM_SOFT_JOBS; ++i)
- {
- job = &(system->jobs[i]);
- MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_FREE == job->state);
- }
- });
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&(system->jobs_used)));
if (NULL != system) {
if (NULL != system->lock) {
}
}
-static struct mali_soft_job *mali_soft_job_system_alloc_job(struct mali_soft_job_system *system)
-{
- struct mali_soft_job *job;
-
- MALI_DEBUG_ASSERT_POINTER(system);
- MALI_ASSERT_SOFT_JOB_SYSTEM_LOCKED(system);
-
- if (_mali_osk_list_empty(&(system->jobs_free))) {
- /* No jobs available. */
- return NULL;
- }
-
- /* Grab first job and move it to the used list. */
- job = _MALI_OSK_LIST_ENTRY(system->jobs_free.next, struct mali_soft_job, system_list);
- MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_FREE == job->state);
-
- _mali_osk_list_move(&(job->system_list), &(system->jobs_used));
- job->state = MALI_SOFT_JOB_STATE_ALLOCATED;
-
- MALI_DEBUG_ASSERT(MALI_SOFT_JOB_INVALID_ID != job->id);
- MALI_DEBUG_ASSERT(system == job->system);
-
- return job;
-}
-
static void mali_soft_job_system_free_job(struct mali_soft_job_system *system, struct mali_soft_job *job)
{
MALI_DEBUG_ASSERT_POINTER(job);
mali_soft_job_system_lock(job->system);
- MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_FREE != job->state);
MALI_DEBUG_ASSERT(MALI_SOFT_JOB_INVALID_ID != job->id);
MALI_DEBUG_ASSERT(system == job->system);
- job->state = MALI_SOFT_JOB_STATE_FREE;
- _mali_osk_list_move(&(job->system_list), &(system->jobs_free));
+ _mali_osk_list_del(&(job->system_list));
mali_soft_job_system_unlock(job->system);
+
+ _mali_osk_free(job);
}
MALI_STATIC_INLINE struct mali_soft_job *mali_soft_job_system_lookup_job(struct mali_soft_job_system *system, u32 job_id)
{
+ struct mali_soft_job *job, *tmp;
+
MALI_DEBUG_ASSERT_POINTER(system);
MALI_ASSERT_SOFT_JOB_SYSTEM_LOCKED(system);
- if (job_id < MALI_MAX_NUM_SOFT_JOBS) {
- return &system->jobs[job_id];
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &system->jobs_used, struct mali_soft_job, system_list) {
+ if (job->id == job_id)
+ return job;
}
return NULL;
}
}
-struct mali_soft_job *mali_soft_job_create(struct mali_soft_job_system *system, mali_soft_job_type type, u32 user_job)
+struct mali_soft_job *mali_soft_job_create(struct mali_soft_job_system *system, mali_soft_job_type type, u64 user_job)
{
struct mali_soft_job *job;
_mali_osk_notification_t *notification = NULL;
MALI_DEBUG_ASSERT_POINTER(system);
- MALI_DEBUG_ASSERT(MALI_SOFT_JOB_TYPE_USER_SIGNALED >= type);
+ MALI_DEBUG_ASSERT((MALI_SOFT_JOB_TYPE_USER_SIGNALED == type) ||
+ (MALI_SOFT_JOB_TYPE_SELF_SIGNALED == type));
- if (MALI_SOFT_JOB_TYPE_USER_SIGNALED == type) {
- notification = _mali_osk_notification_create(_MALI_NOTIFICATION_SOFT_ACTIVATED, sizeof(_mali_uk_soft_job_activated_s));
- if (unlikely(NULL == notification)) {
- MALI_PRINT_ERROR(("Mali Soft Job: failed to allocate notification"));
- return NULL;
- }
+ notification = _mali_osk_notification_create(_MALI_NOTIFICATION_SOFT_ACTIVATED, sizeof(_mali_uk_soft_job_activated_s));
+ if (unlikely(NULL == notification)) {
+ MALI_PRINT_ERROR(("Mali Soft Job: failed to allocate notification"));
+ return NULL;
}
- mali_soft_job_system_lock(system);
-
- job = mali_soft_job_system_alloc_job(system);
- if (NULL == job) {
- mali_soft_job_system_unlock(system);
- MALI_PRINT_ERROR(("Mali Soft Job: failed to allocate job"));
- _mali_osk_notification_delete(notification);
+ job = _mali_osk_malloc(sizeof(struct mali_soft_job));
+ if (unlikely(NULL == job)) {
+ MALI_DEBUG_PRINT(2, ("Mali Soft Job: system alloc job failed. \n"));
return NULL;
}
+ mali_soft_job_system_lock(system);
+
+ job->system = system;
+ job->id = system->last_job_id++;
+ job->state = MALI_SOFT_JOB_STATE_ALLOCATED;
+
+ _mali_osk_list_add(&(job->system_list), &(system->jobs_used));
+
job->type = type;
job->user_job = user_job;
job->activated = MALI_FALSE;
- if (MALI_SOFT_JOB_TYPE_USER_SIGNALED == type) {
- job->activated_notification = notification;
- }
+ job->activated_notification = notification;
_mali_osk_atomic_init(&job->refcount, 1);
job = mali_soft_job_system_lookup_job(system, job_id);
- if (NULL == job || !(MALI_SOFT_JOB_STATE_STARTED == job->state || MALI_SOFT_JOB_STATE_TIMED_OUT == job->state)) {
+ if ((NULL == job) || (MALI_SOFT_JOB_TYPE_USER_SIGNALED != job->type)
+ || !(MALI_SOFT_JOB_STATE_STARTED == job->state || MALI_SOFT_JOB_STATE_TIMED_OUT == job->state)) {
mali_soft_job_system_unlock(system);
MALI_PRINT_ERROR(("Mali Soft Job: invalid soft job id %u", job_id));
return _MALI_OSK_ERR_ITEM_NOT_FOUND;
/* Wake up sleeping signaler. */
job->activated = MALI_TRUE;
- _mali_osk_wait_queue_wake_up(job->tracker.system->wait_queue);
- mali_soft_job_system_unlock(job->system);
+ /* If job type is self signaled, release tracker, move soft job to free list, and scheduler at once */
+ if (MALI_SOFT_JOB_TYPE_SELF_SIGNALED == job->type) {
+ mali_scheduler_mask schedule_mask;
+
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state);
+
+ job->state = MALI_SOFT_JOB_STATE_SIGNALED;
+ mali_soft_job_system_unlock(job->system);
+
+ schedule_mask = mali_timeline_tracker_release(&job->tracker);
+ mali_scheduler_schedule_from_mask(schedule_mask, MALI_FALSE);
+
+ mali_soft_job_destroy(job);
+ } else {
+ _mali_osk_wait_queue_wake_up(job->tracker.system->wait_queue);
+
+ mali_soft_job_system_unlock(job->system);
+ }
}
mali_scheduler_mask mali_soft_job_system_timeout_job(struct mali_soft_job *job)
void mali_soft_job_system_abort(struct mali_soft_job_system *system)
{
- u32 i;
struct mali_soft_job *job, *tmp;
_MALI_OSK_LIST_HEAD_STATIC_INIT(jobs);
mali_soft_job_system_lock(system);
- for (i = 0; i < MALI_MAX_NUM_SOFT_JOBS; ++i) {
- job = &(system->jobs[i]);
-
- MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_FREE == job->state ||
- MALI_SOFT_JOB_STATE_STARTED == job->state ||
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &system->jobs_used, struct mali_soft_job, system_list) {
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state ||
MALI_SOFT_JOB_STATE_TIMED_OUT == job->state);
if (MALI_SOFT_JOB_STATE_STARTED == job->state) {
* Soft jobs of type MALI_SOFT_JOB_TYPE_USER_SIGNALED will only complete after activation if either
* they are signaled by user-space (@ref mali_soft_job_system_signaled_job) or if they are timed out
* by the Timeline system.
+ * Soft jobs of type MALI_SOFT_JOB_TYPE_SELF_SIGNALED will release job resource automatically
+ * in kernel when the job is activated.
*/
typedef enum mali_soft_job_type {
+ MALI_SOFT_JOB_TYPE_SELF_SIGNALED,
MALI_SOFT_JOB_TYPE_USER_SIGNALED,
} mali_soft_job_type;
/**
* Soft job state.
*
- * All soft jobs in a soft job system will initially be in state MALI_SOFT_JOB_STATE_FREE. On @ref
- * mali_soft_job_system_start_job a job will first be allocated. A job in state
- * MALI_SOFT_JOB_STATE_FREE will be picked and the state changed to MALI_SOFT_JOB_STATE_ALLOCATED.
+ * mali_soft_job_system_start_job a job will first be allocated.The job's state set to MALI_SOFT_JOB_STATE_ALLOCATED.
* Once the job is added to the timeline system, the state changes to MALI_SOFT_JOB_STATE_STARTED.
*
* For soft jobs of type MALI_SOFT_JOB_TYPE_USER_SIGNALED the state is changed to
* state is changed to MALI_SOFT_JOB_STATE_TIMED_OUT. This can only happen to soft jobs in state
* MALI_SOFT_JOB_STATE_STARTED.
*
- * When a soft job's reference count reaches zero, it will be freed and the state returns to
- * MALI_SOFT_JOB_STATE_FREE.
*/
typedef enum mali_soft_job_state {
- MALI_SOFT_JOB_STATE_FREE,
MALI_SOFT_JOB_STATE_ALLOCATED,
MALI_SOFT_JOB_STATE_STARTED,
MALI_SOFT_JOB_STATE_SIGNALED,
#define MALI_SOFT_JOB_INVALID_ID ((u32) -1)
-/* Maximum number of soft jobs per soft system. */
-#define MALI_MAX_NUM_SOFT_JOBS 20
-
/**
* Soft job struct.
*
*/
typedef struct mali_soft_job {
mali_soft_job_type type; /**< Soft job type. Must be one of MALI_SOFT_JOB_TYPE_*. */
- u32 user_job; /**< Identifier for soft job in user space. */
+ u64 user_job; /**< Identifier for soft job in user space. */
_mali_osk_atomic_t refcount; /**< Soft jobs are reference counted to prevent premature deletion. */
struct mali_timeline_tracker tracker; /**< Timeline tracker for soft job. */
mali_bool activated; /**< MALI_TRUE if the job has been activated, MALI_FALSE if not. */
*/
typedef struct mali_soft_job_system {
struct mali_session_data *session; /**< The session this soft job system belongs to. */
-
- struct mali_soft_job jobs[MALI_MAX_NUM_SOFT_JOBS]; /**< Array of all soft jobs in this system. */
- _MALI_OSK_LIST_HEAD(jobs_free); /**< List of all free soft jobs. */
_MALI_OSK_LIST_HEAD(jobs_used); /**< List of all allocated soft jobs. */
_mali_osk_spinlock_irq_t *lock; /**< Lock used to protect soft job system and its soft jobs. */
u32 lock_owner; /**< Contains tid of thread that locked the system or 0, if not locked. */
+ u32 last_job_id; /**< Recored the last job id protected by lock. */
} mali_soft_job_system;
/**
* @param user_job Identifier for soft job in user space.
* @return New soft job if successful, NULL if not.
*/
-struct mali_soft_job *mali_soft_job_create(struct mali_soft_job_system *system, mali_soft_job_type type, u32 user_job);
+struct mali_soft_job *mali_soft_job_create(struct mali_soft_job_system *system, mali_soft_job_type type, u64 user_job);
/**
* Destroy soft job.
hlist_move_list(&mali_timeline_sync_fence_to_free_list, &list);
spin_unlock_irqrestore(&mali_timeline_sync_fence_to_free_lock, flags);
- hlist_for_each_entry_safe(o, pos, tmp, &mali_timeline_sync_fence_to_free_list, list) {
+ hlist_for_each_entry_safe(o, pos, tmp, &list, list) {
sync_fence_put(o->fence);
kfree(o);
}
MALI_DEBUG_ASSERT_POINTER(timeline->system);
system = timeline->system;
- if (MALI_TIMELINE_MAX > id) {
- return mali_timeline_is_point_on(system->timelines[id], tracker->fence.points[id]);
+ if (MALI_TIMELINE_MAX > id ) {
+ if(MALI_TIMELINE_NO_POINT != tracker->fence.points[id]) {
+ return mali_timeline_is_point_on(system->timelines[id], tracker->fence.points[id]);
+ } else {
+ return MALI_FALSE;
+ }
} else {
MALI_DEBUG_ASSERT(MALI_TIMELINE_NONE == id);
return MALI_FALSE;
{
switch (id) {
case MALI_TIMELINE_GP:
- return " GP";
+ return "GP";
case MALI_TIMELINE_PP:
- return " PP";
+ return "PP";
case MALI_TIMELINE_SOFT:
return "SOFT";
default:
{
switch (type) {
case MALI_TIMELINE_TRACKER_GP:
- return " GP";
+ return "GP";
case MALI_TIMELINE_TRACKER_PP:
- return " PP";
+ return "PP";
case MALI_TIMELINE_TRACKER_SOFT:
return "SOFT";
case MALI_TIMELINE_TRACKER_WAIT:
return MALI_TIMELINE_TS_FINISH;
}
-void mali_timeline_debug_print_tracker(struct mali_timeline_tracker *tracker)
+void mali_timeline_debug_print_tracker(struct mali_timeline_tracker *tracker, _mali_osk_print_ctx *print_ctx)
{
const char *tracker_state = "IWAF";
+ char state_char = 'I';
+ char tracker_type[32] = {0};
MALI_DEBUG_ASSERT_POINTER(tracker);
+ state_char = *(tracker_state + mali_timeline_debug_get_tracker_state(tracker));
+ _mali_osk_snprintf(tracker_type, sizeof(tracker_type), "%s", timeline_tracker_type_to_string(tracker->type));
+
if (0 != tracker->trigger_ref_count) {
- MALI_PRINTF(("TL: %s %u %c - ref_wait:%u [%s%u,%s%u,%s%u,%d] (0x%08X)\n",
- timeline_tracker_type_to_string(tracker->type), tracker->point,
- *(tracker_state + mali_timeline_debug_get_tracker_state(tracker)),
- tracker->trigger_ref_count,
- is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "W" : " ", tracker->fence.points[0],
- is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "W" : " ", tracker->fence.points[1],
- is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "W" : " ", tracker->fence.points[2],
- tracker->fence.sync_fd, tracker->job));
+#if defined(CONFIG_SYNC)
+ _mali_osk_ctxprintf(print_ctx, "TL: %s %u %c - ref_wait:%u [%s%u,%s%u,%s%u, fd:%d, fence:(0x%08X)] job:(0x%08X)\n",
+ tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "W" : " ", tracker->fence.points[0],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "W" : " ", tracker->fence.points[1],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "W" : " ", tracker->fence.points[2],
+ tracker->fence.sync_fd, tracker->sync_fence, tracker->job);
} else {
- MALI_PRINTF(("TL: %s %u %c (0x%08X)\n",
- timeline_tracker_type_to_string(tracker->type), tracker->point,
- *(tracker_state + mali_timeline_debug_get_tracker_state(tracker)),
- tracker->job));
+ _mali_osk_ctxprintf(print_ctx, "TL: %s %u %c fd:%d fence:(0x%08X) job:(0x%08X)\n",
+ tracker_type, tracker->point, state_char,
+ tracker->fence.sync_fd, tracker->sync_fence, tracker->job);
}
+#else
+ _mali_osk_ctxprintf(print_ctx, "TL: %s %u %c - ref_wait:%u [%s%u,%s%u,%s%u] job:(0x%08X)\n",
+ tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "W" : " ", tracker->fence.points[0],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "W" : " ", tracker->fence.points[1],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "W" : " ", tracker->fence.points[2],
+ tracker->job);
+ } else {
+ _mali_osk_ctxprintf(print_ctx, "TL: %s %u %c job:(0x%08X)\n",
+ tracker_type, tracker->point, state_char,
+ tracker->job);
+ }
+#endif
}
-void mali_timeline_debug_print_timeline(struct mali_timeline *timeline)
+void mali_timeline_debug_print_timeline(struct mali_timeline *timeline, _mali_osk_print_ctx *print_ctx)
{
struct mali_timeline_tracker *tracker = NULL;
- int i_max = 30;
MALI_DEBUG_ASSERT_POINTER(timeline);
tracker = timeline->tracker_tail;
- while (NULL != tracker && 0 < --i_max) {
- mali_timeline_debug_print_tracker(tracker);
+ while (NULL != tracker) {
+ mali_timeline_debug_print_tracker(tracker, print_ctx);
tracker = tracker->timeline_next;
}
-
- if (0 == i_max) {
- MALI_PRINTF(("TL: Too many trackers in list to print\n"));
- }
}
-void mali_timeline_debug_print_system(struct mali_timeline_system *system)
+void mali_timeline_debug_print_system(struct mali_timeline_system *system, _mali_osk_print_ctx *print_ctx)
{
int i;
int num_printed = 0;
+ u32 tid = _mali_osk_get_tid();
MALI_DEBUG_ASSERT_POINTER(system);
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
/* Print all timelines */
for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
struct mali_timeline *timeline = system->timelines[i];
if (NULL == timeline->tracker_head) continue;
- MALI_PRINTF(("TL: Timeline %s:\n",
- timeline_id_to_string((enum mali_timeline_id)i)));
- mali_timeline_debug_print_timeline(timeline);
+ _mali_osk_ctxprintf(print_ctx, "TL: Timeline %s:\n",
+ timeline_id_to_string((enum mali_timeline_id)i));
+
+ mali_timeline_debug_print_timeline(timeline, print_ctx);
num_printed++;
}
if (0 == num_printed) {
- MALI_PRINTF(("TL: All timelines empty\n"));
+ _mali_osk_ctxprintf(print_ctx, "TL: All timelines empty\n");
}
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
}
#endif /* defined(MALI_TIMELINE_DEBUG_FUNCTIONS) */
*
* @param tracker Tracker to print.
*/
-void mali_timeline_debug_print_tracker(struct mali_timeline_tracker *tracker);
+void mali_timeline_debug_print_tracker(struct mali_timeline_tracker *tracker, _mali_osk_print_ctx *print_ctx);
/**
* Print debug information about timeline.
*
* @param timeline Timeline to print.
*/
-void mali_timeline_debug_print_timeline(struct mali_timeline *timeline);
+void mali_timeline_debug_print_timeline(struct mali_timeline *timeline, _mali_osk_print_ctx *print_ctx);
/**
* Print debug information about timeline system.
*
* @param system Timeline system to print.
*/
-void mali_timeline_debug_print_system(struct mali_timeline_system *system);
+void mali_timeline_debug_print_system(struct mali_timeline_system *system, _mali_osk_print_ctx *print_ctx);
#endif /* defined(MALI_TIMELINE_DEBUG_FUNCTIONS) */
_mali_osk_errcode_t _mali_ukk_post_notification(_mali_uk_post_notification_s *args);
/** @brief Verifies if the user and kernel side of this API are compatible.
+ *
+ * This function is obsolete, but kept to allow old, incompatible user space
+ * clients to robustly detect the incompatibility.
*
* @param args see _mali_uk_get_api_version_s in "mali_utgard_uk_types.h"
* @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
*/
_mali_osk_errcode_t _mali_ukk_get_api_version(_mali_uk_get_api_version_s *args);
+/** @brief Verifies if the user and kernel side of this API are compatible.
+ *
+ * @param args see _mali_uk_get_api_version_v2_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_api_version_v2(_mali_uk_get_api_version_v2_s *args);
+
/** @brief Get the user space settings applicable for calling process.
*
* @param args see _mali_uk_get_user_settings_s in "mali_utgard_uk_types.h"
_mali_osk_errcode_t _mali_ukk_release_ump_mem(_mali_uk_release_ump_mem_s *args);
#endif /* CONFIG_MALI400_UMP */
-/** @brief Determine virtual-to-physical mapping of a contiguous memory range
- * (optional)
- *
- * This allows the user-side to do a virtual-to-physical address translation.
- * In conjunction with _mali_uku_map_external_mem, this can be used to do
- * direct rendering.
- *
- * This function will only succeed on a virtual range that is mapped into the
- * current process, and that is contigious.
- *
- * If va is not page-aligned, then it is rounded down to the next page
- * boundary. The remainer is added to size, such that ((u32)va)+size before
- * rounding is equal to ((u32)va)+size after rounding. The rounded modified
- * va and size will be written out into args on success.
- *
- * If the supplied size is zero, or not a multiple of the system's PAGE_SIZE,
- * then size will be rounded up to the next multiple of PAGE_SIZE before
- * translation occurs. The rounded up size will be written out into args on
- * success.
- *
- * On most OSs, virtual-to-physical address translation is a priveledged
- * function. Therefore, the implementer must validate the range supplied, to
- * ensure they are not providing arbitrary virtual-to-physical address
- * translations. While it is unlikely such a mechanism could be used to
- * compromise the security of a system on its own, it is possible it could be
- * combined with another small security risk to cause a much larger security
- * risk.
- *
- * @note This is an optional part of the interface, and is only used by certain
- * implementations of libEGL. If the platform layer in your libEGL
- * implementation does not require Virtual-to-Physical address translation,
- * then this function need not be implemented. A stub implementation should not
- * be required either, as it would only be removed by the compiler's dead code
- * elimination.
- *
- * @note if implemented, this function is entirely platform-dependant, and does
- * not exist in common code.
- *
- * @param args see _mali_uk_va_to_mali_pa_s in "mali_utgard_uk_types.h"
- * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
- */
-_mali_osk_errcode_t _mali_ukk_va_to_mali_pa(_mali_uk_va_to_mali_pa_s *args);
-
/** @} */ /* end group _mali_uk_memory */
/** @addtogroup _mali_uk_profiling U/K Timeline profiling module
* @{ */
-/** @brief Start recording profiling events.
- *
- * @param args see _mali_uk_profiling_start_s in "mali_utgard_uk_types.h"
- */
-_mali_osk_errcode_t _mali_ukk_profiling_start(_mali_uk_profiling_start_s *args);
-
/** @brief Add event to profiling buffer.
*
* @param args see _mali_uk_profiling_add_event_s in "mali_utgard_uk_types.h"
*/
_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args);
-/** @brief Stop recording profiling events.
- *
- * @param args see _mali_uk_profiling_stop_s in "mali_utgard_uk_types.h"
- */
-_mali_osk_errcode_t _mali_ukk_profiling_stop(_mali_uk_profiling_stop_s *args);
-
-/** @brief Retrieve a recorded profiling event.
- *
- * @param args see _mali_uk_profiling_get_event_s in "mali_utgard_uk_types.h"
- */
-_mali_osk_errcode_t _mali_ukk_profiling_get_event(_mali_uk_profiling_get_event_s *args);
-
-/** @brief Clear recorded profiling events.
- *
- * @param args see _mali_uk_profiling_clear_s in "mali_utgard_uk_types.h"
- */
-_mali_osk_errcode_t _mali_ukk_profiling_clear(_mali_uk_profiling_clear_s *args);
-
/** @brief Return the total memory usage
*
* @param args see _mali_uk_profiling_memory_usage_get_s in "mali_utgard_uk_types.h"
#define MALI_IOC_PROFILING_BASE (_MALI_UK_PROFILING_SUBSYSTEM + MALI_IOC_BASE)
#define MALI_IOC_VSYNC_BASE (_MALI_UK_VSYNC_SUBSYSTEM + MALI_IOC_BASE)
-#define MALI_IOC_WAIT_FOR_NOTIFICATION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_WAIT_FOR_NOTIFICATION, _mali_uk_wait_for_notification_s *)
-#define MALI_IOC_GET_API_VERSION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_API_VERSION, _mali_uk_get_api_version_s *)
-#define MALI_IOC_POST_NOTIFICATION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_POST_NOTIFICATION, _mali_uk_post_notification_s *)
-#define MALI_IOC_GET_USER_SETTING _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_USER_SETTING, _mali_uk_get_user_setting_s *)
-#define MALI_IOC_GET_USER_SETTINGS _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_USER_SETTINGS, _mali_uk_get_user_settings_s *)
-#define MALI_IOC_REQUEST_HIGH_PRIORITY _IOW (MALI_IOC_CORE_BASE, _MALI_UK_REQUEST_HIGH_PRIORITY, _mali_uk_request_high_priority_s *)
-#define MALI_IOC_TIMELINE_GET_LATEST_POINT _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_GET_LATEST_POINT, _mali_uk_timeline_get_latest_point_s *)
-#define MALI_IOC_TIMELINE_WAIT _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_WAIT, _mali_uk_timeline_wait_s *)
-#define MALI_IOC_TIMELINE_CREATE_SYNC_FENCE _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_CREATE_SYNC_FENCE, _mali_uk_timeline_create_sync_fence_s *)
-#define MALI_IOC_SOFT_JOB_START _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_SOFT_JOB_START, _mali_uk_soft_job_start_s *)
-#define MALI_IOC_SOFT_JOB_SIGNAL _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_SOFT_JOB_SIGNAL, _mali_uk_soft_job_signal_s *)
+#define MALI_IOC_WAIT_FOR_NOTIFICATION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_WAIT_FOR_NOTIFICATION, _mali_uk_wait_for_notification_s)
+#define MALI_IOC_GET_API_VERSION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_API_VERSION, u32)
+#define MALI_IOC_GET_API_VERSION_V2 _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_API_VERSION, _mali_uk_get_api_version_v2_s)
+#define MALI_IOC_POST_NOTIFICATION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_POST_NOTIFICATION, _mali_uk_post_notification_s)
+#define MALI_IOC_GET_USER_SETTING _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_USER_SETTING, _mali_uk_get_user_setting_s)
+#define MALI_IOC_GET_USER_SETTINGS _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_USER_SETTINGS, _mali_uk_get_user_settings_s)
+#define MALI_IOC_REQUEST_HIGH_PRIORITY _IOW (MALI_IOC_CORE_BASE, _MALI_UK_REQUEST_HIGH_PRIORITY, _mali_uk_request_high_priority_s)
+#define MALI_IOC_TIMELINE_GET_LATEST_POINT _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_GET_LATEST_POINT, _mali_uk_timeline_get_latest_point_s)
+#define MALI_IOC_TIMELINE_WAIT _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_WAIT, _mali_uk_timeline_wait_s)
+#define MALI_IOC_TIMELINE_CREATE_SYNC_FENCE _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_CREATE_SYNC_FENCE, _mali_uk_timeline_create_sync_fence_s)
+#define MALI_IOC_SOFT_JOB_START _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_SOFT_JOB_START, _mali_uk_soft_job_start_s)
+#define MALI_IOC_SOFT_JOB_SIGNAL _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_SOFT_JOB_SIGNAL, _mali_uk_soft_job_signal_s)
-#define MALI_IOC_MEM_MAP_EXT _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_MAP_EXT_MEM, _mali_uk_map_external_mem_s *)
-#define MALI_IOC_MEM_UNMAP_EXT _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_UNMAP_EXT_MEM, _mali_uk_unmap_external_mem_s *)
-#define MALI_IOC_MEM_ATTACH_DMA_BUF _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_ATTACH_DMA_BUF, _mali_uk_attach_dma_buf_s *)
-#define MALI_IOC_MEM_RELEASE_DMA_BUF _IOW(MALI_IOC_MEMORY_BASE, _MALI_UK_RELEASE_DMA_BUF, _mali_uk_release_dma_buf_s *)
-#define MALI_IOC_MEM_DMA_BUF_GET_SIZE _IOR(MALI_IOC_MEMORY_BASE, _MALI_UK_DMA_BUF_GET_SIZE, _mali_uk_dma_buf_get_size_s *)
-#define MALI_IOC_MEM_ATTACH_UMP _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_ATTACH_UMP_MEM, _mali_uk_attach_ump_mem_s *)
-#define MALI_IOC_MEM_RELEASE_UMP _IOW(MALI_IOC_MEMORY_BASE, _MALI_UK_RELEASE_UMP_MEM, _mali_uk_release_ump_mem_s *)
-#define MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE _IOR (MALI_IOC_MEMORY_BASE, _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, _mali_uk_query_mmu_page_table_dump_size_s *)
-#define MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_DUMP_MMU_PAGE_TABLE, _mali_uk_dump_mmu_page_table_s *)
-#define MALI_IOC_MEM_WRITE_SAFE _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_MEM_WRITE_SAFE, _mali_uk_mem_write_safe_s *)
+#define MALI_IOC_MEM_MAP_EXT _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_MAP_EXT_MEM, _mali_uk_map_external_mem_s)
+#define MALI_IOC_MEM_UNMAP_EXT _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_UNMAP_EXT_MEM, _mali_uk_unmap_external_mem_s)
+#define MALI_IOC_MEM_ATTACH_DMA_BUF _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_ATTACH_DMA_BUF, _mali_uk_attach_dma_buf_s)
+#define MALI_IOC_MEM_RELEASE_DMA_BUF _IOW(MALI_IOC_MEMORY_BASE, _MALI_UK_RELEASE_DMA_BUF, _mali_uk_release_dma_buf_s)
+#define MALI_IOC_MEM_DMA_BUF_GET_SIZE _IOR(MALI_IOC_MEMORY_BASE, _MALI_UK_DMA_BUF_GET_SIZE, _mali_uk_dma_buf_get_size_s)
+#define MALI_IOC_MEM_ATTACH_UMP _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_ATTACH_UMP_MEM, _mali_uk_attach_ump_mem_s)
+#define MALI_IOC_MEM_RELEASE_UMP _IOW(MALI_IOC_MEMORY_BASE, _MALI_UK_RELEASE_UMP_MEM, _mali_uk_release_ump_mem_s)
+#define MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE _IOR (MALI_IOC_MEMORY_BASE, _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, _mali_uk_query_mmu_page_table_dump_size_s)
+#define MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_DUMP_MMU_PAGE_TABLE, _mali_uk_dump_mmu_page_table_s)
+#define MALI_IOC_MEM_WRITE_SAFE _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_MEM_WRITE_SAFE, _mali_uk_mem_write_safe_s)
-#define MALI_IOC_PP_START_JOB _IOWR(MALI_IOC_PP_BASE, _MALI_UK_PP_START_JOB, _mali_uk_pp_start_job_s *)
-#define MALI_IOC_PP_AND_GP_START_JOB _IOWR(MALI_IOC_PP_BASE, _MALI_UK_PP_AND_GP_START_JOB, _mali_uk_pp_and_gp_start_job_s *)
-#define MALI_IOC_PP_NUMBER_OF_CORES_GET _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_NUMBER_OF_CORES, _mali_uk_get_pp_number_of_cores_s *)
-#define MALI_IOC_PP_CORE_VERSION_GET _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_CORE_VERSION, _mali_uk_get_pp_core_version_s * )
-#define MALI_IOC_PP_DISABLE_WB _IOW (MALI_IOC_PP_BASE, _MALI_UK_PP_DISABLE_WB, _mali_uk_pp_disable_wb_s * )
+#define MALI_IOC_PP_START_JOB _IOWR(MALI_IOC_PP_BASE, _MALI_UK_PP_START_JOB, _mali_uk_pp_start_job_s)
+#define MALI_IOC_PP_AND_GP_START_JOB _IOWR(MALI_IOC_PP_BASE, _MALI_UK_PP_AND_GP_START_JOB, _mali_uk_pp_and_gp_start_job_s)
+#define MALI_IOC_PP_NUMBER_OF_CORES_GET _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_NUMBER_OF_CORES, _mali_uk_get_pp_number_of_cores_s)
+#define MALI_IOC_PP_CORE_VERSION_GET _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_CORE_VERSION, _mali_uk_get_pp_core_version_s)
+#define MALI_IOC_PP_DISABLE_WB _IOW (MALI_IOC_PP_BASE, _MALI_UK_PP_DISABLE_WB, _mali_uk_pp_disable_wb_s)
-#define MALI_IOC_GP2_START_JOB _IOWR(MALI_IOC_GP_BASE, _MALI_UK_GP_START_JOB, _mali_uk_gp_start_job_s *)
-#define MALI_IOC_GP2_NUMBER_OF_CORES_GET _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_NUMBER_OF_CORES, _mali_uk_get_gp_number_of_cores_s *)
-#define MALI_IOC_GP2_CORE_VERSION_GET _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_CORE_VERSION, _mali_uk_get_gp_core_version_s *)
-#define MALI_IOC_GP2_SUSPEND_RESPONSE _IOW (MALI_IOC_GP_BASE, _MALI_UK_GP_SUSPEND_RESPONSE,_mali_uk_gp_suspend_response_s *)
+#define MALI_IOC_GP2_START_JOB _IOWR(MALI_IOC_GP_BASE, _MALI_UK_GP_START_JOB, _mali_uk_gp_start_job_s)
+#define MALI_IOC_GP2_NUMBER_OF_CORES_GET _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_NUMBER_OF_CORES, _mali_uk_get_gp_number_of_cores_s)
+#define MALI_IOC_GP2_CORE_VERSION_GET _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_CORE_VERSION, _mali_uk_get_gp_core_version_s)
+#define MALI_IOC_GP2_SUSPEND_RESPONSE _IOW (MALI_IOC_GP_BASE, _MALI_UK_GP_SUSPEND_RESPONSE,_mali_uk_gp_suspend_response_s)
-#define MALI_IOC_PROFILING_START _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_START, _mali_uk_profiling_start_s *)
-#define MALI_IOC_PROFILING_ADD_EVENT _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_ADD_EVENT, _mali_uk_profiling_add_event_s*)
-#define MALI_IOC_PROFILING_STOP _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_STOP, _mali_uk_profiling_stop_s *)
-#define MALI_IOC_PROFILING_GET_EVENT _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_GET_EVENT, _mali_uk_profiling_get_event_s *)
-#define MALI_IOC_PROFILING_CLEAR _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_CLEAR, _mali_uk_profiling_clear_s *)
-#define MALI_IOC_PROFILING_GET_CONFIG _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_GET_CONFIG, _mali_uk_get_user_settings_s *)
-#define MALI_IOC_PROFILING_REPORT_SW_COUNTERS _IOW (MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_REPORT_SW_COUNTERS, _mali_uk_sw_counters_report_s *)
-#define MALI_IOC_PROFILING_MEMORY_USAGE_GET _IOR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_MEMORY_USAGE_GET, _mali_uk_profiling_memory_usage_get_s *)
+#define MALI_IOC_PROFILING_ADD_EVENT _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_ADD_EVENT, _mali_uk_profiling_add_event_s)
+#define MALI_IOC_PROFILING_REPORT_SW_COUNTERS _IOW (MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_REPORT_SW_COUNTERS, _mali_uk_sw_counters_report_s)
+#define MALI_IOC_PROFILING_MEMORY_USAGE_GET _IOR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_MEMORY_USAGE_GET, _mali_uk_profiling_memory_usage_get_s)
-#define MALI_IOC_VSYNC_EVENT_REPORT _IOW (MALI_IOC_VSYNC_BASE, _MALI_UK_VSYNC_EVENT_REPORT, _mali_uk_vsync_event_report_s *)
-
-/* Deprecated ioctls */
-#define MALI_IOC_MEM_GET_BIG_BLOCK _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_GET_BIG_BLOCK, void *)
-#define MALI_IOC_MEM_FREE_BIG_BLOCK _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_FREE_BIG_BLOCK, void *)
-#define MALI_IOC_MEM_INIT _IOR (MALI_IOC_MEMORY_BASE, _MALI_UK_INIT_MEM, void *)
-#define MALI_IOC_MEM_TERM _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_TERM_MEM, void *)
+#define MALI_IOC_VSYNC_EVENT_REPORT _IOW (MALI_IOC_VSYNC_BASE, _MALI_UK_VSYNC_EVENT_REPORT, _mali_uk_vsync_event_report_s)
#ifdef __cplusplus
}
_MALI_UK_PP_SUBSYSTEM, /**< Fragment Processor Group of U/K calls */
_MALI_UK_GP_SUBSYSTEM, /**< Vertex Processor Group of U/K calls */
_MALI_UK_PROFILING_SUBSYSTEM, /**< Profiling Group of U/K calls */
- _MALI_UK_PMM_SUBSYSTEM, /**< Power Management Module Group of U/K calls */
_MALI_UK_VSYNC_SUBSYSTEM, /**< VSYNC Group of U/K calls */
} _mali_uk_subsystem_t;
_MALI_UK_INIT_MEM = 0, /**< _mali_ukk_init_mem() */
_MALI_UK_TERM_MEM, /**< _mali_ukk_term_mem() */
- _MALI_UK_GET_BIG_BLOCK, /**< _mali_ukk_get_big_block() */
- _MALI_UK_FREE_BIG_BLOCK, /**< _mali_ukk_free_big_block() */
_MALI_UK_MAP_MEM, /**< _mali_ukk_mem_mmap() */
_MALI_UK_UNMAP_MEM, /**< _mali_ukk_mem_munmap() */
_MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, /**< _mali_ukk_mem_get_mmu_page_table_dump_size() */
_MALI_UK_RELEASE_UMP_MEM, /**< _mali_ukk_release_ump_mem() */
_MALI_UK_MAP_EXT_MEM, /**< _mali_uku_map_external_mem() */
_MALI_UK_UNMAP_EXT_MEM, /**< _mali_uku_unmap_external_mem() */
- _MALI_UK_VA_TO_MALI_PA, /**< _mali_uku_va_to_mali_pa() */
_MALI_UK_MEM_WRITE_SAFE, /**< _mali_uku_mem_write_safe() */
/** Common functions for each core */
/** Profiling functions */
- _MALI_UK_PROFILING_START = 0, /**< __mali_uku_profiling_start() */
- _MALI_UK_PROFILING_ADD_EVENT, /**< __mali_uku_profiling_add_event() */
- _MALI_UK_PROFILING_STOP, /**< __mali_uku_profiling_stop() */
- _MALI_UK_PROFILING_GET_EVENT, /**< __mali_uku_profiling_get_event() */
- _MALI_UK_PROFILING_CLEAR, /**< __mali_uku_profiling_clear() */
- _MALI_UK_PROFILING_GET_CONFIG, /**< __mali_uku_profiling_get_config() */
+ _MALI_UK_PROFILING_ADD_EVENT = 0, /**< __mali_uku_profiling_add_event() */
_MALI_UK_PROFILING_REPORT_SW_COUNTERS,/**< __mali_uku_profiling_report_sw_counters() */
_MALI_UK_PROFILING_MEMORY_USAGE_GET, /**< __mali_uku_profiling_memory_usage_get() */
/** VSYNC reporting fuctions */
_MALI_UK_VSYNC_EVENT_REPORT = 0, /**< _mali_ukk_vsync_event_report() */
-
} _mali_uk_functions;
-/** @brief Get the size necessary for system info
- *
- * @see _mali_ukk_get_system_info_size()
- */
-typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
- u32 size; /**< [out] size of buffer necessary to hold system information data, in bytes */
-} _mali_uk_get_system_info_size_s;
-
-
/** @defgroup _mali_uk_getsysteminfo U/K Get System Info
* @{ */
*/
typedef u32 _mali_core_version;
-/**
- * Enum values for the different modes the driver can be put in.
- * Normal is the default mode. The driver then uses a job queue and takes job objects from the clients.
- * Job completion is reported using the _mali_ukk_wait_for_notification call.
- * The driver blocks this io command until a job has completed or failed or a timeout occurs.
- *
- * The 'raw' mode is reserved for future expansion.
- */
-typedef enum _mali_driver_mode {
- _MALI_DRIVER_MODE_RAW = 1, /**< Reserved for future expansion */
- _MALI_DRIVER_MODE_NORMAL = 2 /**< Normal mode of operation */
-} _mali_driver_mode;
-
-/** @brief List of possible cores
- *
- * add new entries to the end of this enum */
-typedef enum _mali_core_type {
- _MALI_GP2 = 2, /**< MaliGP2 Programmable Vertex Processor */
- _MALI_200 = 5, /**< Mali200 Programmable Fragment Processor */
- _MALI_400_GP = 6, /**< Mali400 Programmable Vertex Processor */
- _MALI_400_PP = 7, /**< Mali400 Programmable Fragment Processor */
- /* insert new core here, do NOT alter the existing values */
-} _mali_core_type;
-
-
-/** @brief Capabilities of Memory Banks
- *
- * These may be used to restrict memory banks for certain uses. They may be
- * used when access is not possible (e.g. Bus does not support access to it)
- * or when access is possible but not desired (e.g. Access is slow).
- *
- * In the case of 'possible but not desired', there is no way of specifying
- * the flags as an optimization hint, so that the memory could be used as a
- * last resort.
- *
- * @see _mali_mem_info
- */
-typedef enum _mali_bus_usage {
-
- _MALI_PP_READABLE = (1 << 0), /** Readable by the Fragment Processor */
- _MALI_PP_WRITEABLE = (1 << 1), /** Writeable by the Fragment Processor */
- _MALI_GP_READABLE = (1 << 2), /** Readable by the Vertex Processor */
- _MALI_GP_WRITEABLE = (1 << 3), /** Writeable by the Vertex Processor */
- _MALI_CPU_READABLE = (1 << 4), /** Readable by the CPU */
- _MALI_CPU_WRITEABLE = (1 << 5), /** Writeable by the CPU */
- _MALI_GP_L2_ALLOC = (1 << 6), /** GP allocate mali L2 cache lines*/
- _MALI_MMU_READABLE = _MALI_PP_READABLE | _MALI_GP_READABLE, /** Readable by the MMU (including all cores behind it) */
- _MALI_MMU_WRITEABLE = _MALI_PP_WRITEABLE | _MALI_GP_WRITEABLE, /** Writeable by the MMU (including all cores behind it) */
-} _mali_bus_usage;
-
-typedef enum mali_memory_cache_settings {
- MALI_CACHE_STANDARD = 0,
- MALI_CACHE_GP_READ_ALLOCATE = 1,
-} mali_memory_cache_settings ;
-
-
-/** @brief Information about the Mali Memory system
- *
- * Information is stored in a linked list, which is stored entirely in the
- * buffer pointed to by the system_info member of the
- * _mali_uk_get_system_info_s arguments provided to _mali_ukk_get_system_info()
- *
- * Each element of the linked list describes a single Mali Memory bank.
- * Each allocation can only come from one bank, and will not cross multiple
- * banks.
- *
- * On Mali-MMU systems, there is only one bank, which describes the maximum
- * possible address range that could be allocated (which may be much less than
- * the available physical memory)
- *
- * The flags member describes the capabilities of the memory. It is an error
- * to attempt to build a job for a particular core (PP or GP) when the memory
- * regions used do not have the capabilities for supporting that core. This
- * would result in a job abort from the Device Driver.
- *
- * For example, it is correct to build a PP job where read-only data structures
- * are taken from a memory with _MALI_PP_READABLE set and
- * _MALI_PP_WRITEABLE clear, and a framebuffer with _MALI_PP_WRITEABLE set and
- * _MALI_PP_READABLE clear. However, it would be incorrect to use a framebuffer
- * where _MALI_PP_WRITEABLE is clear.
- */
-typedef struct _mali_mem_info {
- u32 size; /**< Size of the memory bank in bytes */
- _mali_bus_usage flags; /**< Capabilitiy flags of the memory */
- u32 maximum_order_supported; /**< log2 supported size */
- u32 identifier; /* mali_memory_cache_settings cache_settings; */
- struct _mali_mem_info *next; /**< Next List Link */
-} _mali_mem_info;
-
/** @} */ /* end group _mali_uk_core */
} _maligp_job_suspended_response_code;
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
u32 cookie; /**< [in] cookie from the _mali_uk_gp_job_suspended_s notification */
_maligp_job_suspended_response_code code; /**< [in] abort or resume response code, see \ref _maligp_job_suspended_response_code */
u32 arguments[2]; /**< [in] 0 when aborting a job. When resuming a job, the Mali start and end address for a new heap to resume the job with */
/** @defgroup _mali_uk_gpstartjob_s Vertex Processor Start Job
* @{ */
-/** @brief Status indicating the result of starting a Vertex or Fragment processor job */
-typedef enum {
- _MALI_UK_START_JOB_STARTED, /**< Job started */
- _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE /**< Job could not be started at this time. Try starting the job again */
-} _mali_uk_start_job_status;
-
/** @brief Status indicating the result of the execution of a Vertex or Fragment processor job */
-
typedef enum {
_MALI_UK_JOB_STATUS_END_SUCCESS = 1 << (16 + 0),
_MALI_UK_JOB_STATUS_END_OOM = 1 << (16 + 1),
*
*/
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
- u32 user_job_ptr; /**< [in] identifier for the job in user space, a @c mali_gp_job_info* */
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 user_job_ptr; /**< [in] identifier for the job in user space, a @c mali_gp_job_info* */
u32 priority; /**< [in] job priority. A lower number means higher priority */
u32 frame_registers[MALIGP2_NUM_REGS_FRAME]; /**< [in] core specific registers associated with this job */
u32 perf_counter_flag; /**< [in] bitmask indicating which performance counters to enable, see \ref _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE and related macro definitions */
u32 frame_builder_id; /**< [in] id of the originating frame builder */
u32 flush_id; /**< [in] flush id within the originating frame builder */
_mali_uk_fence_t fence; /**< [in] fence this job must wait on */
- u32 *timeline_point_ptr; /**< [in,out] pointer to location where point on gp timeline for this job will be written */
+ u64 timeline_point_ptr; /**< [in,out] pointer to u32: location where point on gp timeline for this job will be written */
} _mali_uk_gp_start_job_s;
#define _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE (1<<0) /**< Enable performance counter SRC0 for a job */
/** @} */ /* end group _mali_uk_gpstartjob_s */
typedef struct {
- u32 user_job_ptr; /**< [out] identifier for the job in user space */
+ u64 user_job_ptr; /**< [out] identifier for the job in user space */
_mali_uk_job_status status; /**< [out] status of finished job */
u32 heap_current_addr; /**< [out] value of the GP PLB PL heap start address register */
u32 perf_counter0; /**< [out] value of performance counter 0 (see ARM DDI0415A) */
} _mali_uk_gp_job_finished_s;
typedef struct {
- u32 user_job_ptr; /**< [out] identifier for the job in user space */
+ u64 user_job_ptr; /**< [out] identifier for the job in user space */
u32 cookie; /**< [out] identifier for the core in kernel space on which the job stalled */
} _mali_uk_gp_job_suspended_s;
*
*/
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
- u32 user_job_ptr; /**< [in] identifier for the job in user space */
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 user_job_ptr; /**< [in] identifier for the job in user space */
u32 priority; /**< [in] job priority. A lower number means higher priority */
u32 frame_registers[_MALI_PP_MAX_FRAME_REGISTERS]; /**< [in] core specific registers associated with first sub job, see ARM DDI0415A */
u32 frame_registers_addr_frame[_MALI_PP_MAX_SUB_JOBS - 1]; /**< [in] ADDR_FRAME registers for sub job 1-7 */
u32 tilesy; /**< [in] number of tiles in y direction (needed for reading the heatmap memory) */
u32 heatmap_mem; /**< [in] memory address to store counter values per tile (aka heatmap) */
u32 num_memory_cookies; /**< [in] number of memory cookies attached to job */
- u32 *memory_cookies; /**< [in] memory cookies attached to job */
+ u64 memory_cookies; /**< [in] pointer to array of u32 memory cookies attached to job */
_mali_uk_fence_t fence; /**< [in] fence this job must wait on */
- u32 *timeline_point_ptr; /**< [in,out] pointer to location where point on pp timeline for this job will be written */
+ u64 timeline_point_ptr; /**< [in,out] pointer to location of u32 where point on pp timeline for this job will be written */
} _mali_uk_pp_start_job_s;
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
- _mali_uk_gp_start_job_s *gp_args; /**< [in,out] GP uk arguments (see _mali_uk_gp_start_job_s) */
- _mali_uk_pp_start_job_s *pp_args; /**< [in,out] PP uk arguments (see _mali_uk_pp_start_job_s) */
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 gp_args; /**< [in,out] GP uk arguments (see _mali_uk_gp_start_job_s) */
+ u64 pp_args; /**< [in,out] PP uk arguments (see _mali_uk_pp_start_job_s) */
} _mali_uk_pp_and_gp_start_job_s;
/** @} */ /* end group _mali_uk_ppstartjob_s */
typedef struct {
- u32 user_job_ptr; /**< [out] identifier for the job in user space */
+ u64 user_job_ptr; /**< [out] identifier for the job in user space */
_mali_uk_job_status status; /**< [out] status of finished job */
u32 perf_counter0[_MALI_PP_MAX_SUB_JOBS]; /**< [out] value of perfomance counter 0 (see ARM DDI0415A), one for each sub job */
u32 perf_counter1[_MALI_PP_MAX_SUB_JOBS]; /**< [out] value of perfomance counter 1 (see ARM DDI0415A), one for each sub job */
} _mali_uk_pp_job_wbx_flag;
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
u32 fb_id; /**< [in] Frame builder ID of job to disable WB units for */
u32 wb0_memory;
u32 wb1_memory;
* @{ */
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
- u32 type; /**< [in] type of soft job */
- u32 user_job; /**< [in] identifier for the job in user space */
- u32 *job_id_ptr; /**< [in,out] pointer to location where job id will be written */
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 user_job; /**< [in] identifier for the job in user space */
+ u64 job_id_ptr; /**< [in,out] pointer to location of u32 where job id will be written */
_mali_uk_fence_t fence; /**< [in] fence this job must wait on */
u32 point; /**< [out] point on soft timeline for this job */
+ u32 type; /**< [in] type of soft job */
} _mali_uk_soft_job_start_s;
typedef struct {
- u32 user_job; /**< [out] identifier for the job in user space */
+ u64 user_job; /**< [out] identifier for the job in user space */
} _mali_uk_soft_job_activated_s;
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
u32 job_id; /**< [in] id for soft job */
} _mali_uk_soft_job_signal_s;
* when the polygon list builder unit has run out of memory.
*/
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
_mali_uk_notification_type type; /**< [out] Type of notification available */
union {
_mali_uk_gp_job_suspended_s gp_job_suspended;/**< [out] Notification data for _MALI_NOTIFICATION_GP_STALLED notification type */
* This is used to send a quit message to the callback thread.
*/
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
_mali_uk_notification_type type; /**< [in] Type of notification to post */
} _mali_uk_post_notification_s;
* The 16bit integer is stored twice in a 32bit integer
* For example, for version 1 the value would be 0x00010001
*/
-#define _MALI_API_VERSION 401
+#define _MALI_API_VERSION 600
#define _MALI_UK_API_VERSION _MAKE_VERSION_ID(_MALI_API_VERSION)
/**
* of the interface may be backwards compatible.
*/
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 ctx; /**< [in,out] user-kernel context (trashed on output) */
_mali_uk_api_version version; /**< [in,out] API version of user-side interface. */
int compatible; /**< [out] @c 1 when @version is compatible, @c 0 otherwise */
} _mali_uk_get_api_version_s;
+
+/** @brief Arguments for _mali_uk_get_api_version_v2()
+ *
+ * The user-side interface version must be written into the version member,
+ * encoded using _MAKE_VERSION_ID(). It will be compared to the API version of
+ * the kernel-side interface.
+ *
+ * On successful return, the version member will be the API version of the
+ * kernel-side interface. _MALI_UK_API_VERSION macro defines the current version
+ * of the API.
+ *
+ * The compatible member must be checked to see if the version of the user-side
+ * interface is compatible with the kernel-side interface, since future versions
+ * of the interface may be backwards compatible.
+ */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_api_version version; /**< [in,out] API version of user-side interface. */
+ int compatible; /**< [out] @c 1 when @version is compatible, @c 0 otherwise */
+} _mali_uk_get_api_version_v2_s;
+
/** @} */ /* end group _mali_uk_getapiversion_s */
/** @defgroup _mali_uk_get_user_settings_s Get user space settings */
*
*/
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
u32 settings[_MALI_UK_USER_SETTING_MAX]; /**< [out] The values for all settings */
} _mali_uk_get_user_settings_s;
/** @brief struct to hold the value of a particular setting from the user space within a given context
*/
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
_mali_uk_user_setting_t setting; /**< [in] setting to get */
u32 value; /**< [out] value of setting */
} _mali_uk_get_user_setting_s;
/** @brief Arguments for _mali_ukk_request_high_priority() */
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
} _mali_uk_request_high_priority_s;
/** @} */ /* end group _mali_uk_core */
#define _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE (1<<0)
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
u32 phys_addr; /**< [in] physical address */
u32 size; /**< [in] size */
u32 mali_address; /**< [in] mali address to map the physical memory to */
} _mali_uk_map_external_mem_s;
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
u32 cookie; /**< [out] identifier for mapped memory object in kernel space */
} _mali_uk_unmap_external_mem_s;
/** @note This is identical to _mali_uk_map_external_mem_s above, however phys_addr is replaced by memory descriptor */
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
u32 mem_fd; /**< [in] Memory descriptor */
u32 size; /**< [in] size */
u32 mali_address; /**< [in] mali address to map the physical memory to */
} _mali_uk_attach_dma_buf_s;
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
u32 mem_fd; /**< [in] Memory descriptor */
u32 size; /**< [out] size */
} _mali_uk_dma_buf_get_size_s;
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
- u32 cookie; /**< [in] identifier for mapped memory object in kernel space */
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 cookie; /**< [in] identifier for mapped memory object in kernel space */
} _mali_uk_release_dma_buf_s;
/** @note This is identical to _mali_uk_map_external_mem_s above, however phys_addr is replaced by secure_id */
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
u32 secure_id; /**< [in] secure id */
u32 size; /**< [in] size */
u32 mali_address; /**< [in] mali address to map the physical memory to */
} _mali_uk_attach_ump_mem_s;
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
u32 cookie; /**< [in] identifier for mapped memory object in kernel space */
} _mali_uk_release_ump_mem_s;
-/** @brief Arguments for _mali_ukk_va_to_mali_pa()
- *
- * if size is zero or not a multiple of the system's page size, it will be
- * rounded up to the next multiple of the page size. This will occur before
- * any other use of the size parameter.
- *
- * if va is not PAGE_SIZE aligned, it will be rounded down to the next page
- * boundary.
- *
- * The range (va) to ((u32)va)+(size-1) inclusive will be checked for physical
- * contiguity.
- *
- * The implementor will check that the entire physical range is allowed to be mapped
- * into user-space.
- *
- * Failure will occur if either of the above are not satisfied.
- *
- * Otherwise, the physical base address of the range is returned through pa,
- * va is updated to be page aligned, and size is updated to be a non-zero
- * multiple of the system's pagesize.
- */
-typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
- void *va; /**< [in,out] Virtual address of the start of the range */
- u32 pa; /**< [out] Physical base address of the range */
- u32 size; /**< [in,out] Size of the range, in bytes. */
-} _mali_uk_va_to_mali_pa_s;
-
/**
* @brief Arguments for _mali_uk[uk]_mem_write_safe()
*/
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
- const void *src; /**< [in] Pointer to source data */
- void *dest; /**< [in] Destination Mali buffer */
- u32 size; /**< [in,out] Number of bytes to write/copy on input, number of bytes actually written/copied on output */
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 src; /**< [in] Pointer to source data */
+ u64 dest; /**< [in] Destination Mali buffer */
+ u32 size; /**< [in,out] Number of bytes to write/copy on input, number of bytes actually written/copied on output */
} _mali_uk_mem_write_safe_s;
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
u32 size; /**< [out] size of MMU page table information (registers + page tables) */
} _mali_uk_query_mmu_page_table_dump_size_s;
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
u32 size; /**< [in] size of buffer to receive mmu page table information */
- void *buffer; /**< [in,out] buffer to receive mmu page table information */
+ u64 buffer; /**< [in,out] buffer to receive mmu page table information */
u32 register_writes_size; /**< [out] size of MMU register dump */
- u32 *register_writes; /**< [out] pointer within buffer where MMU register dump is stored */
+ u64 register_writes; /**< [out] pointer within buffer where MMU register dump is stored */
u32 page_table_dump_size; /**< [out] size of MMU page table dump */
- u32 *page_table_dump; /**< [out] pointer within buffer where MMU page table dump is stored */
+ u64 page_table_dump; /**< [out] pointer within buffer where MMU page table dump is stored */
} _mali_uk_dump_mmu_page_table_s;
/** @} */ /* end group _mali_uk_memory */
* will contain the number of Fragment Processor cores in the system.
*/
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
u32 number_of_total_cores; /**< [out] Total number of Fragment Processor cores in the system */
u32 number_of_enabled_cores; /**< [out] Number of enabled Fragment Processor cores */
} _mali_uk_get_pp_number_of_cores_s;
* the version that all Fragment Processor cores are compatible with.
*/
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
_mali_core_version version; /**< [out] version returned from core, see \ref _mali_core_version */
+ u32 padding;
} _mali_uk_get_pp_core_version_s;
/** @} */ /* end group _mali_uk_pp */
* will contain the number of Vertex Processor cores in the system.
*/
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
u32 number_of_cores; /**< [out] number of Vertex Processor cores in the system */
} _mali_uk_get_gp_number_of_cores_s;
* the version that all Vertex Processor cores are compatible with.
*/
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
_mali_core_version version; /**< [out] version returned from core, see \ref _mali_core_version */
} _mali_uk_get_gp_core_version_s;
-typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
- u32 limit; /**< [in,out] The desired limit for number of events to record on input, actual limit on output */
-} _mali_uk_profiling_start_s;
+/** @} */ /* end group _mali_uk_gp */
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
u32 event_id; /**< [in] event id to register (see enum mali_profiling_events for values) */
u32 data[5]; /**< [in] event specific data */
} _mali_uk_profiling_add_event_s;
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
- u32 count; /**< [out] The number of events sampled */
-} _mali_uk_profiling_stop_s;
-
-typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
- u32 index; /**< [in] which index to get (starting at zero) */
- u64 timestamp; /**< [out] timestamp of event */
- u32 event_id; /**< [out] event id of event (see enum mali_profiling_events for values) */
- u32 data[5]; /**< [out] event specific data */
-} _mali_uk_profiling_get_event_s;
-
-typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
-} _mali_uk_profiling_clear_s;
-
-typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
u32 memory_usage; /**< [out] total memory usage */
} _mali_uk_profiling_memory_usage_get_s;
-/** @} */ /* end group _mali_uk_gp */
-
/** @addtogroup _mali_uk_memory U/K Memory
* @{ */
* implementation of the U/K interface. Its value must be zero.
*/
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
void *mapping; /**< [out] Returns user-space virtual address for the mapping */
u32 size; /**< [in] Size of the requested mapping */
u32 phys_addr; /**< [in] Physical address - could be offset, depending on caller+callee convention */
u32 cookie; /**< [out] Returns a cookie for use in munmap calls */
- void *uku_private; /**< [in] User-side Private word used by U/K interface */
- void *ukk_private; /**< [in] Kernel-side Private word used by U/K interface */
- mali_memory_cache_settings cache_settings; /**< [in] Option to set special cache flags, tuning L2 efficency */
} _mali_uk_mem_mmap_s;
/** @brief Arguments to _mali_ukk_mem_munmap()
* originally obtained range, or to unmap more than was originally obtained.
*/
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
void *mapping; /**< [in] The mapping returned from mmap call */
u32 size; /**< [in] The size passed to mmap call */
u32 cookie; /**< [in] Cookie from mmap call */
*
*/
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
_mali_uk_vsync_event event; /**< [in] VSYNCH event type */
} _mali_uk_vsync_event_report_s;
* Values recorded for each of the software counters during a single renderpass.
*/
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
- u32 *counters; /**< [in] The array of counter values */
- u32 num_counters; /**< [in] The number of elements in counters array */
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 counters; /**< [in] The array of u32 counter values */
+ u32 num_counters; /**< [in] The number of elements in counters array */
} _mali_uk_sw_counters_report_s;
/** @} */ /* end group _mali_uk_sw_counters_report */
* @{ */
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
u32 timeline; /**< [in] timeline id */
u32 point; /**< [out] latest point on timeline */
} _mali_uk_timeline_get_latest_point_s;
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
_mali_uk_fence_t fence; /**< [in] fence */
u32 timeout; /**< [in] timeout (0 for no wait, -1 for blocking) */
u32 status; /**< [out] status of fence (1 if signaled, 0 if timeout) */
} _mali_uk_timeline_wait_s;
typedef struct {
- void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
_mali_uk_fence_t fence; /**< [in] mali fence to create linux sync fence from */
s32 sync_fd; /**< [out] file descriptor for new linux sync fence */
} _mali_uk_timeline_create_sync_fence_s;
#include "mali_kernel_license.h"
#include <linux/platform_device.h>
#include <linux/miscdevice.h>
+#include <linux/bug.h>
#include <linux/mali/mali_utgard.h>
#include "mali_kernel_common.h"
#include "mali_session.h"
#else
.ioctl = mali_ioctl,
#endif
+ .compat_ioctl = mali_ioctl,
.mmap = mali_mmap
};
switch (cmd) {
case MALI_IOC_WAIT_FOR_NOTIFICATION:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_wait_for_notification_s), sizeof(u64)));
err = wait_for_notification_wrapper(session_data, (_mali_uk_wait_for_notification_s __user *)arg);
break;
+ case MALI_IOC_GET_API_VERSION_V2:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_api_version_v2_s), sizeof(u64)));
+ err = get_api_version_v2_wrapper(session_data, (_mali_uk_get_api_version_v2_s __user *)arg);
+ break;
+
case MALI_IOC_GET_API_VERSION:
err = get_api_version_wrapper(session_data, (_mali_uk_get_api_version_s __user *)arg);
break;
case MALI_IOC_POST_NOTIFICATION:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_post_notification_s), sizeof(u64)));
err = post_notification_wrapper(session_data, (_mali_uk_post_notification_s __user *)arg);
break;
case MALI_IOC_GET_USER_SETTINGS:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_user_settings_s), sizeof(u64)));
err = get_user_settings_wrapper(session_data, (_mali_uk_get_user_settings_s __user *)arg);
break;
case MALI_IOC_REQUEST_HIGH_PRIORITY:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_request_high_priority_s), sizeof(u64)));
err = request_high_priority_wrapper(session_data, (_mali_uk_request_high_priority_s __user *)arg);
break;
#if defined(CONFIG_MALI400_PROFILING)
- case MALI_IOC_PROFILING_START:
- err = profiling_start_wrapper(session_data, (_mali_uk_profiling_start_s __user *)arg);
- break;
-
case MALI_IOC_PROFILING_ADD_EVENT:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_profiling_add_event_s), sizeof(u64)));
err = profiling_add_event_wrapper(session_data, (_mali_uk_profiling_add_event_s __user *)arg);
break;
- case MALI_IOC_PROFILING_STOP:
- err = profiling_stop_wrapper(session_data, (_mali_uk_profiling_stop_s __user *)arg);
- break;
-
- case MALI_IOC_PROFILING_GET_EVENT:
- err = profiling_get_event_wrapper(session_data, (_mali_uk_profiling_get_event_s __user *)arg);
- break;
-
- case MALI_IOC_PROFILING_CLEAR:
- err = profiling_clear_wrapper(session_data, (_mali_uk_profiling_clear_s __user *)arg);
- break;
-
- case MALI_IOC_PROFILING_GET_CONFIG:
- /* Deprecated: still compatible with get_user_settings */
- err = get_user_settings_wrapper(session_data, (_mali_uk_get_user_settings_s __user *)arg);
- break;
-
case MALI_IOC_PROFILING_REPORT_SW_COUNTERS:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_sw_counters_report_s), sizeof(u64)));
err = profiling_report_sw_counters_wrapper(session_data, (_mali_uk_sw_counters_report_s __user *)arg);
break;
case MALI_IOC_PROFILING_MEMORY_USAGE_GET:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_profiling_memory_usage_get_s), sizeof(u64)));
err = profiling_memory_usage_get_wrapper(session_data, (_mali_uk_profiling_memory_usage_get_s __user *)arg);
break;
#else
- case MALI_IOC_PROFILING_START: /* FALL-THROUGH */
case MALI_IOC_PROFILING_ADD_EVENT: /* FALL-THROUGH */
- case MALI_IOC_PROFILING_STOP: /* FALL-THROUGH */
- case MALI_IOC_PROFILING_GET_EVENT: /* FALL-THROUGH */
- case MALI_IOC_PROFILING_CLEAR: /* FALL-THROUGH */
- case MALI_IOC_PROFILING_GET_CONFIG: /* FALL-THROUGH */
case MALI_IOC_PROFILING_REPORT_SW_COUNTERS: /* FALL-THROUGH */
+ case MALI_IOC_PROFILING_MEMORY_USAGE_GET: /* FALL-THROUGH */
MALI_DEBUG_PRINT(2, ("Profiling not supported\n"));
err = -ENOTTY;
break;
#endif
case MALI_IOC_MEM_WRITE_SAFE:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_mem_write_safe_s), sizeof(u64)));
err = mem_write_safe_wrapper(session_data, (_mali_uk_mem_write_safe_s __user *)arg);
break;
case MALI_IOC_MEM_MAP_EXT:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_map_external_mem_s), sizeof(u64)));
err = mem_map_ext_wrapper(session_data, (_mali_uk_map_external_mem_s __user *)arg);
break;
case MALI_IOC_MEM_UNMAP_EXT:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_unmap_external_mem_s), sizeof(u64)));
err = mem_unmap_ext_wrapper(session_data, (_mali_uk_unmap_external_mem_s __user *)arg);
break;
case MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_query_mmu_page_table_dump_size_s), sizeof(u64)));
err = mem_query_mmu_page_table_dump_size_wrapper(session_data, (_mali_uk_query_mmu_page_table_dump_size_s __user *)arg);
break;
case MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_dump_mmu_page_table_s), sizeof(u64)));
err = mem_dump_mmu_page_table_wrapper(session_data, (_mali_uk_dump_mmu_page_table_s __user *)arg);
break;
#if defined(CONFIG_MALI400_UMP)
case MALI_IOC_MEM_ATTACH_UMP:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_attach_ump_mem_s), sizeof(u64)));
err = mem_attach_ump_wrapper(session_data, (_mali_uk_attach_ump_mem_s __user *)arg);
break;
case MALI_IOC_MEM_RELEASE_UMP:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_release_ump_mem_s), sizeof(u64)));
err = mem_release_ump_wrapper(session_data, (_mali_uk_release_ump_mem_s __user *)arg);
break;
#ifdef CONFIG_DMA_SHARED_BUFFER
case MALI_IOC_MEM_ATTACH_DMA_BUF:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_attach_dma_buf_s), sizeof(u64)));
err = mali_attach_dma_buf(session_data, (_mali_uk_attach_dma_buf_s __user *)arg);
break;
case MALI_IOC_MEM_RELEASE_DMA_BUF:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_release_dma_buf_s), sizeof(u64)));
err = mali_release_dma_buf(session_data, (_mali_uk_release_dma_buf_s __user *)arg);
break;
case MALI_IOC_MEM_DMA_BUF_GET_SIZE:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_dma_buf_get_size_s), sizeof(u64)));
err = mali_dma_buf_get_size(session_data, (_mali_uk_dma_buf_get_size_s __user *)arg);
break;
#else
#endif
case MALI_IOC_PP_START_JOB:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_pp_start_job_s), sizeof(u64)));
err = pp_start_job_wrapper(session_data, (_mali_uk_pp_start_job_s __user *)arg);
break;
case MALI_IOC_PP_AND_GP_START_JOB:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_pp_and_gp_start_job_s), sizeof(u64)));
err = pp_and_gp_start_job_wrapper(session_data, (_mali_uk_pp_and_gp_start_job_s __user *)arg);
break;
case MALI_IOC_PP_NUMBER_OF_CORES_GET:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_pp_number_of_cores_s), sizeof(u64)));
err = pp_get_number_of_cores_wrapper(session_data, (_mali_uk_get_pp_number_of_cores_s __user *)arg);
break;
case MALI_IOC_PP_CORE_VERSION_GET:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_pp_core_version_s), sizeof(u64)));
err = pp_get_core_version_wrapper(session_data, (_mali_uk_get_pp_core_version_s __user *)arg);
break;
case MALI_IOC_PP_DISABLE_WB:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_pp_disable_wb_s), sizeof(u64)));
err = pp_disable_wb_wrapper(session_data, (_mali_uk_pp_disable_wb_s __user *)arg);
break;
case MALI_IOC_GP2_START_JOB:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_gp_start_job_s), sizeof(u64)));
err = gp_start_job_wrapper(session_data, (_mali_uk_gp_start_job_s __user *)arg);
break;
case MALI_IOC_GP2_NUMBER_OF_CORES_GET:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_gp_number_of_cores_s), sizeof(u64)));
err = gp_get_number_of_cores_wrapper(session_data, (_mali_uk_get_gp_number_of_cores_s __user *)arg);
break;
case MALI_IOC_GP2_CORE_VERSION_GET:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_gp_core_version_s), sizeof(u64)));
err = gp_get_core_version_wrapper(session_data, (_mali_uk_get_gp_core_version_s __user *)arg);
break;
case MALI_IOC_GP2_SUSPEND_RESPONSE:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_gp_suspend_response_s), sizeof(u64)));
err = gp_suspend_response_wrapper(session_data, (_mali_uk_gp_suspend_response_s __user *)arg);
break;
case MALI_IOC_VSYNC_EVENT_REPORT:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_vsync_event_report_s), sizeof(u64)));
err = vsync_event_report_wrapper(session_data, (_mali_uk_vsync_event_report_s __user *)arg);
break;
case MALI_IOC_TIMELINE_GET_LATEST_POINT:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_timeline_get_latest_point_s), sizeof(u64)));
err = timeline_get_latest_point_wrapper(session_data, (_mali_uk_timeline_get_latest_point_s __user *)arg);
break;
case MALI_IOC_TIMELINE_WAIT:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_timeline_wait_s), sizeof(u64)));
err = timeline_wait_wrapper(session_data, (_mali_uk_timeline_wait_s __user *)arg);
break;
case MALI_IOC_TIMELINE_CREATE_SYNC_FENCE:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_timeline_create_sync_fence_s), sizeof(u64)));
err = timeline_create_sync_fence_wrapper(session_data, (_mali_uk_timeline_create_sync_fence_s __user *)arg);
break;
case MALI_IOC_SOFT_JOB_START:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_soft_job_start_s), sizeof(u64)));
err = soft_job_start_wrapper(session_data, (_mali_uk_soft_job_start_s __user *)arg);
break;
case MALI_IOC_SOFT_JOB_SIGNAL:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_soft_job_signal_s), sizeof(u64)));
err = soft_job_signal_wrapper(session_data, (_mali_uk_soft_job_signal_s __user *)arg);
break;
- case MALI_IOC_MEM_INIT: /* Fallthrough */
- case MALI_IOC_MEM_TERM: /* Fallthrough */
- MALI_DEBUG_PRINT(2, ("Deprecated ioctls called\n"));
- err = -ENOTTY;
- break;
-
- case MALI_IOC_MEM_GET_BIG_BLOCK: /* Fallthrough */
- case MALI_IOC_MEM_FREE_BIG_BLOCK:
- MALI_PRINT_ERROR(("Non-MMU mode is no longer supported.\n"));
- err = -ENOTTY;
- break;
-
default:
MALI_DEBUG_PRINT(2, ("No handler for ioctl 0x%08X 0x%08lX\n", cmd, arg));
err = -ENOTTY;
#include "mali_gp_job.h"
#include "mali_pp_job.h"
#include "mali_pp_scheduler.h"
+#include "mali_session.h"
#define PRIVATE_DATA_COUNTER_MAKE_GP(src) (src)
#define PRIVATE_DATA_COUNTER_MAKE_PP(src) ((1 << 24) | src)
static ssize_t profiling_counter_src_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
{
- u32 is_pp = PRIVATE_DATA_COUNTER_IS_PP((u32)filp->private_data);
- u32 src_id = PRIVATE_DATA_COUNTER_GET_SRC((u32)filp->private_data);
- mali_bool is_sub_job = PRIVATE_DATA_COUNTER_IS_SUB_JOB((u32)filp->private_data);
- u32 sub_job = PRIVATE_DATA_COUNTER_GET_SUB_JOB((u32)filp->private_data);
+ u32 is_pp = PRIVATE_DATA_COUNTER_IS_PP((uintptr_t)filp->private_data);
+ u32 src_id = PRIVATE_DATA_COUNTER_GET_SRC((uintptr_t)filp->private_data);
+ mali_bool is_sub_job = PRIVATE_DATA_COUNTER_IS_SUB_JOB((uintptr_t)filp->private_data);
+ u32 sub_job = PRIVATE_DATA_COUNTER_GET_SUB_JOB((uintptr_t)filp->private_data);
char buf[64];
int r;
u32 val;
static ssize_t profiling_counter_src_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
{
- u32 is_pp = PRIVATE_DATA_COUNTER_IS_PP((u32)filp->private_data);
- u32 src_id = PRIVATE_DATA_COUNTER_GET_SRC((u32)filp->private_data);
- mali_bool is_sub_job = PRIVATE_DATA_COUNTER_IS_SUB_JOB((u32)filp->private_data);
- u32 sub_job = PRIVATE_DATA_COUNTER_GET_SUB_JOB((u32)filp->private_data);
+ u32 is_pp = PRIVATE_DATA_COUNTER_IS_PP((uintptr_t)filp->private_data);
+ u32 src_id = PRIVATE_DATA_COUNTER_GET_SRC((uintptr_t)filp->private_data);
+ mali_bool is_sub_job = PRIVATE_DATA_COUNTER_IS_SUB_JOB((uintptr_t)filp->private_data);
+ u32 sub_job = PRIVATE_DATA_COUNTER_GET_SUB_JOB((uintptr_t)filp->private_data);
char buf[64];
long val;
int ret;
struct dentry *mali_user_settings_dir = debugfs_create_dir("userspace_settings", mali_debugfs_dir);
if (mali_user_settings_dir != NULL) {
- int i;
+ long i;
for (i = 0; i < _MALI_UK_USER_SETTING_MAX; i++) {
- debugfs_create_file(_mali_uk_user_setting_descriptions[i], 0600, mali_user_settings_dir, (void *)i, &user_settings_fops);
+ debugfs_create_file(_mali_uk_user_setting_descriptions[i],
+ 0600, mali_user_settings_dir, (void *)i,
+ &user_settings_fops);
}
}
.read = version_read,
};
+#if defined(DEBUG)
+static int timeline_debugfs_show(struct seq_file *s, void *private_data)
+{
+ struct mali_session_data *session, *tmp;
+ u32 session_seq = 1;
+
+ seq_printf(s, "timeline system info: \n=================\n\n");
+
+ mali_session_lock();
+ MALI_SESSION_FOREACH(session, tmp, link){
+ seq_printf(s, "session %d <%p> start:\n", session_seq,session);
+ mali_timeline_debug_print_system(session->timeline_system,s);
+ seq_printf(s, "session %d end\n\n\n", session_seq++);
+ }
+ mali_session_unlock();
+
+ return 0;
+}
+
+static int timeline_debugfs_open( struct inode *inode, struct file *file)
+{
+ return single_open(file, timeline_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations timeline_dump_fops = {
+ .owner = THIS_MODULE,
+ .open = timeline_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+#endif
+
int mali_sysfs_register(const char *mali_dev_name)
{
mali_debugfs_dir = debugfs_create_dir(mali_dev_name, NULL);
mali_gp_dir = debugfs_create_dir("gp", mali_debugfs_dir);
if (mali_gp_dir != NULL) {
u32 num_groups;
- int i;
+ long i;
num_groups = mali_group_get_glob_num_groups();
for (i = 0; i < num_groups; i++) {
mali_pp_dir = debugfs_create_dir("pp", mali_debugfs_dir);
if (mali_pp_dir != NULL) {
u32 num_groups;
- int i;
+ long i;
debugfs_create_file("num_cores_total", 0400, mali_pp_dir, NULL, &pp_num_cores_total_fops);
debugfs_create_file("num_cores_enabled", 0600, mali_pp_dir, NULL, &pp_num_cores_enabled_fops);
mali_profiling_dir = debugfs_create_dir("profiling", mali_debugfs_dir);
if (mali_profiling_dir != NULL) {
u32 max_sub_jobs;
- int i;
+ long i;
struct dentry *mali_profiling_gp_dir;
struct dentry *mali_profiling_pp_dir;
#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
_mali_osk_snprintf(buf, sizeof(buf), "%u", i);
mali_profiling_pp_x_dir = debugfs_create_dir(buf, mali_profiling_pp_dir);
if (NULL != mali_profiling_pp_x_dir) {
- debugfs_create_file("counter_src0", 0600, mali_profiling_pp_x_dir, (void *)PRIVATE_DATA_COUNTER_MAKE_PP_SUB_JOB(0, i), &profiling_counter_src_fops);
- debugfs_create_file("counter_src1", 0600, mali_profiling_pp_x_dir, (void *)PRIVATE_DATA_COUNTER_MAKE_PP_SUB_JOB(1, i), &profiling_counter_src_fops);
+ debugfs_create_file("counter_src0",
+ 0600, mali_profiling_pp_x_dir,
+ (void *)PRIVATE_DATA_COUNTER_MAKE_PP_SUB_JOB(0, i),
+ &profiling_counter_src_fops);
+ debugfs_create_file("counter_src1",
+ 0600, mali_profiling_pp_x_dir,
+ (void *)PRIVATE_DATA_COUNTER_MAKE_PP_SUB_JOB(1, i),
+ &profiling_counter_src_fops);
}
}
debugfs_create_file("state_dump", 0400, mali_debugfs_dir, NULL, &mali_seq_internal_state_fops);
#endif
+#if defined(DEBUG)
+ debugfs_create_file("timeline_dump", 0400, mali_debugfs_dir, NULL, &timeline_dump_fops);
+#endif
if (mali_sysfs_user_settings_register()) {
/* Failed to create the debugfs entries for the user settings DB. */
MALI_DEBUG_PRINT(2, ("Failed to create user setting debugfs files. Ignoring...\n"));
* @param table_page GPU pointer to the allocated page
* @param mapping CPU pointer to the mapping of the allocated page
*/
-MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_get_table_page(u32 *table_page, mali_io_address *mapping)
+MALI_STATIC_INLINE _mali_osk_errcode_t
+mali_mmu_get_table_page(mali_dma_addr *table_page, mali_io_address *mapping)
{
return mali_mem_os_get_table_page(table_page, mapping);
}
*
* @param pa the GPU address of the page to release
*/
-MALI_STATIC_INLINE void mali_mmu_release_table_page(u32 phys, void *virt)
+MALI_STATIC_INLINE void
+mali_mmu_release_table_page(mali_dma_addr phys, void *virt)
{
mali_mem_os_release_table_page(phys, virt);
}
return -EFAULT;
}
- MALI_DEBUG_PRINT(3, ("Mali DMA-buf: release descriptor cookie %d\n", args.cookie));
+ MALI_DEBUG_PRINT(3, ("Mali DMA-buf: release descriptor cookie %ld\n", args.cookie));
_mali_osk_mutex_wait(session->memory_lock);
- descriptor = mali_descriptor_mapping_free(session->descriptor_mapping, args.cookie);
+ descriptor = mali_descriptor_mapping_free(session->descriptor_mapping, (u32)args.cookie);
if (NULL != descriptor) {
MALI_DEBUG_PRINT(3, ("Mali DMA-buf: Releasing dma-buf at mali address %x\n", descriptor->mali_mapping.addr));
mali_mem_descriptor_destroy(descriptor);
} else {
- MALI_DEBUG_PRINT_ERROR(("Invalid memory descriptor %d used to release dma-buf\n", args.cookie));
+ MALI_DEBUG_PRINT_ERROR(("Invalid memory descriptor %ld used to release dma-buf\n", args.cookie));
ret = -EINVAL;
}
_mali_osk_errcode_t err;
MALI_DEBUG_ASSERT_POINTER(args);
- MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
- session = (struct mali_session_data *)args->ctx;
+ session = (struct mali_session_data *)(uintptr_t)args->ctx;
MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);
/* check arguments */
MALI_DEBUG_PRINT(3,
("Requested to map physical memory 0x%x-0x%x into virtual memory 0x%x\n",
- (void *)args->phys_addr,
- (void *)(args->phys_addr + args->size - 1),
- (void *)args->mali_address)
- );
+ args->phys_addr, (args->phys_addr + args->size - 1),
+ args->mali_address));
/* Validate the mali physical range */
if (_MALI_OSK_ERR_OK != mali_mem_validation_check(args->phys_addr, args->size)) {
struct mali_session_data *session;
MALI_DEBUG_ASSERT_POINTER(args);
- MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
- session = (struct mali_session_data *)args->ctx;
+ session = (struct mali_session_data *)(uintptr_t)args->ctx;
MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);
if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_get(session->descriptor_mapping, args->cookie, (void **)&descriptor)) {
#define MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB * 256)
#define MALI_OS_MEMORY_POOL_TRIM_JIFFIES (10 * CONFIG_HZ) /* Default to 10s */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+/* Write combine dma_attrs */
+static DEFINE_DMA_ATTRS(dma_attrs_wc);
+#endif
+
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
static int mali_mem_os_shrink(int nr_to_scan, gfp_t gfp_mask);
/* Allocate new pages, if needed. */
for (i = 0; i < remaining; i++) {
dma_addr_t dma_addr;
+ gfp_t flags = __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD;
+ int err;
- new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD);
+#if defined(CONFIG_ARM) && !defined(CONFIG_ARM_LPAE)
+ flags |= GFP_HIGHUSER;
+#else
+ flags |= GFP_DMA32;
+#endif
+
+ new_page = alloc_page(flags);
if (unlikely(NULL == new_page)) {
/* Calculate the number of pages actually allocated, and free them. */
dma_addr = dma_map_page(&mali_platform_device->dev, new_page,
0, _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
+ err = dma_mapping_error(&mali_platform_device->dev, dma_addr);
+ if (unlikely(err)) {
+ MALI_DEBUG_PRINT_ERROR(("OS Mem: Failed to DMA map page %p: %u",
+ new_page, err));
+ __free_page(new_page);
+ descriptor->os_mem.count = (page_count - remaining) + i;
+ atomic_add(descriptor->os_mem.count, &mali_mem_os_allocator.allocated_pages);
+ mali_mem_os_free(descriptor);
+ return -EFAULT;
+ }
+
/* Store page phys addr */
SetPagePrivate(new_page);
set_page_private(new_page, dma_addr);
}
list_for_each_entry(page, &descriptor->os_mem.pages, lru) {
- u32 phys = page_private(page);
- mali_mmu_pagedir_update(pagedir, virt, phys, MALI_MMU_PAGE_SIZE, prop);
+ dma_addr_t phys = page_private(page);
+
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
+ /* Verify that the "physical" address is 32-bit and
+ * usable for Mali, when on a system with bus addresses
+ * wider than 32-bit. */
+ MALI_DEBUG_ASSERT(0 == (phys >> 32));
+#endif
+
+ mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys, MALI_MMU_PAGE_SIZE, prop);
virt += MALI_MMU_PAGE_SIZE;
}
#define MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE 128
static struct {
struct {
- u32 phys;
+ mali_dma_addr phys;
mali_io_address mapping;
} page[MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE];
- u32 count;
+ size_t count;
spinlock_t lock;
} mali_mem_page_table_page_pool = {
.count = 0,
.lock = __SPIN_LOCK_UNLOCKED(pool_lock),
};
-_mali_osk_errcode_t mali_mem_os_get_table_page(u32 *phys, mali_io_address *mapping)
+_mali_osk_errcode_t mali_mem_os_get_table_page(mali_dma_addr *phys, mali_io_address *mapping)
{
_mali_osk_errcode_t ret = _MALI_OSK_ERR_NOMEM;
+ dma_addr_t tmp_phys;
spin_lock(&mali_mem_page_table_page_pool.lock);
if (0 < mali_mem_page_table_page_pool.count) {
spin_unlock(&mali_mem_page_table_page_pool.lock);
if (_MALI_OSK_ERR_OK != ret) {
- *mapping = dma_alloc_writecombine(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE, phys, GFP_KERNEL);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+ *mapping = dma_alloc_attrs(&mali_platform_device->dev,
+ _MALI_OSK_MALI_PAGE_SIZE, &tmp_phys,
+ GFP_KERNEL, &dma_attrs_wc);
+#else
+ *mapping = dma_alloc_writecombine(&mali_platform_device->dev,
+ _MALI_OSK_MALI_PAGE_SIZE, &tmp_phys, GFP_KERNEL);
+#endif
if (NULL != *mapping) {
ret = _MALI_OSK_ERR_OK;
+
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
+ /* Verify that the "physical" address is 32-bit and
+ * usable for Mali, when on a system with bus addresses
+ * wider than 32-bit. */
+ MALI_DEBUG_ASSERT(0 == (tmp_phys >> 32));
+#endif
+
+ *phys = (mali_dma_addr)tmp_phys;
}
}
return ret;
}
-void mali_mem_os_release_table_page(u32 phys, void *virt)
+void mali_mem_os_release_table_page(mali_dma_addr phys, void *virt)
{
spin_lock(&mali_mem_page_table_page_pool.lock);
if (MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE > mali_mem_page_table_page_pool.count) {
} else {
spin_unlock(&mali_mem_page_table_page_pool.lock);
- dma_free_writecombine(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE, virt, phys);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+ dma_free_attrs(&mali_platform_device->dev,
+ _MALI_OSK_MALI_PAGE_SIZE, virt, phys,
+ &dma_attrs_wc);
+#else
+ dma_free_writecombine(&mali_platform_device->dev,
+ _MALI_OSK_MALI_PAGE_SIZE, virt, phys);
+#endif
}
}
*/
static void mali_mem_os_page_table_pool_free(size_t nr_to_free)
{
- u32 phys_arr[MALI_MEM_OS_CHUNK_TO_FREE];
+ mali_dma_addr phys_arr[MALI_MEM_OS_CHUNK_TO_FREE];
void *virt_arr[MALI_MEM_OS_CHUNK_TO_FREE];
u32 i;
/* After releasing the spinlock: free the pages we removed from the pool. */
for (i = 0; i < nr_to_free; i++) {
- dma_free_writecombine(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE, virt_arr[i], phys_arr[i]);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+ dma_free_attrs(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE,
+ virt_arr[i], (dma_addr_t)phys_arr[i], &dma_attrs_wc);
+#else
+ dma_free_writecombine(&mali_platform_device->dev,
+ _MALI_OSK_MALI_PAGE_SIZE,
+ virt_arr[i], (dma_addr_t)phys_arr[i]);
+#endif
}
}
return _MALI_OSK_ERR_NOMEM;
}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &dma_attrs_wc);
+#endif
+
register_shrinker(&mali_mem_os_allocator.shrinker);
return _MALI_OSK_ERR_OK;
*/
void mali_mem_os_release(mali_mem_allocation *descriptor);
-_mali_osk_errcode_t mali_mem_os_get_table_page(u32 *phys, mali_io_address *mapping);
+_mali_osk_errcode_t mali_mem_os_get_table_page(mali_dma_addr *phys, mali_io_address *mapping);
-void mali_mem_os_release_table_page(u32 phys, void *virt);
+void mali_mem_os_release_table_page(mali_dma_addr phys, void *virt);
_mali_osk_errcode_t mali_mem_os_init(void);
void mali_mem_os_term(void);
int md, ret;
MALI_DEBUG_ASSERT_POINTER(args);
- MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
- session = (struct mali_session_data *)args->ctx;
- MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (struct mali_session_data *)(uintptr_t)args->ctx;
/* check arguments */
/* NULL might be a valid Mali address */
struct mali_session_data *session;
MALI_DEBUG_ASSERT_POINTER(args);
- MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
- session = (struct mali_session_data *)args->ctx;
- MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (struct mali_session_data *)(uintptr_t)args->ctx;
if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_get(session->descriptor_mapping, args->cookie, (void **)&descriptor)) {
MALI_DEBUG_PRINT(1, ("Invalid memory descriptor %d used to release ump memory\n", args->cookie));
_mali_osk_write_mem_barrier();
}
-u32 _mali_osk_mem_write_safe(void *dest, const void *src, u32 size)
+u32 _mali_osk_mem_write_safe(void __user *dest, const void __user *src, u32 size)
{
#define MALI_MEM_SAFE_COPY_BLOCK_SIZE 4096
u32 retval = 0;
_mali_osk_errcode_t _mali_ukk_mem_write_safe(_mali_uk_mem_write_safe_s *args)
{
+ void __user *src;
+ void __user *dst;
+ struct mali_session_data *session;
+
MALI_DEBUG_ASSERT_POINTER(args);
- if (NULL == args->ctx) {
+ session = (struct mali_session_data *)(uintptr_t)args->ctx;
+
+ if (NULL == session) {
return _MALI_OSK_ERR_INVALID_ARGS;
}
+ src = (void __user *)(uintptr_t)args->src;
+ dst = (void __user *)(uintptr_t)args->dest;
+
/* Return number of bytes actually copied */
- args->size = _mali_osk_mem_write_safe(args->dest, args->src, args->size);
+ args->size = _mali_osk_mem_write_safe(dst, src, args->size);
return _MALI_OSK_ERR_OK;
}
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
#include <linux/sched.h>
+#include <linux/seq_file.h>
#include <linux/module.h>
#include "mali_osk.h"
return res;
}
+void _mali_osk_ctxprintf(_mali_osk_print_ctx *print_ctx, const char *fmt, ...)
+{
+ va_list args;
+ char buf[512];
+
+ va_start(args, fmt);
+ vscnprintf(buf,512,fmt,args);
+ seq_printf(print_ctx,buf);
+ va_end(args);
+}
+
void _mali_osk_abort(void)
{
/* make a simple fault by dereferencing a NULL pointer */
/* Nothing to do */
}
-_mali_osk_errcode_t _mali_osk_profiling_start(u32 *limit)
-{
- /* Nothing to do */
- return _MALI_OSK_ERR_OK;
-}
-
-_mali_osk_errcode_t _mali_osk_profiling_stop(u32 *count)
-{
- /* Nothing to do */
- return _MALI_OSK_ERR_OK;
-}
-
-u32 _mali_osk_profiling_get_count(void)
-{
- return 0;
-}
-
-_mali_osk_errcode_t _mali_osk_profiling_get_event(u32 index, u64 *timestamp, u32 *event_id, u32 data[5])
-{
- /* Nothing to do */
- return _MALI_OSK_ERR_OK;
-}
-
-_mali_osk_errcode_t _mali_osk_profiling_clear(void)
-{
- /* Nothing to do */
- return _MALI_OSK_ERR_OK;
-}
-
-mali_bool _mali_osk_profiling_is_recording(void)
-{
- return MALI_FALSE;
-}
-
-mali_bool _mali_osk_profiling_have_recording(void)
-{
- return MALI_FALSE;
-}
-
void _mali_osk_profiling_report_sw_counters(u32 *counters)
{
trace_mali_sw_counters(_mali_osk_get_pid(), _mali_osk_get_tid(), NULL, counters);
*memory_usage = _mali_ukk_report_memory_usage();
}
-
-_mali_osk_errcode_t _mali_ukk_profiling_start(_mali_uk_profiling_start_s *args)
-{
- return _mali_osk_profiling_start(&args->limit);
-}
-
_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args)
{
/* Always add process and thread identificator in the first two data elements for events from user space */
return _MALI_OSK_ERR_OK;
}
-_mali_osk_errcode_t _mali_ukk_profiling_stop(_mali_uk_profiling_stop_s *args)
-{
- return _mali_osk_profiling_stop(&args->count);
-}
-
-_mali_osk_errcode_t _mali_ukk_profiling_get_event(_mali_uk_profiling_get_event_s *args)
+_mali_osk_errcode_t _mali_ukk_sw_counters_report(_mali_uk_sw_counters_report_s *args)
{
- return _mali_osk_profiling_get_event(args->index, &args->timestamp, &args->event_id, args->data);
-}
+ u32 *counters = (u32 *)(uintptr_t)args->counters;
-_mali_osk_errcode_t _mali_ukk_profiling_clear(_mali_uk_profiling_clear_s *args)
-{
- return _mali_osk_profiling_clear();
-}
+ _mali_osk_profiling_report_sw_counters(counters);
-_mali_osk_errcode_t _mali_ukk_sw_counters_report(_mali_uk_sw_counters_report_s *args)
-{
- _mali_osk_profiling_report_sw_counters(args->counters);
return _MALI_OSK_ERR_OK;
}
typedef struct dma_pool *mali_dma_pool;
+typedef u32 mali_dma_addr;
+
MALI_STATIC_INLINE mali_dma_pool mali_dma_pool_create(u32 size, u32 alignment, u32 boundary)
{
- return dma_pool_create("mali-dma", &mali_platform_device->dev, size, alignment, boundary);
+ return dma_pool_create("mali-dma", &mali_platform_device->dev,
+ (size_t)size, (size_t)alignment, (size_t)boundary);
}
MALI_STATIC_INLINE void mali_dma_pool_destroy(mali_dma_pool pool)
dma_pool_destroy(pool);
}
-MALI_STATIC_INLINE mali_io_address mali_dma_pool_alloc(mali_dma_pool pool, u32 *phys_addr)
+MALI_STATIC_INLINE mali_io_address mali_dma_pool_alloc(mali_dma_pool pool, mali_dma_addr *phys_addr)
{
- return dma_pool_alloc(pool, GFP_KERNEL, phys_addr);
+ void *ret;
+ dma_addr_t phys;
+
+ ret = dma_pool_alloc(pool, GFP_KERNEL, &phys);
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
+ /* Verify that the "physical" address is 32-bit and
+ * usable for Mali, when on a system with bus addresses
+ * wider than 32-bit. */
+ BUG_ON(0 != (phys >> 32));
+#endif
+ *phys_addr = phys;
+
+ return ret;
}
-MALI_STATIC_INLINE void mali_dma_pool_free(mali_dma_pool pool, void *virt_addr, u32 phys_addr)
+MALI_STATIC_INLINE void mali_dma_pool_free(mali_dma_pool pool, void *virt_addr, mali_dma_addr phys_addr)
{
- dma_pool_free(pool, virt_addr, phys_addr);
+ dma_pool_free(pool, virt_addr, (dma_addr_t)phys_addr);
}
if (0 != get_user(kargs.version, &uargs->version)) return -EFAULT;
- kargs.ctx = session_data;
+ kargs.ctx = (uintptr_t)session_data;
err = _mali_ukk_get_api_version(&kargs);
if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
return 0;
}
+int get_api_version_v2_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_v2_s __user *uargs)
+{
+ _mali_uk_get_api_version_v2_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != get_user(kargs.version, &uargs->version)) return -EFAULT;
+
+ kargs.ctx = (uintptr_t)session_data;
+ err = _mali_ukk_get_api_version_v2(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+ if (0 != put_user(kargs.compatible, &uargs->compatible)) return -EFAULT;
+
+ return 0;
+}
+
int wait_for_notification_wrapper(struct mali_session_data *session_data, _mali_uk_wait_for_notification_s __user *uargs)
{
_mali_uk_wait_for_notification_s kargs;
MALI_CHECK_NON_NULL(uargs, -EINVAL);
- kargs.ctx = session_data;
+ kargs.ctx = (uintptr_t)session_data;
err = _mali_ukk_wait_for_notification(&kargs);
if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
if (_MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS != kargs.type) {
- kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+ kargs.ctx = (uintptr_t)NULL; /* prevent kernel address to be returned to user space */
if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_wait_for_notification_s))) return -EFAULT;
} else {
if (0 != put_user(kargs.type, &uargs->type)) return -EFAULT;
MALI_CHECK_NON_NULL(uargs, -EINVAL);
- kargs.ctx = session_data;
+ kargs.ctx = (uintptr_t)session_data;
if (0 != get_user(kargs.type, &uargs->type)) {
return -EFAULT;
MALI_CHECK_NON_NULL(uargs, -EINVAL);
- kargs.ctx = session_data;
+ kargs.ctx = (uintptr_t)session_data;
err = _mali_ukk_get_user_settings(&kargs);
if (_MALI_OSK_ERR_OK != err) {
return map_errcode(err);
}
- kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+ kargs.ctx = 0; /* prevent kernel address to be returned to user space */
if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_get_user_settings_s))) return -EFAULT;
return 0;
MALI_CHECK_NON_NULL(uargs, -EINVAL);
- kargs.ctx = session_data;
+ kargs.ctx = (uintptr_t)session_data;
err = _mali_ukk_request_high_priority(&kargs);
- kargs.ctx = NULL;
+ kargs.ctx = 0;
return map_errcode(err);
}
MALI_CHECK_NON_NULL(uargs, -EINVAL);
MALI_CHECK_NON_NULL(session_data, -EINVAL);
- kargs.ctx = session_data;
+ kargs.ctx = (uintptr_t)session_data;
err = _mali_ukk_get_gp_core_version(&kargs);
if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_gp_suspend_response_s))) return -EFAULT;
- kargs.ctx = session_data;
+ kargs.ctx = (uintptr_t)session_data;
err = _mali_ukk_gp_suspend_response(&kargs);
if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
MALI_CHECK_NON_NULL(uargs, -EINVAL);
MALI_CHECK_NON_NULL(session_data, -EINVAL);
- kargs.ctx = session_data;
+ kargs.ctx = (uintptr_t)session_data;
err = _mali_ukk_get_gp_number_of_cores(&kargs);
if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
return -EFAULT;
}
- kargs.ctx = session_data;
+ kargs.ctx = (uintptr_t)session_data;
/* Check if we can access the buffers */
if (!access_ok(VERIFY_WRITE, kargs.dest, kargs.size)
return -EFAULT;
}
- uk_args.ctx = session_data;
+ uk_args.ctx = (uintptr_t)session_data;
err_code = _mali_ukk_map_external_mem(&uk_args);
if (0 != put_user(uk_args.cookie, &argument->cookie)) {
/* Rollback */
_mali_uk_unmap_external_mem_s uk_args_unmap;
- uk_args_unmap.ctx = session_data;
+ uk_args_unmap.ctx = (uintptr_t)session_data;
uk_args_unmap.cookie = uk_args.cookie;
err_code = _mali_ukk_unmap_external_mem(&uk_args_unmap);
if (_MALI_OSK_ERR_OK != err_code) {
return -EFAULT;
}
- uk_args.ctx = session_data;
+ uk_args.ctx = (uintptr_t)session_data;
err_code = _mali_ukk_unmap_external_mem(&uk_args);
/* Return the error that _mali_ukk_free_big_block produced */
return -EFAULT;
}
- uk_args.ctx = session_data;
+ uk_args.ctx = (uintptr_t)session_data;
err_code = _mali_ukk_release_ump_mem(&uk_args);
/* Return the error that _mali_ukk_free_big_block produced */
return -EFAULT;
}
- uk_args.ctx = session_data;
+ uk_args.ctx = (uintptr_t)session_data;
err_code = _mali_ukk_attach_ump_mem(&uk_args);
if (0 != put_user(uk_args.cookie, &argument->cookie)) {
/* Rollback */
_mali_uk_release_ump_mem_s uk_args_unmap;
- uk_args_unmap.ctx = session_data;
+ uk_args_unmap.ctx = (uintptr_t)session_data;
uk_args_unmap.cookie = uk_args.cookie;
err_code = _mali_ukk_release_ump_mem(&uk_args_unmap);
if (_MALI_OSK_ERR_OK != err_code) {
MALI_CHECK_NON_NULL(uargs, -EINVAL);
MALI_CHECK_NON_NULL(session_data, -EINVAL);
- kargs.ctx = session_data;
+ kargs.ctx = (uintptr_t)session_data;
err = _mali_ukk_query_mmu_page_table_dump_size(&kargs);
if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
{
_mali_uk_dump_mmu_page_table_s kargs;
_mali_osk_errcode_t err;
- void *buffer;
+ void __user *user_buffer;
+ void *buffer = NULL;
int rc = -EFAULT;
/* validate input */
MALI_CHECK_NON_NULL(uargs, -EINVAL);
/* the session_data pointer was validated by caller */
- kargs.buffer = NULL;
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_dump_mmu_page_table_s)))
+ goto err_exit;
- /* get location of user buffer */
- if (0 != get_user(buffer, &uargs->buffer)) goto err_exit;
- /* get size of mmu page table info buffer from user space */
- if (0 != get_user(kargs.size, &uargs->size)) goto err_exit;
- /* verify we can access the whole of the user buffer */
- if (!access_ok(VERIFY_WRITE, buffer, kargs.size)) goto err_exit;
+ user_buffer = (void __user *)(uintptr_t)kargs.buffer;
+ if (!access_ok(VERIFY_WRITE, user_buffer, kargs.size))
+ goto err_exit;
/* allocate temporary buffer (kernel side) to store mmu page table info */
- MALI_CHECK(kargs.size > 0, -ENOMEM);
- kargs.buffer = _mali_osk_valloc(kargs.size);
- if (NULL == kargs.buffer) {
+ if (kargs.size <= 0)
+ return -EINVAL;
+ /* Allow at most 8MiB buffers, this is more than enough to dump a fully
+ * populated page table. */
+ if (kargs.size > SZ_8M)
+ return -EINVAL;
+
+ buffer = (void *)(uintptr_t)_mali_osk_valloc(kargs.size);
+ if (NULL == buffer) {
rc = -ENOMEM;
goto err_exit;
}
- kargs.ctx = session_data;
+ kargs.ctx = (uintptr_t)session_data;
+ kargs.buffer = (uintptr_t)buffer;
err = _mali_ukk_dump_mmu_page_table(&kargs);
if (_MALI_OSK_ERR_OK != err) {
rc = map_errcode(err);
}
/* copy mmu page table info back to user space and update pointers */
- if (0 != copy_to_user(uargs->buffer, kargs.buffer, kargs.size)) goto err_exit;
- if (0 != put_user((kargs.register_writes - (u32 *)kargs.buffer) + (u32 *)uargs->buffer, &uargs->register_writes)) goto err_exit;
- if (0 != put_user((kargs.page_table_dump - (u32 *)kargs.buffer) + (u32 *)uargs->buffer, &uargs->page_table_dump)) goto err_exit;
- if (0 != put_user(kargs.register_writes_size, &uargs->register_writes_size)) goto err_exit;
- if (0 != put_user(kargs.page_table_dump_size, &uargs->page_table_dump_size)) goto err_exit;
+ if (0 != copy_to_user(user_buffer, buffer, kargs.size))
+ goto err_exit;
+
+ kargs.register_writes = kargs.register_writes -
+ (uintptr_t)buffer + (uintptr_t)user_buffer;
+ kargs.page_table_dump = kargs.page_table_dump -
+ (uintptr_t)buffer + (uintptr_t)user_buffer;
+
+ if (0 != copy_to_user(uargs, &kargs, sizeof(kargs)))
+ goto err_exit;
+
rc = 0;
err_exit:
- if (kargs.buffer) _mali_osk_vfree(kargs.buffer);
+ if (buffer) _mali_osk_vfree(buffer);
return rc;
}
MALI_CHECK_NON_NULL(uargs, -EINVAL);
MALI_CHECK_NON_NULL(session_data, -EINVAL);
- kargs.ctx = session_data;
+ kargs.ctx = (uintptr_t)session_data;
err = _mali_ukk_get_pp_number_of_cores(&kargs);
if (_MALI_OSK_ERR_OK != err) {
return map_errcode(err);
}
- kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+ kargs.ctx = (uintptr_t)NULL; /* prevent kernel address to be returned to user space */
if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_get_pp_number_of_cores_s))) {
return -EFAULT;
}
MALI_CHECK_NON_NULL(uargs, -EINVAL);
MALI_CHECK_NON_NULL(session_data, -EINVAL);
- kargs.ctx = session_data;
+ kargs.ctx = (uintptr_t)session_data;
err = _mali_ukk_get_pp_core_version(&kargs);
if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_pp_disable_wb_s))) return -EFAULT;
- kargs.ctx = session_data;
+ kargs.ctx = (uintptr_t)session_data;
_mali_ukk_pp_job_disable_wb(&kargs);
return 0;
#include "mali_session.h"
#include "mali_ukk_wrappers.h"
-int profiling_start_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_start_s __user *uargs)
-{
- _mali_uk_profiling_start_s kargs;
- _mali_osk_errcode_t err;
-
- MALI_CHECK_NON_NULL(uargs, -EINVAL);
-
- if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_start_s))) {
- return -EFAULT;
- }
-
- kargs.ctx = session_data;
- err = _mali_ukk_profiling_start(&kargs);
- if (_MALI_OSK_ERR_OK != err) {
- return map_errcode(err);
- }
-
- if (0 != put_user(kargs.limit, &uargs->limit)) {
- return -EFAULT;
- }
-
- return 0;
-}
-
int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_add_event_s __user *uargs)
{
_mali_uk_profiling_add_event_s kargs;
return -EFAULT;
}
- kargs.ctx = session_data;
+ kargs.ctx = (uintptr_t)session_data;
err = _mali_ukk_profiling_add_event(&kargs);
if (_MALI_OSK_ERR_OK != err) {
return map_errcode(err);
return 0;
}
-int profiling_stop_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stop_s __user *uargs)
-{
- _mali_uk_profiling_stop_s kargs;
- _mali_osk_errcode_t err;
-
- MALI_CHECK_NON_NULL(uargs, -EINVAL);
-
- kargs.ctx = session_data;
- err = _mali_ukk_profiling_stop(&kargs);
- if (_MALI_OSK_ERR_OK != err) {
- return map_errcode(err);
- }
-
- if (0 != put_user(kargs.count, &uargs->count)) {
- return -EFAULT;
- }
-
- return 0;
-}
-
-int profiling_get_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_get_event_s __user *uargs)
-{
- _mali_uk_profiling_get_event_s kargs;
- _mali_osk_errcode_t err;
-
- MALI_CHECK_NON_NULL(uargs, -EINVAL);
-
- if (0 != get_user(kargs.index, &uargs->index)) {
- return -EFAULT;
- }
-
- kargs.ctx = session_data;
-
- err = _mali_ukk_profiling_get_event(&kargs);
- if (_MALI_OSK_ERR_OK != err) {
- return map_errcode(err);
- }
-
- kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
- if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_profiling_get_event_s))) {
- return -EFAULT;
- }
-
- return 0;
-}
-
-int profiling_clear_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_clear_s __user *uargs)
-{
- _mali_uk_profiling_clear_s kargs;
- _mali_osk_errcode_t err;
-
- MALI_CHECK_NON_NULL(uargs, -EINVAL);
-
- kargs.ctx = session_data;
- err = _mali_ukk_profiling_clear(&kargs);
- if (_MALI_OSK_ERR_OK != err) {
- return map_errcode(err);
- }
-
- return 0;
-}
-
int profiling_memory_usage_get_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_memory_usage_get_s __user *uargs)
{
_mali_osk_errcode_t err;
MALI_CHECK_NON_NULL(uargs, -EINVAL);
MALI_CHECK_NON_NULL(session_data, -EINVAL);
- kargs.ctx = session_data;
+ kargs.ctx = (uintptr_t)session_data;
err = _mali_ukk_profiling_memory_usage_get(&kargs);
if (_MALI_OSK_ERR_OK != err) {
return map_errcode(err);
}
- kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+ kargs.ctx = (uintptr_t)NULL; /* prevent kernel address to be returned to user space */
if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_profiling_memory_usage_get_s))) {
return -EFAULT;
}
_mali_uk_sw_counters_report_s kargs;
_mali_osk_errcode_t err;
u32 *counter_buffer;
+ u32 __user *counters;
MALI_CHECK_NON_NULL(uargs, -EINVAL);
return -ENOMEM;
}
- if (0 != copy_from_user(counter_buffer, kargs.counters, sizeof(u32) * kargs.num_counters)) {
+ counters = (u32 *)(uintptr_t)kargs.counters;
+
+ if (0 != copy_from_user(counter_buffer, counters, sizeof(u32) * kargs.num_counters)) {
kfree(counter_buffer);
return -EFAULT;
}
- kargs.ctx = session_data;
- kargs.counters = counter_buffer;
+ kargs.ctx = (uintptr_t)session_data;
+ kargs.counters = (uintptr_t)counter_buffer;
err = _mali_ukk_sw_counters_report(&kargs);
return 0;
}
-
-
int soft_job_start_wrapper(struct mali_session_data *session, _mali_uk_soft_job_start_s __user *uargs)
{
- u32 type, user_job, point;
- _mali_uk_fence_t uk_fence;
+ _mali_uk_soft_job_start_s kargs;
+ u32 type, point;
+ u64 user_job;
struct mali_timeline_fence fence;
struct mali_soft_job *job = NULL;
u32 __user *job_id_ptr = NULL;
MALI_DEBUG_ASSERT_POINTER(session->soft_job_system);
- if (0 != get_user(type, &uargs->type)) return -EFAULT;
- if (0 != get_user(user_job, &uargs->user_job)) return -EFAULT;
- if (0 != get_user(job_id_ptr, &uargs->job_id_ptr)) return -EFAULT;
+ if (0 != copy_from_user(&kargs, uargs, sizeof(kargs))) {
+ return -EFAULT;
+ }
+
+ type = kargs.type;
+ user_job = kargs.user_job;
+ job_id_ptr = (u32 __user *)(uintptr_t)kargs.job_id_ptr;
- if (0 != copy_from_user(&uk_fence, &uargs->fence, sizeof(_mali_uk_fence_t))) return -EFAULT;
- mali_timeline_fence_copy_uk_fence(&fence, &uk_fence);
+ mali_timeline_fence_copy_uk_fence(&fence, &kargs.fence);
- if (MALI_SOFT_JOB_TYPE_USER_SIGNALED < type) {
+ if ((MALI_SOFT_JOB_TYPE_USER_SIGNALED != type) && (MALI_SOFT_JOB_TYPE_SELF_SIGNALED != type)) {
MALI_DEBUG_PRINT_ERROR(("Invalid soft job type specified\n"));
return -EINVAL;
}
return -EFAULT;
}
- kargs.ctx = session_data;
+ kargs.ctx = (uintptr_t)session_data;
err = _mali_ukk_vsync_event_report(&kargs);
if (_MALI_OSK_ERR_OK != err) {
return map_errcode(err);
int wait_for_notification_wrapper(struct mali_session_data *session_data, _mali_uk_wait_for_notification_s __user *uargs);
int get_api_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_s __user *uargs);
+int get_api_version_v2_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_v2_s __user *uargs);
int get_user_settings_wrapper(struct mali_session_data *session_data, _mali_uk_get_user_settings_s __user *uargs);
int post_notification_wrapper(struct mali_session_data *session_data, _mali_uk_post_notification_s __user *uargs);
int request_high_priority_wrapper(struct mali_session_data *session_data, _mali_uk_request_high_priority_s __user *uargs);
int gp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_core_version_s __user *uargs);
int gp_suspend_response_wrapper(struct mali_session_data *session_data, _mali_uk_gp_suspend_response_s __user *uargs);
-int profiling_start_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_start_s __user *uargs);
int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_add_event_s __user *uargs);
-int profiling_stop_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stop_s __user *uargs);
-int profiling_get_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_get_event_s __user *uargs);
-int profiling_clear_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_clear_s __user *uargs);
int profiling_report_sw_counters_wrapper(struct mali_session_data *session_data, _mali_uk_sw_counters_report_s __user *uargs);
int profiling_memory_usage_get_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_memory_usage_get_s __user *uargs);
{
int mali_freq_num;
int i = 0, level = -1;
- int mali_freq_num;
if(freq < 0)
return level;
/* check compatability */
if (args->version == UMP_IOCTL_API_VERSION) {
DBG_MSG(3, ("API version set to newest %d (compatible)\n",
- GET_VERSION(args->version)));
+ GET_VERSION(args->version)));
args->compatible = 1;
session_data->api_version = args->version;
} else {
DBG_MSG(2, ("API version set to %d (incompatible with client version %d)\n",
- GET_VERSION(UMP_IOCTL_API_VERSION), GET_VERSION(args->version)));
+ GET_VERSION(UMP_IOCTL_API_VERSION), GET_VERSION(args->version)));
args->compatible = 0;
args->version = UMP_IOCTL_API_VERSION; /* report our version */
}
if (NULL != mem) {
user_interaction->size = mem->size_bytes;
DBG_MSG(4, ("Returning size. ID: %u, size: %lu ",
- (ump_secure_id)user_interaction->secure_id,
- (unsigned long)user_interaction->size));
+ (ump_secure_id)user_interaction->secure_id,
+ (unsigned long)user_interaction->size));
ump_random_mapping_put(mem);
ret = _MALI_OSK_ERR_OK;
} else {
user_interaction->size = 0;
DBG_MSG(1, ("Failed to look up mapping in ump_ioctl_size_get(). ID: %u\n",
- (ump_secure_id)user_interaction->secure_id));
+ (ump_secure_id)user_interaction->secure_id));
}
return ret;
mem = ump_random_mapping_get(device.secure_id_map, (int)args->secure_id);
if (NULL == mem) {
DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_msync(). ID: %u\n",
- (ump_secure_id)args->secure_id));
+ (ump_secure_id)args->secure_id));
return;
}
mem = ump_random_mapping_get(device.secure_id_map, (int)args->secure_id);
if (NULL == mem) {
DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_switch_hw_usage(). ID: %u\n",
- (ump_secure_id)args->secure_id));
+ (ump_secure_id)args->secure_id));
return;
}
mem->hw_device = args->new_user;
DBG_MSG(3, ("UMP[%02u] Switch usage Start New: %s Prev: %s.\n",
- (ump_secure_id)args->secure_id,
- args->new_user ? "MALI" : "CPU",
- old_user ? "MALI" : "CPU"));
+ (ump_secure_id)args->secure_id,
+ args->new_user ? "MALI" : "CPU",
+ old_user ? "MALI" : "CPU"));
if (!mem->is_cached) {
DBG_MSG(3, ("UMP[%02u] Changing owner of uncached memory. Cache flushing not needed.\n",
- (ump_secure_id)args->secure_id));
+ (ump_secure_id)args->secure_id));
goto out;
}
if (old_user == args->new_user) {
DBG_MSG(4, ("UMP[%02u] Setting the new_user equal to previous for. Cache flushing not needed.\n",
- (ump_secure_id)args->secure_id));
+ (ump_secure_id)args->secure_id));
goto out;
}
if (
(old_user != _UMP_UK_USED_BY_CPU) && (args->new_user != _UMP_UK_USED_BY_CPU)
) {
DBG_MSG(4, ("UMP[%02u] Previous and new user is not CPU. Cache flushing not needed.\n",
- (ump_secure_id)args->secure_id));
+ (ump_secure_id)args->secure_id));
goto out;
}
mem = ump_random_mapping_get(device.secure_id_map, (int)args->secure_id);
if (NULL == mem) {
DBG_MSG(1, ("UMP[%02u] Failed to look up mapping in _ump_ukk_lock(). ID: %u\n",
- (ump_secure_id)args->secure_id));
+ (ump_secure_id)args->secure_id));
return;
}
mem = ump_random_mapping_get(device.secure_id_map, (int)args->secure_id);
if (NULL == mem) {
DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_unlock(). ID: %u\n",
- (ump_secure_id)args->secure_id));
+ (ump_secure_id)args->secure_id));
return;
}
DBG_MSG(1, ("UMP[%02u] Unlocking. Old Lock flag:\n",
- (u32)args->secure_id, (u32) mem->lock_usage));
+ (u32)args->secure_id, (u32) mem->lock_usage));
mem->lock_usage = (ump_lock_usage) UMP_NOT_LOCKED;
}
session_data->cookies_map = ump_descriptor_mapping_create(
- UMP_COOKIES_PER_SESSION_INITIAL,
- UMP_COOKIES_PER_SESSION_MAXIMUM);
+ UMP_COOKIES_PER_SESSION_INITIAL,
+ UMP_COOKIES_PER_SESSION_MAXIMUM);
if (NULL == session_data->cookies_map) {
MSG_ERR(("Failed to create descriptor mapping for _ump_ukk_map_mem cookies\n"));
/* Now, ask the active memory backend to do the actual memory allocation */
if (!device.backend->allocate(device.backend->ctx, new_allocation)) {
DBG_MSG(3, ("OOM: No more UMP memory left. Failed to allocate memory in ump_ioctl_allocate(). Size: %lu, requested size: %lu\n",
- new_allocation->size_bytes,
- (unsigned long)user_interaction->size));
+ new_allocation->size_bytes,
+ (unsigned long)user_interaction->size));
_mali_osk_free(new_allocation);
_mali_osk_free(session_memory_element);
return _MALI_OSK_ERR_INVALID_FUNC;
user_interaction->secure_id = new_allocation->secure_id;
user_interaction->size = new_allocation->size_bytes;
DBG_MSG(3, ("UMP memory allocated. ID: %u, size: %lu\n",
- new_allocation->secure_id,
- new_allocation->size_bytes));
+ new_allocation->secure_id,
+ new_allocation->size_bytes));
return _MALI_OSK_ERR_OK;
}
return NULL;
map->lock = _mali_osk_mutex_rw_init(_MALI_OSK_LOCKFLAG_ORDERED,
- _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP);
+ _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP);
if (NULL != map->lock) {
map->root = RB_ROOT;
#if UMP_RANDOM_MAP_DELAY
map->failed.count++;
if (time_is_before_jiffies(map->failed.timestamp +
- UMP_FAILED_LOOKUP_DELAY * HZ))
- {
+ UMP_FAILED_LOOKUP_DELAY * HZ)) {
/* If it is a long time since last failure, reset
* the counter and skip the delay this time. */
map->failed.count = 0;
new_ref = _ump_osk_atomic_dec_and_read(&mem->ref_count);
DBG_MSG(5, ("Memory reference decremented. ID: %u, new value: %d\n",
- mem->secure_id, new_ref));
+ mem->secure_id, new_ref));
if (0 == new_ref) {
DBG_MSG(3, ("Final release of memory. ID: %u\n", mem->secure_id));
typedef struct lock_cmd_priv {
uint32_t msg[128]; /*ioctl args*/
- u32 pid; /*process id*/
+ u32 pid; /*process id*/
} _lock_cmd_priv;
typedef struct lock_ref {
typedef struct umplock_item {
u32 secure_id;
u32 id_ref_count;
+ u32 owner;
_lock_access_usage usage;
_lock_ref references[MAX_PIDS];
struct semaphore item_lock;
struct class *umplock_class;
};
-static char umplock_dev_name[] = "umplock";
-
-int umplock_major = 0;
-module_param(umplock_major, int, S_IRUGO); /* r--r--r-- */
-MODULE_PARM_DESC(umplock_major, "Device major number");
-
-static int umplock_driver_open(struct inode *inode, struct file *filp);
-static int umplock_driver_release(struct inode *inode, struct file *filp);
-static long umplock_driver_ioctl(struct file *f, unsigned int cmd, unsigned long arg);
-
-static struct file_operations umplock_fops = {
- .owner = THIS_MODULE,
- .open = umplock_driver_open,
- .release = umplock_driver_release,
- .unlocked_ioctl = umplock_driver_ioctl,
-};
-
static struct umplock_device umplock_device;
static umplock_device_private device;
+static dev_t umplock_dev;
+static char umplock_dev_name[] = "umplock";
-void umplock_init_locklist(void)
-{
- memset(&device.items, 0, sizeof(umplock_item)*MAX_ITEMS);
- atomic_set(&device.sessions, 0);
-}
-
-void umplock_deinit_locklist(void)
-{
- memset(&device.items, 0, sizeof(umplock_item)*MAX_ITEMS);
-}
-
-int umplock_device_initialize(void)
-{
- int err;
- dev_t dev = 0;
-
- if (0 == umplock_major) {
- err = alloc_chrdev_region(&dev, 0, 1, umplock_dev_name);
- umplock_major = MAJOR(dev);
- } else {
- dev = MKDEV(umplock_major, 0);
- err = register_chrdev_region(dev, 1, umplock_dev_name);
- }
-
- if (0 == err) {
- memset(&umplock_device, 0, sizeof(umplock_device));
- cdev_init(&umplock_device.cdev, &umplock_fops);
- umplock_device.cdev.owner = THIS_MODULE;
- umplock_device.cdev.ops = &umplock_fops;
-
- err = cdev_add(&umplock_device.cdev, dev, 1);
- if (0 == err) {
- umplock_device.umplock_class = class_create(THIS_MODULE, umplock_dev_name);
- if (IS_ERR(umplock_device.umplock_class)) {
- err = PTR_ERR(umplock_device.umplock_class);
- } else {
- struct device *mdev;
- mdev = device_create(umplock_device.umplock_class, NULL, dev, NULL, umplock_dev_name);
- if (!IS_ERR(mdev)) {
- return 0; /* all ok */
- }
-
- err = PTR_ERR(mdev);
- class_destroy(umplock_device.umplock_class);
- }
- cdev_del(&umplock_device.cdev);
- }
-
- unregister_chrdev_region(dev, 1);
- }
-
- return 1;
-}
-
-void umplock_device_terminate(void)
-{
- dev_t dev = MKDEV(umplock_major, 0);
-
- device_destroy(umplock_device.umplock_class, dev);
- class_destroy(umplock_device.umplock_class);
-
- cdev_del(&umplock_device.cdev);
- unregister_chrdev_region(dev, 1);
-}
-
-int umplock_constructor(void)
-{
- mutex_init(&device.item_list_lock);
- if (!umplock_device_initialize()) return 1;
- umplock_init_locklist();
-
- return 0;
-}
+int umplock_debug_level = 0;
+module_param(umplock_debug_level, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(umplock_debug_level, "set umplock_debug_level to print debug messages");
-void umplock_destructor(void)
-{
- umplock_deinit_locklist();
- umplock_device_terminate();
- mutex_destroy(&device.item_list_lock);
-}
+#define PDEBUG(level, fmt, args...) do { if ((level) <= umplock_debug_level) printk(KERN_DEBUG "umplock: " fmt, ##args); } while (0)
+#define PERROR(fmt, args...) do { printk(KERN_ERR "umplock: " fmt, ##args); } while (0)
int umplock_find_item(u32 secure_id)
{
int i;
for (i = 0; i < MAX_ITEMS; i++) {
- if (device.items[i].secure_id == secure_id) return i;
- }
-
- return -1;
-}
-
-int umplock_find_slot(void)
-{
- int i;
- for (i = 0; i < MAX_ITEMS; i++) {
- if (device.items[i].secure_id == 0) return i;
+ if (device.items[i].secure_id == secure_id) {
+ return i;
+ }
}
return -1;
i = umplock_find_item(lock_item->secure_id);
- if (i < 0)
+ if (i < 0) {
return -1;
+ }
for (j = 0; j < MAX_PIDS; j++) {
if (device.items[i].references[j].pid == lock_cmd->pid) {
{
int i;
- if (pid == 0)
+ if (pid == 0) {
return -1;
+ }
for (i = 0; i < MAX_PIDS; i++) {
- if (device.pids[i] == pid) return i;
+ if (device.pids[i] == pid) {
+ return i;
+ }
}
return -1;
i_index = ref_index = -1;
-#if 0
- if (lock_item->usage == 1) printk(KERN_DEBUG "UMPLOCK: C 0x%x GPU SURFACE\n", lock_item->secure_id);
- else if (lock_item->usage == 2) printk(KERN_DEBUG "UMPLOCK: C 0x%x GPU TEXTURE\n", lock_item->secure_id);
- else printk(KERN_DEBUG "UMPLOCK: C 0x%x CPU\n", lock_item->secure_id);
-#endif
-
ret = umplock_find_client_valid(lock_cmd->pid);
if (ret < 0) {
/*lock request from an invalid client pid, do nothing*/
- return 0;
+ return -EINVAL;
}
ret = umplock_find_item_by_pid(lock_cmd, &i_index, &ref_index);
if (ret >= 0) {
- if (device.items[i_index].references[ref_index].ref_count == 0)
- device.items[i_index].references[ref_index].ref_count = 1;
} else if ((i_index = umplock_find_item(lock_item->secure_id)) >= 0) {
for (ref_index = 0; ref_index < MAX_PIDS; ref_index++) {
- if (device.items[i_index].references[ref_index].pid == 0) break;
+ if (device.items[i_index].references[ref_index].pid == 0) {
+ break;
+ }
}
if (ref_index < MAX_PIDS) {
device.items[i_index].references[ref_index].pid = lock_cmd->pid;
- device.items[i_index].references[ref_index].ref_count = 1;
+ device.items[i_index].references[ref_index].ref_count = 0;
} else {
- printk(KERN_ERR "UMPLOCK: whoops, item ran out of available reference slot\n");
+ PERROR("whoops, item ran out of available reference slots\n");
+ return -EINVAL;
+
}
} else {
- i_index = umplock_find_slot();
+ i_index = umplock_find_item(0);
if (i_index >= 0) {
device.items[i_index].secure_id = lock_item->secure_id;
- device.items[i_index].id_ref_count = 1;
+ device.items[i_index].id_ref_count = 0;
device.items[i_index].usage = lock_item->usage;
device.items[i_index].references[0].pid = lock_cmd->pid;
- device.items[i_index].references[0].ref_count = 1;
+ device.items[i_index].references[0].ref_count = 0;
sema_init(&device.items[i_index].item_lock, 1);
} else {
- printk(KERN_ERR "UMPLOCK: whoops, ran out of available slots\n");
+ PERROR("whoops, ran out of available slots\n");
+ return -EINVAL;
}
}
static int do_umplock_create(_lock_cmd_priv *lock_cmd)
{
- int ret = 0;
- mutex_lock(&device.item_list_lock);
- ret = do_umplock_create_locked(lock_cmd);
- mutex_unlock(&device.item_list_lock);
- return ret;
+ return 0;
}
static int do_umplock_process(_lock_cmd_priv *lock_cmd)
{
- int ret, i_index, ref_index, ref_count;
+ int ret, i_index, ref_index;
+ _lock_item_s *lock_item = (_lock_item_s *)&lock_cmd->msg;
mutex_lock(&device.item_list_lock);
- do_umplock_create_locked(lock_cmd);
+ if (0 == lock_item->secure_id) {
+ PERROR("IOCTL_UMPLOCK_PROCESS called with secure_id is 0, pid: %d\n", lock_cmd->pid);
+ mutex_unlock(&device.item_list_lock);
+ return -EINVAL;
+ }
- ret = umplock_find_client_valid(lock_cmd->pid);
+ ret = do_umplock_create_locked(lock_cmd);
if (ret < 0) {
- /*lock request from an invalid client pid, do nothing*/
mutex_unlock(&device.item_list_lock);
- return 0;
+ return -EINVAL;
}
ret = umplock_find_item_by_pid(lock_cmd, &i_index, &ref_index);
- ref_count = device.items[i_index].references[ref_index].ref_count;
- if (ret >= 0) {
- if (ref_count == 1) {
- /*add ref before down to wait for the umplock*/
- device.items[i_index].references[ref_index].ref_count++;
- device.items[i_index].id_ref_count++;
- mutex_unlock(&device.item_list_lock);
- if (down_interruptible(&device.items[i_index].item_lock)) {
- /*wait up without hold the umplock. restore previous state and return*/
- mutex_lock(&device.item_list_lock);
- device.items[i_index].references[ref_index].ref_count--;
- device.items[i_index].id_ref_count--;
- if (device.items[i_index].references[ref_index].ref_count == 1) {
- device.items[i_index].references[ref_index].ref_count = 0;
- device.items[i_index].references[ref_index].pid = 0;
- if (device.items[i_index].id_ref_count == 1) {
- device.items[i_index].id_ref_count = 0;
- device.items[i_index].secure_id = 0;
- }
- }
- mutex_unlock(&device.item_list_lock);
- return -ERESTARTSYS;
- }
- mutex_lock(&device.item_list_lock);
- } else {
- /*already got the umplock, add ref*/
- device.items[i_index].references[ref_index].ref_count++;
- device.items[i_index].id_ref_count++;
- }
-#if 0
- if (lock_item->usage == 1) printk(KERN_DEBUG "UMPLOCK: P 0x%x GPU SURFACE\n", lock_item->secure_id);
- else if (lock_item->usage == 2) printk(KERN_DEBUG "UMPLOCK: P 0x%x GPU TEXTURE\n", lock_item->secure_id);
- else printk(KERN_DEBUG "UMPLOCK: P 0x%x CPU\n", lock_item->secure_id);
-#endif
- } else {
+ if (ret < 0) {
/*fail to find a item*/
- printk(KERN_ERR "UMPLOCK: IOCTL_UMPLOCK_PROCESS called with invalid parameter\n");
+ PERROR("IOCTL_UMPLOCK_PROCESS called with invalid parameter, pid: %d\n", lock_cmd->pid);
mutex_unlock(&device.item_list_lock);
return -EINVAL;
}
+ device.items[i_index].references[ref_index].ref_count++;
+ device.items[i_index].id_ref_count++;
+ PDEBUG(1, "try to lock, pid: %d, secure_id: 0x%x, ref_count: %d\n", lock_cmd->pid, lock_item->secure_id, device.items[i_index].references[ref_index].ref_count);
+
+ if (lock_cmd->pid == device.items[i_index].owner) {
+ PDEBUG(1, "already own the lock, pid: %d, secure_id: 0x%x, ref_count: %d\n", lock_cmd->pid, lock_item->secure_id, device.items[i_index].references[ref_index].ref_count);
+ mutex_unlock(&device.item_list_lock);
+ return 0;
+ }
+
+ mutex_unlock(&device.item_list_lock);
+ if (down_interruptible(&device.items[i_index].item_lock)) {
+ /*wait up without hold the umplock. restore previous state and return*/
+ mutex_lock(&device.item_list_lock);
+ device.items[i_index].references[ref_index].ref_count--;
+ device.items[i_index].id_ref_count--;
+ if (0 == device.items[i_index].references[ref_index].ref_count) {
+ device.items[i_index].references[ref_index].pid = 0;
+ if (0 == device.items[i_index].id_ref_count) {
+ PDEBUG(1, "release item, pid: %d, secure_id: 0x%x\n", lock_cmd->pid, lock_item->secure_id);
+ device.items[i_index].secure_id = 0;
+ }
+ }
+
+ PERROR("failed lock, pid: %d, secure_id: 0x%x, ref_count: %d\n", lock_cmd->pid, lock_item->secure_id, device.items[i_index].references[ref_index].ref_count);
+
+ mutex_unlock(&device.item_list_lock);
+ return -ERESTARTSYS;
+ }
+
+ mutex_lock(&device.item_list_lock);
+ PDEBUG(1, "got lock, pid: %d, secure_id: 0x%x, ref_count: %d\n", lock_cmd->pid, lock_item->secure_id, device.items[i_index].references[ref_index].ref_count);
+ device.items[i_index].owner = lock_cmd->pid;
mutex_unlock(&device.item_list_lock);
+
return 0;
}
static int do_umplock_release(_lock_cmd_priv *lock_cmd)
{
- int i_index, ref_index, ref_count;
- int ret;
+ int ret, i_index, ref_index;
+ _lock_item_s *lock_item = (_lock_item_s *)&lock_cmd->msg;
mutex_lock(&device.item_list_lock);
+
+ if (0 == lock_item->secure_id) {
+ PERROR("IOCTL_UMPLOCK_RELEASE called with secure_id is 0, pid: %d\n", lock_cmd->pid);
+ mutex_unlock(&device.item_list_lock);
+ return -EINVAL;
+ }
+
ret = umplock_find_client_valid(lock_cmd->pid);
if (ret < 0) {
/*lock request from an invalid client pid, do nothing*/
mutex_unlock(&device.item_list_lock);
- return 0;
+ return -EPERM;
}
i_index = ref_index = -1;
ret = umplock_find_item_by_pid(lock_cmd, &i_index, &ref_index);
+ if (ret < 0) {
+ /*fail to find item*/
+ PERROR("IOCTL_UMPLOCK_RELEASE called with invalid parameter pid: %d, secid: 0x%x\n", lock_cmd->pid, lock_item->secure_id);
+ mutex_unlock(&device.item_list_lock);
+ return -EINVAL;
+ }
- if (ret >= 0) {
- device.items[i_index].references[ref_index].ref_count--;
- ref_count = device.items[i_index].references[ref_index].ref_count;
- device.items[i_index].id_ref_count--;
+ /* if the lock is not owned by this process */
+ if (lock_cmd->pid != device.items[i_index].owner) {
+ mutex_unlock(&device.item_list_lock);
+ return -EPERM;
+ }
-#if 0
- if (lock_item->usage == 1) printk(KERN_DEBUG "UMPLOCK: R 0x%x GPU SURFACE\n", lock_item->secure_id);
- else if (lock_item->usage == 2) printk(KERN_DEBUG "UMPLOCK: R 0x%x GPU TEXTURE\n", lock_item->secure_id);
- else printk(KERN_DEBUG "UMPLOCK: R 0x%x CPU\n", lock_item->secure_id);
-#endif
- /*reached the last reference to the umplock*/
- if (ref_count == 1) {
- /*release the umplock*/
- up(&device.items[i_index].item_lock);
+ /* if the ref_count is 0, that means nothing to unlock, just return */
+ if (0 == device.items[i_index].references[ref_index].ref_count) {
+ mutex_unlock(&device.item_list_lock);
+ return 0;
+ }
- device.items[i_index].references[ref_index].ref_count = 0;
- device.items[i_index].references[ref_index].pid = 0;
- if (device.items[i_index].id_ref_count == 1) {
- device.items[i_index].id_ref_count = 0;
- device.items[i_index].secure_id = 0;
- }
+ device.items[i_index].references[ref_index].ref_count--;
+ device.items[i_index].id_ref_count--;
+ PDEBUG(1, "unlock, pid: %d, secure_id: 0x%x, ref_count: %d\n", lock_cmd->pid, lock_item->secure_id, device.items[i_index].references[ref_index].ref_count);
+
+ if (0 == device.items[i_index].references[ref_index].ref_count) {
+ device.items[i_index].references[ref_index].pid = 0;
+ if (0 == device.items[i_index].id_ref_count) {
+ PDEBUG(1, "release item, pid: %d, secure_id: 0x%x\n", lock_cmd->pid, lock_item->secure_id);
+ device.items[i_index].secure_id = 0;
}
- } else {
- /*fail to find item*/
- printk(KERN_ERR "UMPLOCK: IOCTL_UMPLOCK_RELEASE called with invalid parameter pid : %d tgid :%d secid: %d \n", lock_cmd->pid, current->tgid, ((_lock_item_s *)&lock_cmd->msg)->secure_id);
- mutex_unlock(&device.item_list_lock);
- return -EINVAL;
+ device.items[i_index].owner = 0;
+ up(&device.items[i_index].item_lock);
}
mutex_unlock(&device.item_list_lock);
+
return 0;
}
{
int i;
- printk(KERN_DEBUG "UMPLOCK: ZAP ALL ENTRIES!\n");
+ PDEBUG(1, "ZAP ALL ENTRIES!\n");
mutex_lock(&device.item_list_lock);
for (i = 0; i < MAX_ITEMS; i++) {
device.items[i].secure_id = 0;
- memset(&device.items[i].references, 0, sizeof(_lock_ref)*MAX_PIDS);
+ memset(&device.items[i].references, 0, sizeof(_lock_ref) * MAX_PIDS);
sema_init(&device.items[i].item_lock, 1);
}
- mutex_unlock(&device.item_list_lock);
for (i = 0; i < MAX_PIDS; i++) {
device.pids[i] = 0;
}
+ mutex_unlock(&device.item_list_lock);
+
return 0;
}
{
int i, j;
- printk("dump all the items\n");
-
mutex_lock(&device.item_list_lock);
+ PERROR("dump all the items begin\n");
for (i = 0; i < MAX_ITEMS; i++) {
for (j = 0; j < MAX_PIDS; j++) {
if (device.items[i].secure_id != 0 && device.items[i].references[j].pid != 0) {
- printk("item[%d]->secure_id=%d\t reference[%d].ref_count=%d.pid=%d\n",
+ PERROR("item[%d]->secure_id=0x%x, owner=%d\t reference[%d].ref_count=%d.pid=%d\n",
i,
device.items[i].secure_id,
+ device.items[i].owner,
j,
device.items[i].references[j].ref_count,
device.items[i].references[j].pid);
}
}
}
+ PERROR("dump all the items end\n");
mutex_unlock(&device.item_list_lock);
return 0;
mutex_lock(&device.item_list_lock);
for (i = 0; i < MAX_PIDS; i++) {
if (device.pids[i] == lock_cmd->pid) {
+ mutex_unlock(&device.item_list_lock);
return 0;
}
}
}
mutex_unlock(&device.item_list_lock);
if (i == MAX_PIDS) {
- printk(KERN_ERR "Oops, Run out of cient slots\n ");
+ PERROR("Oops, Run out of client slots\n ");
+ return -EINVAL;
}
return 0;
}
/*walk through umplock item list and release reference attached to this client*/
for (i_index = 0; i_index < MAX_ITEMS; i_index++) {
lock_item->secure_id = device.items[i_index].secure_id;
+
/*find the item index and reference slot for the lock_item*/
ret = umplock_find_item_by_pid(lock_cmd, &i_index, &ref_index);
}
while (device.items[i_index].references[ref_index].ref_count) {
/*release references on this client*/
+
+ PDEBUG(1, "delete client, pid: %d, ref_count: %d\n", lock_cmd->pid, device.items[i_index].references[ref_index].ref_count);
+
mutex_unlock(&device.item_list_lock);
do_umplock_release(lock_cmd);
mutex_lock(&device.item_list_lock);
_lock_cmd_priv lock_cmd;
atomic_inc(&device.sessions);
- printk(KERN_DEBUG "UMPLOCK: OPEN SESSION (%i references)\n", atomic_read(&device.sessions));
+ PDEBUG(1, "OPEN SESSION (%i references)\n", atomic_read(&device.sessions));
lock_cmd.pid = (u32)current->tgid;
do_umplock_client_add(&lock_cmd);
static int umplock_driver_release(struct inode *inode, struct file *filp)
{
+ int sessions = 0;
_lock_cmd_priv lock_cmd;
lock_cmd.pid = (u32)current->tgid;
do_umplock_client_delete(&lock_cmd);
+ mutex_lock(&device.item_list_lock);
atomic_dec(&device.sessions);
- printk(KERN_DEBUG "UMPLOCK: CLOSE SESSION (%i references)\n", atomic_read(&device.sessions));
- if (atomic_read(&device.sessions) == 0) {
+ sessions = atomic_read(&device.sessions);
+ PDEBUG(1, "CLOSE SESSION (%i references)\n", sessions);
+ mutex_unlock(&device.item_list_lock);
+ if (sessions == 0) {
do_umplock_zap();
}
return 0;
}
+static struct file_operations umplock_fops = {
+ .owner = THIS_MODULE,
+ .open = umplock_driver_open,
+ .release = umplock_driver_release,
+ .unlocked_ioctl = umplock_driver_ioctl,
+};
+
+int umplock_device_initialize(void)
+{
+ int err;
+
+ err = alloc_chrdev_region(&umplock_dev, 0, 1, umplock_dev_name);
+
+ if (0 == err) {
+ memset(&umplock_device, 0, sizeof(umplock_device));
+ cdev_init(&umplock_device.cdev, &umplock_fops);
+ umplock_device.cdev.owner = THIS_MODULE;
+ umplock_device.cdev.ops = &umplock_fops;
+
+ err = cdev_add(&umplock_device.cdev, umplock_dev, 1);
+ if (0 == err) {
+ umplock_device.umplock_class = class_create(THIS_MODULE, umplock_dev_name);
+ if (IS_ERR(umplock_device.umplock_class)) {
+ err = PTR_ERR(umplock_device.umplock_class);
+ } else {
+ struct device *mdev;
+ mdev = device_create(umplock_device.umplock_class, NULL, umplock_dev, NULL, umplock_dev_name);
+ if (!IS_ERR(mdev)) {
+ return 0; /* all ok */
+ }
+
+ err = PTR_ERR(mdev);
+ class_destroy(umplock_device.umplock_class);
+ }
+ cdev_del(&umplock_device.cdev);
+ }
+
+ unregister_chrdev_region(umplock_dev, 1);
+ } else {
+ PERROR("alloc chardev region failed\n");
+ }
+
+ return err;
+}
+
+void umplock_device_terminate(void)
+{
+ device_destroy(umplock_device.umplock_class, umplock_dev);
+ class_destroy(umplock_device.umplock_class);
+
+ cdev_del(&umplock_device.cdev);
+ unregister_chrdev_region(umplock_dev, 1);
+}
+
static int __init umplock_initialize_module(void)
{
- printk(KERN_DEBUG "Inserting UMP lock device driver. Compiled: %s, time: %s\n", __DATE__, __TIME__);
+ PDEBUG(1, "Inserting UMP lock device driver. Compiled: %s, time: %s\n", __DATE__, __TIME__);
- if (!umplock_constructor()) {
- printk(KERN_ERR "UMP lock device driver init failed\n");
+ mutex_init(&device.item_list_lock);
+ if (umplock_device_initialize() != 0) {
+ PERROR("UMP lock device driver init failed\n");
return -ENOTTY;
}
+ memset(&device.items, 0, sizeof(umplock_item) * MAX_ITEMS);
+ memset(&device.pids, 0, sizeof(u32) * MAX_PIDS);
+ atomic_set(&device.sessions, 0);
- printk(KERN_DEBUG "UMP lock device driver loaded\n");
+ PDEBUG(1, "UMP lock device driver loaded\n");
return 0;
}
static void __exit umplock_cleanup_module(void)
{
- printk(KERN_DEBUG "unloading UMP lock module\n");
- umplock_destructor();
- printk(KERN_DEBUG "UMP lock module unloaded\n");
+ PDEBUG(1, "unloading UMP lock module\n");
+
+ memset(&device.items, 0, sizeof(umplock_item) * MAX_ITEMS);
+ memset(&device.pids, 0, sizeof(u32) * MAX_PIDS);
+ umplock_device_terminate();
+ mutex_destroy(&device.item_list_lock);
+
+ PDEBUG(1, "UMP lock module unloaded\n");
}
module_init(umplock_initialize_module);