gpu: add mali450 support for Q [1/4]
authorJiyu Yang <Jiyu.Yang@amlogic.com>
Wed, 15 Jan 2020 12:13:40 +0000 (20:13 +0800)
committerJiyu Yang <jiyu.yang@amlogic.com>
Mon, 20 Jan 2020 12:20:01 +0000 (04:20 -0800)
PD#SWPL-9341

Problem:
no Q version for ampere

Solution:
add r10p0 for ampere

Verify:
ampere

Change-Id: I434613e725f75905c3182a87c73c3fa350f7c486
Signed-off-by: Jiyu Yang <jiyu.yang@amlogic.com>
177 files changed:
mali/.version
mali/common/mali_control_timer.c
mali/common/mali_group.c
mali/common/mali_osk.h
mali/common/mali_osk_types.h
mali/common/mali_timeline.c
mali/common/mali_timeline.h
mali/linux/mali_internal_sync.c
mali/linux/mali_internal_sync.h
mali/linux/mali_memory_os_alloc.c
mali/linux/mali_memory_secure.c
mali/linux/mali_memory_swap_alloc.c
mali/linux/mali_osk_notification.c
mali/linux/mali_osk_timers.c
mali/linux/mali_sync.c
utgard/r10p0 [new symlink]
utgard/r8p0 [deleted symlink]
utgard/r8p0/.version [new file with mode: 0755]
utgard/r8p0/Kbuild [new file with mode: 0755]
utgard/r8p0/Kconfig [new file with mode: 0755]
utgard/r8p0/Makefile [new file with mode: 0755]
utgard/r8p0/clean.sh [new file with mode: 0755]
utgard/r8p0/common/mali_broadcast.c [new file with mode: 0755]
utgard/r8p0/common/mali_broadcast.h [new file with mode: 0755]
utgard/r8p0/common/mali_control_timer.c [new file with mode: 0755]
utgard/r8p0/common/mali_control_timer.h [new file with mode: 0755]
utgard/r8p0/common/mali_dlbu.c [new file with mode: 0755]
utgard/r8p0/common/mali_dlbu.h [new file with mode: 0755]
utgard/r8p0/common/mali_dvfs_policy.c [new file with mode: 0755]
utgard/r8p0/common/mali_dvfs_policy.h [new file with mode: 0755]
utgard/r8p0/common/mali_executor.c [new file with mode: 0755]
utgard/r8p0/common/mali_executor.h [new file with mode: 0755]
utgard/r8p0/common/mali_gp.c [new file with mode: 0755]
utgard/r8p0/common/mali_gp.h [new file with mode: 0755]
utgard/r8p0/common/mali_gp_job.c [new file with mode: 0755]
utgard/r8p0/common/mali_gp_job.h [new file with mode: 0755]
utgard/r8p0/common/mali_group.c [new file with mode: 0755]
utgard/r8p0/common/mali_group.h [new file with mode: 0755]
utgard/r8p0/common/mali_hw_core.c [new file with mode: 0755]
utgard/r8p0/common/mali_hw_core.h [new file with mode: 0755]
utgard/r8p0/common/mali_kernel_common.h [new file with mode: 0755]
utgard/r8p0/common/mali_kernel_core.c [new file with mode: 0755]
utgard/r8p0/common/mali_kernel_core.h [new file with mode: 0644]
utgard/r8p0/common/mali_kernel_utilization.c [new file with mode: 0755]
utgard/r8p0/common/mali_kernel_utilization.h [new file with mode: 0755]
utgard/r8p0/common/mali_kernel_vsync.c [new file with mode: 0755]
utgard/r8p0/common/mali_l2_cache.c [new file with mode: 0755]
utgard/r8p0/common/mali_l2_cache.h [new file with mode: 0755]
utgard/r8p0/common/mali_mem_validation.c [new file with mode: 0644]
utgard/r8p0/common/mali_mem_validation.h [new file with mode: 0644]
utgard/r8p0/common/mali_mmu.c [new file with mode: 0755]
utgard/r8p0/common/mali_mmu.h [new file with mode: 0755]
utgard/r8p0/common/mali_mmu_page_directory.c [new file with mode: 0644]
utgard/r8p0/common/mali_mmu_page_directory.h [new file with mode: 0755]
utgard/r8p0/common/mali_osk.h [new file with mode: 0755]
utgard/r8p0/common/mali_osk_bitops.h [new file with mode: 0755]
utgard/r8p0/common/mali_osk_list.h [new file with mode: 0755]
utgard/r8p0/common/mali_osk_mali.h [new file with mode: 0755]
utgard/r8p0/common/mali_osk_profiling.h [new file with mode: 0755]
utgard/r8p0/common/mali_osk_types.h [new file with mode: 0755]
utgard/r8p0/common/mali_pm.c [new file with mode: 0755]
utgard/r8p0/common/mali_pm.h [new file with mode: 0755]
utgard/r8p0/common/mali_pm_domain.c [new file with mode: 0755]
utgard/r8p0/common/mali_pm_domain.h [new file with mode: 0755]
utgard/r8p0/common/mali_pm_metrics.c [new file with mode: 0644]
utgard/r8p0/common/mali_pm_metrics.h [new file with mode: 0644]
utgard/r8p0/common/mali_pmu.c [new file with mode: 0755]
utgard/r8p0/common/mali_pmu.h [new file with mode: 0755]
utgard/r8p0/common/mali_pp.c [new file with mode: 0755]
utgard/r8p0/common/mali_pp.h [new file with mode: 0755]
utgard/r8p0/common/mali_pp_job.c [new file with mode: 0755]
utgard/r8p0/common/mali_pp_job.h [new file with mode: 0755]
utgard/r8p0/common/mali_scheduler.c [new file with mode: 0755]
utgard/r8p0/common/mali_scheduler.h [new file with mode: 0755]
utgard/r8p0/common/mali_scheduler_types.h [new file with mode: 0755]
utgard/r8p0/common/mali_session.c [new file with mode: 0755]
utgard/r8p0/common/mali_session.h [new file with mode: 0755]
utgard/r8p0/common/mali_soft_job.c [new file with mode: 0755]
utgard/r8p0/common/mali_soft_job.h [new file with mode: 0755]
utgard/r8p0/common/mali_spinlock_reentrant.c [new file with mode: 0755]
utgard/r8p0/common/mali_spinlock_reentrant.h [new file with mode: 0755]
utgard/r8p0/common/mali_timeline.c [new file with mode: 0755]
utgard/r8p0/common/mali_timeline.h [new file with mode: 0755]
utgard/r8p0/common/mali_timeline_fence_wait.c [new file with mode: 0755]
utgard/r8p0/common/mali_timeline_fence_wait.h [new file with mode: 0755]
utgard/r8p0/common/mali_timeline_sync_fence.c [new file with mode: 0755]
utgard/r8p0/common/mali_timeline_sync_fence.h [new file with mode: 0755]
utgard/r8p0/common/mali_ukk.h [new file with mode: 0755]
utgard/r8p0/common/mali_user_settings_db.c [new file with mode: 0755]
utgard/r8p0/common/mali_user_settings_db.h [new file with mode: 0755]
utgard/r8p0/include/linux/mali/mali_utgard.h [new file with mode: 0755]
utgard/r8p0/include/linux/mali/mali_utgard_ioctl.h [new file with mode: 0755]
utgard/r8p0/include/linux/mali/mali_utgard_profiling_events.h [new file with mode: 0755]
utgard/r8p0/include/linux/mali/mali_utgard_profiling_gator_api.h [new file with mode: 0755]
utgard/r8p0/include/linux/mali/mali_utgard_uk_types.h [new file with mode: 0755]
utgard/r8p0/linux/license/gpl/mali_kernel_license.h [new file with mode: 0755]
utgard/r8p0/linux/mali_devfreq.c [new file with mode: 0644]
utgard/r8p0/linux/mali_devfreq.h [new file with mode: 0644]
utgard/r8p0/linux/mali_device_pause_resume.c [new file with mode: 0755]
utgard/r8p0/linux/mali_dma_fence.c [new file with mode: 0755]
utgard/r8p0/linux/mali_dma_fence.h [new file with mode: 0755]
utgard/r8p0/linux/mali_internal_sync.c [new file with mode: 0644]
utgard/r8p0/linux/mali_internal_sync.h [new file with mode: 0755]
utgard/r8p0/linux/mali_kernel_linux.c [new file with mode: 0644]
utgard/r8p0/linux/mali_kernel_linux.h [new file with mode: 0755]
utgard/r8p0/linux/mali_kernel_sysfs.c [new file with mode: 0755]
utgard/r8p0/linux/mali_kernel_sysfs.h [new file with mode: 0755]
utgard/r8p0/linux/mali_linux_trace.h [new file with mode: 0755]
utgard/r8p0/linux/mali_memory.c [new file with mode: 0755]
utgard/r8p0/linux/mali_memory.h [new file with mode: 0755]
utgard/r8p0/linux/mali_memory_block_alloc.c [new file with mode: 0755]
utgard/r8p0/linux/mali_memory_block_alloc.h [new file with mode: 0755]
utgard/r8p0/linux/mali_memory_cow.c [new file with mode: 0644]
utgard/r8p0/linux/mali_memory_cow.h [new file with mode: 0644]
utgard/r8p0/linux/mali_memory_defer_bind.c [new file with mode: 0644]
utgard/r8p0/linux/mali_memory_defer_bind.h [new file with mode: 0644]
utgard/r8p0/linux/mali_memory_dma_buf.c [new file with mode: 0755]
utgard/r8p0/linux/mali_memory_dma_buf.h [new file with mode: 0755]
utgard/r8p0/linux/mali_memory_external.c [new file with mode: 0755]
utgard/r8p0/linux/mali_memory_external.h [new file with mode: 0644]
utgard/r8p0/linux/mali_memory_manager.c [new file with mode: 0644]
utgard/r8p0/linux/mali_memory_manager.h [new file with mode: 0644]
utgard/r8p0/linux/mali_memory_os_alloc.c [new file with mode: 0644]
utgard/r8p0/linux/mali_memory_os_alloc.h [new file with mode: 0755]
utgard/r8p0/linux/mali_memory_secure.c [new file with mode: 0644]
utgard/r8p0/linux/mali_memory_secure.h [new file with mode: 0644]
utgard/r8p0/linux/mali_memory_swap_alloc.c [new file with mode: 0644]
utgard/r8p0/linux/mali_memory_swap_alloc.h [new file with mode: 0644]
utgard/r8p0/linux/mali_memory_types.h [new file with mode: 0755]
utgard/r8p0/linux/mali_memory_ump.c [new file with mode: 0755]
utgard/r8p0/linux/mali_memory_ump.h [new file with mode: 0644]
utgard/r8p0/linux/mali_memory_util.c [new file with mode: 0755]
utgard/r8p0/linux/mali_memory_util.h [new file with mode: 0644]
utgard/r8p0/linux/mali_memory_virtual.c [new file with mode: 0644]
utgard/r8p0/linux/mali_memory_virtual.h [new file with mode: 0644]
utgard/r8p0/linux/mali_osk_atomics.c [new file with mode: 0755]
utgard/r8p0/linux/mali_osk_bitmap.c [new file with mode: 0644]
utgard/r8p0/linux/mali_osk_irq.c [new file with mode: 0644]
utgard/r8p0/linux/mali_osk_locks.c [new file with mode: 0755]
utgard/r8p0/linux/mali_osk_locks.h [new file with mode: 0755]
utgard/r8p0/linux/mali_osk_low_level_mem.c [new file with mode: 0755]
utgard/r8p0/linux/mali_osk_mali.c [new file with mode: 0755]
utgard/r8p0/linux/mali_osk_math.c [new file with mode: 0755]
utgard/r8p0/linux/mali_osk_memory.c [new file with mode: 0755]
utgard/r8p0/linux/mali_osk_misc.c [new file with mode: 0755]
utgard/r8p0/linux/mali_osk_notification.c [new file with mode: 0755]
utgard/r8p0/linux/mali_osk_pm.c [new file with mode: 0755]
utgard/r8p0/linux/mali_osk_profiling.c [new file with mode: 0755]
utgard/r8p0/linux/mali_osk_specific.h [new file with mode: 0755]
utgard/r8p0/linux/mali_osk_time.c [new file with mode: 0755]
utgard/r8p0/linux/mali_osk_timers.c [new file with mode: 0755]
utgard/r8p0/linux/mali_osk_wait_queue.c [new file with mode: 0755]
utgard/r8p0/linux/mali_osk_wq.c [new file with mode: 0755]
utgard/r8p0/linux/mali_pmu_power_up_down.c [new file with mode: 0755]
utgard/r8p0/linux/mali_profiling_events.h [new file with mode: 0755]
utgard/r8p0/linux/mali_profiling_gator_api.h [new file with mode: 0755]
utgard/r8p0/linux/mali_profiling_internal.c [new file with mode: 0755]
utgard/r8p0/linux/mali_profiling_internal.h [new file with mode: 0755]
utgard/r8p0/linux/mali_sync.c [new file with mode: 0755]
utgard/r8p0/linux/mali_sync.h [new file with mode: 0755]
utgard/r8p0/linux/mali_uk_types.h [new file with mode: 0755]
utgard/r8p0/linux/mali_ukk_core.c [new file with mode: 0755]
utgard/r8p0/linux/mali_ukk_gp.c [new file with mode: 0755]
utgard/r8p0/linux/mali_ukk_mem.c [new file with mode: 0755]
utgard/r8p0/linux/mali_ukk_pp.c [new file with mode: 0755]
utgard/r8p0/linux/mali_ukk_profiling.c [new file with mode: 0755]
utgard/r8p0/linux/mali_ukk_soft_job.c [new file with mode: 0755]
utgard/r8p0/linux/mali_ukk_timeline.c [new file with mode: 0755]
utgard/r8p0/linux/mali_ukk_vsync.c [new file with mode: 0755]
utgard/r8p0/linux/mali_ukk_wrappers.h [new file with mode: 0755]
utgard/r8p0/readme.txt [new file with mode: 0755]
utgard/r8p0/regs/mali_200_regs.h [new file with mode: 0755]
utgard/r8p0/regs/mali_gp_regs.h [new file with mode: 0755]
utgard/r8p0/timestamp-arm11-cc/mali_timestamp.c [new file with mode: 0755]
utgard/r8p0/timestamp-arm11-cc/mali_timestamp.h [new file with mode: 0755]
utgard/r8p0/timestamp-default/mali_timestamp.c [new file with mode: 0755]
utgard/r8p0/timestamp-default/mali_timestamp.h [new file with mode: 0755]

index f84a6cb2814272caea968a29f8adc34995669f72..2b6cd95df3ec7c1a91e5b85365b346322ab93250 100755 (executable)
@@ -1 +1 @@
-r8p0-01rel0
+r10p0-00rel0
index fc6ceb43fbbb2ae1d89e447a646bb87fdbb7d275..c7ff928e9d8e015f7898b79b43d22f3ed0eab2d9 100755 (executable)
@@ -64,13 +64,17 @@ _mali_osk_errcode_t mali_control_timer_init(void)
                        MALI_DEBUG_PRINT(2, ("Mali GPU Timer: %u\n", mali_control_timeout));
                }
        }
-
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+       mali_control_timer = _mali_osk_timer_init(mali_control_timer_callback);
+#else
        mali_control_timer = _mali_osk_timer_init();
+#endif
        if (NULL == mali_control_timer) {
                return _MALI_OSK_ERR_FAULT;
        }
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
        _mali_osk_timer_setcallback(mali_control_timer, mali_control_timer_callback, NULL);
-
+#endif
        return _MALI_OSK_ERR_OK;
 }
 
index 287863953f7b44572ee70eae0371aaef5a26311d..0364da0bf2cb74fea037fb9f5a60d2d66d9ba2bf 100755 (executable)
@@ -7,12 +7,6 @@
  * A copy of the licence is included with the program, and can also be obtained from Free Software
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  */
-
-#include <linux/types.h>
-#include <linux/version.h>
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 29))
-#include <mach/cpu.h>
-#endif
 #include "mali_kernel_common.h"
 #include "mali_group.h"
 #include "mali_osk.h"
@@ -28,7 +22,6 @@
 #include "mali_pm_domain.h"
 #include "mali_pm.h"
 #include "mali_executor.h"
-#include <mali_platform.h>
 
 #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
 #include <linux/sched.h>
@@ -51,7 +44,7 @@ int mali_max_job_runtime = MALI_MAX_JOB_RUNTIME_DEFAULT;
 static void mali_group_bottom_half_mmu(void *data);
 static void mali_group_bottom_half_gp(void *data);
 static void mali_group_bottom_half_pp(void *data);
-static void mali_group_timeout(void *data);
+static void mali_group_timeout(void *callback_data);
 static void mali_group_reset_pp(struct mali_group *group);
 static void mali_group_reset_mmu(struct mali_group *group);
 
@@ -59,473 +52,481 @@ static void mali_group_activate_page_directory(struct mali_group *group, struct
 static void mali_group_recovery_reset(struct mali_group *group);
 
 struct mali_group *mali_group_create(struct mali_l2_cache_core *core,
-        struct mali_dlbu_core *dlbu,
-        struct mali_bcast_unit *bcast,
-        u32 domain_index)
+                                    struct mali_dlbu_core *dlbu,
+                                    struct mali_bcast_unit *bcast,
+                                    u32 domain_index)
 {
-    struct mali_group *group = NULL;
-
-    if (mali_global_num_groups >= MALI_MAX_NUMBER_OF_GROUPS) {
-        MALI_PRINT_ERROR(("Mali group: Too many group objects created\n"));
-        return NULL;
-    }
-
-    group = _mali_osk_calloc(1, sizeof(struct mali_group));
-    if (NULL != group) {
-        group->timeout_timer = _mali_osk_timer_init();
-        if (NULL != group->timeout_timer) {
-            _mali_osk_timer_setcallback(group->timeout_timer, mali_group_timeout, (void *)group);
-
-            group->l2_cache_core[0] = core;
-            _mali_osk_list_init(&group->group_list);
-            _mali_osk_list_init(&group->executor_list);
-            _mali_osk_list_init(&group->pm_domain_list);
-            group->bcast_core = bcast;
-            group->dlbu_core = dlbu;
-
-            /* register this object as a part of the correct power domain */
-            if ((NULL != core) || (NULL != dlbu) || (NULL != bcast))
-                group->pm_domain = mali_pm_register_group(domain_index, group);
-
-            mali_global_groups[mali_global_num_groups] = group;
-            mali_global_num_groups++;
-
-            return group;
-        }
-        _mali_osk_free(group);
-    }
-
-    return NULL;
+       struct mali_group *group = NULL;
+
+       if (mali_global_num_groups >= MALI_MAX_NUMBER_OF_GROUPS) {
+               MALI_PRINT_ERROR(("Mali group: Too many group objects created\n"));
+               return NULL;
+       }
+
+       group = _mali_osk_calloc(1, sizeof(struct mali_group));
+       if (NULL != group) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+           group->timeout_timer = _mali_osk_timer_init(mali_group_timeout);
+#else
+               group->timeout_timer = _mali_osk_timer_init();
+#endif
+               if (NULL != group->timeout_timer) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)                     
+                       _mali_osk_timer_setcallback_data(group->timeout_timer, (void *)group);
+#else
+                       _mali_osk_timer_setcallback(group->timeout_timer, mali_group_timeout, (void *)group);
+#endif
+                       group->l2_cache_core[0] = core;
+                       _mali_osk_list_init(&group->group_list);
+                       _mali_osk_list_init(&group->executor_list);
+                       _mali_osk_list_init(&group->pm_domain_list);
+                       group->bcast_core = bcast;
+                       group->dlbu_core = dlbu;
+
+                       /* register this object as a part of the correct power domain */
+                       if ((NULL != core) || (NULL != dlbu) || (NULL != bcast))
+                               group->pm_domain = mali_pm_register_group(domain_index, group);
+
+                       mali_global_groups[mali_global_num_groups] = group;
+                       mali_global_num_groups++;
+
+                       return group;
+               }
+               _mali_osk_free(group);
+       }
+
+       return NULL;
 }
 
 void mali_group_delete(struct mali_group *group)
 {
-    u32 i;
-
-    MALI_DEBUG_PRINT(4, ("Deleting group %s\n",
-                mali_group_core_description(group)));
-
-    MALI_DEBUG_ASSERT(NULL == group->parent_group);
-    MALI_DEBUG_ASSERT((MALI_GROUP_STATE_INACTIVE == group->state) || ((MALI_GROUP_STATE_ACTIVATION_PENDING == group->state)));
-
-    /* Delete the resources that this group owns */
-    if (NULL != group->gp_core) {
-        mali_gp_delete(group->gp_core);
-    }
-
-    if (NULL != group->pp_core) {
-        mali_pp_delete(group->pp_core);
-    }
-
-    if (NULL != group->mmu) {
-        mali_mmu_delete(group->mmu);
-    }
-
-    if (mali_group_is_virtual(group)) {
-        /* Remove all groups from virtual group */
-        struct mali_group *child;
-        struct mali_group *temp;
-
-        _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
-            child->parent_group = NULL;
-            mali_group_delete(child);
-        }
-
-        mali_dlbu_delete(group->dlbu_core);
-
-        if (NULL != group->bcast_core) {
-            mali_bcast_unit_delete(group->bcast_core);
-        }
-    }
-
-    for (i = 0; i < mali_global_num_groups; i++) {
-        if (mali_global_groups[i] == group) {
-            mali_global_groups[i] = NULL;
-            mali_global_num_groups--;
-
-            if (i != mali_global_num_groups) {
-                /* We removed a group from the middle of the array -- move the last
-                 * group to the current position to close the gap */
-                mali_global_groups[i] = mali_global_groups[mali_global_num_groups];
-                mali_global_groups[mali_global_num_groups] = NULL;
-            }
-
-            break;
-        }
-    }
-
-    if (NULL != group->timeout_timer) {
-        _mali_osk_timer_del(group->timeout_timer);
-        _mali_osk_timer_term(group->timeout_timer);
-    }
-
-    if (NULL != group->bottom_half_work_mmu) {
-        _mali_osk_wq_delete_work(group->bottom_half_work_mmu);
-    }
-
-    if (NULL != group->bottom_half_work_gp) {
-        _mali_osk_wq_delete_work(group->bottom_half_work_gp);
-    }
-
-    if (NULL != group->bottom_half_work_pp) {
-        _mali_osk_wq_delete_work(group->bottom_half_work_pp);
-    }
-
-    _mali_osk_free(group);
+       u32 i;
+
+       MALI_DEBUG_PRINT(4, ("Deleting group %s\n",
+                            mali_group_core_description(group)));
+
+       MALI_DEBUG_ASSERT(NULL == group->parent_group);
+       MALI_DEBUG_ASSERT((MALI_GROUP_STATE_INACTIVE == group->state) || ((MALI_GROUP_STATE_ACTIVATION_PENDING == group->state)));
+
+       /* Delete the resources that this group owns */
+       if (NULL != group->gp_core) {
+               mali_gp_delete(group->gp_core);
+       }
+
+       if (NULL != group->pp_core) {
+               mali_pp_delete(group->pp_core);
+       }
+
+       if (NULL != group->mmu) {
+               mali_mmu_delete(group->mmu);
+       }
+
+       if (mali_group_is_virtual(group)) {
+               /* Remove all groups from virtual group */
+               struct mali_group *child;
+               struct mali_group *temp;
+
+               _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+                       child->parent_group = NULL;
+                       mali_group_delete(child);
+               }
+
+               mali_dlbu_delete(group->dlbu_core);
+
+               if (NULL != group->bcast_core) {
+                       mali_bcast_unit_delete(group->bcast_core);
+               }
+       }
+
+       for (i = 0; i < mali_global_num_groups; i++) {
+               if (mali_global_groups[i] == group) {
+                       mali_global_groups[i] = NULL;
+                       mali_global_num_groups--;
+
+                       if (i != mali_global_num_groups) {
+                               /* We removed a group from the middle of the array -- move the last
+                                * group to the current position to close the gap */
+                               mali_global_groups[i] = mali_global_groups[mali_global_num_groups];
+                               mali_global_groups[mali_global_num_groups] = NULL;
+                       }
+
+                       break;
+               }
+       }
+
+       if (NULL != group->timeout_timer) {
+               _mali_osk_timer_del(group->timeout_timer);
+               _mali_osk_timer_term(group->timeout_timer);
+       }
+
+       if (NULL != group->bottom_half_work_mmu) {
+               _mali_osk_wq_delete_work(group->bottom_half_work_mmu);
+       }
+
+       if (NULL != group->bottom_half_work_gp) {
+               _mali_osk_wq_delete_work(group->bottom_half_work_gp);
+       }
+
+       if (NULL != group->bottom_half_work_pp) {
+               _mali_osk_wq_delete_work(group->bottom_half_work_pp);
+       }
+
+       _mali_osk_free(group);
 }
 
 _mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group, struct mali_mmu_core *mmu_core)
 {
-    /* This group object now owns the MMU core object */
-    group->mmu = mmu_core;
-    group->bottom_half_work_mmu = _mali_osk_wq_create_work(mali_group_bottom_half_mmu, group);
-    if (NULL == group->bottom_half_work_mmu) {
-        return _MALI_OSK_ERR_FAULT;
-    }
-    return _MALI_OSK_ERR_OK;
+       /* This group object now owns the MMU core object */
+       group->mmu = mmu_core;
+       group->bottom_half_work_mmu = _mali_osk_wq_create_work(mali_group_bottom_half_mmu, group);
+       if (NULL == group->bottom_half_work_mmu) {
+               return _MALI_OSK_ERR_FAULT;
+       }
+       return _MALI_OSK_ERR_OK;
 }
 
 void mali_group_remove_mmu_core(struct mali_group *group)
 {
-    /* This group object no longer owns the MMU core object */
-    group->mmu = NULL;
-    if (NULL != group->bottom_half_work_mmu) {
-        _mali_osk_wq_delete_work(group->bottom_half_work_mmu);
-    }
+       /* This group object no longer owns the MMU core object */
+       group->mmu = NULL;
+       if (NULL != group->bottom_half_work_mmu) {
+               _mali_osk_wq_delete_work(group->bottom_half_work_mmu);
+       }
 }
 
 _mali_osk_errcode_t mali_group_add_gp_core(struct mali_group *group, struct mali_gp_core *gp_core)
 {
-    /* This group object now owns the GP core object */
-    group->gp_core = gp_core;
-    group->bottom_half_work_gp = _mali_osk_wq_create_work(mali_group_bottom_half_gp, group);
-    if (NULL == group->bottom_half_work_gp) {
-        return _MALI_OSK_ERR_FAULT;
-    }
-    return _MALI_OSK_ERR_OK;
+       /* This group object now owns the GP core object */
+       group->gp_core = gp_core;
+       group->bottom_half_work_gp = _mali_osk_wq_create_work(mali_group_bottom_half_gp, group);
+       if (NULL == group->bottom_half_work_gp) {
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       return _MALI_OSK_ERR_OK;
 }
 
 void mali_group_remove_gp_core(struct mali_group *group)
 {
-    /* This group object no longer owns the GP core object */
-    group->gp_core = NULL;
-    if (NULL != group->bottom_half_work_gp) {
-        _mali_osk_wq_delete_work(group->bottom_half_work_gp);
-    }
+       /* This group object no longer owns the GP core object */
+       group->gp_core = NULL;
+       if (NULL != group->bottom_half_work_gp) {
+               _mali_osk_wq_delete_work(group->bottom_half_work_gp);
+       }
 }
 
 _mali_osk_errcode_t mali_group_add_pp_core(struct mali_group *group, struct mali_pp_core *pp_core)
 {
-    /* This group object now owns the PP core object */
-    group->pp_core = pp_core;
-    group->bottom_half_work_pp = _mali_osk_wq_create_work(mali_group_bottom_half_pp, group);
-    if (NULL == group->bottom_half_work_pp) {
-        return _MALI_OSK_ERR_FAULT;
-    }
-    return _MALI_OSK_ERR_OK;
+       /* This group object now owns the PP core object */
+       group->pp_core = pp_core;
+       group->bottom_half_work_pp = _mali_osk_wq_create_work(mali_group_bottom_half_pp, group);
+       if (NULL == group->bottom_half_work_pp) {
+               return _MALI_OSK_ERR_FAULT;
+       }
+       return _MALI_OSK_ERR_OK;
 }
 
 void mali_group_remove_pp_core(struct mali_group *group)
 {
-    /* This group object no longer owns the PP core object */
-    group->pp_core = NULL;
-    if (NULL != group->bottom_half_work_pp) {
-        _mali_osk_wq_delete_work(group->bottom_half_work_pp);
-    }
+       /* This group object no longer owns the PP core object */
+       group->pp_core = NULL;
+       if (NULL != group->bottom_half_work_pp) {
+               _mali_osk_wq_delete_work(group->bottom_half_work_pp);
+       }
 }
 
 enum mali_group_state mali_group_activate(struct mali_group *group)
 {
-    MALI_DEBUG_ASSERT_POINTER(group);
-    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
-
-    MALI_DEBUG_PRINT(4, ("Group: Activating group %s\n",
-                mali_group_core_description(group)));
-
-    if (MALI_GROUP_STATE_INACTIVE == group->state) {
-        /* Group is inactive, get PM refs in order to power up */
-
-        /*
-         * We'll take a maximum of 2 power domain references pr group,
-         * one for the group itself, and one for it's L2 cache.
-         */
-        struct mali_pm_domain *domains[MALI_MAX_NUM_DOMAIN_REFS];
-        struct mali_group *groups[MALI_MAX_NUM_DOMAIN_REFS];
-        u32 num_domains = 0;
-        mali_bool all_groups_on;
-
-        /* Deal with child groups first */
-        if (mali_group_is_virtual(group)) {
-            /*
-             * The virtual group might have 0, 1 or 2 L2s in
-             * its l2_cache_core array, but we ignore these and
-             * let the child groups take the needed L2 cache ref
-             * on behalf of the virtual group.
-             * In other words; The L2 refs are taken in pair with
-             * the physical group which the L2 is attached to.
-             */
-            struct mali_group *child;
-            struct mali_group *temp;
-
-            /*
-             * Child group is inactive, get PM
-             * refs in order to power up.
-             */
-            _MALI_OSK_LIST_FOREACHENTRY(child, temp,
-                    &group->group_list,
-                    struct mali_group, group_list) {
-                MALI_DEBUG_ASSERT(MALI_GROUP_STATE_INACTIVE
-                        == child->state);
-
-                child->state = MALI_GROUP_STATE_ACTIVATION_PENDING;
-
-                MALI_DEBUG_ASSERT_POINTER(
-                        child->pm_domain);
-                domains[num_domains] = child->pm_domain;
-                groups[num_domains] = child;
-                num_domains++;
-
-                /*
-                 * Take L2 domain ref for child group.
-                 */
-                MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS
-                        > num_domains);
-                domains[num_domains] = mali_l2_cache_get_pm_domain(
-                        child->l2_cache_core[0]);
-                groups[num_domains] = NULL;
-                MALI_DEBUG_ASSERT(NULL ==
-                        child->l2_cache_core[1]);
-                num_domains++;
-            }
-        } else {
-            /* Take L2 domain ref for physical groups. */
-            MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS >
-                    num_domains);
-
-            domains[num_domains] = mali_l2_cache_get_pm_domain(
-                    group->l2_cache_core[0]);
-            groups[num_domains] = NULL;
-            MALI_DEBUG_ASSERT(NULL == group->l2_cache_core[1]);
-            num_domains++;
-        }
-
-        /* Do the group itself last (it's dependencies first) */
-
-        group->state = MALI_GROUP_STATE_ACTIVATION_PENDING;
-
-        MALI_DEBUG_ASSERT_POINTER(group->pm_domain);
-        domains[num_domains] = group->pm_domain;
-        groups[num_domains] = group;
-        num_domains++;
-
-        all_groups_on = mali_pm_get_domain_refs(domains, groups,
-                num_domains);
-
-        /*
-         * Complete activation for group, include
-         * virtual group or physical group.
-         */
-        if (MALI_TRUE == all_groups_on) {
-
-            mali_group_set_active(group);
-        }
-    } else if (MALI_GROUP_STATE_ACTIVE == group->state) {
-        /* Already active */
-        MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
-    } else {
-        /*
-         * Activation already pending, group->power_is_on could
-         * be both true or false. We need to wait for power up
-         * notification anyway.
-         */
-        MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVATION_PENDING
-                == group->state);
-    }
-
-    MALI_DEBUG_PRINT(4, ("Group: group %s activation result: %s\n",
-                mali_group_core_description(group),
-                MALI_GROUP_STATE_ACTIVE == group->state ?
-                "ACTIVE" : "PENDING"));
-
-    return group->state;
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+       MALI_DEBUG_PRINT(4, ("Group: Activating group %s\n",
+                            mali_group_core_description(group)));
+
+       if (MALI_GROUP_STATE_INACTIVE == group->state) {
+               /* Group is inactive, get PM refs in order to power up */
+
+               /*
+                * We'll take a maximum of 2 power domain references pr group,
+                * one for the group itself, and one for it's L2 cache.
+                */
+               struct mali_pm_domain *domains[MALI_MAX_NUM_DOMAIN_REFS];
+               struct mali_group *groups[MALI_MAX_NUM_DOMAIN_REFS];
+               u32 num_domains = 0;
+               mali_bool all_groups_on;
+
+               /* Deal with child groups first */
+               if (mali_group_is_virtual(group)) {
+                       /*
+                        * The virtual group might have 0, 1 or 2 L2s in
+                        * its l2_cache_core array, but we ignore these and
+                        * let the child groups take the needed L2 cache ref
+                        * on behalf of the virtual group.
+                        * In other words; The L2 refs are taken in pair with
+                        * the physical group which the L2 is attached to.
+                        */
+                       struct mali_group *child;
+                       struct mali_group *temp;
+
+                       /*
+                        * Child group is inactive, get PM
+                        * refs in order to power up.
+                        */
+                       _MALI_OSK_LIST_FOREACHENTRY(child, temp,
+                                                   &group->group_list,
+                                                   struct mali_group, group_list) {
+                               MALI_DEBUG_ASSERT(MALI_GROUP_STATE_INACTIVE
+                                                 == child->state);
+
+                               child->state = MALI_GROUP_STATE_ACTIVATION_PENDING;
+
+                               MALI_DEBUG_ASSERT_POINTER(
+                                       child->pm_domain);
+                               domains[num_domains] = child->pm_domain;
+                               groups[num_domains] = child;
+                               num_domains++;
+
+                               /*
+                                * Take L2 domain ref for child group.
+                                */
+                               MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS
+                                                 > num_domains);
+                               domains[num_domains] = mali_l2_cache_get_pm_domain(
+                                                              child->l2_cache_core[0]);
+                               groups[num_domains] = NULL;
+                               MALI_DEBUG_ASSERT(NULL ==
+                                                 child->l2_cache_core[1]);
+                               num_domains++;
+                       }
+               } else {
+                       /* Take L2 domain ref for physical groups. */
+                       MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS >
+                                         num_domains);
+
+                       domains[num_domains] = mali_l2_cache_get_pm_domain(
+                                                      group->l2_cache_core[0]);
+                       groups[num_domains] = NULL;
+                       MALI_DEBUG_ASSERT(NULL == group->l2_cache_core[1]);
+                       num_domains++;
+               }
+
+               /* Do the group itself last (it's dependencies first) */
+
+               group->state = MALI_GROUP_STATE_ACTIVATION_PENDING;
+
+               MALI_DEBUG_ASSERT_POINTER(group->pm_domain);
+               domains[num_domains] = group->pm_domain;
+               groups[num_domains] = group;
+               num_domains++;
+
+               all_groups_on = mali_pm_get_domain_refs(domains, groups,
+                                                       num_domains);
+
+               /*
+                * Complete activation for group, include
+                * virtual group or physical group.
+                */
+               if (MALI_TRUE == all_groups_on) {
+
+                       mali_group_set_active(group);
+               }
+       } else if (MALI_GROUP_STATE_ACTIVE == group->state) {
+               /* Already active */
+               MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
+       } else {
+               /*
+                * Activation already pending, group->power_is_on could
+                * be both true or false. We need to wait for power up
+                * notification anyway.
+                */
+               MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVATION_PENDING
+                                 == group->state);
+       }
+
+       MALI_DEBUG_PRINT(4, ("Group: group %s activation result: %s\n",
+                            mali_group_core_description(group),
+                            MALI_GROUP_STATE_ACTIVE == group->state ?
+                            "ACTIVE" : "PENDING"));
+
+       return group->state;
 }
 
 mali_bool mali_group_set_active(struct mali_group *group)
 {
-    MALI_DEBUG_ASSERT_POINTER(group);
-    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
-    MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVATION_PENDING == group->state);
-    MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVATION_PENDING == group->state);
+       MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
 
-    MALI_DEBUG_PRINT(4, ("Group: Activation completed for %s\n",
-                mali_group_core_description(group)));
+       MALI_DEBUG_PRINT(4, ("Group: Activation completed for %s\n",
+                            mali_group_core_description(group)));
 
-    if (mali_group_is_virtual(group)) {
-        struct mali_group *child;
-        struct mali_group *temp;
+       if (mali_group_is_virtual(group)) {
+               struct mali_group *child;
+               struct mali_group *temp;
 
-        _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list,
-                struct mali_group, group_list) {
-            if (MALI_TRUE != child->power_is_on) {
-                return MALI_FALSE;
-            }
+               _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list,
+                                           struct mali_group, group_list) {
+                       if (MALI_TRUE != child->power_is_on) {
+                               return MALI_FALSE;
+                       }
 
-            child->state = MALI_GROUP_STATE_ACTIVE;
-        }
+                       child->state = MALI_GROUP_STATE_ACTIVE;
+               }
 
-        mali_group_reset(group);
-    }
+               mali_group_reset(group);
+       }
 
-    /* Go to ACTIVE state */
-    group->state = MALI_GROUP_STATE_ACTIVE;
+       /* Go to ACTIVE state */
+       group->state = MALI_GROUP_STATE_ACTIVE;
 
-    return MALI_TRUE;
+       return MALI_TRUE;
 }
 
 mali_bool mali_group_deactivate(struct mali_group *group)
 {
-    struct mali_pm_domain *domains[MALI_MAX_NUM_DOMAIN_REFS];
-    u32 num_domains = 0;
-    mali_bool power_down = MALI_FALSE;
-
-    MALI_DEBUG_ASSERT_POINTER(group);
-    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
-    MALI_DEBUG_ASSERT(MALI_GROUP_STATE_INACTIVE != group->state);
-
-    MALI_DEBUG_PRINT(3, ("Group: Deactivating group %s\n",
-                mali_group_core_description(group)));
-
-    group->state = MALI_GROUP_STATE_INACTIVE;
-
-    MALI_DEBUG_ASSERT_POINTER(group->pm_domain);
-    domains[num_domains] = group->pm_domain;
-    num_domains++;
-
-    if (mali_group_is_virtual(group)) {
-        /* Release refs for all child groups */
-        struct mali_group *child;
-        struct mali_group *temp;
-
-        _MALI_OSK_LIST_FOREACHENTRY(child, temp,
-                &group->group_list,
-                struct mali_group, group_list) {
-            child->state = MALI_GROUP_STATE_INACTIVE;
-
-            MALI_DEBUG_ASSERT_POINTER(child->pm_domain);
-            domains[num_domains] = child->pm_domain;
-            num_domains++;
-
-            /* Release L2 cache domain for child groups */
-            MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS >
-                    num_domains);
-            domains[num_domains] = mali_l2_cache_get_pm_domain(
-                    child->l2_cache_core[0]);
-            MALI_DEBUG_ASSERT(NULL == child->l2_cache_core[1]);
-            num_domains++;
-        }
-
-        /*
-         * Must do mali_group_power_down() steps right here for
-         * virtual group, because virtual group itself is likely to
-         * stay powered on, however child groups are now very likely
-         * to be powered off (and thus lose their state).
-         */
-
-        mali_group_clear_session(group);
-        /*
-         * Disable the broadcast unit (clear it's mask).
-         * This is needed in case the GPU isn't actually
-         * powered down at this point and groups are
-         * removed from an inactive virtual group.
-         * If not, then the broadcast unit will intercept
-         * their interrupts!
-         */
-        mali_bcast_disable(group->bcast_core);
-    } else {
-        /* Release L2 cache domain for physical groups */
-        MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS >
-                num_domains);
-        domains[num_domains] = mali_l2_cache_get_pm_domain(
-                group->l2_cache_core[0]);
-        MALI_DEBUG_ASSERT(NULL == group->l2_cache_core[1]);
-        num_domains++;
-    }
-
-    power_down = mali_pm_put_domain_refs(domains, num_domains);
-
-    return power_down;
+       struct mali_pm_domain *domains[MALI_MAX_NUM_DOMAIN_REFS];
+       u32 num_domains = 0;
+       mali_bool power_down = MALI_FALSE;
+
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT(MALI_GROUP_STATE_INACTIVE != group->state);
+
+       MALI_DEBUG_PRINT(3, ("Group: Deactivating group %s\n",
+                            mali_group_core_description(group)));
+
+       group->state = MALI_GROUP_STATE_INACTIVE;
+
+       MALI_DEBUG_ASSERT_POINTER(group->pm_domain);
+       domains[num_domains] = group->pm_domain;
+       num_domains++;
+
+       if (mali_group_is_virtual(group)) {
+               /* Release refs for all child groups */
+               struct mali_group *child;
+               struct mali_group *temp;
+
+               _MALI_OSK_LIST_FOREACHENTRY(child, temp,
+                                           &group->group_list,
+                                           struct mali_group, group_list) {
+                       child->state = MALI_GROUP_STATE_INACTIVE;
+
+                       MALI_DEBUG_ASSERT_POINTER(child->pm_domain);
+                       domains[num_domains] = child->pm_domain;
+                       num_domains++;
+
+                       /* Release L2 cache domain for child groups */
+                       MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS >
+                                         num_domains);
+                       domains[num_domains] = mali_l2_cache_get_pm_domain(
+                                                      child->l2_cache_core[0]);
+                       MALI_DEBUG_ASSERT(NULL == child->l2_cache_core[1]);
+                       num_domains++;
+               }
+
+               /*
+                * Must do mali_group_power_down() steps right here for
+                * virtual group, because virtual group itself is likely to
+                * stay powered on, however child groups are now very likely
+                * to be powered off (and thus lose their state).
+                */
+
+               mali_group_clear_session(group);
+               /*
+                * Disable the broadcast unit (clear it's mask).
+                * This is needed in case the GPU isn't actually
+                * powered down at this point and groups are
+                * removed from an inactive virtual group.
+                * If not, then the broadcast unit will intercept
+                * their interrupts!
+                */
+               mali_bcast_disable(group->bcast_core);
+       } else {
+               /* Release L2 cache domain for physical groups */
+               MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS >
+                                 num_domains);
+               domains[num_domains] = mali_l2_cache_get_pm_domain(
+                                              group->l2_cache_core[0]);
+               MALI_DEBUG_ASSERT(NULL == group->l2_cache_core[1]);
+               num_domains++;
+       }
+
+       power_down = mali_pm_put_domain_refs(domains, num_domains);
+
+       return power_down;
 }
 
 void mali_group_power_up(struct mali_group *group)
 {
-    MALI_DEBUG_ASSERT_POINTER(group);
-    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
-
-    MALI_DEBUG_PRINT(3, ("Group: Power up for %s\n",
-                mali_group_core_description(group)));
-
-    group->power_is_on = MALI_TRUE;
-
-    if (MALI_FALSE == mali_group_is_virtual(group)
-            && MALI_FALSE == mali_group_is_in_virtual(group)) {
-        mali_group_reset(group);
-    }
-
-    /*
-     * When we just acquire only one physical group form virt group,
-     * we should remove the bcast&dlbu mask from virt group and
-     * reset bcast and dlbu core, although part of pp cores in virt
-     * group maybe not be powered on.
-     */
-    if (MALI_TRUE == mali_group_is_virtual(group)) {
-        mali_bcast_reset(group->bcast_core);
-        mali_dlbu_update_mask(group->dlbu_core);
-    }
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+       MALI_DEBUG_PRINT(3, ("Group: Power up for %s\n",
+                            mali_group_core_description(group)));
+
+       group->power_is_on = MALI_TRUE;
+
+       if (MALI_FALSE == mali_group_is_virtual(group)
+           && MALI_FALSE == mali_group_is_in_virtual(group)) {
+               mali_group_reset(group);
+       }
+
+       /*
+        * When we just acquire only one physical group form virt group,
+        * we should remove the bcast&dlbu mask from virt group and
+        * reset bcast and dlbu core, although part of pp cores in virt
+        * group maybe not be powered on.
+        */
+       if (MALI_TRUE == mali_group_is_virtual(group)) {
+               mali_bcast_reset(group->bcast_core);
+               mali_dlbu_update_mask(group->dlbu_core);
+       }
 }
 
 void mali_group_power_down(struct mali_group *group)
 {
-    MALI_DEBUG_ASSERT_POINTER(group);
-    MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
-    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
-
-    MALI_DEBUG_PRINT(3, ("Group: Power down for %s\n",
-                mali_group_core_description(group)));
-
-    group->power_is_on = MALI_FALSE;
-
-    if (mali_group_is_virtual(group)) {
-        /*
-         * What we do for physical jobs in this function should
-         * already have been done in mali_group_deactivate()
-         * for virtual group.
-         */
-        MALI_DEBUG_ASSERT(NULL == group->session);
-    } else {
-        mali_group_clear_session(group);
-    }
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+       MALI_DEBUG_PRINT(3, ("Group: Power down for %s\n",
+                            mali_group_core_description(group)));
+
+       group->power_is_on = MALI_FALSE;
+
+       if (mali_group_is_virtual(group)) {
+               /*
+                * What we do for physical jobs in this function should
+                * already have been done in mali_group_deactivate()
+                * for virtual group.
+                */
+               MALI_DEBUG_ASSERT(NULL == group->session);
+       } else {
+               mali_group_clear_session(group);
+       }
 }
 
 MALI_DEBUG_CODE(static void mali_group_print_virtual(struct mali_group *vgroup)
-        {
-        u32 i;
-        struct mali_group *group;
-        struct mali_group *temp;
-
-        MALI_DEBUG_PRINT(4, ("Virtual group %s (%p)\n",
-                mali_group_core_description(vgroup),
-                vgroup));
-        MALI_DEBUG_PRINT(4, ("l2_cache_core[0] = %p, ref = %d\n", vgroup->l2_cache_core[0], vgroup->l2_cache_core_ref_count[0]));
-        MALI_DEBUG_PRINT(4, ("l2_cache_core[1] = %p, ref = %d\n", vgroup->l2_cache_core[1], vgroup->l2_cache_core_ref_count[1]));
-
-        i = 0;
-        _MALI_OSK_LIST_FOREACHENTRY(group, temp, &vgroup->group_list, struct mali_group, group_list) {
-        MALI_DEBUG_PRINT(4, ("[%d] %s (%p), l2_cache_core[0] = %p\n",
-                i, mali_group_core_description(group),
-                group, group->l2_cache_core[0]));
-        i++;
-        }
-        })
+{
+       u32 i;
+       struct mali_group *group;
+       struct mali_group *temp;
+
+       MALI_DEBUG_PRINT(4, ("Virtual group %s (%p)\n",
+                            mali_group_core_description(vgroup),
+                            vgroup));
+       MALI_DEBUG_PRINT(4, ("l2_cache_core[0] = %p, ref = %d\n", vgroup->l2_cache_core[0], vgroup->l2_cache_core_ref_count[0]));
+       MALI_DEBUG_PRINT(4, ("l2_cache_core[1] = %p, ref = %d\n", vgroup->l2_cache_core[1], vgroup->l2_cache_core_ref_count[1]));
+
+       i = 0;
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &vgroup->group_list, struct mali_group, group_list) {
+               MALI_DEBUG_PRINT(4, ("[%d] %s (%p), l2_cache_core[0] = %p\n",
+                                    i, mali_group_core_description(group),
+                                    group, group->l2_cache_core[0]));
+               i++;
+       }
+})
 
 static void mali_group_dump_core_status(struct mali_group *group)
 {
@@ -597,125 +598,125 @@ void mali_group_dump_status(struct mali_group *group)
  */
 void mali_group_add_group(struct mali_group *parent, struct mali_group *child)
 {
-    mali_bool found;
-    u32 i;
-
-    MALI_DEBUG_PRINT(3, ("Adding group %s to virtual group %s\n",
-                mali_group_core_description(child),
-                mali_group_core_description(parent)));
-
-    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
-    MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
-    MALI_DEBUG_ASSERT(!mali_group_is_virtual(child));
-    MALI_DEBUG_ASSERT(NULL == child->parent_group);
-
-    _mali_osk_list_addtail(&child->group_list, &parent->group_list);
-
-    child->parent_group = parent;
-
-    MALI_DEBUG_ASSERT_POINTER(child->l2_cache_core[0]);
-
-    MALI_DEBUG_PRINT(4, ("parent->l2_cache_core: [0] = %p, [1] = %p\n", parent->l2_cache_core[0], parent->l2_cache_core[1]));
-    MALI_DEBUG_PRINT(4, ("child->l2_cache_core: [0] = %p, [1] = %p\n", child->l2_cache_core[0], child->l2_cache_core[1]));
-
-    /* Keep track of the L2 cache cores of child groups */
-    found = MALI_FALSE;
-    for (i = 0; i < 2; i++) {
-        if (parent->l2_cache_core[i] == child->l2_cache_core[0]) {
-            MALI_DEBUG_ASSERT(parent->l2_cache_core_ref_count[i] > 0);
-            parent->l2_cache_core_ref_count[i]++;
-            found = MALI_TRUE;
-        }
-    }
-
-    if (!found) {
-        /* First time we see this L2 cache, add it to our list */
-        i = (NULL == parent->l2_cache_core[0]) ? 0 : 1;
-
-        MALI_DEBUG_PRINT(4, ("First time we see l2_cache %p. Adding to [%d] = %p\n", child->l2_cache_core[0], i, parent->l2_cache_core[i]));
-
-        MALI_DEBUG_ASSERT(NULL == parent->l2_cache_core[i]);
-
-        parent->l2_cache_core[i] = child->l2_cache_core[0];
-        parent->l2_cache_core_ref_count[i]++;
-    }
-
-    /* Update Broadcast Unit and DLBU */
-    mali_bcast_add_group(parent->bcast_core, child);
-    mali_dlbu_add_group(parent->dlbu_core, child);
-
-    if (MALI_TRUE == parent->power_is_on) {
-        mali_bcast_reset(parent->bcast_core);
-        mali_dlbu_update_mask(parent->dlbu_core);
-    }
-
-    if (MALI_TRUE == child->power_is_on) {
-        if (NULL == parent->session) {
-            if (NULL != child->session) {
-                /*
-                 * Parent has no session, so clear
-                 * child session as well.
-                 */
-                mali_mmu_activate_empty_page_directory(child->mmu);
-            }
-        } else {
-            if (parent->session == child->session) {
-                /* We already have same session as parent,
-                 * so a simple zap should be enough.
-                 */
-                mali_mmu_zap_tlb(child->mmu);
-            } else {
-                /*
-                 * Parent has a different session, so we must
-                 * switch to that sessions page table
-                 */
-                mali_mmu_activate_page_directory(child->mmu, mali_session_get_page_directory(parent->session));
-            }
-
-            /* It is the parent which keeps the session from now on */
-            child->session = NULL;
-        }
-    } else {
-        /* should have been cleared when child was powered down */
-        MALI_DEBUG_ASSERT(NULL == child->session);
-    }
-
-    /* Start job on child when parent is active */
-    if (NULL != parent->pp_running_job) {
-        struct mali_pp_job *job = parent->pp_running_job;
-
-        MALI_DEBUG_PRINT(3, ("Group %x joining running job %d on virtual group %x\n",
-                    child, mali_pp_job_get_id(job), parent));
-
-        /* Only allowed to add active child to an active parent */
-        MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE == parent->state);
-        MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE == child->state);
-
-        mali_pp_job_start(child->pp_core, job, mali_pp_core_get_id(child->pp_core), MALI_TRUE);
-
-        _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
-                MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
-                MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
-                mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
-
-        _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
-                MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
-                MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
-                mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
+       mali_bool found;
+       u32 i;
+
+       MALI_DEBUG_PRINT(3, ("Adding group %s to virtual group %s\n",
+                            mali_group_core_description(child),
+                            mali_group_core_description(parent)));
+
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
+       MALI_DEBUG_ASSERT(!mali_group_is_virtual(child));
+       MALI_DEBUG_ASSERT(NULL == child->parent_group);
+
+       _mali_osk_list_addtail(&child->group_list, &parent->group_list);
+
+       child->parent_group = parent;
+
+       MALI_DEBUG_ASSERT_POINTER(child->l2_cache_core[0]);
+
+       MALI_DEBUG_PRINT(4, ("parent->l2_cache_core: [0] = %p, [1] = %p\n", parent->l2_cache_core[0], parent->l2_cache_core[1]));
+       MALI_DEBUG_PRINT(4, ("child->l2_cache_core: [0] = %p, [1] = %p\n", child->l2_cache_core[0], child->l2_cache_core[1]));
+
+       /* Keep track of the L2 cache cores of child groups */
+       found = MALI_FALSE;
+       for (i = 0; i < 2; i++) {
+               if (parent->l2_cache_core[i] == child->l2_cache_core[0]) {
+                       MALI_DEBUG_ASSERT(parent->l2_cache_core_ref_count[i] > 0);
+                       parent->l2_cache_core_ref_count[i]++;
+                       found = MALI_TRUE;
+               }
+       }
+
+       if (!found) {
+               /* First time we see this L2 cache, add it to our list */
+               i = (NULL == parent->l2_cache_core[0]) ? 0 : 1;
+
+               MALI_DEBUG_PRINT(4, ("First time we see l2_cache %p. Adding to [%d] = %p\n", child->l2_cache_core[0], i, parent->l2_cache_core[i]));
+
+               MALI_DEBUG_ASSERT(NULL == parent->l2_cache_core[i]);
+
+               parent->l2_cache_core[i] = child->l2_cache_core[0];
+               parent->l2_cache_core_ref_count[i]++;
+       }
+
+       /* Update Broadcast Unit and DLBU */
+       mali_bcast_add_group(parent->bcast_core, child);
+       mali_dlbu_add_group(parent->dlbu_core, child);
+
+       if (MALI_TRUE == parent->power_is_on) {
+               mali_bcast_reset(parent->bcast_core);
+               mali_dlbu_update_mask(parent->dlbu_core);
+       }
+
+       if (MALI_TRUE == child->power_is_on) {
+               if (NULL == parent->session) {
+                       if (NULL != child->session) {
+                               /*
+                                * Parent has no session, so clear
+                                * child session as well.
+                                */
+                               mali_mmu_activate_empty_page_directory(child->mmu);
+                       }
+               } else {
+                       if (parent->session == child->session) {
+                               /* We already have same session as parent,
+                                * so a simple zap should be enough.
+                                */
+                               mali_mmu_zap_tlb(child->mmu);
+                       } else {
+                               /*
+                                * Parent has a different session, so we must
+                                * switch to that sessions page table
+                                */
+                               mali_mmu_activate_page_directory(child->mmu, mali_session_get_page_directory(parent->session));
+                       }
+
+                       /* It is the parent which keeps the session from now on */
+                       child->session = NULL;
+               }
+       } else {
+               /* should have been cleared when child was powered down */
+               MALI_DEBUG_ASSERT(NULL == child->session);
+       }
+
+       /* Start job on child when parent is active */
+       if (NULL != parent->pp_running_job) {
+               struct mali_pp_job *job = parent->pp_running_job;
+
+               MALI_DEBUG_PRINT(3, ("Group %x joining running job %d on virtual group %x\n",
+                                    child, mali_pp_job_get_id(job), parent));
+
+               /* Only allowed to add active child to an active parent */
+               MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE == parent->state);
+               MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE == child->state);
+
+               mali_pp_job_start(child->pp_core, job, mali_pp_core_get_id(child->pp_core), MALI_TRUE);
+
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                             MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+                                             MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+                                             mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
+
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                                             MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
+                                             mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
 #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
-        trace_gpu_sched_switch(
-                mali_pp_core_description(group->pp_core),
-                sched_clock(), mali_pp_job_get_tid(job),
-                0, mali_pp_job_get_id(job));
+               trace_gpu_sched_switch(
+                       mali_pp_core_description(group->pp_core),
+                       sched_clock(), mali_pp_job_get_tid(job),
+                       0, mali_pp_job_get_id(job));
 #endif
 
 #if defined(CONFIG_MALI400_PROFILING)
-        trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core),
-                mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job));
+               trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core),
+                                      mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job));
 #endif
-    }
+       }
 
-    MALI_DEBUG_CODE(mali_group_print_virtual(parent);)
+       MALI_DEBUG_CODE(mali_group_print_virtual(parent);)
 }
 
 /**
@@ -723,113 +724,112 @@ void mali_group_add_group(struct mali_group *parent, struct mali_group *child)
  */
 void mali_group_remove_group(struct mali_group *parent, struct mali_group *child)
 {
-    u32 i;
+       u32 i;
 
-    MALI_DEBUG_PRINT(3, ("Removing group %s from virtual group %s\n",
-                mali_group_core_description(child),
-                mali_group_core_description(parent)));
+       MALI_DEBUG_PRINT(3, ("Removing group %s from virtual group %s\n",
+                            mali_group_core_description(child),
+                            mali_group_core_description(parent)));
 
-    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
-    MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
-    MALI_DEBUG_ASSERT(!mali_group_is_virtual(child));
-    MALI_DEBUG_ASSERT(parent == child->parent_group);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
+       MALI_DEBUG_ASSERT(!mali_group_is_virtual(child));
+       MALI_DEBUG_ASSERT(parent == child->parent_group);
 
-    /* Update Broadcast Unit and DLBU */
-    mali_bcast_remove_group(parent->bcast_core, child);
-    mali_dlbu_remove_group(parent->dlbu_core, child);
+       /* Update Broadcast Unit and DLBU */
+       mali_bcast_remove_group(parent->bcast_core, child);
+       mali_dlbu_remove_group(parent->dlbu_core, child);
 
-    if (MALI_TRUE == parent->power_is_on) {
-        mali_bcast_reset(parent->bcast_core);
-        mali_dlbu_update_mask(parent->dlbu_core);
-    }
+       if (MALI_TRUE == parent->power_is_on) {
+               mali_bcast_reset(parent->bcast_core);
+               mali_dlbu_update_mask(parent->dlbu_core);
+       }
 
-    child->session = parent->session;
-    child->parent_group = NULL;
+       child->session = parent->session;
+       child->parent_group = NULL;
 
-    _mali_osk_list_delinit(&child->group_list);
-    if (_mali_osk_list_empty(&parent->group_list)) {
-        parent->session = NULL;
-    }
+       _mali_osk_list_delinit(&child->group_list);
+       if (_mali_osk_list_empty(&parent->group_list)) {
+               parent->session = NULL;
+       }
 
-    /* Keep track of the L2 cache cores of child groups */
-    i = (child->l2_cache_core[0] == parent->l2_cache_core[0]) ? 0 : 1;
+       /* Keep track of the L2 cache cores of child groups */
+       i = (child->l2_cache_core[0] == parent->l2_cache_core[0]) ? 0 : 1;
 
-    MALI_DEBUG_ASSERT(child->l2_cache_core[0] == parent->l2_cache_core[i]);
+       MALI_DEBUG_ASSERT(child->l2_cache_core[0] == parent->l2_cache_core[i]);
 
-    parent->l2_cache_core_ref_count[i]--;
-    if (parent->l2_cache_core_ref_count[i] == 0) {
-        parent->l2_cache_core[i] = NULL;
-    }
+       parent->l2_cache_core_ref_count[i]--;
+       if (parent->l2_cache_core_ref_count[i] == 0) {
+               parent->l2_cache_core[i] = NULL;
+       }
 
-    MALI_DEBUG_CODE(mali_group_print_virtual(parent));
+       MALI_DEBUG_CODE(mali_group_print_virtual(parent));
 }
 
 struct mali_group *mali_group_acquire_group(struct mali_group *parent)
 {
-    struct mali_group *child = NULL;
+       struct mali_group *child = NULL;
 
-    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
-    MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
 
-    if (!_mali_osk_list_empty(&parent->group_list)) {
-        child = _MALI_OSK_LIST_ENTRY(parent->group_list.prev, struct mali_group, group_list);
-        mali_group_remove_group(parent, child);
-    }
+       if (!_mali_osk_list_empty(&parent->group_list)) {
+               child = _MALI_OSK_LIST_ENTRY(parent->group_list.prev, struct mali_group, group_list);
+               mali_group_remove_group(parent, child);
+       }
 
-    if (NULL != child) {
-        if (MALI_GROUP_STATE_ACTIVE != parent->state
-                && MALI_TRUE == child->power_is_on) {
-            mali_group_reset(child);
-        }
-    }
+       if (NULL != child) {
+               if (MALI_GROUP_STATE_ACTIVE != parent->state
+                   && MALI_TRUE == child->power_is_on) {
+                       mali_group_reset(child);
+               }
+       }
 
-    return child;
+       return child;
 }
 
 void mali_group_reset(struct mali_group *group)
 {
-    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
-    MALI_DEBUG_ASSERT(NULL == group->gp_running_job);
-    MALI_DEBUG_ASSERT(NULL == group->pp_running_job);
-    MALI_DEBUG_ASSERT(NULL == group->session);
-
-    MALI_DEBUG_PRINT(3, ("Group: reset of %s\n",
-                mali_group_core_description(group)));
-
-    if (NULL != group->dlbu_core) {
-        mali_dlbu_reset(group->dlbu_core);
-    }
-
-    if (NULL != group->bcast_core) {
-        mali_bcast_reset(group->bcast_core);
-    }
-
-    MALI_DEBUG_ASSERT(NULL != group->mmu);
-    mali_group_reset_mmu(group);
-
-    if (NULL != group->gp_core) {
-        MALI_DEBUG_ASSERT(NULL == group->pp_core);
-        mali_gp_reset(group->gp_core);
-    } else {
-        MALI_DEBUG_ASSERT(NULL != group->pp_core);
-        mali_group_reset_pp(group);
-    }
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT(NULL == group->gp_running_job);
+       MALI_DEBUG_ASSERT(NULL == group->pp_running_job);
+
+       MALI_DEBUG_PRINT(3, ("Group: reset of %s\n",
+                            mali_group_core_description(group)));
+
+       if (NULL != group->dlbu_core) {
+               mali_dlbu_reset(group->dlbu_core);
+       }
+
+       if (NULL != group->bcast_core) {
+               mali_bcast_reset(group->bcast_core);
+       }
+
+       MALI_DEBUG_ASSERT(NULL != group->mmu);
+       mali_group_reset_mmu(group);
+
+       if (NULL != group->gp_core) {
+               MALI_DEBUG_ASSERT(NULL == group->pp_core);
+               mali_gp_reset(group->gp_core);
+       } else {
+               MALI_DEBUG_ASSERT(NULL != group->pp_core);
+               mali_group_reset_pp(group);
+       }
 }
 
 void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job, mali_bool gpu_secure_mode_pre_enabled)
 {
-    struct mali_session_data *session;
+       struct mali_session_data *session;
 
-    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
 
-    MALI_DEBUG_PRINT(3, ("Group: Starting GP job 0x%08X on group %s\n",
-                job,
-                mali_group_core_description(group)));
+       MALI_DEBUG_PRINT(3, ("Group: Starting GP job 0x%08X on group %s\n",
+                            job,
+                            mali_group_core_description(group)));
 
-    session = mali_gp_job_get_session(job);
+       session = mali_gp_job_get_session(job);
 
-    MALI_DEBUG_ASSERT_POINTER(group->l2_cache_core[0]);
-    mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_gp_job_get_cache_order(job));
+       MALI_DEBUG_ASSERT_POINTER(group->l2_cache_core[0]);
+       mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_gp_job_get_cache_order(job));
 
        /* Reset GPU and disable gpu secure mode if needed. */
        if (MALI_TRUE == _mali_osk_gpu_secure_mode_is_enabled()) {
@@ -849,45 +849,45 @@ void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job,
                mali_group_activate_page_directory(group, session, MALI_FALSE);
        }
 
-    mali_gp_job_start(group->gp_core, job);
+       mali_gp_job_start(group->gp_core, job);
 
-    _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
-            MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0) |
-            MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
-            mali_gp_job_get_frame_builder_id(job), mali_gp_job_get_flush_id(job), 0, 0, 0);
-    _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
-            MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
-            mali_gp_job_get_pid(job), mali_gp_job_get_tid(job), 0, 0, 0);
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                     MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0) |
+                                     MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+                                     mali_gp_job_get_frame_builder_id(job), mali_gp_job_get_flush_id(job), 0, 0, 0);
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                                     MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+                                     mali_gp_job_get_pid(job), mali_gp_job_get_tid(job), 0, 0, 0);
 
 #if defined(CONFIG_MALI400_PROFILING)
-    trace_mali_core_active(mali_gp_job_get_pid(job), 1 /* active */, 1 /* GP */,  0 /* core */,
-            mali_gp_job_get_frame_builder_id(job), mali_gp_job_get_flush_id(job));
+       trace_mali_core_active(mali_gp_job_get_pid(job), 1 /* active */, 1 /* GP */,  0 /* core */,
+                              mali_gp_job_get_frame_builder_id(job), mali_gp_job_get_flush_id(job));
 #endif
 
 #if defined(CONFIG_MALI400_PROFILING)
-    if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
-            (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
-        mali_group_report_l2_cache_counters_per_core(group, 0);
-    }
+       if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+           (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+               mali_group_report_l2_cache_counters_per_core(group, 0);
+       }
 #endif /* #if defined(CONFIG_MALI400_PROFILING) */
 
 #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
-    trace_gpu_sched_switch(mali_gp_core_description(group->gp_core),
-            sched_clock(), mali_gp_job_get_tid(job),
-            0, mali_gp_job_get_id(job));
+       trace_gpu_sched_switch(mali_gp_core_description(group->gp_core),
+                              sched_clock(), mali_gp_job_get_tid(job),
+                              0, mali_gp_job_get_id(job));
 #endif
 
-    group->gp_running_job = job;
-    group->is_working = MALI_TRUE;
+       group->gp_running_job = job;
+       group->is_working = MALI_TRUE;
 
-    /* Setup SW timer and record start time */
-    group->start_time = _mali_osk_time_tickcount();
-    _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
+       /* Setup SW timer and record start time */
+       group->start_time = _mali_osk_time_tickcount();
+       _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
 
-    MALI_DEBUG_PRINT(4, ("Group: Started GP job 0x%08X on group %s at %u\n",
-                job,
-                mali_group_core_description(group),
-                group->start_time));
+       MALI_DEBUG_PRINT(4, ("Group: Started GP job 0x%08X on group %s at %u\n",
+                            job,
+                            mali_group_core_description(group),
+                            group->start_time));
 }
 
 /* Used to set all the registers except frame renderer list address and fragment shader stack address
@@ -895,24 +895,24 @@ void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job,
  */
 void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job, mali_bool gpu_secure_mode_pre_enabled)
 {
-    struct mali_session_data *session;
+       struct mali_session_data *session;
 
-    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
 
-    MALI_DEBUG_PRINT(3, ("Group: Starting PP job 0x%08X part %u/%u on group %s\n",
-                job, sub_job + 1,
-                mali_pp_job_get_sub_job_count(job),
-                mali_group_core_description(group)));
+       MALI_DEBUG_PRINT(3, ("Group: Starting PP job 0x%08X part %u/%u on group %s\n",
+                            job, sub_job + 1,
+                            mali_pp_job_get_sub_job_count(job),
+                            mali_group_core_description(group)));
 
-    session = mali_pp_job_get_session(job);
+       session = mali_pp_job_get_session(job);
 
-    if (NULL != group->l2_cache_core[0]) {
-        mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_pp_job_get_cache_order(job));
-    }
+       if (NULL != group->l2_cache_core[0]) {
+               mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_pp_job_get_cache_order(job));
+       }
 
-    if (NULL != group->l2_cache_core[1]) {
-        mali_l2_cache_invalidate_conditional(group->l2_cache_core[1], mali_pp_job_get_cache_order(job));
-    }
+       if (NULL != group->l2_cache_core[1]) {
+               mali_l2_cache_invalidate_conditional(group->l2_cache_core[1], mali_pp_job_get_cache_order(job));
+       }
 
        /* Reset GPU and change gpu secure mode if needed. */
        if (MALI_TRUE == mali_pp_job_is_protected_job(job) && MALI_FALSE == _mali_osk_gpu_secure_mode_is_enabled()) {
@@ -940,376 +940,376 @@ void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job,
                mali_group_activate_page_directory(group, session, MALI_FALSE);
        }
 
-    if (mali_group_is_virtual(group)) {
-        struct mali_group *child;
-        struct mali_group *temp;
-        u32 core_num = 0;
+       if (mali_group_is_virtual(group)) {
+               struct mali_group *child;
+               struct mali_group *temp;
+               u32 core_num = 0;
 
-        MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(job));
+               MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(job));
 
-        /* Configure DLBU for the job */
-        mali_dlbu_config_job(group->dlbu_core, job);
+               /* Configure DLBU for the job */
+               mali_dlbu_config_job(group->dlbu_core, job);
 
-        /* Write stack address for each child group */
-        _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
-            mali_pp_write_addr_stack(child->pp_core, job);
-            core_num++;
-        }
+               /* Write stack address for each child group */
+               _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+                       mali_pp_write_addr_stack(child->pp_core, job);
+                       core_num++;
+               }
 
-        mali_pp_job_start(group->pp_core, job, sub_job, MALI_FALSE);
-    } else {
-        mali_pp_job_start(group->pp_core, job, sub_job, MALI_FALSE);
-    }
+               mali_pp_job_start(group->pp_core, job, sub_job, MALI_FALSE);
+       } else {
+               mali_pp_job_start(group->pp_core, job, sub_job, MALI_FALSE);
+       }
 
-    /* if the group is virtual, loop through physical groups which belong to this group
-     * and call profiling events for its cores as virtual */
-    if (MALI_TRUE == mali_group_is_virtual(group)) {
-        struct mali_group *child;
-        struct mali_group *temp;
+       /* if the group is virtual, loop through physical groups which belong to this group
+        * and call profiling events for its cores as virtual */
+       if (MALI_TRUE == mali_group_is_virtual(group)) {
+               struct mali_group *child;
+               struct mali_group *temp;
 
-        _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
-            _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
-                    MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
-                    MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
-                    mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
+               _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                                     MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+                                                     MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+                                                     mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
 
-            _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
-                    MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
-                    MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
-                    mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
+                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                                                     MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+                                                     MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
+                                                     mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
 
 #if defined(CONFIG_MALI400_PROFILING)
-            trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core),
-                    mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job));
+                       trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core),
+                                              mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job));
 #endif
-        }
+               }
 
 #if defined(CONFIG_MALI400_PROFILING)
-        if (0 != group->l2_cache_core_ref_count[0]) {
-            if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
-                    (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
-                mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
-            }
-        }
-        if (0 != group->l2_cache_core_ref_count[1]) {
-            if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) &&
-                    (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) {
-                mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1]));
-            }
-        }
+               if (0 != group->l2_cache_core_ref_count[0]) {
+                       if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+                           (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+                               mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+                       }
+               }
+               if (0 != group->l2_cache_core_ref_count[1]) {
+                       if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) &&
+                           (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) {
+                               mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1]));
+                       }
+               }
 #endif /* #if defined(CONFIG_MALI400_PROFILING) */
 
-    } else { /* group is physical - call profiling events for physical cores */
-        _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
-                MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) |
-                MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
-                mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
+       } else { /* group is physical - call profiling events for physical cores */
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                             MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) |
+                                             MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+                                             mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
 
-        _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
-                MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) |
-                MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL,
-                mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                                             MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) |
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL,
+                                             mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
 
 #if defined(CONFIG_MALI400_PROFILING)
-        trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(group->pp_core),
-                mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job));
+               trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(group->pp_core),
+                                      mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job));
 #endif
 
 #if defined(CONFIG_MALI400_PROFILING)
-        if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
-                (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
-            mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
-        }
+               if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+                   (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+                       mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+               }
 #endif /* #if defined(CONFIG_MALI400_PROFILING) */
-    }
+       }
 
 #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
-    trace_gpu_sched_switch(mali_pp_core_description(group->pp_core),
-            sched_clock(), mali_pp_job_get_tid(job),
-            0, mali_pp_job_get_id(job));
+       trace_gpu_sched_switch(mali_pp_core_description(group->pp_core),
+                              sched_clock(), mali_pp_job_get_tid(job),
+                              0, mali_pp_job_get_id(job));
 #endif
 
-    group->pp_running_job = job;
-    group->pp_running_sub_job = sub_job;
-    group->is_working = MALI_TRUE;
+       group->pp_running_job = job;
+       group->pp_running_sub_job = sub_job;
+       group->is_working = MALI_TRUE;
 
-    /* Setup SW timer and record start time */
-    group->start_time = _mali_osk_time_tickcount();
-    _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
+       /* Setup SW timer and record start time */
+       group->start_time = _mali_osk_time_tickcount();
+       _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
 
-    MALI_DEBUG_PRINT(4, ("Group: Started PP job 0x%08X part %u/%u on group %s at %u\n",
-                job, sub_job + 1,
-                mali_pp_job_get_sub_job_count(job),
-                mali_group_core_description(group),
-                group->start_time));
+       MALI_DEBUG_PRINT(4, ("Group: Started PP job 0x%08X part %u/%u on group %s at %u\n",
+                            job, sub_job + 1,
+                            mali_pp_job_get_sub_job_count(job),
+                            mali_group_core_description(group),
+                            group->start_time));
 
 }
 
 void mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr)
 {
-    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
 
-    MALI_DEBUG_ASSERT_POINTER(group->l2_cache_core[0]);
-    mali_l2_cache_invalidate(group->l2_cache_core[0]);
+       MALI_DEBUG_ASSERT_POINTER(group->l2_cache_core[0]);
+       mali_l2_cache_invalidate(group->l2_cache_core[0]);
 
-    mali_mmu_zap_tlb_without_stall(group->mmu);
+       mali_mmu_zap_tlb_without_stall(group->mmu);
 
-    mali_gp_resume_with_new_heap(group->gp_core, start_addr, end_addr);
+       mali_gp_resume_with_new_heap(group->gp_core, start_addr, end_addr);
 
-    _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME |
-            MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
-            0, 0, 0, 0, 0);
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME |
+                                     MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+                                     0, 0, 0, 0, 0);
 
 #if defined(CONFIG_MALI400_PROFILING)
-    trace_mali_core_active(mali_gp_job_get_pid(group->gp_running_job), 1 /* active */, 1 /* GP */,  0 /* core */,
-            mali_gp_job_get_frame_builder_id(group->gp_running_job), mali_gp_job_get_flush_id(group->gp_running_job));
+       trace_mali_core_active(mali_gp_job_get_pid(group->gp_running_job), 1 /* active */, 1 /* GP */,  0 /* core */,
+                              mali_gp_job_get_frame_builder_id(group->gp_running_job), mali_gp_job_get_flush_id(group->gp_running_job));
 #endif
 }
 
 static void mali_group_reset_mmu(struct mali_group *group)
 {
-    struct mali_group *child;
-    struct mali_group *temp;
-    _mali_osk_errcode_t err;
-
-    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
-
-    if (!mali_group_is_virtual(group)) {
-        /* This is a physical group or an idle virtual group -- simply wait for
-         * the reset to complete. */
-        err = mali_mmu_reset(group->mmu);
-        MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
-    } else { /* virtual group */
-        /* Loop through all members of this virtual group and wait
-         * until they are done resetting.
-         */
-        _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
-            err = mali_mmu_reset(child->mmu);
-            MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
-        }
-    }
+       struct mali_group *child;
+       struct mali_group *temp;
+       _mali_osk_errcode_t err;
+
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+       if (!mali_group_is_virtual(group)) {
+               /* This is a physical group or an idle virtual group -- simply wait for
+                * the reset to complete. */
+               err = mali_mmu_reset(group->mmu);
+               MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
+       } else { /* virtual group */
+               /* Loop through all members of this virtual group and wait
+                * until they are done resetting.
+                */
+               _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+                       err = mali_mmu_reset(child->mmu);
+                       MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
+               }
+       }
 }
 
 static void mali_group_reset_pp(struct mali_group *group)
 {
-    struct mali_group *child;
-    struct mali_group *temp;
-
-    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
-
-    mali_pp_reset_async(group->pp_core);
-
-    if (!mali_group_is_virtual(group) || NULL == group->pp_running_job) {
-        /* This is a physical group or an idle virtual group -- simply wait for
-         * the reset to complete. */
-        mali_pp_reset_wait(group->pp_core);
-    } else {
-        /* Loop through all members of this virtual group and wait until they
-         * are done resetting.
-         */
-        _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
-            mali_pp_reset_wait(child->pp_core);
-        }
-    }
+       struct mali_group *child;
+       struct mali_group *temp;
+
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+       mali_pp_reset_async(group->pp_core);
+
+       if (!mali_group_is_virtual(group) || NULL == group->pp_running_job) {
+               /* This is a physical group or an idle virtual group -- simply wait for
+                * the reset to complete. */
+               mali_pp_reset_wait(group->pp_core);
+       } else {
+               /* Loop through all members of this virtual group and wait until they
+                * are done resetting.
+                */
+               _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+                       mali_pp_reset_wait(child->pp_core);
+               }
+       }
 }
 
 struct mali_pp_job *mali_group_complete_pp(struct mali_group *group, mali_bool success, u32 *sub_job)
 {
-    struct mali_pp_job *pp_job_to_return;
+       struct mali_pp_job *pp_job_to_return;
 
-    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
-    MALI_DEBUG_ASSERT_POINTER(group);
-    MALI_DEBUG_ASSERT_POINTER(group->pp_core);
-    MALI_DEBUG_ASSERT_POINTER(group->pp_running_job);
-    MALI_DEBUG_ASSERT_POINTER(sub_job);
-    MALI_DEBUG_ASSERT(MALI_TRUE == group->is_working);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+       MALI_DEBUG_ASSERT_POINTER(group->pp_running_job);
+       MALI_DEBUG_ASSERT_POINTER(sub_job);
+       MALI_DEBUG_ASSERT(MALI_TRUE == group->is_working);
 
-    /* Stop/clear the timeout timer. */
-    _mali_osk_timer_del_async(group->timeout_timer);
+       /* Stop/clear the timeout timer. */
+       _mali_osk_timer_del_async(group->timeout_timer);
 
-    if (NULL != group->pp_running_job) {
+       if (NULL != group->pp_running_job) {
 
-        /* Deal with HW counters and profiling */
+               /* Deal with HW counters and profiling */
 
-        if (MALI_TRUE == mali_group_is_virtual(group)) {
-            struct mali_group *child;
-            struct mali_group *temp;
+               if (MALI_TRUE == mali_group_is_virtual(group)) {
+                       struct mali_group *child;
+                       struct mali_group *temp;
 
-            /* update performance counters from each physical pp core within this virtual group */
-            _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
-                mali_pp_update_performance_counters(group->pp_core, child->pp_core, group->pp_running_job, mali_pp_core_get_id(child->pp_core));
-            }
+                       /* update performance counters from each physical pp core within this virtual group */
+                       _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+                               mali_pp_update_performance_counters(group->pp_core, child->pp_core, group->pp_running_job, mali_pp_core_get_id(child->pp_core));
+                       }
 
 #if defined(CONFIG_MALI400_PROFILING)
-            /* send profiling data per physical core */
-            _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
-                _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
-                        MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
-                        MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
-                        mali_pp_job_get_perf_counter_value0(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
-                        mali_pp_job_get_perf_counter_value1(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
-                        mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8),
-                        0, 0);
-
-                trace_mali_core_active(mali_pp_job_get_pid(group->pp_running_job),
-                        0 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core),
-                        mali_pp_job_get_frame_builder_id(group->pp_running_job),
-                        mali_pp_job_get_flush_id(group->pp_running_job));
-            }
-            if (0 != group->l2_cache_core_ref_count[0]) {
-                if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
-                        (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
-                    mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
-                }
-            }
-            if (0 != group->l2_cache_core_ref_count[1]) {
-                if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) &&
-                        (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) {
-                    mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1]));
-                }
-            }
+                       /* send profiling data per physical core */
+                       _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+                               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                                             MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+                                                             MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
+                                                             mali_pp_job_get_perf_counter_value0(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
+                                                             mali_pp_job_get_perf_counter_value1(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
+                                                             mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8),
+                                                             0, 0);
+
+                               trace_mali_core_active(mali_pp_job_get_pid(group->pp_running_job),
+                                                      0 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core),
+                                                      mali_pp_job_get_frame_builder_id(group->pp_running_job),
+                                                      mali_pp_job_get_flush_id(group->pp_running_job));
+                       }
+                       if (0 != group->l2_cache_core_ref_count[0]) {
+                               if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+                                   (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+                                       mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+                               }
+                       }
+                       if (0 != group->l2_cache_core_ref_count[1]) {
+                               if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) &&
+                                   (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) {
+                                       mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1]));
+                               }
+                       }
 
 #endif
-        } else {
-            /* update performance counters for a physical group's pp core */
-            mali_pp_update_performance_counters(group->pp_core, group->pp_core, group->pp_running_job, group->pp_running_sub_job);
+               } else {
+                       /* update performance counters for a physical group's pp core */
+                       mali_pp_update_performance_counters(group->pp_core, group->pp_core, group->pp_running_job, group->pp_running_sub_job);
 
 #if defined(CONFIG_MALI400_PROFILING)
-            _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
-                    MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) |
-                    MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL,
-                    mali_pp_job_get_perf_counter_value0(group->pp_running_job, group->pp_running_sub_job),
-                    mali_pp_job_get_perf_counter_value1(group->pp_running_job, group->pp_running_sub_job),
-                    mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8),
-                    0, 0);
-
-            trace_mali_core_active(mali_pp_job_get_pid(group->pp_running_job),
-                    0 /* active */, 0 /* PP */, mali_pp_core_get_id(group->pp_core),
-                    mali_pp_job_get_frame_builder_id(group->pp_running_job),
-                    mali_pp_job_get_flush_id(group->pp_running_job));
-
-            if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
-                    (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
-                mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
-            }
+                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                                     MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) |
+                                                     MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL,
+                                                     mali_pp_job_get_perf_counter_value0(group->pp_running_job, group->pp_running_sub_job),
+                                                     mali_pp_job_get_perf_counter_value1(group->pp_running_job, group->pp_running_sub_job),
+                                                     mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8),
+                                                     0, 0);
+
+                       trace_mali_core_active(mali_pp_job_get_pid(group->pp_running_job),
+                                              0 /* active */, 0 /* PP */, mali_pp_core_get_id(group->pp_core),
+                                              mali_pp_job_get_frame_builder_id(group->pp_running_job),
+                                              mali_pp_job_get_flush_id(group->pp_running_job));
+
+                       if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+                           (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+                               mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+                       }
 #endif
-        }
+               }
 
 #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
-        trace_gpu_sched_switch(
-                mali_gp_core_description(group->gp_core),
-                sched_clock(), 0, 0, 0);
+               trace_gpu_sched_switch(
+                       mali_gp_core_description(group->gp_core),
+                       sched_clock(), 0, 0, 0);
 #endif
 
-    }
-
-    if (success) {
-        /* Only do soft reset for successful jobs, a full recovery
-         * reset will be done for failed jobs. */
-        mali_pp_reset_async(group->pp_core);
-    }
+       }
 
-    pp_job_to_return = group->pp_running_job;
-    group->pp_running_job = NULL;
-    group->is_working = MALI_FALSE;
-    *sub_job = group->pp_running_sub_job;
+       if (success) {
+               /* Only do soft reset for successful jobs, a full recovery
+                * reset will be done for failed jobs. */
+               mali_pp_reset_async(group->pp_core);
+       }
 
-    if (!success) {
-        MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n"));
-        mali_group_recovery_reset(group);
-    } else if (_MALI_OSK_ERR_OK != mali_pp_reset_wait(group->pp_core)) {
-        MALI_PRINT_ERROR(("Mali group: Executing recovery reset due to reset failure\n"));
-        mali_group_recovery_reset(group);
-    }
+       pp_job_to_return = group->pp_running_job;
+       group->pp_running_job = NULL;
+       group->is_working = MALI_FALSE;
+       *sub_job = group->pp_running_sub_job;
+
+       if (!success) {
+               MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n"));
+               mali_group_recovery_reset(group);
+       } else if (_MALI_OSK_ERR_OK != mali_pp_reset_wait(group->pp_core)) {
+               MALI_PRINT_ERROR(("Mali group: Executing recovery reset due to reset failure\n"));
+               mali_group_recovery_reset(group);
+       }
 
-    return pp_job_to_return;
+       return pp_job_to_return;
 }
 
 struct mali_gp_job *mali_group_complete_gp(struct mali_group *group, mali_bool success)
 {
-    struct mali_gp_job *gp_job_to_return;
+       struct mali_gp_job *gp_job_to_return;
 
-    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
-    MALI_DEBUG_ASSERT_POINTER(group);
-    MALI_DEBUG_ASSERT_POINTER(group->gp_core);
-    MALI_DEBUG_ASSERT_POINTER(group->gp_running_job);
-    MALI_DEBUG_ASSERT(MALI_TRUE == group->is_working);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+       MALI_DEBUG_ASSERT_POINTER(group->gp_running_job);
+       MALI_DEBUG_ASSERT(MALI_TRUE == group->is_working);
 
-    /* Stop/clear the timeout timer. */
-    _mali_osk_timer_del_async(group->timeout_timer);
+       /* Stop/clear the timeout timer. */
+       _mali_osk_timer_del_async(group->timeout_timer);
 
-    if (NULL != group->gp_running_job) {
-        mali_gp_update_performance_counters(group->gp_core, group->gp_running_job);
+       if (NULL != group->gp_running_job) {
+               mali_gp_update_performance_counters(group->gp_core, group->gp_running_job);
 
 #if defined(CONFIG_MALI400_PROFILING)
-        _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
-                mali_gp_job_get_perf_counter_value0(group->gp_running_job),
-                mali_gp_job_get_perf_counter_value1(group->gp_running_job),
-                mali_gp_job_get_perf_counter_src0(group->gp_running_job) | (mali_gp_job_get_perf_counter_src1(group->gp_running_job) << 8),
-                0, 0);
-
-        if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
-                (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
-            mali_group_report_l2_cache_counters_per_core(group, 0);
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+                                             mali_gp_job_get_perf_counter_value0(group->gp_running_job),
+                                             mali_gp_job_get_perf_counter_value1(group->gp_running_job),
+                                             mali_gp_job_get_perf_counter_src0(group->gp_running_job) | (mali_gp_job_get_perf_counter_src1(group->gp_running_job) << 8),
+                                             0, 0);
+
+               if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+                   (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
+                       mali_group_report_l2_cache_counters_per_core(group, 0);
 #endif
 
 #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
-        trace_gpu_sched_switch(
-                mali_pp_core_description(group->pp_core),
-                sched_clock(), 0, 0, 0);
+               trace_gpu_sched_switch(
+                       mali_pp_core_description(group->pp_core),
+                       sched_clock(), 0, 0, 0);
 #endif
 
 #if defined(CONFIG_MALI400_PROFILING)
-        trace_mali_core_active(mali_gp_job_get_pid(group->gp_running_job), 0 /* active */, 1 /* GP */,  0 /* core */,
-                mali_gp_job_get_frame_builder_id(group->gp_running_job), mali_gp_job_get_flush_id(group->gp_running_job));
+               trace_mali_core_active(mali_gp_job_get_pid(group->gp_running_job), 0 /* active */, 1 /* GP */,  0 /* core */,
+                                      mali_gp_job_get_frame_builder_id(group->gp_running_job), mali_gp_job_get_flush_id(group->gp_running_job));
 #endif
 
-        mali_gp_job_set_current_heap_addr(group->gp_running_job,
-                mali_gp_read_plbu_alloc_start_addr(group->gp_core));
-    }
-
-    if (success) {
-        /* Only do soft reset for successful jobs, a full recovery
-         * reset will be done for failed jobs. */
-        mali_gp_reset_async(group->gp_core);
-    }
-
-    gp_job_to_return = group->gp_running_job;
-    group->gp_running_job = NULL;
-    group->is_working = MALI_FALSE;
-
-    if (!success) {
-        MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n"));
-        mali_group_recovery_reset(group);
-    } else if (_MALI_OSK_ERR_OK != mali_gp_reset_wait(group->gp_core)) {
-        MALI_PRINT_ERROR(("Mali group: Executing recovery reset due to reset failure\n"));
-        mali_group_recovery_reset(group);
-    }
-
-    return gp_job_to_return;
+               mali_gp_job_set_current_heap_addr(group->gp_running_job,
+                                                 mali_gp_read_plbu_alloc_start_addr(group->gp_core));
+       }
+
+       if (success) {
+               /* Only do soft reset for successful jobs, a full recovery
+                * reset will be done for failed jobs. */
+               mali_gp_reset_async(group->gp_core);
+       }
+
+       gp_job_to_return = group->gp_running_job;
+       group->gp_running_job = NULL;
+       group->is_working = MALI_FALSE;
+
+       if (!success) {
+               MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n"));
+               mali_group_recovery_reset(group);
+       } else if (_MALI_OSK_ERR_OK != mali_gp_reset_wait(group->gp_core)) {
+               MALI_PRINT_ERROR(("Mali group: Executing recovery reset due to reset failure\n"));
+               mali_group_recovery_reset(group);
+       }
+
+       return gp_job_to_return;
 }
 
 struct mali_group *mali_group_get_glob_group(u32 index)
 {
-    if (mali_global_num_groups > index) {
-        return mali_global_groups[index];
-    }
+       if (mali_global_num_groups > index) {
+               return mali_global_groups[index];
+       }
 
-    return NULL;
+       return NULL;
 }
 
 u32 mali_group_get_glob_num_groups(void)
 {
-    return mali_global_num_groups;
+       return mali_global_num_groups;
 }
 
 static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session, mali_bool is_reload)
 {
-    MALI_DEBUG_PRINT(5, ("Mali group: Activating page directory 0x%08X from session 0x%08X on group %s\n",
-                mali_session_get_page_directory(session), session,
-                mali_group_core_description(group)));
+       MALI_DEBUG_PRINT(5, ("Mali group: Activating page directory 0x%08X from session 0x%08X on group %s\n",
+                            mali_session_get_page_directory(session), session,
+                            mali_group_core_description(group)));
 
-    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
 
        if (group->session != session || MALI_TRUE == is_reload) {
                /* Different session than last time, so we need to do some work */
@@ -1329,175 +1329,134 @@ static void mali_group_activate_page_directory(struct mali_group *group, struct
 
 static void mali_group_recovery_reset(struct mali_group *group)
 {
-    _mali_osk_errcode_t err;
-
-    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
-
-    /* Stop cores, bus stop */
-    if (NULL != group->pp_core) {
-        mali_pp_stop_bus(group->pp_core);
-    } else {
-        mali_gp_stop_bus(group->gp_core);
-    }
-
-    /* Flush MMU and clear page fault (if any) */
-    mali_mmu_activate_fault_flush_page_directory(group->mmu);
-    mali_mmu_page_fault_done(group->mmu);
-
-    /* Wait for cores to stop bus, then do a hard reset on them */
-    if (NULL != group->pp_core) {
-        if (mali_group_is_virtual(group)) {
-            struct mali_group *child, *temp;
-
-            /* Disable the broadcast unit while we do reset directly on the member cores. */
-            mali_bcast_disable(group->bcast_core);
-
-            _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
-                mali_pp_stop_bus_wait(child->pp_core);
-                mali_pp_hard_reset(child->pp_core);
-            }
-
-            mali_bcast_enable(group->bcast_core);
-        } else {
-            mali_pp_stop_bus_wait(group->pp_core);
-            mali_pp_hard_reset(group->pp_core);
-        }
-    } else {
-        mali_gp_stop_bus_wait(group->gp_core);
-        mali_gp_hard_reset(group->gp_core);
-    }
-
-    /* Reset MMU */
-    err = mali_mmu_reset(group->mmu);
-    MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
-    MALI_IGNORE(err);
-
-    group->session = NULL;
+       _mali_osk_errcode_t err;
+
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+       /* Stop cores, bus stop */
+       if (NULL != group->pp_core) {
+               mali_pp_stop_bus(group->pp_core);
+       } else {
+               mali_gp_stop_bus(group->gp_core);
+       }
+
+       /* Flush MMU and clear page fault (if any) */
+       mali_mmu_activate_fault_flush_page_directory(group->mmu);
+       mali_mmu_page_fault_done(group->mmu);
+
+       /* Wait for cores to stop bus, then do a hard reset on them */
+       if (NULL != group->pp_core) {
+               if (mali_group_is_virtual(group)) {
+                       struct mali_group *child, *temp;
+
+                       /* Disable the broadcast unit while we do reset directly on the member cores. */
+                       mali_bcast_disable(group->bcast_core);
+
+                       _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+                               mali_pp_stop_bus_wait(child->pp_core);
+                               mali_pp_hard_reset(child->pp_core);
+                       }
+
+                       mali_bcast_enable(group->bcast_core);
+               } else {
+                       mali_pp_stop_bus_wait(group->pp_core);
+                       mali_pp_hard_reset(group->pp_core);
+               }
+       } else {
+               mali_gp_stop_bus_wait(group->gp_core);
+               mali_gp_hard_reset(group->gp_core);
+       }
+
+       /* Reset MMU */
+       err = mali_mmu_reset(group->mmu);
+       MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
+       MALI_IGNORE(err);
+
+       group->session = NULL;
 }
 
 #if MALI_STATE_TRACKING
 u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size)
 {
-    int n = 0;
-    int i;
-    struct mali_group *child;
-    struct mali_group *temp;
-
-    if (mali_group_is_virtual(group)) {
-        n += _mali_osk_snprintf(buf + n, size - n,
-                "Virtual PP Group: %p\n", group);
-    } else if (mali_group_is_in_virtual(group)) {
-        n += _mali_osk_snprintf(buf + n, size - n,
-                "Child PP Group: %p\n", group);
-    } else if (NULL != group->pp_core) {
-        n += _mali_osk_snprintf(buf + n, size - n,
-                "Physical PP Group: %p\n", group);
-    } else {
-        MALI_DEBUG_ASSERT_POINTER(group->gp_core);
-        n += _mali_osk_snprintf(buf + n, size - n,
-                "GP Group: %p\n", group);
-    }
-
-    switch (group->state) {
-        case MALI_GROUP_STATE_INACTIVE:
-            n += _mali_osk_snprintf(buf + n, size - n,
-                    "\tstate: INACTIVE\n");
-            break;
-        case MALI_GROUP_STATE_ACTIVATION_PENDING:
-            n += _mali_osk_snprintf(buf + n, size - n,
-                    "\tstate: ACTIVATION_PENDING\n");
-            break;
-        case MALI_GROUP_STATE_ACTIVE:
-            n += _mali_osk_snprintf(buf + n, size - n,
-                    "\tstate: MALI_GROUP_STATE_ACTIVE\n");
-            break;
-        default:
-            n += _mali_osk_snprintf(buf + n, size - n,
-                    "\tstate: UNKNOWN (%d)\n", group->state);
-            MALI_DEBUG_ASSERT(0);
-            break;
-    }
-
-    n += _mali_osk_snprintf(buf + n, size - n,
-            "\tSW power: %s\n",
-            group->power_is_on ? "On" : "Off");
-
-    n += mali_pm_dump_state_domain(group->pm_domain, buf + n, size - n);
-
-    for (i = 0; i < 2; i++) {
-        if (NULL != group->l2_cache_core[i]) {
-            struct mali_pm_domain *domain;
-            domain = mali_l2_cache_get_pm_domain(
-                    group->l2_cache_core[i]);
-            n += mali_pm_dump_state_domain(domain,
-                    buf + n, size - n);
-        }
-    }
-
-    if (group->gp_core) {
-        n += mali_gp_dump_state(group->gp_core, buf + n, size - n);
-        n += _mali_osk_snprintf(buf + n, size - n,
-                "\tGP running job: %p\n", group->gp_running_job);
-    }
-
-    if (group->pp_core) {
-        n += mali_pp_dump_state(group->pp_core, buf + n, size - n);
-        n += _mali_osk_snprintf(buf + n, size - n,
-                "\tPP running job: %p, subjob %d \n",
-                group->pp_running_job,
-                group->pp_running_sub_job);
-    }
-
-    _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list,
-            struct mali_group, group_list) {
-        n += mali_group_dump_state(child, buf + n, size - n);
-    }
-
-    return n;
-}
-#endif
+       int n = 0;
+       int i;
+
+       if (mali_group_is_virtual(group)) {
+               n += _mali_osk_snprintf(buf + n, size - n,
+                                       "Virtual PP Group: %p\n", group);
+       } else if (mali_group_is_in_virtual(group)) {
+               n += _mali_osk_snprintf(buf + n, size - n,
+                                       "Child PP Group: %p\n", group);
+       } else if (NULL != group->pp_core) {
+               n += _mali_osk_snprintf(buf + n, size - n,
+                                       "Physical PP Group: %p\n", group);
+       } else {
+               MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+               n += _mali_osk_snprintf(buf + n, size - n,
+                                       "GP Group: %p\n", group);
+       }
+
+       switch (group->state) {
+       case MALI_GROUP_STATE_INACTIVE:
+               n += _mali_osk_snprintf(buf + n, size - n,
+                                       "\tstate: INACTIVE\n");
+               break;
+       case MALI_GROUP_STATE_ACTIVATION_PENDING:
+               n += _mali_osk_snprintf(buf + n, size - n,
+                                       "\tstate: ACTIVATION_PENDING\n");
+               break;
+       case MALI_GROUP_STATE_ACTIVE:
+               n += _mali_osk_snprintf(buf + n, size - n,
+                                       "\tstate: MALI_GROUP_STATE_ACTIVE\n");
+               break;
+       default:
+               n += _mali_osk_snprintf(buf + n, size - n,
+                                       "\tstate: UNKNOWN (%d)\n", group->state);
+               MALI_DEBUG_ASSERT(0);
+               break;
+       }
 
-/* Kasin added. */
-#if MESON_CPU_TYPE == MESON_CPU_TYPE_MESON6
-#include <platform/meson_m400/mali_fix.h>
-#define INT_MALI_PP2_MMU ( 6+32)
-struct _mali_osk_irq_t_struct;
-u32 get_irqnum(struct _mali_osk_irq_t_struct* irq);
+       n += _mali_osk_snprintf(buf + n, size - n,
+                               "\tSW power: %s\n",
+                               group->power_is_on ? "On" : "Off");
+
+       n += mali_pm_dump_state_domain(group->pm_domain, buf + n, size - n);
+
+       for (i = 0; i < 2; i++) {
+               if (NULL != group->l2_cache_core[i]) {
+                       struct mali_pm_domain *domain;
+                       domain = mali_l2_cache_get_pm_domain(
+                                        group->l2_cache_core[i]);
+                       n += mali_pm_dump_state_domain(domain,
+                                                      buf + n, size - n);
+               }
+       }
+
+       if (group->gp_core) {
+               n += mali_gp_dump_state(group->gp_core, buf + n, size - n);
+               n += _mali_osk_snprintf(buf + n, size - n,
+                                       "\tGP running job: %p\n", group->gp_running_job);
+       }
+
+       if (group->pp_core) {
+               n += mali_pp_dump_state(group->pp_core, buf + n, size - n);
+               n += _mali_osk_snprintf(buf + n, size - n,
+                                       "\tPP running job: %p, subjob %d \n",
+                                       group->pp_running_job,
+                                       group->pp_running_sub_job);
+       }
+
+       return n;
+}
 #endif
 
 _mali_osk_errcode_t mali_group_upper_half_mmu(void *data)
 {
-    struct mali_group *group = (struct mali_group *)data;
-#if MESON_CPU_TYPE == MESON_CPU_TYPE_MESON6
-    struct mali_mmu_core *mmu = group->mmu;
-#endif
-    _mali_osk_errcode_t ret;
-
-    MALI_DEBUG_ASSERT_POINTER(group);
-    MALI_DEBUG_ASSERT_POINTER(group->mmu);
-
-#if MESON_CPU_TYPE == MESON_CPU_TYPE_MESON6
-    if (MALI_FALSE == group->power_is_on)
-        MALI_SUCCESS;
-    if (get_irqnum(mmu->irq) == INT_MALI_PP2_MMU)
-    {
-        if (group == NULL || group->pp_core == NULL)
-            MALI_SUCCESS;
-        if (group->pp_core->core_id == 0) {
-            if (malifix_get_mmu_int_process_state(0) == MMU_INT_HIT)
-                malifix_set_mmu_int_process_state(0, MMU_INT_TOP);
-            else
-                MALI_SUCCESS;
-        }
-        else if (group->pp_core->core_id == 1) {
-            if (malifix_get_mmu_int_process_state(1) == MMU_INT_HIT)
-                malifix_set_mmu_int_process_state(1, MMU_INT_TOP);
-            else
-                MALI_SUCCESS;
-        } else
-            MALI_SUCCESS;
-    }
-#endif
+       struct mali_group *group = (struct mali_group *)data;
+       _mali_osk_errcode_t ret;
+
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->mmu);
 
 #if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
 #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
@@ -1508,29 +1467,29 @@ _mali_osk_errcode_t mali_group_upper_half_mmu(void *data)
                return _MALI_OSK_ERR_FAULT;
        }
 #endif
-    if (NULL != group->gp_core) {
-        _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
-                MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
-                MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
-                0, 0, /* No pid and tid for interrupt handler */
-                MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
-                mali_mmu_get_rawstat(group->mmu), 0);
-    } else {
-        MALI_DEBUG_ASSERT_POINTER(group->pp_core);
-        _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
-                MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
-                MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
-                0, 0, /* No pid and tid for interrupt handler */
-                MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
-                    mali_pp_core_get_id(group->pp_core)),
-                mali_mmu_get_rawstat(group->mmu), 0);
-    }
+       if (NULL != group->gp_core) {
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+                                             0, 0, /* No pid and tid for interrupt handler */
+                                             MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
+                                             mali_mmu_get_rawstat(group->mmu), 0);
+       } else {
+               MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+                                             0, 0, /* No pid and tid for interrupt handler */
+                                             MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
+                                                     mali_pp_core_get_id(group->pp_core)),
+                                             mali_mmu_get_rawstat(group->mmu), 0);
+       }
 #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
        mali_executor_unlock();
 #endif
 #endif
 
-    ret = mali_executor_interrupt_mmu(group, MALI_TRUE);
+       ret = mali_executor_interrupt_mmu(group, MALI_TRUE);
 
 #if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
 #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
@@ -1559,99 +1518,83 @@ _mali_osk_errcode_t mali_group_upper_half_mmu(void *data)
        }
 #endif
 
-    if (NULL != group->gp_core) {
-        _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
-                MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
-                MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
-                0, 0, /* No pid and tid for interrupt handler */
-                MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
-                mali_mmu_get_rawstat(group->mmu), 0);
-    } else {
-        _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
-                MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
-                MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
-                0, 0, /* No pid and tid for interrupt handler */
-                MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
-                    mali_pp_core_get_id(group->pp_core)),
-                mali_mmu_get_rawstat(group->mmu), 0);
-    }
+       if (NULL != group->gp_core) {
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+                                             0, 0, /* No pid and tid for interrupt handler */
+                                             MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
+                                             0xFFFFFFFF, 0);
+       } else {
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+                                             0, 0, /* No pid and tid for interrupt handler */
+                                             MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
+                                                     mali_pp_core_get_id(group->pp_core)),
+                                             0xFFFFFFFF, 0);
+       }
 #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
        mali_executor_unlock();
 #endif
 #endif
 
-    return ret;
+       return ret;
 }
 
 static void mali_group_bottom_half_mmu(void *data)
 {
-    struct mali_group *group = (struct mali_group *)data;
-#if MESON_CPU_TYPE == MESON_CPU_TYPE_MESON6
-    struct mali_mmu_core *mmu = group->mmu;
-#endif
+       struct mali_group *group = (struct mali_group *)data;
 
-    MALI_DEBUG_ASSERT_POINTER(group);
-    MALI_DEBUG_ASSERT_POINTER(group->mmu);
-
-    if (NULL != group->gp_core) {
-        _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
-                MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
-                MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
-                0, _mali_osk_get_tid(), /* pid and tid */
-                MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
-                mali_mmu_get_rawstat(group->mmu), 0);
-    } else {
-        MALI_DEBUG_ASSERT_POINTER(group->pp_core);
-        _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
-                MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
-                MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
-                0, _mali_osk_get_tid(), /* pid and tid */
-                MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
-                    mali_pp_core_get_id(group->pp_core)),
-                mali_mmu_get_rawstat(group->mmu), 0);
-    }
-
-    mali_executor_interrupt_mmu(group, MALI_FALSE);
-
-    if (NULL != group->gp_core) {
-        _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
-                MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
-                MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
-                0, _mali_osk_get_tid(), /* pid and tid */
-                MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
-                mali_mmu_get_rawstat(group->mmu), 0);
-    } else {
-        _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
-                MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
-                MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
-                0, _mali_osk_get_tid(), /* pid and tid */
-                MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
-                    mali_pp_core_get_id(group->pp_core)),
-                mali_mmu_get_rawstat(group->mmu), 0);
-    }
-#if MESON_CPU_TYPE == MESON_CPU_TYPE_MESON6
-    if (get_irqnum(mmu->irq) == INT_MALI_PP2_MMU)
-    {
-        if (group->pp_core->core_id == 0) {
-            if (malifix_get_mmu_int_process_state(0) == MMU_INT_TOP)
-                malifix_set_mmu_int_process_state(0, MMU_INT_NONE);
-        }
-        else if (group->pp_core->core_id == 1) {
-            if (malifix_get_mmu_int_process_state(1) == MMU_INT_TOP)
-                malifix_set_mmu_int_process_state(1, MMU_INT_NONE);
-        }
-    }
-#endif
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->mmu);
+
+       if (NULL != group->gp_core) {
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                                             0, _mali_osk_get_tid(), /* pid and tid */
+                                             MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
+                                             0xFFFFFFFF, 0);
+       } else {
+               MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                                             0, _mali_osk_get_tid(), /* pid and tid */
+                                             MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
+                                                     mali_pp_core_get_id(group->pp_core)),
+                                             0xFFFFFFFF, 0);
+       }
+
+       mali_executor_interrupt_mmu(group, MALI_FALSE);
+
+       if (NULL != group->gp_core) {
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                                             0, _mali_osk_get_tid(), /* pid and tid */
+                                             MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
+                                             0xFFFFFFFF, 0);
+       } else {
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                                             0, _mali_osk_get_tid(), /* pid and tid */
+                                             MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
+                                                     mali_pp_core_get_id(group->pp_core)),
+                                             0xFFFFFFFF, 0);
+       }
 }
 
 _mali_osk_errcode_t mali_group_upper_half_gp(void *data)
 {
-    struct mali_group *group = (struct mali_group *)data;
-    _mali_osk_errcode_t ret;
+       struct mali_group *group = (struct mali_group *)data;
+       _mali_osk_errcode_t ret;
 
-    MALI_DEBUG_ASSERT_POINTER(group);
-    MALI_DEBUG_ASSERT_POINTER(group->gp_core);
-    MALI_DEBUG_ASSERT_POINTER(group->mmu);
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+       MALI_DEBUG_ASSERT_POINTER(group->mmu);
 
 #if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
 #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
@@ -1662,21 +1605,21 @@ _mali_osk_errcode_t mali_group_upper_half_gp(void *data)
                return _MALI_OSK_ERR_FAULT;
        }
 #endif
-    _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
-            MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
-            MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
-            0, 0, /* No pid and tid for interrupt handler */
-            MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
-            mali_gp_get_rawstat(group->gp_core), 0);
-
-    MALI_DEBUG_PRINT(4, ("Group: Interrupt 0x%08X from %s\n",
-                mali_gp_get_rawstat(group->gp_core),
-                mali_group_core_description(group)));
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+                                     0, 0, /* No pid and tid for interrupt handler */
+                                     MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
+                                     mali_gp_get_rawstat(group->gp_core), 0);
+
+       MALI_DEBUG_PRINT(4, ("Group: Interrupt 0x%08X from %s\n",
+                            mali_gp_get_rawstat(group->gp_core),
+                            mali_group_core_description(group)));
 #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
        mali_executor_unlock();
 #endif
 #endif
-    ret = mali_executor_interrupt_gp(group, MALI_TRUE);
+       ret = mali_executor_interrupt_gp(group, MALI_TRUE);
 
 #if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
 #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
@@ -1693,71 +1636,53 @@ _mali_osk_errcode_t mali_group_upper_half_gp(void *data)
                return ret;
        }
 #endif
-    _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
-            MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
-            MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
-            0, 0, /* No pid and tid for interrupt handler */
-            MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
-            mali_gp_get_rawstat(group->gp_core), 0);
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+                                     0, 0, /* No pid and tid for interrupt handler */
+                                     MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
+                                     0xFFFFFFFF, 0);
 #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
        mali_executor_unlock();
 #endif
 #endif
-    return ret;
+       return ret;
 }
 
 static void mali_group_bottom_half_gp(void *data)
 {
-    struct mali_group *group = (struct mali_group *)data;
-
-    MALI_DEBUG_ASSERT_POINTER(group);
-    MALI_DEBUG_ASSERT_POINTER(group->gp_core);
-    MALI_DEBUG_ASSERT_POINTER(group->mmu);
-
-    _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
-            MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
-            MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
-            0, _mali_osk_get_tid(), /* pid and tid */
-            MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
-            mali_gp_get_rawstat(group->gp_core), 0);
-
-    mali_executor_interrupt_gp(group, MALI_FALSE);
-
-    _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
-            MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
-            MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
-            0, _mali_osk_get_tid(), /* pid and tid */
-            MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
-            mali_gp_get_rawstat(group->gp_core), 0);
-}
+       struct mali_group *group = (struct mali_group *)data;
 
-#if MESON_CPU_TYPE == MESON_CPU_TYPE_MESON6
-int PP0_int_cnt = 0;
-int mali_PP0_int_cnt(void)
-{
-    return PP0_int_cnt;
-}
-EXPORT_SYMBOL(mali_PP0_int_cnt);
-
-int PP1_int_cnt = 0;
-int mali_PP1_int_cnt(void)
-{
-    return PP1_int_cnt;
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+       MALI_DEBUG_ASSERT_POINTER(group->mmu);
+
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                                     0, _mali_osk_get_tid(), /* pid and tid */
+                                     MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
+                                     0xFFFFFFFF, 0);
+
+       mali_executor_interrupt_gp(group, MALI_FALSE);
+
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                                     0, _mali_osk_get_tid(), /* pid and tid */
+                                     MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
+                                     0xFFFFFFFF, 0);
 }
-EXPORT_SYMBOL(mali_PP1_int_cnt);
-#endif
 
 _mali_osk_errcode_t mali_group_upper_half_pp(void *data)
 {
-    struct mali_group *group = (struct mali_group *)data;
-#if MESON_CPU_TYPE == MESON_CPU_TYPE_MESON6
-    struct mali_pp_core *core = group->pp_core;
-#endif
-    _mali_osk_errcode_t ret;
+       struct mali_group *group = (struct mali_group *)data;
+       _mali_osk_errcode_t ret;
+
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+       MALI_DEBUG_ASSERT_POINTER(group->mmu);
 
-    MALI_DEBUG_ASSERT_POINTER(group);
-    MALI_DEBUG_ASSERT_POINTER(group->pp_core);
-    MALI_DEBUG_ASSERT_POINTER(group->mmu);
 #if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
 #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
        mali_executor_lock();
@@ -1768,30 +1693,24 @@ _mali_osk_errcode_t mali_group_upper_half_pp(void *data)
        }
 #endif
 
-    _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
-            MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
-            MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
-            0, 0, /* No pid and tid for interrupt handler */
-            MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
-                mali_pp_core_get_id(group->pp_core)),
-            mali_pp_get_rawstat(group->pp_core), 0);
-
-    MALI_DEBUG_PRINT(4, ("Group: Interrupt 0x%08X from %s\n",
-                mali_pp_get_rawstat(group->pp_core),
-                mali_group_core_description(group)));
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+                                     0, 0, /* No pid and tid for interrupt handler */
+                                     MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
+                                             mali_pp_core_get_id(group->pp_core)),
+                                     mali_pp_get_rawstat(group->pp_core), 0);
+
+       MALI_DEBUG_PRINT(4, ("Group: Interrupt 0x%08X from %s\n",
+                            mali_pp_get_rawstat(group->pp_core),
+                            mali_group_core_description(group)));
 #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
        mali_executor_unlock();
 #endif
 #endif
 
-    ret = mali_executor_interrupt_pp(group, MALI_TRUE);
+       ret = mali_executor_interrupt_pp(group, MALI_TRUE);
 
-#if MESON_CPU_TYPE == MESON_CPU_TYPE_MESON6
-    if (core->core_id == 0)
-        PP0_int_cnt++;
-    else if (core->core_id == 1)
-        PP1_int_cnt++;
-#endif
 #if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
 #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
        mali_executor_lock();
@@ -1808,142 +1727,143 @@ _mali_osk_errcode_t mali_group_upper_half_pp(void *data)
                return ret;
        }
 #endif
-    _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
-            MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
-            MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
-            0, 0, /* No pid and tid for interrupt handler */
-            MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
-                mali_pp_core_get_id(group->pp_core)),
-            mali_pp_get_rawstat(group->pp_core), 0);
-
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+                                     0, 0, /* No pid and tid for interrupt handler */
+                                     MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
+                                             mali_pp_core_get_id(group->pp_core)),
+                                     0xFFFFFFFF, 0);
 #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
        mali_executor_unlock();
 #endif
 #endif
-    return ret;
+       return ret;
 }
 
 static void mali_group_bottom_half_pp(void *data)
 {
-    struct mali_group *group = (struct mali_group *)data;
-
-    MALI_DEBUG_ASSERT_POINTER(group);
-    MALI_DEBUG_ASSERT_POINTER(group->pp_core);
-    MALI_DEBUG_ASSERT_POINTER(group->mmu);
-
-    _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
-            MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
-            MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
-            0, _mali_osk_get_tid(), /* pid and tid */
-            MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
-                mali_pp_core_get_id(group->pp_core)),
-            mali_pp_get_rawstat(group->pp_core), 0);
-
-    mali_executor_interrupt_pp(group, MALI_FALSE);
-
-    _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
-            MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
-            MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
-            0, _mali_osk_get_tid(), /* pid and tid */
-            MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
-                mali_pp_core_get_id(group->pp_core)),
-            mali_pp_get_rawstat(group->pp_core), 0);
+       struct mali_group *group = (struct mali_group *)data;
+
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+       MALI_DEBUG_ASSERT_POINTER(group->mmu);
+
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                                     0, _mali_osk_get_tid(), /* pid and tid */
+                                     MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
+                                             mali_pp_core_get_id(group->pp_core)),
+                                     0xFFFFFFFF, 0);
+
+       mali_executor_interrupt_pp(group, MALI_FALSE);
+
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                                     0, _mali_osk_get_tid(), /* pid and tid */
+                                     MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
+                                             mali_pp_core_get_id(group->pp_core)),
+                                     0xFFFFFFFF, 0);
 }
 
-static void mali_group_timeout(void *data)
+static void mali_group_timeout(void *callback_data)
 {
-    struct mali_group *group = (struct mali_group *)data;
-    MALI_DEBUG_ASSERT_POINTER(group);
-
-    MALI_DEBUG_PRINT(2, ("Group: timeout handler for %s at %u\n",
-                mali_group_core_description(group),
-                _mali_osk_time_tickcount()));
-
-    if (mali_core_timeout < 65533)
-        mali_core_timeout++;
-    if (NULL != group->gp_core) {
-        mali_group_schedule_bottom_half_gp(group);
-    } else {
-        MALI_DEBUG_ASSERT_POINTER(group->pp_core);
-        mali_group_schedule_bottom_half_pp(group);
-    }
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+       _mali_osk_timer_t* group_timer = _MALI_OSK_CONTAINER_OF(callback_data, _mali_osk_timer_t, timer);
+       struct mali_group* group = (struct mali_group *)(group_timer -> data);
+#else
+       struct mali_group *group = (struct mali_group *)callback_data;
+       MALI_DEBUG_ASSERT_POINTER(group);
+#endif
+       MALI_DEBUG_PRINT(2, ("Group: timeout handler for %s at %u\n",
+                            mali_group_core_description(group),
+                            _mali_osk_time_tickcount()));
+
+       if (NULL != group->gp_core) {
+               mali_group_schedule_bottom_half_gp(group);
+       } else {
+               MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+               mali_group_schedule_bottom_half_pp(group);
+       }
 }
 
 mali_bool mali_group_zap_session(struct mali_group *group,
-        struct mali_session_data *session)
+                                struct mali_session_data *session)
 {
-    MALI_DEBUG_ASSERT_POINTER(group);
-    MALI_DEBUG_ASSERT_POINTER(session);
-    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
-
-    if (group->session != session) {
-        /* not running from this session */
-        return MALI_TRUE; /* success */
-    }
-
-    if (group->is_working) {
-        /* The Zap also does the stall and disable_stall */
-        mali_bool zap_success = mali_mmu_zap_tlb(group->mmu);
-        return zap_success;
-    } else {
-        /* Just remove the session instead of zapping */
-        mali_group_clear_session(group);
-        return MALI_TRUE; /* success */
-    }
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(session);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+       if (group->session != session) {
+               /* not running from this session */
+               return MALI_TRUE; /* success */
+       }
+
+       if (group->is_working) {
+               /* The Zap also does the stall and disable_stall */
+               mali_bool zap_success = mali_mmu_zap_tlb(group->mmu);
+               return zap_success;
+       } else {
+               /* Just remove the session instead of zapping */
+               mali_group_clear_session(group);
+               return MALI_TRUE; /* success */
+       }
 }
 
 #if defined(CONFIG_MALI400_PROFILING)
 static void mali_group_report_l2_cache_counters_per_core(struct mali_group *group, u32 core_num)
 {
-    u32 source0 = 0;
-    u32 value0 = 0;
-    u32 source1 = 0;
-    u32 value1 = 0;
-    u32 profiling_channel = 0;
-
-    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
-
-    switch (core_num) {
-        case 0:
-            profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
-                MALI_PROFILING_EVENT_CHANNEL_GPU |
-                MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS;
-            break;
-        case 1:
-            profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
-                MALI_PROFILING_EVENT_CHANNEL_GPU |
-                MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L21_COUNTERS;
-            break;
-        case 2:
-            profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
-                MALI_PROFILING_EVENT_CHANNEL_GPU |
-                MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L22_COUNTERS;
-            break;
-        default:
-            profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
-                MALI_PROFILING_EVENT_CHANNEL_GPU |
-                MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS;
-            break;
-    }
-
-    if (0 == core_num) {
-        mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
-    }
-    if (1 == core_num) {
-        if (1 == mali_l2_cache_get_id(group->l2_cache_core[0])) {
-            mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
-        } else if (1 == mali_l2_cache_get_id(group->l2_cache_core[1])) {
-            mali_l2_cache_core_get_counter_values(group->l2_cache_core[1], &source0, &value0, &source1, &value1);
-        }
-    }
-    if (2 == core_num) {
-        if (2 == mali_l2_cache_get_id(group->l2_cache_core[0])) {
-            mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
-        } else if (2 == mali_l2_cache_get_id(group->l2_cache_core[1])) {
-            mali_l2_cache_core_get_counter_values(group->l2_cache_core[1], &source0, &value0, &source1, &value1);
-        }
-    }
-
-    _mali_osk_profiling_add_event(profiling_channel, source1 << 8 | source0, value0, value1, 0, 0);
+       u32 source0 = 0;
+       u32 value0 = 0;
+       u32 source1 = 0;
+       u32 value1 = 0;
+       u32 profiling_channel = 0;
+
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+       switch (core_num) {
+       case 0:
+               profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                   MALI_PROFILING_EVENT_CHANNEL_GPU |
+                                   MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS;
+               break;
+       case 1:
+               profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                   MALI_PROFILING_EVENT_CHANNEL_GPU |
+                                   MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L21_COUNTERS;
+               break;
+       case 2:
+               profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                   MALI_PROFILING_EVENT_CHANNEL_GPU |
+                                   MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L22_COUNTERS;
+               break;
+       default:
+               profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                   MALI_PROFILING_EVENT_CHANNEL_GPU |
+                                   MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS;
+               break;
+       }
+
+       if (0 == core_num) {
+               mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
+       }
+       if (1 == core_num) {
+               if (1 == mali_l2_cache_get_id(group->l2_cache_core[0])) {
+                       mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
+               } else if (1 == mali_l2_cache_get_id(group->l2_cache_core[1])) {
+                       mali_l2_cache_core_get_counter_values(group->l2_cache_core[1], &source0, &value0, &source1, &value1);
+               }
+       }
+       if (2 == core_num) {
+               if (2 == mali_l2_cache_get_id(group->l2_cache_core[0])) {
+                       mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
+               } else if (2 == mali_l2_cache_get_id(group->l2_cache_core[1])) {
+                       mali_l2_cache_core_get_counter_values(group->l2_cache_core[1], &source0, &value0, &source1, &value1);
+               }
+       }
+
+       _mali_osk_profiling_add_event(profiling_channel, source1 << 8 | source0, value0, value1, 0, 0);
 }
 #endif /* #if defined(CONFIG_MALI400_PROFILING) */
index 5c20fc32993f0e29fc75d03d874ab7ba9a315fc9..6df2917cfc3dc13871adcfa2b22a53316a4062ed 100755 (executable)
@@ -947,7 +947,17 @@ _mali_osk_errcode_t _mali_osk_notification_queue_dequeue(_mali_osk_notification_
  * asked for.
  *
  * @{ */
-
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+/** @brief Initialize a timer
+*
+* Allocates resources for a new timer, and initializes the callback function. This does not
+* start the timer.
+*
+* @param callback Function to call when timer expires
+* @return a pointer to the allocated timer object, or NULL on failure.
+*/
+_mali_osk_timer_t *_mali_osk_timer_init(_mali_osk_timer_callback_t callback);
+#else
 /** @brief Initialize a timer
  *
  * Allocates resources for a new timer, and initializes them. This does not
@@ -956,7 +966,7 @@ _mali_osk_errcode_t _mali_osk_notification_queue_dequeue(_mali_osk_notification_
  * @return a pointer to the allocated timer object, or NULL on failure.
  */
 _mali_osk_timer_t *_mali_osk_timer_init(void);
-
+#endif
 /** @brief Start a timer
  *
  * It is an error to start a timer without setting the callback via
@@ -1033,7 +1043,14 @@ void _mali_osk_timer_del_async(_mali_osk_timer_t *tim);
  * @return MALI_TRUE if time is active, MALI_FALSE if it is not active
  */
 mali_bool _mali_osk_timer_pending(_mali_osk_timer_t *tim);
-
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+/** @brief Set input parameters for the timer's callback functions.
+ * 
+ * @param tim the timer.
+ * @param data Function-specific data to supply to the function on expiry.
+**/
+void _mali_osk_timer_setcallback_data(_mali_osk_timer_t *tim, void *data);
+#else
 /** @brief Set a timer's callback parameters.
  *
  * This must be called at least once before a timer is started/modified.
@@ -1047,7 +1064,7 @@ mali_bool _mali_osk_timer_pending(_mali_osk_timer_t *tim);
  * @param data Function-specific data to supply to the function on expiry.
  */
 void _mali_osk_timer_setcallback(_mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data);
-
+#endif
 /** @brief Terminate a timer, and deallocate resources.
  *
  * The timer must first be stopped by calling _mali_osk_timer_del().
index 6e9a1336bf0be77fb7a26c9212422a03f78f3c5c..4a0263f5a16c671e50f45b80bd4e15355a1a995c 100755 (executable)
@@ -19,6 +19,8 @@
 #ifdef __cplusplus
 extern "C" {
 #endif
+#include <linux/version.h>
+#include <linux/timer.h>
 
 /**
  * @addtogroup uddapi Unified Device Driver (UDD) APIs
@@ -398,7 +400,12 @@ typedef struct _mali_osk_notification_t_struct {
 typedef void (*_mali_osk_timer_callback_t)(void *arg);
 
 /** @brief Private type for Timer Callback Objects */
-typedef struct _mali_osk_timer_t_struct _mali_osk_timer_t;
+typedef struct _mali_osk_timer_t_struct {
+       struct timer_list timer;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)     
+       unsigned long data;
+#endif
+}_mali_osk_timer_t;
 /** @} */ /* end group _mali_osk_timer */
 
 
index 4ed5f867ea502f964c8820015f9b439973c5db38..c514c49803d58ec3bc0fbad554acd32fc7333208 100755 (executable)
@@ -461,6 +461,61 @@ static mali_scheduler_mask mali_timeline_update_oldest_point(struct mali_timelin
        return schedule_mask;
 }
 
+static mali_scheduler_mask mali_timeline_release_with_depended_point(struct mali_timeline_tracker *tracker)
+{
+       struct mali_timeline *timeline;
+       struct mali_timeline_waiter *waiter;
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+       
+       timeline = tracker->timeline;
+       MALI_DEBUG_ASSERT_POINTER(timeline);
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_SOFT == timeline->id);
+       
+       MALI_DEBUG_CODE({
+               struct mali_timeline_system *system = timeline->system;
+               MALI_DEBUG_ASSERT_POINTER(system);
+
+               MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+       });
+
+       /* Only release the waiter that wait for the tracker. */
+       waiter = timeline->waiter_tail;
+       while (NULL != waiter) {
+               if (waiter->point == tracker->point) {
+
+                       struct mali_timeline_waiter *waiter_next;
+                       struct mali_timeline_waiter *waiter_prev;
+                       
+                       waiter_next = waiter->timeline_next;
+                       waiter_prev = waiter->timeline_prev;
+                       waiter->timeline_next = NULL;
+                       waiter->timeline_prev = NULL;
+
+                       if (NULL != waiter_prev) {
+                               waiter_prev->timeline_next = waiter_next;
+                       }
+
+                       if (NULL != waiter_next) {
+                               waiter_next->timeline_prev = waiter_prev;
+                       }
+
+                       if (waiter ==  timeline->waiter_tail)
+                                timeline->waiter_tail = waiter_next;
+
+                       if (waiter == timeline->waiter_head)
+                               timeline->waiter_head = NULL;
+                       
+                       schedule_mask |= mali_timeline_system_release_waiter(timeline->system, waiter);
+                       waiter = waiter_next;
+               }else {
+
+                       waiter = waiter->timeline_next;
+               }
+       }
+
+       return schedule_mask;
+}
+
 void mali_timeline_tracker_init(struct mali_timeline_tracker *tracker,
                                mali_timeline_tracker_type type,
                                struct mali_timeline_fence *fence,
@@ -549,6 +604,11 @@ mali_scheduler_mask mali_timeline_tracker_release(struct mali_timeline_tracker *
                MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
        } else {
                tracker_prev->timeline_next = tracker_next;
+               if (MALI_TIMELINE_SOFT == tracker->timeline->id) {
+                       /* Use the signaled soft tracker to release the depended soft waiter */
+                       schedule_mask |= mali_timeline_release_with_depended_point(tracker);
+                       MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+               }
        }
 
        MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
@@ -1160,6 +1220,12 @@ static void mali_timeline_system_create_waiters_and_unlock(struct mali_timeline_
                        continue;
                }
 
+               if ((MALI_TIMELINE_SOFT == timeline->id) && mali_timeline_is_tracker_released(timeline, point)) {
+                       /* The tracker that the point related to has already been released, so no need to a waiter. */
+                       continue;
+               }
+               
+
                /* The point is on timeline. */
                MALI_DEBUG_ASSERT(mali_timeline_is_point_on(timeline, point));
 
index 3aeb75ce1de53ad3029e1e401406003115e2d79b..574f9f1fac1a8d439805097c6608810abebcae4d 100755 (executable)
@@ -305,6 +305,31 @@ MALI_STATIC_INLINE mali_bool mali_timeline_is_point_released(struct mali_timelin
        return point_normalized > (next_normalized + MALI_TIMELINE_MAX_POINT_SPAN);
 }
 
+/**
+ * Check if the tracker that the point relate to has been released.  A point is released if the tracker is not on the timeline.
+ * @param timeline Timeline.
+ * @param point Point on timeline.
+ * @return MALI_TRUE if the tracker has been release, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_is_tracker_released(struct mali_timeline *timeline, mali_timeline_point point)
+{
+       struct mali_timeline_tracker *tracker;
+       
+       MALI_DEBUG_ASSERT_POINTER(timeline);
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT != point);
+
+       tracker = timeline->tracker_tail;
+
+       while (NULL != tracker) {
+               if (point == tracker->point)
+                       return MALI_FALSE;
+               tracker = tracker->timeline_next;
+       }
+
+       return MALI_TRUE;
+}
+
+
 /**
  * Check if a point is valid.  A point is valid if is on the timeline or has been released.
  *
index be8a53e5bdc2a2b6073adedd6d9789720a7fe21e..d6421e15ec595c0b2c1d15895ee0e901e2a4b953 100644 (file)
@@ -91,14 +91,13 @@ static void mali_internal_fence_check_cb_func(struct dma_fence *fence, struct dm
        ret = atomic_dec_and_test(&sync_fence->status);
        if (ret)
                wake_up_all(&sync_fence->wq);
-#else  
-       ret =sync_fence->fence->ops->signaled(sync_fence->fence);
+#else
+       ret = sync_fence->fence->ops->signaled(sync_fence->fence);
 
 #ifdef DEBUG
        if (0 > ret)
-               trace_printk("Mali internal sync:fence signaled? ret=%d, fence  0x%p for sync_fence 0x%p.\n", ret, fence, sync_fence);
+               trace_printk("Mali internal sync:fence signaled? fence  0x%p for sync_fence 0x%p.\n", fence, sync_fence);
 #endif
-
        if (1 == ret)
                wake_up_all(&sync_fence->wq);
 #endif
@@ -123,9 +122,13 @@ static void mali_internal_sync_fence_add_fence(struct mali_internal_sync_fence *
        }
 }
 #endif
-
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
+static int mali_internal_sync_fence_wake_up_wq(wait_queue_entry_t *curr, unsigned mode,
+               int wake_flags, void *key)
+#else
 static int mali_internal_sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode,
                int wake_flags, void *key)
+#endif
 {
        struct mali_internal_sync_fence_waiter *wait;
        MALI_IGNORE(mode);
@@ -133,8 +136,11 @@ static int mali_internal_sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode
        MALI_IGNORE(key);
 
        wait = container_of(curr, struct mali_internal_sync_fence_waiter, work);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
+       list_del_init(&wait->work.entry);
+#else
        list_del_init(&wait->work.task_list);
-
+#endif
        wait->callback(wait->work.private, wait);
        return 1;
 }
@@ -199,6 +205,9 @@ void mali_internal_sync_timeline_signal(struct mali_internal_sync_timeline *sync
 
        list_for_each_entry_safe(sync_pt, next, &sync_timeline->sync_pt_list_head,
                                 sync_pt_list) {
+           if ((sync_pt->base).ops->signaled && (sync_pt->base).ops->signaled(&sync_pt->base)){
+               list_del_init(&sync_pt->sync_pt_list);
+           }
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
                if (dma_fence_is_signaled_locked(&sync_pt->base))
 #else
@@ -249,12 +258,25 @@ err:
 
 struct mali_internal_sync_fence *mali_internal_sync_fence_fdget(int fd)
 {
-       struct file *file = fget(fd);
-
-       if (NULL == file) {
+       struct file *file;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
+        struct fence *fence = sync_file_get_fence(fd);
+#else
+        struct dma_fence *fence = sync_file_get_fence(fd);
+#endif
+       /* Verify whether the fd is a valid sync file. */
+       if (unlikely(!fence))
                return NULL;
-       }
 
+       /* sync_file_get_fence get the fence, so put it. */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+       fence_put(fence);
+#else
+       dma_fence_put(fence);
+#endif
+
+       file = fget(fd);
+       /* Whether file is NULL has already been verified in sync_file_get_fence. */
        return file->private_data;
 }
 
@@ -274,18 +296,17 @@ struct mali_internal_sync_fence *mali_internal_sync_fence_merge(
 
        total_fences = num_fence1 + num_fence2;
 
-       i =0;
+       i = 0;
        j = 0;
-       
+
        if (num_fence1 > 0) {
                fence0 = sync_fence1->cbs[i].fence;
                i = 1;
-       }
-       else if(num_fence2 > 0) {
+       } else if (num_fence2 > 0) {
                fence0 = sync_fence2->cbs[i].fence;
-               j =1;
+               j = 1;
        }
-               
+
        new_sync_fence = (struct mali_internal_sync_fence *)sync_file_create(fence0);
        if (NULL == new_sync_fence) {
                MALI_PRINT_ERROR(("Mali internal sync:Failed to  create the mali internal sync fence when merging sync fence.\n"));
@@ -372,10 +393,10 @@ static void mali_internal_add_fence_array(struct dma_fence **fences, int *num_fe
 
 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
 static int mali_internal_sync_fence_set_fence_array(struct mali_internal_sync_fence *sync_fence,
-                              struct fence **fences, int num_fences)
+               struct fence **fences, int num_fences)
 #else
 static int mali_internal_sync_fence_set_fence_array(struct mali_internal_sync_fence *sync_fence,
-                              struct dma_fence **fences, int num_fences)
+               struct dma_fence **fences, int num_fences)
 #endif
 {
 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
@@ -383,111 +404,114 @@ static int mali_internal_sync_fence_set_fence_array(struct mali_internal_sync_fe
 #else
        struct dma_fence_array *array;
 #endif
-       MALI_DEBUG_ASSERT(1 != num_fences);
+       if(num_fences == 1) {
+               sync_fence->fence =fences[0];
+               kfree(fences);
+       } else {        
 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
-       array = fence_array_create(num_fences, fences,
+               array = fence_array_create(num_fences, fences,
                                           fence_context_alloc(1), 1, false);
 #else
-       array = dma_fence_array_create(num_fences, fences,
-                                          dma_fence_context_alloc(1), 1, false);
+               array = dma_fence_array_create(num_fences, fences,
+                         dma_fence_context_alloc(1), 1, false);
 #endif
-       if (!array)
-               return -ENOMEM;
-
-       sync_fence->fence = &array->base;
+               if (!array)
+                       return -ENOMEM;
 
+               sync_fence->fence = &array->base;
+       }
        return 0;
 }
 
 struct mali_internal_sync_fence *mali_internal_sync_fence_merge(
        struct mali_internal_sync_fence *sync_fence1, struct mali_internal_sync_fence *sync_fence2)
 {
-               struct mali_internal_sync_fence *sync_fence;
+       struct mali_internal_sync_fence *sync_fence;
 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
-               struct fence **fences, **nfences, **fences1, **fences2;
+       struct fence **fences, **nfences, **fences1, **fences2;
 #else
-               struct dma_fence **fences, **nfences, **fences1, **fences2;
+       struct dma_fence **fences, **nfences, **fences1, **fences2;
 #endif
-               int real_num_fences, i, j, num_fences, num_fences1, num_fences2;
+       int real_num_fences, i, j, num_fences, num_fences1, num_fences2;
 
-               fences1 = mali_internal_get_fences(sync_fence1, &num_fences1);
-               fences2 = mali_internal_get_fences(sync_fence2, &num_fences2);
-               
-               num_fences = num_fences1 + num_fences2;
-       
-               fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL);
-               if (!fences) {
-                       MALI_PRINT_ERROR(("Mali internal sync:Failed to  alloc buffer for fences.\n"));
-                       goto fences_alloc_failed;
-               }
+       fences1 = mali_internal_get_fences(sync_fence1, &num_fences1);
+       fences2 = mali_internal_get_fences(sync_fence2, &num_fences2);
 
-               for (real_num_fences = i = j = 0; i < num_fences1 && j < num_fences2; ) {
+       num_fences = num_fences1 + num_fences2;
+
+       fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL);
+       if (!fences) {
+               MALI_PRINT_ERROR(("Mali internal sync:Failed to  alloc buffer for fences.\n"));
+               goto fences_alloc_failed;
+       }
+
+       for (real_num_fences = i = j = 0; i < num_fences1 && j < num_fences2;) {
 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
-                       struct fence *fence1 = fences1[i];
-                       struct fence *fence2 = fences2[j];
+               struct fence *fence1 = fences1[i];
+               struct fence *fence2 = fences2[j];
 #else
-                       struct dma_fence *fence1 = fences1[i];
-                       struct dma_fence *fence2 = fences2[j];
+               struct dma_fence *fence1 = fences1[i];
+               struct dma_fence *fence2 = fences2[j];
 #endif
-                       if (fence1->context < fence2->context) {
+               if (fence1->context < fence2->context) {
+                       mali_internal_add_fence_array(fences, &real_num_fences, fence1);
+
+                       i++;
+               } else if (fence1->context > fence2->context) {
+                       mali_internal_add_fence_array(fences, &real_num_fences, fence2);
+
+                       j++;
+               } else {
+                       if (fence1->seqno - fence2->seqno <= INT_MAX)
                                mali_internal_add_fence_array(fences, &real_num_fences, fence1);
-       
-                               i++;
-                       } else if (fence1->context > fence2->context) {
+                       else
                                mali_internal_add_fence_array(fences, &real_num_fences, fence2);
-       
-                               j++;
-                       } else {
-                               if (fence1->seqno - fence2->seqno <= INT_MAX)
-                                       mali_internal_add_fence_array(fences, &real_num_fences, fence1);
-                               else
-                                       mali_internal_add_fence_array(fences, &real_num_fences, fence2);
-       
-                               i++;
-                               j++;
-                       }
+
+                       i++;
+                       j++;
                }
-       
-               for (; i < num_fences1; i++)
-                       mali_internal_add_fence_array(fences, &real_num_fences, fences1[i]);
-       
-               for (; j < num_fences2; j++)
-                       mali_internal_add_fence_array(fences, &real_num_fences, fences2[j]);
-
-               if (0 == real_num_fences)
+       }
+
+       for (; i < num_fences1; i++)
+               mali_internal_add_fence_array(fences, &real_num_fences, fences1[i]);
+
+       for (; j < num_fences2; j++)
+               mali_internal_add_fence_array(fences, &real_num_fences, fences2[j]);
+
+       if (0 == real_num_fences)
 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
-                       fences[real_num_fences++] = fence_get(fences1[0]);
-#else
-                       fences[real_num_fences++] = dma_fence_get(fences1[0]);
-#endif
-       
-               if (num_fences > real_num_fences) {
-                       nfences = krealloc(fences, real_num_fences * sizeof(*fences),
-                                         GFP_KERNEL);
-                       if (!nfences)
-                               goto nfences_alloc_failed;
-       
-                       fences = nfences;
-               }
+               fences[real_num_fences++] = fence_get(fences1[0]);
+#else
+               fences[real_num_fences++] = dma_fence_get(fences1[0]);
+#endif
 
-               sync_fence = (struct mali_internal_sync_fence *)sync_file_create(fences[0]);
-               if (NULL == sync_fence) {
-                       MALI_PRINT_ERROR(("Mali internal sync:Failed to  create the mali internal sync fence when merging sync fence.\n"));
-                       goto sync_fence_alloc_failed;
-               }
+       if (num_fences > real_num_fences) {
+               nfences = krealloc(fences, real_num_fences * sizeof(*fences),
+                                  GFP_KERNEL);
+               if (!nfences)
+                       goto nfences_alloc_failed;
+
+               fences = nfences;
+       }
+
+       sync_fence = (struct mali_internal_sync_fence *)sync_file_create(fences[0]);
+       if (NULL == sync_fence) {
+               MALI_PRINT_ERROR(("Mali internal sync:Failed to  create the mali internal sync fence when merging sync fence.\n"));
+               goto sync_fence_alloc_failed;
+       }
 
 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
-               fence_put(fences[0]);
+       fence_put(fences[0]);
 #else
-               dma_fence_put(fences[0]);
+       dma_fence_put(fences[0]);
 #endif
 
-               if (mali_internal_sync_fence_set_fence_array(sync_fence, fences, real_num_fences) < 0) {
-                       MALI_PRINT_ERROR(("Mali internal sync:Failed to  set fence for sync fence.\n"));
-                       goto sync_fence_set_failed;
-               }
-       
-               return sync_fence;
+       if (mali_internal_sync_fence_set_fence_array(sync_fence, fences, real_num_fences) < 0) {
+               MALI_PRINT_ERROR(("Mali internal sync:Failed to  set fence for sync fence.\n"));
+               goto sync_fence_set_failed;
+       }
+
+       return sync_fence;
 
 sync_fence_set_failed:
        fput(sync_fence->file);
@@ -511,7 +535,11 @@ void mali_internal_sync_fence_waiter_init(struct mali_internal_sync_fence_waiter
        MALI_DEBUG_ASSERT_POINTER(waiter);
        MALI_DEBUG_ASSERT_POINTER(callback);
 
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
+       INIT_LIST_HEAD(&waiter->work.entry);
+#else
        INIT_LIST_HEAD(&waiter->work.task_list);
+#endif
        waiter->callback = callback;
 }
 
@@ -552,12 +580,8 @@ int mali_internal_sync_fence_wait_async(struct mali_internal_sync_fence *sync_fe
        else
                err = -1;
 
-       if (0 > err) {
-#ifdef DEBUG
-               trace_printk("Mali, line%d, signal error\n", __LINE__);
-#endif
+       if (0 > err)
                return err;
-       }
 
        if (1 == err)
                return err;
@@ -569,28 +593,24 @@ int mali_internal_sync_fence_wait_async(struct mali_internal_sync_fence *sync_fe
 #endif
 
        if (0 != err) {
-#ifdef DEBUG
-               trace_printk("Mali, fence_add_callback error %d\n", err);
-#endif
-               if (-ENOENT == err)
+               if (-ENOENT == err) 
                        err = 1;
                return err;
        }
-
        init_waitqueue_func_entry(&waiter->work, mali_internal_sync_fence_wake_up_wq);
        waiter->work.private = sync_fence;
 
        spin_lock_irqsave(&sync_fence->wq.lock, flags);
        err =  sync_fence->fence->ops->signaled(sync_fence->fence);
 
-       if (0 == err)
+       if (0 == err){
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
+               __add_wait_queue_entry_tail(&sync_fence->wq, &waiter->work);
+#else
                __add_wait_queue_tail(&sync_fence->wq, &waiter->work);
-
-       spin_unlock_irqrestore(&sync_fence->wq.lock, flags);
-#ifdef DEBUG
-       if ((1 != err) && (0 != err))
-               trace_printk("Mali, line%d, signal error\n", __LINE__);
 #endif
+        }
+       spin_unlock_irqrestore(&sync_fence->wq.lock, flags);
 
        return err;
 #endif
@@ -606,17 +626,22 @@ int mali_internal_sync_fence_cancel_async(struct mali_internal_sync_fence *sync_
        MALI_DEBUG_ASSERT_POINTER(waiter);
 
        spin_lock_irqsave(&sync_fence->wq.lock, flags);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
+       if (!list_empty(&waiter->work.entry))
+               list_del_init(&waiter->work.entry);
+#else
        if (!list_empty(&waiter->work.task_list))
                list_del_init(&waiter->work.task_list);
+#endif
        else
                ret = -ENOENT;
        spin_unlock_irqrestore(&sync_fence->wq.lock, flags);
 
        if (0 == ret) {
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
-        dma_fence_remove_callback(sync_fence->fence, &waiter->cb);
+               dma_fence_remove_callback(sync_fence->fence, &waiter->cb);
 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
-        fence_remove_callback(sync_fence->fence, &waiter->cb);
+               fence_remove_callback(sync_fence->fence, &waiter->cb);
 #endif
 
        }
@@ -673,18 +698,12 @@ static void mali_internal_fence_release(struct fence *fence)
        sync_pt = mali_internal_fence_to_sync_pt(fence);
        parent = mali_internal_sync_pt_to_sync_timeline(sync_pt);
 
-
-       spin_lock_irqsave(fence->lock, flags);
-#if 0
-       if (WARN_ON_ONCE(!list_empty(&sync_pt->sync_pt_list)))
-               list_del(&sync_pt->sync_pt_list);
-#else
-       //sync_pt_list empty is possible, dont show warn.
-       if (!list_empty(&sync_pt->sync_pt_list))
-               list_del(&sync_pt->sync_pt_list);
-#endif
-       spin_unlock_irqrestore(fence->lock, flags);
-
+       if(!list_empty(&sync_pt->sync_pt_list)) {
+           spin_lock_irqsave(fence->lock, flags);
+           if (!list_empty(&sync_pt->sync_pt_list))
+               list_del_init(&sync_pt->sync_pt_list);
+           spin_unlock_irqrestore(fence->lock, flags);
+       }
        if (parent->ops->free_pt)
                parent->ops->free_pt(sync_pt);
 
@@ -713,7 +732,8 @@ static bool mali_internal_fence_signaled(struct fence *fence)
 
        ret = parent->ops->has_signaled(sync_pt);
        if (0 > ret)
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 68)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) \
+                || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 68)))
                fence->error = ret;
 #else
                fence->status = ret;
index 88307bccb85edf526485b4b7805544b8f3b22219..7615c22d12cf0bc480f1f369a22f4437a6bc8c3e 100755 (executable)
@@ -90,7 +90,11 @@ typedef void (*mali_internal_sync_callback_t)(struct mali_internal_sync_fence *s
                struct mali_internal_sync_fence_waiter *waiter);
 
 struct mali_internal_sync_fence_waiter {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
+       wait_queue_entry_t work;
+#else
        wait_queue_t work;
+#endif
        mali_internal_sync_callback_t callback;
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
index 73ca64b9703598173be3e1fdee866746fed0a48b..5726890bdc31525f191a3d15d38ca5e5f085f232 100644 (file)
@@ -203,7 +203,13 @@ int mali_mem_os_alloc_pages(mali_mem_os_mem *os_mem, u32 size)
        /* Allocate new pages, if needed. */
        for (i = 0; i < remaining; i++) {
                dma_addr_t dma_addr;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+               gfp_t flags = __GFP_ZERO | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+               gfp_t flags = __GFP_ZERO | __GFP_RETRY_MAYFAIL | __GFP_NOWARN | __GFP_COLD;
+#else
                gfp_t flags = __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD;
+#endif
                int err;
 
 #if defined(CONFIG_ARM) && !defined(CONFIG_ARM_LPAE)
index 7856ae6c74cefc85d54e3639ccdc8e103f8ee322..879ff066b4204739abc9ba730b09fd9b91902628 100644 (file)
 #include "mali_memory_secure.h"
 #include "mali_osk.h"
 #include <linux/mutex.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0)
+#include <linux/dma-direct.h>
+#else
 #include <linux/dma-mapping.h>
+#endif
 #include <linux/dma-buf.h>
 
 _mali_osk_errcode_t mali_mem_secure_attach_dma_buf(mali_mem_secure *secure_mem, u32 size, int mem_fd)
index 132bad4cd6ed5bfadc08fb635c6ebe570702f3ac..c24d6efcb4b3fa14aeb205cfc8bb9bb7c77235ee 100644 (file)
@@ -256,7 +256,11 @@ static void mali_mem_swap_swapped_bkend_pool_shrink(_mali_mem_swap_pool_shrink_t
        }
 
        /* Get system free pages number. */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+       system_free_size = global_zone_page_state(NR_FREE_PAGES) * PAGE_SIZE;
+#else
        system_free_size = global_page_state(NR_FREE_PAGES) * PAGE_SIZE;
+#endif
        last_gpu_utilization = _mali_ukk_utilization_gp_pp();
 
        if ((last_gpu_utilization < gpu_utilization_threshold_value)
@@ -583,9 +587,12 @@ int mali_mem_swap_alloc_pages(mali_mem_swap *swap_mem, u32 size, u32 *bkend_idx)
 
                list_add_tail(&m_page->list, &swap_mem->pages);
        }
-
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+       system_free_size = global_zone_page_state(NR_FREE_PAGES) * PAGE_SIZE;
+#else
        system_free_size = global_page_state(NR_FREE_PAGES) * PAGE_SIZE;
-
+#endif
+        
        if ((system_free_size < mali_mem_swap_out_threshold_value)
            && (mem_backend_swapped_pool_size > (mali_mem_swap_out_threshold_value >> 2))
            && mali_utilization_enabled()) {
index 58678417b34f48247c614bfcb2e86fea63247caa..4173d07b4e58f5a80868aaf5380bb949967c7796 100755 (executable)
@@ -56,8 +56,13 @@ _mali_osk_notification_t *_mali_osk_notification_create(u32 type, u32 size)
        /* OPT Recycling of notification objects */
        _mali_osk_notification_wrapper_t *notification;
 
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+       notification = (_mali_osk_notification_wrapper_t *)kmalloc(sizeof(_mali_osk_notification_wrapper_t) + size,
+                       GFP_KERNEL | __GFP_HIGH | __GFP_RETRY_MAYFAIL);
+#else
        notification = (_mali_osk_notification_wrapper_t *)kmalloc(sizeof(_mali_osk_notification_wrapper_t) + size,
                        GFP_KERNEL | __GFP_HIGH | __GFP_REPEAT);
+#endif
        if (NULL == notification) {
                MALI_DEBUG_PRINT(1, ("Failed to create a notification object\n"));
                return NULL;
index 8ada2da8d386b0659f7f13727b3cec1967b015bf..bb8d6fc5fa9bdc8233dd9c1615a35f0f3df84649 100755 (executable)
 #include "mali_osk.h"
 #include "mali_kernel_common.h"
 
-struct _mali_osk_timer_t_struct {
-       struct timer_list timer;
-};
-
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+typedef void (*timer_timeout_function_t)(struct timer_list *);
+#else
 typedef void (*timer_timeout_function_t)(unsigned long);
+#endif
 
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+_mali_osk_timer_t *_mali_osk_timer_init(_mali_osk_timer_callback_t callback)
+#else
 _mali_osk_timer_t *_mali_osk_timer_init(void)
+#endif
 {
        _mali_osk_timer_t *t = (_mali_osk_timer_t *)kmalloc(sizeof(_mali_osk_timer_t), GFP_KERNEL);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+       if (NULL != t) timer_setup(&t->timer, (timer_timeout_function_t)callback, 0);
+       t->data = (unsigned long)0;
+#else
        if (NULL != t) init_timer(&t->timer);
+#endif
        return t;
 }
 
@@ -61,12 +70,19 @@ mali_bool _mali_osk_timer_pending(_mali_osk_timer_t *tim)
        MALI_DEBUG_ASSERT_POINTER(tim);
        return 1 == timer_pending(&(tim->timer));
 }
-
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+void _mali_osk_timer_setcallback_data(_mali_osk_timer_t *tim,  void *data)
+#else
 void _mali_osk_timer_setcallback(_mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data)
+#endif
 {
        MALI_DEBUG_ASSERT_POINTER(tim);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+       tim->data = (unsigned long)data;
+#else
        tim->timer.data = (unsigned long)data;
        tim->timer.function = (timer_timeout_function_t)callback;
+#endif
 }
 
 void _mali_osk_timer_term(_mali_osk_timer_t *tim)
index 1712cfdd40fdc071f543e49bab3d641b5b115d4d..1f9f9412c18de609241b0e5e81aadaf029b46154 100755 (executable)
@@ -230,9 +230,10 @@ static void timeline_print_obj(struct seq_file *s, struct sync_timeline *sync_tl
                                mali_spinlock_reentrant_signal(system->spinlock, tid);
                        }
                        mali_spinlock_reentrant_signal(mali_tl->spinlock, tid);
-
+#ifndef CONFIG_FTRACE
                        /* dump job queue status and group running status */
                        mali_executor_status_dump();
+#endif
                }
 #endif
        }
@@ -286,9 +287,10 @@ static void timeline_value_str(struct sync_timeline *timeline, char *str, int si
                                mali_spinlock_reentrant_signal(system->spinlock, tid);
                        }
                        mali_spinlock_reentrant_signal(mali_tl->spinlock, tid);
-
+#ifndef CONFIG_FTRACE
                        /* dump job queue status and group running status */
                        mali_executor_status_dump();
+#endif
                }
 #endif
        }
@@ -462,6 +464,12 @@ struct sync_fence *mali_sync_flag_create_fence(struct mali_sync_flag *flag)
                return NULL;
        }
 
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+       fence_put(&sync_pt->base);
+#else
+       dma_fence_put(&sync_pt->base);
+#endif
+
        return sync_fence;
 }
 #else
diff --git a/utgard/r10p0 b/utgard/r10p0
new file mode 120000 (symlink)
index 0000000..bc2c859
--- /dev/null
@@ -0,0 +1 @@
+../mali
\ No newline at end of file
diff --git a/utgard/r8p0 b/utgard/r8p0
deleted file mode 120000 (symlink)
index bc2c859..0000000
+++ /dev/null
@@ -1 +0,0 @@
-../mali
\ No newline at end of file
diff --git a/utgard/r8p0/.version b/utgard/r8p0/.version
new file mode 100755 (executable)
index 0000000..f84a6cb
--- /dev/null
@@ -0,0 +1 @@
+r8p0-01rel0
diff --git a/utgard/r8p0/Kbuild b/utgard/r8p0/Kbuild
new file mode 100755 (executable)
index 0000000..a2f8357
--- /dev/null
@@ -0,0 +1,262 @@
+#
+# Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+# 
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+# 
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+#
+
+# This file is called by the Linux build system.
+
+# set up defaults if not defined by the user
+include $(src)/platform/Kbuild.amlogic
+
+TIMESTAMP ?= default
+OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB ?= 16
+USING_GPU_UTILIZATION ?= 0
+PROFILING_SKIP_PP_JOBS ?= 0
+PROFILING_SKIP_PP_AND_GP_JOBS ?= 0
+MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP ?= 0
+MALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED ?= 0
+MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS ?= 0
+MALI_UPPER_HALF_SCHEDULING ?= 1
+MALI_ENABLE_CPU_CYCLES ?= 0
+
+# For customer releases the Linux Device Drivers will be provided as ARM proprietary and GPL releases:
+# The ARM proprietary product will only include the license/proprietary directory
+# The GPL product will only include the license/gpl directory
+ifeq ($(wildcard $(src)/linux/license/gpl/*),)
+    ccflags-y += -I$(src)/linux/license/proprietary
+    ifeq ($(CONFIG_MALI400_PROFILING),y)
+        $(error Profiling is incompatible with non-GPL license)
+    endif
+    ifeq ($(CONFIG_PM_RUNTIME),y)
+        $(error Runtime PM is incompatible with non-GPL license)
+    endif
+    ifeq ($(CONFIG_DMA_SHARED_BUFFER),y)
+        $(error DMA-BUF is incompatible with non-GPL license)
+    endif
+    $(error Linux Device integration is incompatible with non-GPL license)
+else
+    ccflags-y += -I$(src)/linux/license/gpl
+endif
+
+ifeq ($(USING_GPU_UTILIZATION), 1)
+    ifeq ($(USING_DVFS), 1)
+        $(error USING_GPU_UTILIZATION conflict with USING_DVFS you can read the Integration Guide to choose which one do you need)
+    endif
+endif
+
+ifeq ($(MALI_PLATFORM_FILES),)
+ifeq ($(CONFIG_ARCH_EXYNOS4),y)
+EXTRA_DEFINES += -DMALI_FAKE_PLATFORM_DEVICE=1
+export MALI_PLATFORM=exynos4
+export MALI_PLATFORM_FILES_BUILDIN = $(notdir $(wildcard $(src)/platform/$(MALI_PLATFORM)/*.c))
+export MALI_PLATFORM_FILES_ADD_PREFIX = $(addprefix platform/$(MALI_PLATFORM)/,$(MALI_PLATFORM_FILES_BUILDIN)) 
+endif
+endif
+
+mali-y += \
+       linux/mali_osk_atomics.o \
+       linux/mali_osk_irq.o \
+       linux/mali_osk_wq.o \
+       linux/mali_osk_locks.o \
+       linux/mali_osk_wait_queue.o \
+       linux/mali_osk_low_level_mem.o \
+       linux/mali_osk_math.o \
+       linux/mali_osk_memory.o \
+       linux/mali_osk_misc.o \
+       linux/mali_osk_mali.o \
+       linux/mali_osk_notification.o \
+       linux/mali_osk_time.o \
+       linux/mali_osk_timers.o \
+       linux/mali_osk_bitmap.o
+
+mali-y += linux/mali_memory.o linux/mali_memory_os_alloc.o
+mali-y += linux/mali_memory_external.o
+mali-y += linux/mali_memory_block_alloc.o
+mali-y += linux/mali_memory_swap_alloc.o
+
+mali-y += \
+       linux/mali_memory_manager.o \
+       linux/mali_memory_virtual.o \
+       linux/mali_memory_util.o \
+       linux/mali_memory_cow.o \
+       linux/mali_memory_defer_bind.o
+
+mali-y += \
+       linux/mali_ukk_mem.o \
+       linux/mali_ukk_gp.o \
+       linux/mali_ukk_pp.o \
+       linux/mali_ukk_core.o \
+       linux/mali_ukk_soft_job.o \
+       linux/mali_ukk_timeline.o
+
+mali-$(CONFIG_MALI_DEVFREQ) += \
+       linux/mali_devfreq.o \
+       common/mali_pm_metrics.o
+
+# Source files which always are included in a build
+mali-y += \
+       common/mali_kernel_core.o \
+       linux/mali_kernel_linux.o \
+       common/mali_session.o \
+       linux/mali_device_pause_resume.o \
+       common/mali_kernel_vsync.o \
+       linux/mali_ukk_vsync.o \
+       linux/mali_kernel_sysfs.o \
+       common/mali_mmu.o \
+       common/mali_mmu_page_directory.o \
+       common/mali_mem_validation.o \
+       common/mali_hw_core.o \
+       common/mali_gp.o \
+       common/mali_pp.o \
+       common/mali_pp_job.o \
+       common/mali_gp_job.o \
+       common/mali_soft_job.o \
+       common/mali_scheduler.o \
+       common/mali_executor.o \
+       common/mali_group.o \
+       common/mali_dlbu.o \
+       common/mali_broadcast.o \
+       common/mali_pm.o \
+       common/mali_pmu.o \
+       common/mali_user_settings_db.o \
+       common/mali_kernel_utilization.o \
+       common/mali_control_timer.o \
+       common/mali_l2_cache.o \
+       common/mali_timeline.o \
+       common/mali_timeline_fence_wait.o \
+       common/mali_timeline_sync_fence.o \
+       common/mali_spinlock_reentrant.o \
+       common/mali_pm_domain.o \
+       linux/mali_osk_pm.o \
+       linux/mali_pmu_power_up_down.o \
+       __malidrv_build_info.o
+
+ifneq ($(wildcard $(src)/linux/mali_slp_global_lock.c),)
+       mali-y += linux/mali_slp_global_lock.o
+endif
+
+ifneq ($(MALI_PLATFORM_FILES),)
+       mali-y += $(MALI_PLATFORM_FILES:.c=.o)
+endif
+
+ifneq ($(MALI_PLATFORM_FILES_ADD_PREFIX),)
+       mali-y += $(MALI_PLATFORM_FILES_ADD_PREFIX:.c=.o)
+endif
+
+mali-$(CONFIG_MALI400_PROFILING) += linux/mali_ukk_profiling.o
+mali-$(CONFIG_MALI400_PROFILING) += linux/mali_osk_profiling.o
+
+mali-$(CONFIG_MALI400_INTERNAL_PROFILING) += linux/mali_profiling_internal.o timestamp-$(TIMESTAMP)/mali_timestamp.o
+ccflags-$(CONFIG_MALI400_INTERNAL_PROFILING) += -I$(src)/timestamp-$(TIMESTAMP)
+
+mali-$(CONFIG_DMA_SHARED_BUFFER) += linux/mali_memory_dma_buf.o
+mali-$(CONFIG_DMA_SHARED_BUFFER) += linux/mali_memory_secure.o
+mali-$(CONFIG_SYNC) += linux/mali_sync.o
+mali-$(CONFIG_SYNC) += linux/mali_internal_sync.o
+mali-$(CONFIG_SYNC_FILE) += linux/mali_sync.o
+mali-$(CONFIG_SYNC_FILE) += linux/mali_internal_sync.o
+mali-$(CONFIG_MALI_DMA_BUF_FENCE) += linux/mali_dma_fence.o
+ccflags-$(CONFIG_SYNC) += -Idrivers/staging/android
+ccflags-$(CONFIG_SYNC_FILE) += -Idrivers/staging/android
+
+mali-$(CONFIG_MALI400_UMP) += linux/mali_memory_ump.o
+
+mali-$(CONFIG_MALI_DVFS) += common/mali_dvfs_policy.o
+
+# Tell the Linux build system from which .o file to create the kernel module
+obj-$(CONFIG_MALI400) := mali.o
+
+ccflags-y += $(EXTRA_DEFINES)
+
+# Set up our defines, which will be passed to gcc
+ccflags-y += -DMALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP=$(MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP)
+ccflags-y += -DMALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED=$(MALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED)
+ccflags-y += -DMALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS=$(MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS)
+ifdef CONFIG_MALI400_DEBUG
+ccflags-y += -DMALI_STATE_TRACKING=1
+else
+ccflags-y += -DMALI_STATE_TRACKING=0
+endif
+ccflags-y += -DMALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB=$(OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB)
+ccflags-y += -DUSING_GPU_UTILIZATION=$(USING_GPU_UTILIZATION)
+ccflags-y += -DMALI_ENABLE_CPU_CYCLES=$(MALI_ENABLE_CPU_CYCLES)
+
+ifeq ($(MALI_UPPER_HALF_SCHEDULING),1)
+       ccflags-y += -DMALI_UPPER_HALF_SCHEDULING
+endif
+
+#build-in include path is different
+ifeq ($(MALI_PLATFORM_FILES),)
+ccflags-$(CONFIG_MALI400_UMP) += -I$(src)/../ump/include/
+else
+ccflags-$(CONFIG_MALI400_UMP) += -I$(src)/../../ump/include/ump
+endif
+ccflags-$(CONFIG_MALI400_DEBUG) += -DDEBUG
+
+# Use our defines when compiling
+ccflags-y += -I$(src) -I$(src)/include -I$(src)/common -I$(src)/linux -I$(src)/platform
+
+# Get subversion revision number, fall back to only ${MALI_RELEASE_NAME} if no svn info is available
+MALI_RELEASE_NAME=$(shell cat $(src)/.version 2> /dev/null)
+
+SVN_INFO = (cd $(src); svn info 2>/dev/null)
+
+ifneq ($(shell $(SVN_INFO) 2>/dev/null),)
+# SVN detected
+SVN_REV := $(shell $(SVN_INFO) | grep '^Revision: '| sed -e 's/^Revision: //' 2>/dev/null)
+DRIVER_REV := $(MALI_RELEASE_NAME)-r$(SVN_REV)
+CHANGE_DATE := $(shell $(SVN_INFO) | grep '^Last Changed Date: ' | cut -d: -f2- | cut -b2-)
+CHANGED_REVISION := $(shell $(SVN_INFO) | grep '^Last Changed Rev: ' | cut -d: -f2- | cut -b2-)
+REPO_URL := $(shell $(SVN_INFO) | grep '^URL: ' | cut -d: -f2- | cut -b2-)
+
+else # SVN
+GIT_REV := $(shell cd $(src); git describe --always 2>/dev/null)
+ifneq ($(GIT_REV),)
+# Git detected
+DRIVER_REV := $(MALI_RELEASE_NAME)-$(GIT_REV)
+CHANGE_DATE := $(shell cd $(src); git log -1 --format="%ci")
+CHANGED_REVISION := $(GIT_REV)
+REPO_URL := $(shell cd $(src); git describe --all --always 2>/dev/null)
+
+else # Git
+# No Git or SVN detected
+DRIVER_REV := $(MALI_RELEASE_NAME)
+CHANGE_DATE := $(MALI_RELEASE_NAME)
+CHANGED_REVISION := $(MALI_RELEASE_NAME)
+endif
+endif
+
+ccflags-y += -DSVN_REV_STRING=\"$(DRIVER_REV)\"
+
+VERSION_STRINGS :=
+VERSION_STRINGS += API_VERSION=$(shell cd $(src); grep "\#define _MALI_API_VERSION" $(FILES_PREFIX)include/linux/mali/mali_utgard_uk_types.h | cut -d' ' -f 3 )
+VERSION_STRINGS += REPO_URL=$(REPO_URL)
+VERSION_STRINGS += REVISION=$(DRIVER_REV)
+VERSION_STRINGS += CHANGED_REVISION=$(CHANGED_REVISION)
+VERSION_STRINGS += CHANGE_DATE=$(CHANGE_DATE)
+VERSION_STRINGS += BUILD_DATE=$(shell date)
+ifdef CONFIG_MALI400_DEBUG
+VERSION_STRINGS += BUILD=debug
+else
+VERSION_STRINGS += BUILD=release
+endif
+VERSION_STRINGS += TARGET_PLATFORM=$(TARGET_PLATFORM)
+VERSION_STRINGS += MALI_PLATFORM=$(MALI_PLATFORM)
+VERSION_STRINGS += KDIR=$(KDIR)
+VERSION_STRINGS += OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB=$(OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB)
+VERSION_STRINGS += USING_UMP=$(CONFIG_MALI400_UMP)
+VERSION_STRINGS += USING_PROFILING=$(CONFIG_MALI400_PROFILING)
+VERSION_STRINGS += USING_INTERNAL_PROFILING=$(CONFIG_MALI400_INTERNAL_PROFILING)
+VERSION_STRINGS += USING_GPU_UTILIZATION=$(USING_GPU_UTILIZATION)
+VERSION_STRINGS += USING_DVFS=$(CONFIG_MALI_DVFS)
+VERSION_STRINGS += USING_DMA_BUF_FENCE = $(CONFIG_MALI_DMA_BUF_FENCE)
+VERSION_STRINGS += MALI_UPPER_HALF_SCHEDULING=$(MALI_UPPER_HALF_SCHEDULING)
+
+# Create file with Mali driver configuration
+$(src)/__malidrv_build_info.c:
+       @echo 'const char *__malidrv_build_info(void) { return "malidrv: $(VERSION_STRINGS)";}' > $(src)/__malidrv_build_info.c
diff --git a/utgard/r8p0/Kconfig b/utgard/r8p0/Kconfig
new file mode 100755 (executable)
index 0000000..fbf08de
--- /dev/null
@@ -0,0 +1,136 @@
+menu "Mali GPU OpenGL device driver"
+config MALI400
+       tristate "Mali-300/400/450 support"
+       depends on ARM || ARM64
+       default m
+       select DMA_SHARED_BUFFER
+       ---help---
+         This enables support for the ARM Mali-300, Mali-400, and Mali-450
+         GPUs.
+
+         To compile this driver as a module, choose M here: the module will be
+         called mali.
+
+config MALI470
+       bool "Enable Mali-470 support"
+       depends on MALI400
+       ---help---
+         This enables support for Mali-470 specific features.
+
+config MALI400_DEBUG
+       bool "Enable debug in Mali driver"
+       depends on MALI400
+       default n
+       ---help---
+         This enabled extra debug checks and messages in the Mali driver.
+
+config MALI400_PROFILING_EXTRA_SUPPORT
+       bool "Select other items in kernel to support PROFILING."
+       depends on MALI400_PROFILING
+       select PROFILING
+       select FTRACE
+       select PERF_EVENTS
+       select ENABLE_DEFAULT_TRACERS
+       select DEBUG_MUTEXES
+       select HIGH_RES_TIMERS
+       select HW_PERF_EVENTS
+       select CPU_FREQ
+       select MALI400_DEBUG
+
+config MALI400_PROFILING
+       bool "Enable Mali profiling"
+       depends on MALI400
+       select TRACEPOINTS
+       default n
+       ---help---
+         This enables gator profiling of Mali GPU events.
+
+config MALI400_INTERNAL_PROFILING
+       bool "Enable internal Mali profiling API"
+       depends on MALI400_PROFILING
+       default n
+       ---help---
+         This enables the internal legacy Mali profiling API.
+
+config MALI400_UMP
+       bool "Enable UMP support"
+       depends on MALI400
+       ---help---
+         This enables support for the UMP memory sharing API in the Mali driver.
+
+config MALI_DVFS
+       bool "Enable Mali dynamically frequency change"
+       depends on MALI400 && !MALI_DEVFREQ
+       default n
+       ---help---
+         This enables support for dynamic change frequency of Mali with the goal of lowering power consumption.
+
+config MALI_DMA_BUF_MAP_ON_ATTACH
+       bool "Map dma-buf attachments on attach"
+       depends on MALI400 && DMA_SHARED_BUFFER
+       default y
+       ---help---
+         This makes the Mali driver map dma-buf attachments after doing
+         attach. If this is not set the dma-buf attachments will be mapped for
+         every time the GPU need to access the buffer.
+
+         Mapping for each access can cause lower performance.
+
+config MALI_SHARED_INTERRUPTS
+       bool "Support for shared interrupts"
+       depends on MALI400
+       default n
+       ---help---
+         Adds functionality required to properly support shared interrupts.  Without this support,
+         the device driver will fail during insmod if it detects shared interrupts.  This also
+         works when the GPU is not using shared interrupts, but might have a slight performance
+         impact.
+
+if ARCH_MESON6
+config MESON6_GPU_EXTRA
+       bool "M6 fix"
+       depends on MALI400
+       default y
+       select MALI_SHARED_INTERRUPTS
+endif
+
+config MALI_PMU_PARALLEL_POWER_UP
+       bool "Power up Mali PMU domains in parallel"
+       depends on MALI400
+       default n
+       ---help---
+         This makes the Mali driver power up all PMU power domains in parallel, instead of
+         powering up domains one by one, with a slight delay in between. Powering on all power
+         domains at the same time may cause peak currents higher than what some systems can handle.
+         These systems must not enable this option.
+
+config MALI_DT
+       bool "Using device tree to initialize module"
+       depends on MALI400 && OF
+       default n
+       ---help---
+         This enable the Mali driver to choose the device tree path to get platform resoures
+         and disable the old config method. Mali driver could run on the platform which the
+         device tree is enabled in kernel and corresponding hardware description is implemented
+         properly in device DTS file.
+
+config MALI_DEVFREQ
+       bool "Using devfreq to tuning frequency"
+       depends on MALI400 && PM_DEVFREQ
+       default n
+       ---help---
+       Support devfreq for Mali.
+
+       Using the devfreq framework and, by default, the simpleondemand
+       governor, the frequency of Mali will be dynamically selected from the
+       available OPPs.
+
+config MALI_QUIET
+       bool "Make Mali driver very quiet"
+       depends on MALI400 && !MALI400_DEBUG
+       default n
+       ---help---
+         This forces the Mali driver to never print any messages.
+
+         If unsure, say N.
+endmenu
diff --git a/utgard/r8p0/Makefile b/utgard/r8p0/Makefile
new file mode 100755 (executable)
index 0000000..5a259fe
--- /dev/null
@@ -0,0 +1,216 @@
+#
+# Copyright (C) 2010-2016 ARM Limited. All rights reserved.
+# 
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+# 
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+#
+
+USE_UMPV2=0
+USING_PROFILING ?= 1
+USING_INTERNAL_PROFILING ?= 0
+USING_DVFS ?= 1
+USING_DMA_BUF_FENCE ?= 0
+MALI_HEATMAPS_ENABLED ?= 0
+MALI_DMA_BUF_MAP_ON_ATTACH ?= 1
+MALI_DMA_BUF_LAZY_MAP ?= 0
+MALI_PMU_PARALLEL_POWER_UP ?= 0
+USING_DT ?= 0
+MALI_MEM_SWAP_TRACKING ?= 0
+USING_DEVFREQ ?= 0
+
+# The Makefile sets up "arch" based on the CONFIG, creates the version info
+# string and the __malidrv_build_info.c file, and then call the Linux build
+# system to actually build the driver. After that point the Kbuild file takes
+# over.
+
+# set up defaults if not defined by the user
+ARCH ?= arm
+
+OSKOS=linux
+FILES_PREFIX=
+
+check_cc2 = \
+       $(shell if $(1) -S -o /dev/null -xc /dev/null > /dev/null 2>&1; \
+       then \
+               echo "$(2)"; \
+       else \
+               echo "$(3)"; \
+       fi ;)
+
+# This conditional makefile exports the global definition ARM_INTERNAL_BUILD. Customer releases will not include arm_internal.mak
+-include ../../../arm_internal.mak
+
+# Give warning of old config parameters are used
+ifneq ($(CONFIG),)
+$(warning "You have specified the CONFIG variable which is no longer in used. Use TARGET_PLATFORM instead.")
+endif
+
+ifneq ($(CPU),)
+$(warning "You have specified the CPU variable which is no longer in used. Use TARGET_PLATFORM instead.")
+endif
+
+# Include the mapping between TARGET_PLATFORM and KDIR + MALI_PLATFORM
+-include MALI_CONFIGURATION
+export KDIR ?= $(KDIR-$(TARGET_PLATFORM))
+export MALI_PLATFORM ?= $(MALI_PLATFORM-$(TARGET_PLATFORM))
+
+ifneq ($(TARGET_PLATFORM),)
+ifeq ($(MALI_PLATFORM),)
+$(error "Invalid TARGET_PLATFORM: $(TARGET_PLATFORM)")
+endif
+endif
+
+# validate lookup result
+ifeq ($(KDIR),)
+$(error No KDIR found for platform $(TARGET_PLATFORM))
+endif
+
+ifeq ($(USING_GPU_UTILIZATION), 1)
+    ifeq ($(USING_DVFS), 1)
+        $(error USING_GPU_UTILIZATION conflict with USING_DVFS you can read the Integration Guide to choose which one do you need)
+    endif
+endif
+
+ifeq ($(USING_UMP),1)
+export CONFIG_MALI400_UMP=y
+export EXTRA_DEFINES += -DCONFIG_MALI400_UMP=1
+ifeq ($(USE_UMPV2),1)
+UMP_SYMVERS_FILE ?= ../umpv2/Module.symvers
+else
+UMP_SYMVERS_FILE ?= ../ump/Module.symvers
+endif
+KBUILD_EXTRA_SYMBOLS = $(realpath $(UMP_SYMVERS_FILE))
+$(warning $(KBUILD_EXTRA_SYMBOLS))
+endif
+
+# Define host system directory
+KDIR-$(shell uname -m):=/lib/modules/$(shell uname -r)/build
+
+include $(KDIR)/.config
+
+ifeq ($(ARCH), arm)
+# when compiling for ARM we're cross compiling
+export CROSS_COMPILE ?= $(call check_cc2, arm-linux-gnueabi-gcc, arm-linux-gnueabi-, arm-none-linux-gnueabi-)
+endif
+
+# report detected/selected settings
+ifdef ARM_INTERNAL_BUILD
+$(warning TARGET_PLATFORM $(TARGET_PLATFORM))
+$(warning KDIR $(KDIR))
+$(warning MALI_PLATFORM $(MALI_PLATFORM))
+endif
+
+# Set up build config
+export CONFIG_MALI400=m
+export CONFIG_MALI450=y
+export CONFIG_MALI470=y
+
+export EXTRA_DEFINES += -DCONFIG_MALI400=1
+export EXTRA_DEFINES += -DCONFIG_MALI450=1
+export EXTRA_DEFINES += -DCONFIG_MALI470=1
+
+ifneq ($(MALI_PLATFORM),)
+export EXTRA_DEFINES += -DMALI_FAKE_PLATFORM_DEVICE=1
+export MALI_PLATFORM_FILES = $(wildcard platform/$(MALI_PLATFORM)/*.c)
+endif
+
+ifeq ($(USING_PROFILING),1)
+ifeq ($(CONFIG_TRACEPOINTS),)
+$(warning CONFIG_TRACEPOINTS required for profiling)
+else
+export CONFIG_MALI400_PROFILING=y
+export EXTRA_DEFINES += -DCONFIG_MALI400_PROFILING=1
+ifeq ($(USING_INTERNAL_PROFILING),1)
+export CONFIG_MALI400_INTERNAL_PROFILING=y
+export EXTRA_DEFINES += -DCONFIG_MALI400_INTERNAL_PROFILING=1
+endif
+ifeq ($(MALI_HEATMAPS_ENABLED),1)
+export MALI_HEATMAPS_ENABLED=y
+export EXTRA_DEFINES += -DCONFIG_MALI400_HEATMAPS_ENABLED
+endif
+endif
+endif
+
+ifeq ($(MALI_DMA_BUF_MAP_ON_ATTACH),1)
+export CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH=y
+export EXTRA_DEFINES += -DCONFIG_MALI_DMA_BUF_MAP_ON_ATTACH
+endif
+
+ifeq ($(MALI_DMA_BUF_LAZY_MAP),1)
+ifeq ($(MALI_DMA_BUF_MAP_ON_ATTACH),0)
+export CONFIG_MALI_DMA_BUF_LAZY_MAP=y
+export EXTRA_DEFINES += -DCONFIG_MALI_DMA_BUF_LAZY_MAP
+else
+$(warning "You want to enable MALI_DMA_BUF_LAZY_MAP but MALI_DMA_BUF_MAP_ON_ATTACH was enabled.")
+endif
+endif
+
+ifeq ($(MALI_SHARED_INTERRUPTS),1)
+export CONFIG_MALI_SHARED_INTERRUPTS=y
+export EXTRA_DEFINES += -DCONFIG_MALI_SHARED_INTERRUPTS
+endif
+
+ifeq ($(USING_DVFS),1)
+export CONFIG_MALI_DVFS=y
+export EXTRA_DEFINES += -DCONFIG_MALI_DVFS
+endif
+
+ifeq ($(USING_DMA_BUF_FENCE),1)
+export CONFIG_MALI_DMA_BUF_FENCE=y
+export EXTRA_DEFINES += -DCONFIG_MALI_DMA_BUF_FENCE
+endif
+
+ifeq ($(MALI_PMU_PARALLEL_POWER_UP),1)
+export CONFIG_MALI_PMU_PARALLEL_POWER_UP=y
+export EXTRA_DEFINES += -DCONFIG_MALI_PMU_PARALLEL_POWER_UP
+endif
+
+ifdef CONFIG_OF
+ifeq ($(USING_DT),1)
+export CONFIG_MALI_DT=y
+export EXTRA_DEFINES += -DCONFIG_MALI_DT
+endif
+endif
+
+ifeq ($(USING_DEVFREQ), 1)
+ifdef CONFIG_PM_DEVFREQ
+export CONFIG_MALI_DEVFREQ=y
+export EXTRA_DEFINES += -DCONFIG_MALI_DEVFREQ=1
+else
+$(warning "You want to support DEVFREQ but kernel didn't support DEVFREQ.")
+endif
+endif
+
+ifneq ($(BUILD),release)
+# Debug
+export CONFIG_MALI400_DEBUG=y
+else
+# Release
+ifeq ($(MALI_QUIET),1)
+export CONFIG_MALI_QUIET=y
+export EXTRA_DEFINES += -DCONFIG_MALI_QUIET
+endif
+endif
+
+ifeq ($(MALI_SKIP_JOBS),1)
+EXTRA_DEFINES += -DPROFILING_SKIP_PP_JOBS=1 -DPROFILING_SKIP_GP_JOBS=1
+endif
+
+ifeq ($(MALI_MEM_SWAP_TRACKING),1)
+EXTRA_DEFINES += -DMALI_MEM_SWAP_TRACKING=1
+endif
+
+all: $(UMP_SYMVERS_FILE)
+       $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) modules
+       @rm $(FILES_PREFIX)__malidrv_build_info.c $(FILES_PREFIX)__malidrv_build_info.o
+
+clean:
+       $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean
+
+kernelrelease:
+       $(MAKE) ARCH=$(ARCH) -C $(KDIR) kernelrelease
+
+export CONFIG KBUILD_EXTRA_SYMBOLS
diff --git a/utgard/r8p0/clean.sh b/utgard/r8p0/clean.sh
new file mode 100755 (executable)
index 0000000..130fd73
--- /dev/null
@@ -0,0 +1,7 @@
+#!/bin/sh
+# rm *.o, *.cmd, *~
+find . -name "*.o" -o -name "*.cmd" -o -name "*~" | xargs rm -rf
+rm     Module.symvers
+rm     mali.ko
+rm     mali.mod.c
+rm     modules.order
diff --git a/utgard/r8p0/common/mali_broadcast.c b/utgard/r8p0/common/mali_broadcast.c
new file mode 100755 (executable)
index 0000000..4c4b2bc
--- /dev/null
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2012-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_broadcast.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+
+#define MALI_BROADCAST_REGISTER_SIZE      0x1000
+#define MALI_BROADCAST_REG_BROADCAST_MASK    0x0
+#define MALI_BROADCAST_REG_INTERRUPT_MASK    0x4
+
+struct mali_bcast_unit {
+       struct mali_hw_core hw_core;
+       u32 current_mask;
+};
+
+struct mali_bcast_unit *mali_bcast_unit_create(const _mali_osk_resource_t *resource)
+{
+       struct mali_bcast_unit *bcast_unit = NULL;
+
+       MALI_DEBUG_ASSERT_POINTER(resource);
+       MALI_DEBUG_PRINT(2, ("Broadcast: Creating Mali Broadcast unit: %s\n",
+                            resource->description));
+
+       bcast_unit = _mali_osk_malloc(sizeof(struct mali_bcast_unit));
+       if (NULL == bcast_unit) {
+               MALI_PRINT_ERROR(("Broadcast: Failed to allocate memory for Broadcast unit\n"));
+               return NULL;
+       }
+
+       if (_MALI_OSK_ERR_OK == mali_hw_core_create(&bcast_unit->hw_core,
+                       resource, MALI_BROADCAST_REGISTER_SIZE)) {
+               bcast_unit->current_mask = 0;
+               mali_bcast_reset(bcast_unit);
+
+               return bcast_unit;
+       } else {
+               MALI_PRINT_ERROR(("Broadcast: Failed map broadcast unit\n"));
+       }
+
+       _mali_osk_free(bcast_unit);
+
+       return NULL;
+}
+
+void mali_bcast_unit_delete(struct mali_bcast_unit *bcast_unit)
+{
+       MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+       mali_hw_core_delete(&bcast_unit->hw_core);
+       _mali_osk_free(bcast_unit);
+}
+
+/* Call this function to add the @group's id into bcast mask
+ * Note: redundant calling this function with same @group
+ * doesn't make any difference as calling it once
+ */
+void mali_bcast_add_group(struct mali_bcast_unit *bcast_unit,
+                         struct mali_group *group)
+{
+       u32 bcast_id;
+       u32 broadcast_mask;
+
+       MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+       MALI_DEBUG_ASSERT_POINTER(group);
+
+       bcast_id = mali_pp_core_get_bcast_id(mali_group_get_pp_core(group));
+
+       broadcast_mask = bcast_unit->current_mask;
+
+       broadcast_mask |= (bcast_id); /* add PP core to broadcast */
+       broadcast_mask |= (bcast_id << 16); /* add MMU to broadcast */
+
+       /* store mask so we can restore on reset */
+       bcast_unit->current_mask = broadcast_mask;
+}
+
+/* Call this function to remove @group's id from bcast mask
+ * Note: redundant calling this function with same @group
+ * doesn't make any difference as calling it once
+ */
+void mali_bcast_remove_group(struct mali_bcast_unit *bcast_unit,
+                            struct mali_group *group)
+{
+       u32 bcast_id;
+       u32 broadcast_mask;
+
+       MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+       MALI_DEBUG_ASSERT_POINTER(group);
+
+       bcast_id = mali_pp_core_get_bcast_id(mali_group_get_pp_core(group));
+
+       broadcast_mask = bcast_unit->current_mask;
+
+       broadcast_mask &= ~((bcast_id << 16) | bcast_id);
+
+       /* store mask so we can restore on reset */
+       bcast_unit->current_mask = broadcast_mask;
+}
+
+void mali_bcast_reset(struct mali_bcast_unit *bcast_unit)
+{
+       MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+
+       MALI_DEBUG_PRINT(4,
+                        ("Broadcast: setting mask 0x%08X + 0x%08X (reset)\n",
+                         bcast_unit->current_mask,
+                         bcast_unit->current_mask & 0xFF));
+
+       /* set broadcast mask */
+       mali_hw_core_register_write(&bcast_unit->hw_core,
+                                   MALI_BROADCAST_REG_BROADCAST_MASK,
+                                   bcast_unit->current_mask);
+
+       /* set IRQ override mask */
+       mali_hw_core_register_write(&bcast_unit->hw_core,
+                                   MALI_BROADCAST_REG_INTERRUPT_MASK,
+                                   bcast_unit->current_mask & 0xFF);
+}
+
+void mali_bcast_disable(struct mali_bcast_unit *bcast_unit)
+{
+       MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+
+       MALI_DEBUG_PRINT(4, ("Broadcast: setting mask 0x0 + 0x0 (disable)\n"));
+
+       /* set broadcast mask */
+       mali_hw_core_register_write(&bcast_unit->hw_core,
+                                   MALI_BROADCAST_REG_BROADCAST_MASK,
+                                   0x0);
+
+       /* set IRQ override mask */
+       mali_hw_core_register_write(&bcast_unit->hw_core,
+                                   MALI_BROADCAST_REG_INTERRUPT_MASK,
+                                   0x0);
+}
diff --git a/utgard/r8p0/common/mali_broadcast.h b/utgard/r8p0/common/mali_broadcast.h
new file mode 100755 (executable)
index 0000000..e12e8a2
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2012-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_BROADCAST_H__
+#define __MALI_BROADCAST_H__
+
+/*
+ *  Interface for the broadcast unit on Mali-450.
+ *
+ * - Represents up to 8 Ã— (MMU + PP) pairs.
+ * - Supports dynamically changing which (MMU + PP) pairs receive the broadcast by
+ *   setting a mask.
+ */
+
+#include "mali_hw_core.h"
+#include "mali_group.h"
+
+struct mali_bcast_unit;
+
+struct mali_bcast_unit *mali_bcast_unit_create(const _mali_osk_resource_t *resource);
+void mali_bcast_unit_delete(struct mali_bcast_unit *bcast_unit);
+
+/* Add a group to the list of (MMU + PP) pairs broadcasts go out to. */
+void mali_bcast_add_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group);
+
+/* Remove a group to the list of (MMU + PP) pairs broadcasts go out to. */
+void mali_bcast_remove_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group);
+
+/* Re-set cached mask. This needs to be called after having been suspended. */
+void mali_bcast_reset(struct mali_bcast_unit *bcast_unit);
+
+/**
+ * Disable broadcast unit
+ *
+ * mali_bcast_enable must be called to re-enable the unit. Cores may not be
+ * added or removed when the unit is disabled.
+ */
+void mali_bcast_disable(struct mali_bcast_unit *bcast_unit);
+
+/**
+ * Re-enable broadcast unit
+ *
+ * This resets the masks to include the cores present when mali_bcast_disable was called.
+ */
+MALI_STATIC_INLINE void mali_bcast_enable(struct mali_bcast_unit *bcast_unit)
+{
+       mali_bcast_reset(bcast_unit);
+}
+
+#endif /* __MALI_BROADCAST_H__ */
diff --git a/utgard/r8p0/common/mali_control_timer.c b/utgard/r8p0/common/mali_control_timer.c
new file mode 100755 (executable)
index 0000000..fc6ceb4
--- /dev/null
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2010-2012, 2014-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_utilization.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_dvfs_policy.h"
+#include "mali_control_timer.h"
+
+static u64 period_start_time = 0;
+
+static _mali_osk_timer_t *mali_control_timer = NULL;
+static mali_bool timer_running = MALI_FALSE;
+
+static u32 mali_control_timeout = 1000;
+
+void mali_control_timer_add(u32 timeout)
+{
+       _mali_osk_timer_add(mali_control_timer, _mali_osk_time_mstoticks(timeout));
+}
+
+static void mali_control_timer_callback(void *arg)
+{
+       if (mali_utilization_enabled()) {
+               struct mali_gpu_utilization_data *util_data = NULL;
+               u64 time_period = 0;
+               mali_bool need_add_timer = MALI_TRUE;
+
+               /* Calculate gpu utilization */
+               util_data = mali_utilization_calculate(&period_start_time, &time_period, &need_add_timer);
+
+               if (util_data) {
+#if defined(CONFIG_MALI_DVFS)
+                       mali_dvfs_policy_realize(util_data, time_period);
+#else
+                       mali_utilization_platform_realize(util_data);
+#endif
+
+                       if (MALI_TRUE == need_add_timer) {
+                               mali_control_timer_add(mali_control_timeout);
+                       }
+               }
+       }
+}
+
+/* Init a timer (for now it is used for GPU utilization and dvfs) */
+_mali_osk_errcode_t mali_control_timer_init(void)
+{
+       _mali_osk_device_data data;
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+               /* Use device specific settings (if defined) */
+               if (0 != data.control_interval) {
+                       mali_control_timeout = data.control_interval;
+                       MALI_DEBUG_PRINT(2, ("Mali GPU Timer: %u\n", mali_control_timeout));
+               }
+       }
+
+       mali_control_timer = _mali_osk_timer_init();
+       if (NULL == mali_control_timer) {
+               return _MALI_OSK_ERR_FAULT;
+       }
+       _mali_osk_timer_setcallback(mali_control_timer, mali_control_timer_callback, NULL);
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_control_timer_term(void)
+{
+       if (NULL != mali_control_timer) {
+               _mali_osk_timer_del(mali_control_timer);
+               timer_running = MALI_FALSE;
+               _mali_osk_timer_term(mali_control_timer);
+               mali_control_timer = NULL;
+       }
+}
+
+mali_bool mali_control_timer_resume(u64 time_now)
+{
+       mali_utilization_data_assert_locked();
+
+       if (timer_running != MALI_TRUE) {
+               timer_running = MALI_TRUE;
+
+               period_start_time = time_now;
+
+               mali_utilization_reset();
+
+               return MALI_TRUE;
+       }
+
+       return MALI_FALSE;
+}
+
+void mali_control_timer_pause(void)
+{
+       mali_utilization_data_assert_locked();
+       if (timer_running == MALI_TRUE) {
+               timer_running = MALI_FALSE;
+       }
+}
+
+void mali_control_timer_suspend(mali_bool suspend)
+{
+       mali_utilization_data_lock();
+
+       if (timer_running == MALI_TRUE) {
+               timer_running = MALI_FALSE;
+
+               mali_utilization_data_unlock();
+
+               if (suspend == MALI_TRUE) {
+                       _mali_osk_timer_del(mali_control_timer);
+                       mali_utilization_reset();
+               }
+       } else {
+               mali_utilization_data_unlock();
+       }
+}
diff --git a/utgard/r8p0/common/mali_control_timer.h b/utgard/r8p0/common/mali_control_timer.h
new file mode 100755 (executable)
index 0000000..1265390
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2010-2012, 2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_CONTROL_TIMER_H__
+#define __MALI_CONTROL_TIMER_H__
+
+#include "mali_osk.h"
+
+_mali_osk_errcode_t mali_control_timer_init(void);
+
+void mali_control_timer_term(void);
+
+mali_bool mali_control_timer_resume(u64 time_now);
+
+void mali_control_timer_suspend(mali_bool suspend);
+void mali_control_timer_pause(void);
+
+void mali_control_timer_add(u32 timeout);
+
+#endif /* __MALI_CONTROL_TIMER_H__ */
+
diff --git a/utgard/r8p0/common/mali_dlbu.c b/utgard/r8p0/common/mali_dlbu.c
new file mode 100755 (executable)
index 0000000..4f2a121
--- /dev/null
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2012-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_dlbu.h"
+#include "mali_memory.h"
+#include "mali_pp.h"
+#include "mali_group.h"
+#include "mali_osk.h"
+#include "mali_hw_core.h"
+
+/**
+ * Size of DLBU registers in bytes
+ */
+#define MALI_DLBU_SIZE 0x400
+
+mali_dma_addr mali_dlbu_phys_addr = 0;
+static mali_io_address mali_dlbu_cpu_addr = NULL;
+
+/**
+ * DLBU register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_dlbu_register {
+       MALI_DLBU_REGISTER_MASTER_TLLIST_PHYS_ADDR = 0x0000, /**< Master tile list physical base address;
+                                                             31:12 Physical address to the page used for the DLBU
+                                                             0 DLBU enable - set this bit to 1 enables the AXI bus
+                                                             between PPs and L2s, setting to 0 disables the router and
+                                                             no further transactions are sent to DLBU */
+       MALI_DLBU_REGISTER_MASTER_TLLIST_VADDR     = 0x0004, /**< Master tile list virtual base address;
+                                                             31:12 Virtual address to the page used for the DLBU */
+       MALI_DLBU_REGISTER_TLLIST_VBASEADDR     = 0x0008, /**< Tile list virtual base address;
+                                                             31:12 Virtual address to the tile list. This address is used when
+                                                             calculating the call address sent to PP.*/
+       MALI_DLBU_REGISTER_FB_DIM                 = 0x000C, /**< Framebuffer dimension;
+                                                             23:16 Number of tiles in Y direction-1
+                                                             7:0 Number of tiles in X direction-1 */
+       MALI_DLBU_REGISTER_TLLIST_CONF       = 0x0010, /**< Tile list configuration;
+                                                             29:28 select the size of each allocated block: 0=128 bytes, 1=256, 2=512, 3=1024
+                                                             21:16 2^n number of tiles to be binned to one tile list in Y direction
+                                                             5:0 2^n number of tiles to be binned to one tile list in X direction */
+       MALI_DLBU_REGISTER_START_TILE_POS         = 0x0014, /**< Start tile positions;
+                                                             31:24 start position in Y direction for group 1
+                                                             23:16 start position in X direction for group 1
+                                                             15:8 start position in Y direction for group 0
+                                                             7:0 start position in X direction for group 0 */
+       MALI_DLBU_REGISTER_PP_ENABLE_MASK         = 0x0018, /**< PP enable mask;
+                                                             7 enable PP7 for load balancing
+                                                             6 enable PP6 for load balancing
+                                                             5 enable PP5 for load balancing
+                                                             4 enable PP4 for load balancing
+                                                             3 enable PP3 for load balancing
+                                                             2 enable PP2 for load balancing
+                                                             1 enable PP1 for load balancing
+                                                             0 enable PP0 for load balancing */
+} mali_dlbu_register;
+
+typedef enum {
+       PP0ENABLE = 0,
+       PP1ENABLE,
+       PP2ENABLE,
+       PP3ENABLE,
+       PP4ENABLE,
+       PP5ENABLE,
+       PP6ENABLE,
+       PP7ENABLE
+} mali_dlbu_pp_enable;
+
+struct mali_dlbu_core {
+       struct mali_hw_core     hw_core;           /**< Common for all HW cores */
+       u32                     pp_cores_mask;     /**< This is a mask for the PP cores whose operation will be controlled by LBU
+                                                      see MALI_DLBU_REGISTER_PP_ENABLE_MASK register */
+};
+
+_mali_osk_errcode_t mali_dlbu_initialize(void)
+{
+       MALI_DEBUG_PRINT(2, ("Mali DLBU: Initializing\n"));
+
+       if (_MALI_OSK_ERR_OK ==
+           mali_mmu_get_table_page(&mali_dlbu_phys_addr,
+                                   &mali_dlbu_cpu_addr)) {
+               return _MALI_OSK_ERR_OK;
+       }
+
+       return _MALI_OSK_ERR_FAULT;
+}
+
+void mali_dlbu_terminate(void)
+{
+       MALI_DEBUG_PRINT(3, ("Mali DLBU: terminating\n"));
+
+       if (0 != mali_dlbu_phys_addr && 0 != mali_dlbu_cpu_addr) {
+               mali_mmu_release_table_page(mali_dlbu_phys_addr,
+                                           mali_dlbu_cpu_addr);
+               mali_dlbu_phys_addr = 0;
+               mali_dlbu_cpu_addr = 0;
+       }
+}
+
+struct mali_dlbu_core *mali_dlbu_create(const _mali_osk_resource_t *resource)
+{
+       struct mali_dlbu_core *core = NULL;
+
+       MALI_DEBUG_PRINT(2, ("Mali DLBU: Creating Mali dynamic load balancing unit: %s\n", resource->description));
+
+       core = _mali_osk_malloc(sizeof(struct mali_dlbu_core));
+       if (NULL != core) {
+               if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALI_DLBU_SIZE)) {
+                       core->pp_cores_mask = 0;
+                       if (_MALI_OSK_ERR_OK == mali_dlbu_reset(core)) {
+                               return core;
+                       }
+                       MALI_PRINT_ERROR(("Failed to reset DLBU %s\n", core->hw_core.description));
+                       mali_hw_core_delete(&core->hw_core);
+               }
+
+               _mali_osk_free(core);
+       } else {
+               MALI_PRINT_ERROR(("Mali DLBU: Failed to allocate memory for DLBU core\n"));
+       }
+
+       return NULL;
+}
+
+void mali_dlbu_delete(struct mali_dlbu_core *dlbu)
+{
+       MALI_DEBUG_ASSERT_POINTER(dlbu);
+       mali_hw_core_delete(&dlbu->hw_core);
+       _mali_osk_free(dlbu);
+}
+
+_mali_osk_errcode_t mali_dlbu_reset(struct mali_dlbu_core *dlbu)
+{
+       u32 dlbu_registers[7];
+       _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+       MALI_DEBUG_ASSERT_POINTER(dlbu);
+
+       MALI_DEBUG_PRINT(4, ("Mali DLBU: mali_dlbu_reset: %s\n", dlbu->hw_core.description));
+
+       dlbu_registers[0] = mali_dlbu_phys_addr | 1; /* bit 0 enables the whole core */
+       dlbu_registers[1] = MALI_DLBU_VIRT_ADDR;
+       dlbu_registers[2] = 0;
+       dlbu_registers[3] = 0;
+       dlbu_registers[4] = 0;
+       dlbu_registers[5] = 0;
+       dlbu_registers[6] = dlbu->pp_cores_mask;
+
+       /* write reset values to core registers */
+       mali_hw_core_register_write_array_relaxed(&dlbu->hw_core, MALI_DLBU_REGISTER_MASTER_TLLIST_PHYS_ADDR, dlbu_registers, 7);
+
+       err = _MALI_OSK_ERR_OK;
+
+       return err;
+}
+
+void mali_dlbu_update_mask(struct mali_dlbu_core *dlbu)
+{
+       MALI_DEBUG_ASSERT_POINTER(dlbu);
+
+       mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_PP_ENABLE_MASK, dlbu->pp_cores_mask);
+}
+
+void mali_dlbu_add_group(struct mali_dlbu_core *dlbu, struct mali_group *group)
+{
+       struct mali_pp_core *pp_core;
+       u32 bcast_id;
+
+       MALI_DEBUG_ASSERT_POINTER(dlbu);
+       MALI_DEBUG_ASSERT_POINTER(group);
+
+       pp_core = mali_group_get_pp_core(group);
+       bcast_id = mali_pp_core_get_bcast_id(pp_core);
+
+       dlbu->pp_cores_mask |= bcast_id;
+       MALI_DEBUG_PRINT(3, ("Mali DLBU: Adding core[%d] New mask= 0x%02x\n", bcast_id , dlbu->pp_cores_mask));
+}
+
+/* Remove a group from the DLBU */
+void mali_dlbu_remove_group(struct mali_dlbu_core *dlbu, struct mali_group *group)
+{
+       struct mali_pp_core *pp_core;
+       u32 bcast_id;
+
+       MALI_DEBUG_ASSERT_POINTER(dlbu);
+       MALI_DEBUG_ASSERT_POINTER(group);
+
+       pp_core = mali_group_get_pp_core(group);
+       bcast_id = mali_pp_core_get_bcast_id(pp_core);
+
+       dlbu->pp_cores_mask &= ~bcast_id;
+       MALI_DEBUG_PRINT(3, ("Mali DLBU: Removing core[%d] New mask= 0x%02x\n", bcast_id, dlbu->pp_cores_mask));
+}
+
+/* Configure the DLBU for \a job. This needs to be done before the job is started on the groups in the DLBU. */
+void mali_dlbu_config_job(struct mali_dlbu_core *dlbu, struct mali_pp_job *job)
+{
+       u32 *registers;
+       MALI_DEBUG_ASSERT(job);
+       registers = mali_pp_job_get_dlbu_registers(job);
+       MALI_DEBUG_PRINT(4, ("Mali DLBU: Starting job\n"));
+
+       /* Writing 4 registers:
+        * DLBU registers except the first two (written once at DLBU initialisation / reset) and the PP_ENABLE_MASK register */
+       mali_hw_core_register_write_array_relaxed(&dlbu->hw_core, MALI_DLBU_REGISTER_TLLIST_VBASEADDR, registers, 4);
+
+}
diff --git a/utgard/r8p0/common/mali_dlbu.h b/utgard/r8p0/common/mali_dlbu.h
new file mode 100755 (executable)
index 0000000..c031f11
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2012-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_DLBU_H__
+#define __MALI_DLBU_H__
+
+#define MALI_DLBU_VIRT_ADDR 0xFFF00000 /* master tile virtual address fixed at this value and mapped into every session */
+
+#include "mali_osk.h"
+
+struct mali_pp_job;
+struct mali_group;
+struct mali_dlbu_core;
+
+extern mali_dma_addr mali_dlbu_phys_addr;
+
+_mali_osk_errcode_t mali_dlbu_initialize(void);
+void mali_dlbu_terminate(void);
+
+struct mali_dlbu_core *mali_dlbu_create(const _mali_osk_resource_t *resource);
+void mali_dlbu_delete(struct mali_dlbu_core *dlbu);
+
+_mali_osk_errcode_t mali_dlbu_reset(struct mali_dlbu_core *dlbu);
+
+void mali_dlbu_add_group(struct mali_dlbu_core *dlbu, struct mali_group *group);
+void mali_dlbu_remove_group(struct mali_dlbu_core *dlbu, struct mali_group *group);
+
+/** @brief Called to update HW after DLBU state changed
+ *
+ * This function must be called after \a mali_dlbu_add_group or \a
+ * mali_dlbu_remove_group to write the updated mask to hardware, unless the
+ * same is accomplished by calling \a mali_dlbu_reset.
+ */
+void mali_dlbu_update_mask(struct mali_dlbu_core *dlbu);
+
+void mali_dlbu_config_job(struct mali_dlbu_core *dlbu, struct mali_pp_job *job);
+
+#endif /* __MALI_DLBU_H__ */
diff --git a/utgard/r8p0/common/mali_dvfs_policy.c b/utgard/r8p0/common/mali_dvfs_policy.c
new file mode 100755 (executable)
index 0000000..1094f9d
--- /dev/null
@@ -0,0 +1,308 @@
+/*
+ * Copyright (C) 2010-2012, 2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/mali/mali_utgard.h>
+#include "mali_kernel_common.h"
+#include "mali_scheduler.h"
+#include "mali_dvfs_policy.h"
+#include "mali_osk_mali.h"
+#include "mali_osk_profiling.h"
+
+#define CLOCK_TUNING_TIME_DEBUG 0
+
+#define MAX_PERFORMANCE_VALUE 256
+#define MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(percent) ((int) ((percent)*(MAX_PERFORMANCE_VALUE)/100.0 + 0.5))
+
+/** The max fps the same as display vsync default 60, can set by module insert parameter */
+int mali_max_system_fps = 60;
+/** A lower limit on their desired FPS default 58, can set by module insert parameter */
+int mali_desired_fps = 58;
+
+static int mali_fps_step1 = 0;
+static int mali_fps_step2 = 0;
+
+static int clock_step = -1;
+static int cur_clk_step = -1;
+static struct mali_gpu_clock *gpu_clk = NULL;
+
+/*Function prototype */
+static int (*mali_gpu_set_freq)(int) = NULL;
+static int (*mali_gpu_get_freq)(void) = NULL;
+
+static mali_bool mali_dvfs_enabled = MALI_FALSE;
+
+#define NUMBER_OF_NANOSECONDS_PER_SECOND  1000000000ULL
+static u32 calculate_window_render_fps(u64 time_period)
+{
+       u32 max_window_number;
+       u64 tmp;
+       u64 max = time_period;
+       u32 leading_zeroes;
+       u32 shift_val;
+       u32 time_period_shift;
+       u32 max_window_number_shift;
+       u32 ret_val;
+
+       max_window_number = mali_session_max_window_num();
+
+       /* To avoid float division, extend the dividend to ns unit */
+       tmp = (u64)max_window_number * NUMBER_OF_NANOSECONDS_PER_SECOND;
+       if (tmp > time_period) {
+               max = tmp;
+       }
+
+       /*
+        * We may have 64-bit values, a dividend or a divisor or both
+        * To avoid dependencies to a 64-bit divider, we shift down the two values
+        * equally first.
+        */
+       leading_zeroes = _mali_osk_clz((u32)(max >> 32));
+       shift_val = 32 - leading_zeroes;
+
+       time_period_shift = (u32)(time_period >> shift_val);
+       max_window_number_shift = (u32)(tmp >> shift_val);
+
+       ret_val = max_window_number_shift / time_period_shift;
+
+       return ret_val;
+}
+
+static bool mali_pickup_closest_avail_clock(int target_clock_mhz, mali_bool pick_clock_up)
+{
+       int i = 0;
+       bool clock_changed = false;
+
+       /* Round up the closest available frequency step for target_clock_hz */
+       for (i = 0; i < gpu_clk->num_of_steps; i++) {
+               /* Find the first item > target_clock_hz */
+               if (((int)(gpu_clk->item[i].clock) - target_clock_mhz) > 0) {
+                       break;
+               }
+       }
+
+       /* If the target clock greater than the maximum clock just pick the maximum one*/
+       if (i == gpu_clk->num_of_steps) {
+               i = gpu_clk->num_of_steps - 1;
+       } else {
+               if ((!pick_clock_up) && (i > 0)) {
+                       i = i - 1;
+               }
+       }
+
+       clock_step = i;
+       if (cur_clk_step != clock_step) {
+               clock_changed = true;
+       }
+
+       return clock_changed;
+}
+
+void mali_dvfs_policy_realize(struct mali_gpu_utilization_data *data, u64 time_period)
+{
+       int under_perform_boundary_value = 0;
+       int over_perform_boundary_value = 0;
+       int current_fps = 0;
+       int current_gpu_util = 0;
+       bool clock_changed = false;
+#if CLOCK_TUNING_TIME_DEBUG
+       struct timeval start;
+       struct timeval stop;
+       unsigned int elapse_time;
+       do_gettimeofday(&start);
+#endif
+       u32 window_render_fps;
+
+       if (NULL == gpu_clk) {
+               MALI_DEBUG_PRINT(2, ("Enable DVFS but patform doesn't Support freq change. \n"));
+               return;
+       }
+
+       window_render_fps = calculate_window_render_fps(time_period);
+
+       current_fps = window_render_fps;
+       current_gpu_util = data->utilization_gpu;
+
+       /* Get the specific under_perform_boundary_value and over_perform_boundary_value */
+       if ((mali_desired_fps <= current_fps) && (current_fps < mali_max_system_fps)) {
+               under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(90);
+               over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(70);
+       } else if ((mali_fps_step1 <= current_fps) && (current_fps < mali_desired_fps)) {
+               under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(55);
+               over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(35);
+       } else if ((mali_fps_step2 <= current_fps) && (current_fps < mali_fps_step1)) {
+               under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(70);
+               over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(50);
+       } else {
+               under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(55);
+               over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(35);
+       }
+
+       MALI_DEBUG_PRINT(5, ("Using ARM power policy: gpu util = %d \n", current_gpu_util));
+       MALI_DEBUG_PRINT(5, ("Using ARM power policy: under_perform = %d,  over_perform = %d \n", under_perform_boundary_value, over_perform_boundary_value));
+       MALI_DEBUG_PRINT(5, ("Using ARM power policy: render fps = %d,  pressure render fps = %d \n", current_fps, window_render_fps));
+
+       /* Get current clock value */
+       cur_clk_step = mali_gpu_get_freq();
+
+       /* Consider offscreen */
+       if (0 == current_fps) {
+               /* GP or PP under perform, need to give full power */
+               if (current_gpu_util > over_perform_boundary_value) {
+                       if (cur_clk_step != gpu_clk->num_of_steps - 1) {
+                               clock_changed = true;
+                               clock_step = gpu_clk->num_of_steps - 1;
+                       }
+               }
+
+               /* If GPU is idle, use lowest power */
+               if (0 == current_gpu_util) {
+                       if (cur_clk_step != 0) {
+                               clock_changed = true;
+                               clock_step = 0;
+                       }
+               }
+
+               goto real_setting;
+       }
+
+       /* 2. Calculate target clock if the GPU clock can be tuned */
+       if (-1 != cur_clk_step) {
+               int target_clk_mhz = -1;
+               mali_bool pick_clock_up = MALI_TRUE;
+
+               if (current_gpu_util > under_perform_boundary_value) {
+                       /* when under perform, need to consider the fps part */
+                       target_clk_mhz = gpu_clk->item[cur_clk_step].clock * current_gpu_util * mali_desired_fps / under_perform_boundary_value / current_fps;
+                       pick_clock_up = MALI_TRUE;
+               } else if (current_gpu_util < over_perform_boundary_value) {
+                       /* when over perform, did't need to consider fps, system didn't want to reach desired fps */
+                       target_clk_mhz = gpu_clk->item[cur_clk_step].clock * current_gpu_util / under_perform_boundary_value;
+                       pick_clock_up = MALI_FALSE;
+               }
+
+               if (-1 != target_clk_mhz) {
+                       clock_changed = mali_pickup_closest_avail_clock(target_clk_mhz, pick_clock_up);
+               }
+       }
+
+real_setting:
+       if (clock_changed) {
+               mali_gpu_set_freq(clock_step);
+
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                             MALI_PROFILING_EVENT_CHANNEL_GPU |
+                                             MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+                                             gpu_clk->item[clock_step].clock,
+                                             gpu_clk->item[clock_step].vol / 1000,
+                                             0, 0, 0);
+       }
+
+#if CLOCK_TUNING_TIME_DEBUG
+       do_gettimeofday(&stop);
+
+       elapse_time = timeval_to_ns(&stop) - timeval_to_ns(&start);
+       MALI_DEBUG_PRINT(2, ("Using ARM power policy:  eclapse time = %d\n", elapse_time));
+#endif
+}
+
+_mali_osk_errcode_t mali_dvfs_policy_init(void)
+{
+       _mali_osk_device_data data;
+       _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+               if ((NULL != data.get_clock_info) && (NULL != data.set_freq) && (NULL != data.get_freq)) {
+                       MALI_DEBUG_PRINT(2, ("Mali DVFS init: using arm dvfs policy \n"));
+
+
+                       mali_fps_step1 = mali_max_system_fps / 3;
+                       mali_fps_step2 = mali_max_system_fps / 5;
+
+                       data.get_clock_info(&gpu_clk);
+
+                       if (gpu_clk != NULL) {
+#ifdef DEBUG
+                               int i;
+                               for (i = 0; i < gpu_clk->num_of_steps; i++) {
+                                       MALI_DEBUG_PRINT(5, ("mali gpu clock info: step%d clock(%d)Hz,vol(%d) \n",
+                                                            i, gpu_clk->item[i].clock, gpu_clk->item[i].vol));
+                               }
+#endif
+                       } else {
+                               MALI_DEBUG_PRINT(2, ("Mali DVFS init: platform didn't define enough info for ddk to do DVFS \n"));
+                       }
+
+                       mali_gpu_get_freq = data.get_freq;
+                       mali_gpu_set_freq = data.set_freq;
+
+                       if ((NULL != gpu_clk) && (gpu_clk->num_of_steps > 0)
+                           && (NULL != mali_gpu_get_freq) && (NULL != mali_gpu_set_freq)) {
+                               mali_dvfs_enabled = MALI_TRUE;
+                       }
+               } else {
+                       MALI_DEBUG_PRINT(2, ("Mali DVFS init: platform function callback incomplete, need check mali_gpu_device_data in platform .\n"));
+               }
+       } else {
+               err = _MALI_OSK_ERR_FAULT;
+               MALI_DEBUG_PRINT(2, ("Mali DVFS init: get platform data error .\n"));
+       }
+
+       return err;
+}
+
+/*
+ * Always give full power when start a new period,
+ * if mali dvfs enabled, for performance consideration
+ */
+void mali_dvfs_policy_new_period(void)
+{
+       /* Always give full power when start a new period */
+       unsigned int cur_clk_step = 0;
+
+       cur_clk_step = mali_gpu_get_freq();
+
+       if (cur_clk_step != (gpu_clk->num_of_steps - 1)) {
+               mali_gpu_set_freq(gpu_clk->num_of_steps - 1);
+
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                             MALI_PROFILING_EVENT_CHANNEL_GPU |
+                                             MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE, gpu_clk->item[gpu_clk->num_of_steps - 1].clock,
+                                             gpu_clk->item[gpu_clk->num_of_steps - 1].vol / 1000, 0, 0, 0);
+       }
+}
+
+mali_bool mali_dvfs_policy_enabled(void)
+{
+       return mali_dvfs_enabled;
+}
+
+#if defined(CONFIG_MALI400_PROFILING)
+void mali_get_current_gpu_clk_item(struct mali_gpu_clk_item *clk_item)
+{
+       if (mali_platform_device != NULL) {
+
+               struct mali_gpu_device_data *device_data = NULL;
+               device_data = (struct mali_gpu_device_data *)mali_platform_device->dev.platform_data;
+
+               if ((NULL != device_data->get_clock_info) && (NULL != device_data->get_freq)) {
+
+                       int cur_clk_step = device_data->get_freq();
+                       struct mali_gpu_clock *mali_gpu_clk = NULL;
+
+                       device_data->get_clock_info(&mali_gpu_clk);
+                       clk_item->clock = mali_gpu_clk->item[cur_clk_step].clock;
+                       clk_item->vol = mali_gpu_clk->item[cur_clk_step].vol;
+               } else {
+                       MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: platform function callback incomplete, need check mali_gpu_device_data in platform .\n"));
+               }
+       }
+}
+#endif
+
diff --git a/utgard/r8p0/common/mali_dvfs_policy.h b/utgard/r8p0/common/mali_dvfs_policy.h
new file mode 100755 (executable)
index 0000000..1770426
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2010-2012, 2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_DVFS_POLICY_H__
+#define __MALI_DVFS_POLICY_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void mali_dvfs_policy_realize(struct mali_gpu_utilization_data *data, u64 time_period);
+
+_mali_osk_errcode_t mali_dvfs_policy_init(void);
+
+void mali_dvfs_policy_new_period(void);
+
+mali_bool mali_dvfs_policy_enabled(void);
+
+#if defined(CONFIG_MALI400_PROFILING)
+void mali_get_current_gpu_clk_item(struct mali_gpu_clk_item *clk_item);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif/* __MALI_DVFS_POLICY_H__ */
diff --git a/utgard/r8p0/common/mali_executor.c b/utgard/r8p0/common/mali_executor.c
new file mode 100755 (executable)
index 0000000..0359c77
--- /dev/null
@@ -0,0 +1,2697 @@
+/*
+ * Copyright (C) 2012-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_executor.h"
+#include "mali_scheduler.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_pp.h"
+#include "mali_pp_job.h"
+#include "mali_group.h"
+#include "mali_pm.h"
+#include "mali_timeline.h"
+#include "mali_osk_profiling.h"
+#include "mali_session.h"
+#include "mali_osk_mali.h"
+
+/*
+ * If dma_buf with map on demand is used, we defer job deletion and job queue
+ * if in atomic context, since both might sleep.
+ */
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+#define MALI_EXECUTOR_USE_DEFERRED_PP_JOB_DELETE 1
+#define MALI_EXECUTOR_USE_DEFERRED_PP_JOB_QUEUE 1
+#endif /* !defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) */
+
+/*
+ * ---------- static type definitions (structs, enums, etc) ----------
+ */
+
+enum mali_executor_state_t {
+       EXEC_STATE_NOT_PRESENT, /* Virtual group on Mali-300/400 (do not use) */
+       EXEC_STATE_DISABLED,    /* Disabled by core scaling (do not use) */
+       EXEC_STATE_EMPTY,       /* No child groups for virtual group (do not use) */
+       EXEC_STATE_INACTIVE,    /* Can be used, but must be activate first */
+       EXEC_STATE_IDLE,        /* Active and ready to be used */
+       EXEC_STATE_WORKING,     /* Executing a job */
+};
+
+/*
+ * ---------- global variables (exported due to inline functions) ----------
+ */
+
+/* Lock for this module (protecting all HW access except L2 caches) */
+_mali_osk_spinlock_irq_t *mali_executor_lock_obj = NULL;
+
+mali_bool mali_executor_hints[MALI_EXECUTOR_HINT_MAX];
+
+/*
+ * ---------- static variables ----------
+ */
+
+/* Used to defer job scheduling */
+static _mali_osk_wq_work_t *executor_wq_high_pri = NULL;
+
+/* Store version from GP and PP (user space wants to know this) */
+static u32 pp_version = 0;
+static u32 gp_version = 0;
+
+/* List of physical PP groups which are disabled by some external source */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_disabled);
+static u32 group_list_disabled_count = 0;
+
+/* List of groups which can be used, but activate first */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_inactive);
+static u32 group_list_inactive_count = 0;
+
+/* List of groups which are active and ready to be used */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_idle);
+static u32 group_list_idle_count = 0;
+
+/* List of groups which are executing a job */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_working);
+static u32 group_list_working_count = 0;
+
+/* Virtual group (if any) */
+static struct mali_group *virtual_group = NULL;
+
+/* Virtual group state is tracked with a state variable instead of 4 lists */
+static enum mali_executor_state_t virtual_group_state = EXEC_STATE_NOT_PRESENT;
+
+/* GP group */
+static struct mali_group *gp_group = NULL;
+
+/* GP group state is tracked with a state variable instead of 4 lists */
+static enum mali_executor_state_t gp_group_state = EXEC_STATE_NOT_PRESENT;
+
+static u32 gp_returned_cookie = 0;
+
+/* Total number of physical PP cores present */
+static u32 num_physical_pp_cores_total = 0;
+
+/* Number of physical cores which are enabled */
+static u32 num_physical_pp_cores_enabled = 0;
+
+/* Enable or disable core scaling */
+static mali_bool core_scaling_enabled = MALI_TRUE;
+
+/* Variables to allow safe pausing of the scheduler */
+static _mali_osk_wait_queue_t *executor_working_wait_queue = NULL;
+static u32 pause_count = 0;
+
+/* PP cores haven't been enabled because of some pp cores haven't been disabled. */
+static int core_scaling_delay_up_mask[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 };
+
+/* Variables used to implement notify pp core changes to userspace when core scaling
+ * is finished in mali_executor_complete_group() function. */
+static _mali_osk_wq_work_t *executor_wq_notify_core_change = NULL;
+static _mali_osk_wait_queue_t *executor_notify_core_change_wait_queue = NULL;
+
+/*
+ * ---------- Forward declaration of static functions ----------
+ */
+static mali_bool mali_executor_is_suspended(void *data);
+static mali_bool mali_executor_is_working(void);
+static void mali_executor_disable_empty_virtual(void);
+static mali_bool mali_executor_physical_rejoin_virtual(struct mali_group *group);
+static mali_bool mali_executor_has_virtual_group(void);
+static mali_bool mali_executor_virtual_group_is_usable(void);
+static void mali_executor_schedule(void);
+static void mali_executor_wq_schedule(void *arg);
+static void mali_executor_send_gp_oom_to_user(struct mali_gp_job *job);
+static void mali_executor_complete_group(struct mali_group *group,
+               mali_bool success,
+               struct mali_gp_job **gp_job_done,
+               struct mali_pp_job **pp_job_done);
+static void mali_executor_change_state_pp_physical(struct mali_group *group,
+               _mali_osk_list_t *old_list,
+               u32 *old_count,
+               _mali_osk_list_t *new_list,
+               u32 *new_count);
+static mali_bool mali_executor_group_is_in_state(struct mali_group *group,
+               enum mali_executor_state_t state);
+
+static void mali_executor_group_enable_internal(struct mali_group *group);
+static void mali_executor_group_disable_internal(struct mali_group *group);
+static void mali_executor_core_scale(unsigned int target_core_nr);
+static void mali_executor_core_scale_in_group_complete(struct mali_group *group);
+static void mali_executor_notify_core_change(u32 num_cores);
+static void mali_executor_wq_notify_core_change(void *arg);
+static void mali_executor_change_group_status_disabled(struct mali_group *group);
+static mali_bool mali_executor_deactivate_list_idle(mali_bool deactivate_idle_group);
+static void mali_executor_set_state_pp_physical(struct mali_group *group,
+               _mali_osk_list_t *new_list,
+               u32 *new_count);
+
+/*
+ * ---------- Actual implementation ----------
+ */
+
+_mali_osk_errcode_t mali_executor_initialize(void)
+{
+       mali_executor_lock_obj = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_EXECUTOR);
+       if (NULL == mali_executor_lock_obj) {
+               mali_executor_terminate();
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       executor_wq_high_pri = _mali_osk_wq_create_work_high_pri(mali_executor_wq_schedule, NULL);
+       if (NULL == executor_wq_high_pri) {
+               mali_executor_terminate();
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       executor_working_wait_queue = _mali_osk_wait_queue_init();
+       if (NULL == executor_working_wait_queue) {
+               mali_executor_terminate();
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       executor_wq_notify_core_change = _mali_osk_wq_create_work(mali_executor_wq_notify_core_change, NULL);
+       if (NULL == executor_wq_notify_core_change) {
+               mali_executor_terminate();
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       executor_notify_core_change_wait_queue = _mali_osk_wait_queue_init();
+       if (NULL == executor_notify_core_change_wait_queue) {
+               mali_executor_terminate();
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_executor_terminate(void)
+{
+       if (NULL != executor_notify_core_change_wait_queue) {
+               _mali_osk_wait_queue_term(executor_notify_core_change_wait_queue);
+               executor_notify_core_change_wait_queue = NULL;
+       }
+
+       if (NULL != executor_wq_notify_core_change) {
+               _mali_osk_wq_delete_work(executor_wq_notify_core_change);
+               executor_wq_notify_core_change = NULL;
+       }
+
+       if (NULL != executor_working_wait_queue) {
+               _mali_osk_wait_queue_term(executor_working_wait_queue);
+               executor_working_wait_queue = NULL;
+       }
+
+       if (NULL != executor_wq_high_pri) {
+               _mali_osk_wq_delete_work(executor_wq_high_pri);
+               executor_wq_high_pri = NULL;
+       }
+
+       if (NULL != mali_executor_lock_obj) {
+               _mali_osk_spinlock_irq_term(mali_executor_lock_obj);
+               mali_executor_lock_obj = NULL;
+       }
+}
+
+void mali_executor_populate(void)
+{
+       u32 num_groups;
+       u32 i;
+
+       num_groups = mali_group_get_glob_num_groups();
+
+       /* Do we have a virtual group? */
+       for (i = 0; i < num_groups; i++) {
+               struct mali_group *group = mali_group_get_glob_group(i);
+
+               if (mali_group_is_virtual(group)) {
+                       virtual_group = group;
+                       virtual_group_state = EXEC_STATE_INACTIVE;
+                       break;
+               }
+       }
+
+       /* Find all the available physical GP and PP cores */
+       for (i = 0; i < num_groups; i++) {
+               struct mali_group *group = mali_group_get_glob_group(i);
+
+               if (NULL != group) {
+                       struct mali_pp_core *pp_core = mali_group_get_pp_core(group);
+                       struct mali_gp_core *gp_core = mali_group_get_gp_core(group);
+
+                       if (!mali_group_is_virtual(group)) {
+                               if (NULL != pp_core) {
+                                       if (0 == pp_version) {
+                                               /* Retrieve PP version from the first available PP core */
+                                               pp_version = mali_pp_core_get_version(pp_core);
+                                       }
+
+                                       if (NULL != virtual_group) {
+                                               mali_executor_lock();
+                                               mali_group_add_group(virtual_group, group);
+                                               mali_executor_unlock();
+                                       } else {
+                                               _mali_osk_list_add(&group->executor_list, &group_list_inactive);
+                                               group_list_inactive_count++;
+                                       }
+
+                                       num_physical_pp_cores_total++;
+                               } else {
+                                       MALI_DEBUG_ASSERT_POINTER(gp_core);
+
+                                       if (0 == gp_version) {
+                                               /* Retrieve GP version */
+                                               gp_version = mali_gp_core_get_version(gp_core);
+                                       }
+
+                                       gp_group = group;
+                                       gp_group_state = EXEC_STATE_INACTIVE;
+                               }
+
+                       }
+               }
+       }
+
+       num_physical_pp_cores_enabled = num_physical_pp_cores_total;
+}
+
+void mali_executor_depopulate(void)
+{
+       struct mali_group *group;
+       struct mali_group *temp;
+
+       MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != gp_group_state);
+
+       if (NULL != gp_group) {
+               mali_group_delete(gp_group);
+               gp_group = NULL;
+       }
+
+       MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != virtual_group_state);
+
+       if (NULL != virtual_group) {
+               mali_group_delete(virtual_group);
+               virtual_group = NULL;
+       }
+
+       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&group_list_working));
+
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, executor_list) {
+               mali_group_delete(group);
+       }
+
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_inactive, struct mali_group, executor_list) {
+               mali_group_delete(group);
+       }
+
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, executor_list) {
+               mali_group_delete(group);
+       }
+}
+
+void mali_executor_suspend(void)
+{
+       mali_executor_lock();
+
+       /* Increment the pause_count so that no more jobs will be scheduled */
+       pause_count++;
+
+       mali_executor_unlock();
+
+       _mali_osk_wait_queue_wait_event(executor_working_wait_queue,
+                                       mali_executor_is_suspended, NULL);
+
+       /*
+        * mali_executor_complete_XX() leaves jobs in idle state.
+        * deactivate option is used when we are going to power down
+        * the entire GPU (OS suspend) and want a consistent SW vs HW
+        * state.
+        */
+       mali_executor_lock();
+
+       mali_executor_deactivate_list_idle(MALI_TRUE);
+
+       /*
+        * The following steps are used to deactive all of activated
+        * (MALI_GROUP_STATE_ACTIVE) and activating (MALI_GROUP
+        * _STAET_ACTIVATION_PENDING) groups, to make sure the variable
+        * pd_mask_wanted is equal with 0. */
+       if (MALI_GROUP_STATE_INACTIVE != mali_group_get_state(gp_group)) {
+               gp_group_state = EXEC_STATE_INACTIVE;
+               mali_group_deactivate(gp_group);
+       }
+
+       if (mali_executor_has_virtual_group()) {
+               if (MALI_GROUP_STATE_INACTIVE
+                   != mali_group_get_state(virtual_group)) {
+                       virtual_group_state = EXEC_STATE_INACTIVE;
+                       mali_group_deactivate(virtual_group);
+               }
+       }
+
+       if (0 < group_list_inactive_count) {
+               struct mali_group *group;
+               struct mali_group *temp;
+
+               _MALI_OSK_LIST_FOREACHENTRY(group, temp,
+                                           &group_list_inactive,
+                                           struct mali_group, executor_list) {
+                       if (MALI_GROUP_STATE_ACTIVATION_PENDING
+                           == mali_group_get_state(group)) {
+                               mali_group_deactivate(group);
+                       }
+
+                       /*
+                        * On mali-450 platform, we may have physical group in the group inactive
+                        * list, and its state is MALI_GROUP_STATE_ACTIVATION_PENDING, so we only
+                        * deactivate it is not enough, we still also need add it back to virtual group.
+                        * And now, virtual group must be in INACTIVE state, so it's safe to add
+                        * physical group to virtual group at this point.
+                        */
+                       if (NULL != virtual_group) {
+                               _mali_osk_list_delinit(&group->executor_list);
+                               group_list_inactive_count--;
+
+                               mali_group_add_group(virtual_group, group);
+                       }
+               }
+       }
+
+       mali_executor_unlock();
+}
+
+void mali_executor_resume(void)
+{
+       mali_executor_lock();
+
+       /* Decrement pause_count to allow scheduling again (if it reaches 0) */
+       pause_count--;
+       if (0 == pause_count) {
+               mali_executor_schedule();
+       }
+
+       mali_executor_unlock();
+}
+
+u32 mali_executor_get_num_cores_total(void)
+{
+       return num_physical_pp_cores_total;
+}
+
+u32 mali_executor_get_num_cores_enabled(void)
+{
+       return num_physical_pp_cores_enabled;
+}
+
+struct mali_pp_core *mali_executor_get_virtual_pp(void)
+{
+       MALI_DEBUG_ASSERT_POINTER(virtual_group);
+       MALI_DEBUG_ASSERT_POINTER(virtual_group->pp_core);
+       return virtual_group->pp_core;
+}
+
+struct mali_group *mali_executor_get_virtual_group(void)
+{
+       return virtual_group;
+}
+
+void mali_executor_zap_all_active(struct mali_session_data *session)
+{
+       struct mali_group *group;
+       struct mali_group *temp;
+       mali_bool ret;
+
+       mali_executor_lock();
+
+       /*
+        * This function is a bit complicated because
+        * mali_group_zap_session() can fail. This only happens because the
+        * group is in an unhandled page fault status.
+        * We need to make sure this page fault is handled before we return,
+        * so that we know every single outstanding MMU transactions have
+        * completed. This will allow caller to safely remove physical pages
+        * when we have returned.
+        */
+
+       MALI_DEBUG_ASSERT(NULL != gp_group);
+       ret = mali_group_zap_session(gp_group, session);
+       if (MALI_FALSE == ret) {
+               struct mali_gp_job *gp_job = NULL;
+
+               mali_executor_complete_group(gp_group, MALI_FALSE, &gp_job, NULL);
+
+               MALI_DEBUG_ASSERT_POINTER(gp_job);
+
+               /* GP job completed, make sure it is freed */
+               mali_scheduler_complete_gp_job(gp_job, MALI_FALSE,
+                                              MALI_TRUE, MALI_TRUE);
+       }
+
+       if (mali_executor_has_virtual_group()) {
+               ret = mali_group_zap_session(virtual_group, session);
+               if (MALI_FALSE == ret) {
+                       struct mali_pp_job *pp_job = NULL;
+
+                       mali_executor_complete_group(virtual_group, MALI_FALSE, NULL, &pp_job);
+
+                       if (NULL != pp_job) {
+                               /* PP job completed, make sure it is freed */
+                               mali_scheduler_complete_pp_job(pp_job, 0,
+                                                              MALI_TRUE, MALI_TRUE);
+                       }
+               }
+       }
+
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working,
+                                   struct mali_group, executor_list) {
+               ret = mali_group_zap_session(group, session);
+               if (MALI_FALSE == ret) {
+                       ret = mali_group_zap_session(group, session);
+                       if (MALI_FALSE == ret) {
+                               struct mali_pp_job *pp_job = NULL;
+
+                               mali_executor_complete_group(group, MALI_FALSE, NULL, &pp_job);
+
+                               if (NULL != pp_job) {
+                                       /* PP job completed, free it */
+                                       mali_scheduler_complete_pp_job(pp_job,
+                                                                      0, MALI_TRUE,
+                                                                      MALI_TRUE);
+                               }
+                       }
+               }
+       }
+
+       mali_executor_unlock();
+}
+
+void mali_executor_schedule_from_mask(mali_scheduler_mask mask, mali_bool deferred_schedule)
+{
+       if (MALI_SCHEDULER_MASK_EMPTY != mask) {
+               if (MALI_TRUE == deferred_schedule) {
+                       _mali_osk_wq_schedule_work_high_pri(executor_wq_high_pri);
+               } else {
+                       /* Schedule from this thread*/
+                       mali_executor_lock();
+                       mali_executor_schedule();
+                       mali_executor_unlock();
+               }
+       }
+}
+
+_mali_osk_errcode_t mali_executor_interrupt_gp(struct mali_group *group,
+               mali_bool in_upper_half)
+{
+       enum mali_interrupt_result int_result;
+       mali_bool time_out = MALI_FALSE;
+
+       MALI_DEBUG_PRINT(4, ("Executor: GP interrupt from %s in %s half\n",
+                            mali_group_core_description(group),
+                            in_upper_half ? "upper" : "bottom"));
+
+       mali_executor_lock();
+       if (!mali_group_is_working(group)) {
+               /* Not working, so nothing to do */
+               mali_executor_unlock();
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT(mali_group_is_working(group));
+
+       if (mali_group_has_timed_out(group)) {
+               int_result = MALI_INTERRUPT_RESULT_ERROR;
+               time_out = MALI_TRUE;
+               MALI_PRINT(("Executor GP: Job %d Timeout on %s\n",
+                           mali_gp_job_get_id(group->gp_running_job),
+                           mali_group_core_description(group)));
+       } else {
+               int_result = mali_group_get_interrupt_result_gp(group);
+               if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+                       mali_executor_unlock();
+                       return _MALI_OSK_ERR_FAULT;
+               }
+       }
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+               /* No interrupts signalled, so nothing to do */
+               mali_executor_unlock();
+               return _MALI_OSK_ERR_FAULT;
+       }
+#else
+       MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_NONE != int_result);
+#endif
+
+       mali_group_mask_all_interrupts_gp(group);
+
+       if (MALI_INTERRUPT_RESULT_SUCCESS_VS == int_result) {
+               if (mali_group_gp_is_active(group)) {
+                       /* Only VS completed so far, while PLBU is still active */
+
+                       /* Enable all but the current interrupt */
+                       mali_group_enable_interrupts_gp(group, int_result);
+
+                       mali_executor_unlock();
+                       return _MALI_OSK_ERR_OK;
+               }
+       } else if (MALI_INTERRUPT_RESULT_SUCCESS_PLBU == int_result) {
+               if (mali_group_gp_is_active(group)) {
+                       /* Only PLBU completed so far, while VS is still active */
+
+                       /* Enable all but the current interrupt */
+                       mali_group_enable_interrupts_gp(group, int_result);
+
+                       mali_executor_unlock();
+                       return _MALI_OSK_ERR_OK;
+               }
+       } else if (MALI_INTERRUPT_RESULT_OOM == int_result) {
+               struct mali_gp_job *job = mali_group_get_running_gp_job(group);
+
+               /* PLBU out of mem */
+               MALI_DEBUG_PRINT(3, ("Executor: PLBU needs more heap memory\n"));
+
+#if defined(CONFIG_MALI400_PROFILING)
+               /* Give group a chance to generate a SUSPEND event */
+               mali_group_oom(group);
+#endif
+
+               /*
+                * no need to hold interrupt raised while
+                * waiting for more memory.
+                */
+               mali_executor_send_gp_oom_to_user(job);
+
+               mali_executor_unlock();
+
+               return _MALI_OSK_ERR_OK;
+       }
+
+       /* We should now have a real interrupt to handle */
+
+       MALI_DEBUG_PRINT(4, ("Executor: Group %s completed with %s\n",
+                            mali_group_core_description(group),
+                            (MALI_INTERRUPT_RESULT_ERROR == int_result) ?
+                            "ERROR" : "success"));
+
+       if (in_upper_half && MALI_INTERRUPT_RESULT_ERROR == int_result) {
+               /* Don't bother to do processing of errors in upper half */
+               mali_executor_unlock();
+
+               if (MALI_FALSE == time_out) {
+                       mali_group_schedule_bottom_half_gp(group);
+               }
+       } else {
+               struct mali_gp_job *job;
+               mali_bool success;
+
+               if (MALI_TRUE == time_out) {
+                       mali_group_dump_status(group);
+               }
+
+               success = (int_result != MALI_INTERRUPT_RESULT_ERROR) ?
+                         MALI_TRUE : MALI_FALSE;
+
+               mali_executor_complete_group(group, success, &job, NULL);
+
+               mali_executor_unlock();
+
+               /* GP jobs always fully complete */
+               MALI_DEBUG_ASSERT(NULL != job);
+
+               /* This will notify user space and close the job object */
+               mali_scheduler_complete_gp_job(job, success,
+                                              MALI_TRUE, MALI_TRUE);
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_executor_interrupt_pp(struct mali_group *group,
+               mali_bool in_upper_half)
+{
+       enum mali_interrupt_result int_result;
+       mali_bool time_out = MALI_FALSE;
+
+       MALI_DEBUG_PRINT(4, ("Executor: PP interrupt from %s in %s half\n",
+                            mali_group_core_description(group),
+                            in_upper_half ? "upper" : "bottom"));
+
+       mali_executor_lock();
+
+       if (!mali_group_is_working(group)) {
+               /* Not working, so nothing to do */
+               mali_executor_unlock();
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       if (in_upper_half) {
+               if (mali_group_is_in_virtual(group)) {
+                       /* Child groups should never handle PP interrupts */
+                       MALI_DEBUG_ASSERT(!mali_group_has_timed_out(group));
+                       mali_executor_unlock();
+                       return _MALI_OSK_ERR_FAULT;
+               }
+       }
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT(mali_group_is_working(group));
+       MALI_DEBUG_ASSERT(!mali_group_is_in_virtual(group));
+
+       if (mali_group_has_timed_out(group)) {
+               int_result = MALI_INTERRUPT_RESULT_ERROR;
+               time_out = MALI_TRUE;
+               MALI_PRINT(("Executor PP: Job %d Timeout on %s\n",
+                           mali_pp_job_get_id(group->pp_running_job),
+                           mali_group_core_description(group)));
+       } else {
+               int_result = mali_group_get_interrupt_result_pp(group);
+               if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+                       mali_executor_unlock();
+                       return _MALI_OSK_ERR_FAULT;
+               }
+       }
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+               /* No interrupts signalled, so nothing to do */
+               mali_executor_unlock();
+               return _MALI_OSK_ERR_FAULT;
+       } else if (MALI_INTERRUPT_RESULT_SUCCESS == int_result) {
+               if (mali_group_is_virtual(group) && mali_group_pp_is_active(group)) {
+                       /* Some child groups are still working, so nothing to do right now */
+                       mali_executor_unlock();
+                       return _MALI_OSK_ERR_FAULT;
+               }
+       }
+#else
+       MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_NONE != int_result);
+#endif
+
+       /* We should now have a real interrupt to handle */
+
+       MALI_DEBUG_PRINT(4, ("Executor: Group %s completed with %s\n",
+                            mali_group_core_description(group),
+                            (MALI_INTERRUPT_RESULT_ERROR == int_result) ?
+                            "ERROR" : "success"));
+
+       if (in_upper_half && MALI_INTERRUPT_RESULT_ERROR == int_result) {
+               /* Don't bother to do processing of errors in upper half */
+               mali_group_mask_all_interrupts_pp(group);
+               mali_executor_unlock();
+
+               if (MALI_FALSE == time_out) {
+                       mali_group_schedule_bottom_half_pp(group);
+               }
+       } else {
+               struct mali_pp_job *job = NULL;
+               mali_bool success;
+
+               if (MALI_TRUE == time_out) {
+                       mali_group_dump_status(group);
+               }
+
+               success = (int_result == MALI_INTERRUPT_RESULT_SUCCESS) ?
+                         MALI_TRUE : MALI_FALSE;
+
+               mali_executor_complete_group(group, success, NULL, &job);
+
+               mali_executor_unlock();
+
+               if (NULL != job) {
+                       /* Notify user space and close the job object */
+                       mali_scheduler_complete_pp_job(job,
+                                                      num_physical_pp_cores_total,
+                                                      MALI_TRUE, MALI_TRUE);
+               }
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_executor_interrupt_mmu(struct mali_group *group,
+               mali_bool in_upper_half)
+{
+       enum mali_interrupt_result int_result;
+
+       MALI_DEBUG_PRINT(4, ("Executor: MMU interrupt from %s in %s half\n",
+                            mali_group_core_description(group),
+                            in_upper_half ? "upper" : "bottom"));
+
+       mali_executor_lock();
+       if (!mali_group_is_working(group)) {
+               /* Not working, so nothing to do */
+               mali_executor_unlock();
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT(mali_group_is_working(group));
+
+       int_result = mali_group_get_interrupt_result_mmu(group);
+       if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+               mali_executor_unlock();
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+               /* No interrupts signalled, so nothing to do */
+               mali_executor_unlock();
+               return _MALI_OSK_ERR_FAULT;
+       }
+#else
+       MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_ERROR == int_result);
+#endif
+
+       /* We should now have a real interrupt to handle */
+
+       if (in_upper_half) {
+               /* Don't bother to do processing of errors in upper half */
+
+               struct mali_group *parent = group->parent_group;
+
+               mali_mmu_mask_all_interrupts(group->mmu);
+
+               mali_executor_unlock();
+
+               if (NULL == parent) {
+                       mali_group_schedule_bottom_half_mmu(group);
+               } else {
+                       mali_group_schedule_bottom_half_mmu(parent);
+               }
+
+       } else {
+               struct mali_gp_job *gp_job = NULL;
+               struct mali_pp_job *pp_job = NULL;
+
+#ifdef DEBUG
+
+               u32 fault_address = mali_mmu_get_page_fault_addr(group->mmu);
+               u32 status = mali_mmu_get_status(group->mmu);
+               MALI_DEBUG_PRINT(2, ("Executor: Mali page fault detected at 0x%x from bus id %d of type %s on %s\n",
+                                    (void *)(uintptr_t)fault_address,
+                                    (status >> 6) & 0x1F,
+                                    (status & 32) ? "write" : "read",
+                                    group->mmu->hw_core.description));
+               MALI_DEBUG_PRINT(3, ("Executor: MMU rawstat = 0x%08X, MMU status = 0x%08X\n",
+                                    mali_mmu_get_rawstat(group->mmu), status));
+               mali_mmu_pagedir_diag(mali_session_get_page_directory(group->session), fault_address);
+#endif
+
+               mali_executor_complete_group(group, MALI_FALSE, &gp_job, &pp_job);
+
+               mali_executor_unlock();
+
+               if (NULL != gp_job) {
+                       MALI_DEBUG_ASSERT(NULL == pp_job);
+
+                       /* Notify user space and close the job object */
+                       mali_scheduler_complete_gp_job(gp_job, MALI_FALSE,
+                                                      MALI_TRUE, MALI_TRUE);
+               } else if (NULL != pp_job) {
+                       MALI_DEBUG_ASSERT(NULL == gp_job);
+
+                       /* Notify user space and close the job object */
+                       mali_scheduler_complete_pp_job(pp_job,
+                                                      num_physical_pp_cores_total,
+                                                      MALI_TRUE, MALI_TRUE);
+               }
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_executor_group_power_up(struct mali_group *groups[], u32 num_groups)
+{
+       u32 i;
+       mali_bool child_groups_activated = MALI_FALSE;
+       mali_bool do_schedule = MALI_FALSE;
+#if defined(DEBUG)
+       u32 num_activated = 0;
+#endif
+
+       MALI_DEBUG_ASSERT_POINTER(groups);
+       MALI_DEBUG_ASSERT(0 < num_groups);
+
+       mali_executor_lock();
+
+       MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups\n", num_groups));
+
+       for (i = 0; i < num_groups; i++) {
+               MALI_DEBUG_PRINT(3, ("Executor: powering up group %s\n",
+                                    mali_group_core_description(groups[i])));
+
+               mali_group_power_up(groups[i]);
+
+               if ((MALI_GROUP_STATE_ACTIVATION_PENDING != mali_group_get_state(groups[i]) ||
+                    (MALI_TRUE != mali_executor_group_is_in_state(groups[i], EXEC_STATE_INACTIVE)))) {
+                       /* nothing more to do for this group */
+                       continue;
+               }
+
+               MALI_DEBUG_PRINT(3, ("Executor: activating group %s\n",
+                                    mali_group_core_description(groups[i])));
+
+#if defined(DEBUG)
+               num_activated++;
+#endif
+
+               if (mali_group_is_in_virtual(groups[i])) {
+                       /*
+                        * At least one child group of virtual group is powered on.
+                        */
+                       child_groups_activated = MALI_TRUE;
+               } else if (MALI_FALSE == mali_group_is_virtual(groups[i])) {
+                       /* Set gp and pp not in virtual to active. */
+                       mali_group_set_active(groups[i]);
+               }
+
+               /* Move group from inactive to idle list */
+               if (groups[i] == gp_group) {
+                       MALI_DEBUG_ASSERT(EXEC_STATE_INACTIVE ==
+                                         gp_group_state);
+                       gp_group_state = EXEC_STATE_IDLE;
+               } else if (MALI_FALSE == mali_group_is_in_virtual(groups[i])
+                          && MALI_FALSE == mali_group_is_virtual(groups[i])) {
+                       MALI_DEBUG_ASSERT(MALI_TRUE == mali_executor_group_is_in_state(groups[i],
+                                         EXEC_STATE_INACTIVE));
+
+                       mali_executor_change_state_pp_physical(groups[i],
+                                                              &group_list_inactive,
+                                                              &group_list_inactive_count,
+                                                              &group_list_idle,
+                                                              &group_list_idle_count);
+               }
+
+               do_schedule = MALI_TRUE;
+       }
+
+       if (mali_executor_has_virtual_group() &&
+           MALI_TRUE == child_groups_activated &&
+           MALI_GROUP_STATE_ACTIVATION_PENDING ==
+           mali_group_get_state(virtual_group)) {
+               /*
+                * Try to active virtual group while it may be not sucessful every time,
+                * because there is one situation that not all of child groups are powered on
+                * in one time and virtual group is in activation pending state.
+                */
+               if (mali_group_set_active(virtual_group)) {
+                       /* Move group from inactive to idle */
+                       MALI_DEBUG_ASSERT(EXEC_STATE_INACTIVE ==
+                                         virtual_group_state);
+                       virtual_group_state = EXEC_STATE_IDLE;
+
+                       MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups completed, %u  physical activated, 1 virtual activated.\n", num_groups, num_activated));
+               } else {
+                       MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups completed, %u physical activated\n", num_groups, num_activated));
+               }
+       } else {
+               MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups completed, %u physical activated\n", num_groups, num_activated));
+       }
+
+       if (MALI_TRUE == do_schedule) {
+               /* Trigger a schedule */
+               mali_executor_schedule();
+       }
+
+       mali_executor_unlock();
+}
+
+void mali_executor_group_power_down(struct mali_group *groups[],
+                                   u32 num_groups)
+{
+       u32 i;
+
+       MALI_DEBUG_ASSERT_POINTER(groups);
+       MALI_DEBUG_ASSERT(0 < num_groups);
+
+       mali_executor_lock();
+
+       MALI_DEBUG_PRINT(3, ("Executor: powering down %u groups\n", num_groups));
+
+       for (i = 0; i < num_groups; i++) {
+               /* Groups must be either disabled or inactive. while for virtual group,
+                * it maybe in empty state, because when we meet pm_runtime_suspend,
+                * virtual group could be powered off, and before we acquire mali_executor_lock,
+                * we must release mali_pm_state_lock, if there is a new physical job was queued,
+                * all of physical groups in virtual group could be pulled out, so we only can
+                * powered down an empty virtual group. Those physical groups will be powered
+                * up in following pm_runtime_resume callback function.
+                */
+               MALI_DEBUG_ASSERT(mali_executor_group_is_in_state(groups[i],
+                                 EXEC_STATE_DISABLED) ||
+                                 mali_executor_group_is_in_state(groups[i],
+                                                 EXEC_STATE_INACTIVE) ||
+                                 mali_executor_group_is_in_state(groups[i],
+                                                 EXEC_STATE_EMPTY));
+
+               MALI_DEBUG_PRINT(3, ("Executor: powering down group %s\n",
+                                    mali_group_core_description(groups[i])));
+
+               mali_group_power_down(groups[i]);
+       }
+
+       MALI_DEBUG_PRINT(3, ("Executor: powering down %u groups completed\n", num_groups));
+
+       mali_executor_unlock();
+}
+
+void mali_executor_abort_session(struct mali_session_data *session)
+{
+       struct mali_group *group;
+       struct mali_group *tmp_group;
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+       MALI_DEBUG_ASSERT(session->is_aborting);
+
+       MALI_DEBUG_PRINT(3,
+                        ("Executor: Aborting all jobs from session 0x%08X.\n",
+                         session));
+
+       mali_executor_lock();
+
+       if (mali_group_get_session(gp_group) == session) {
+               if (EXEC_STATE_WORKING == gp_group_state) {
+                       struct mali_gp_job *gp_job = NULL;
+
+                       mali_executor_complete_group(gp_group, MALI_FALSE, &gp_job, NULL);
+
+                       MALI_DEBUG_ASSERT_POINTER(gp_job);
+
+                       /* GP job completed, make sure it is freed */
+                       mali_scheduler_complete_gp_job(gp_job, MALI_FALSE,
+                                                      MALI_TRUE, MALI_TRUE);
+               } else {
+                       /* Same session, but not working, so just clear it */
+                       mali_group_clear_session(gp_group);
+               }
+       }
+
+       if (mali_executor_has_virtual_group()) {
+               if (EXEC_STATE_WORKING == virtual_group_state
+                   && mali_group_get_session(virtual_group) == session) {
+                       struct mali_pp_job *pp_job = NULL;
+
+                       mali_executor_complete_group(virtual_group, MALI_FALSE, NULL, &pp_job);
+
+                       if (NULL != pp_job) {
+                               /* PP job completed, make sure it is freed */
+                               mali_scheduler_complete_pp_job(pp_job, 0,
+                                                              MALI_TRUE, MALI_TRUE);
+                       }
+               }
+       }
+
+       _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_working,
+                                   struct mali_group, executor_list) {
+               if (mali_group_get_session(group) == session) {
+                       struct mali_pp_job *pp_job = NULL;
+
+                       mali_executor_complete_group(group, MALI_FALSE, NULL, &pp_job);
+
+                       if (NULL != pp_job) {
+                               /* PP job completed, make sure it is freed */
+                               mali_scheduler_complete_pp_job(pp_job, 0,
+                                                              MALI_TRUE, MALI_TRUE);
+                       }
+               }
+       }
+
+       _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_idle, struct mali_group, executor_list) {
+               mali_group_clear_session(group);
+       }
+
+       _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_inactive, struct mali_group, executor_list) {
+               mali_group_clear_session(group);
+       }
+
+       _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_disabled, struct mali_group, executor_list) {
+               mali_group_clear_session(group);
+       }
+
+       mali_executor_unlock();
+}
+
+
+void mali_executor_core_scaling_enable(void)
+{
+       /* PS: Core scaling is by default enabled */
+       core_scaling_enabled = MALI_TRUE;
+}
+
+void mali_executor_core_scaling_disable(void)
+{
+       core_scaling_enabled = MALI_FALSE;
+}
+
+mali_bool mali_executor_core_scaling_is_enabled(void)
+{
+       return core_scaling_enabled;
+}
+
+void mali_executor_group_enable(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+
+       mali_executor_lock();
+
+       if ((NULL != mali_group_get_gp_core(group) || NULL != mali_group_get_pp_core(group))
+           && (mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED))) {
+               mali_executor_group_enable_internal(group);
+       }
+
+       mali_executor_schedule();
+       mali_executor_unlock();
+
+       _mali_osk_wq_schedule_work(executor_wq_notify_core_change);
+}
+
+/*
+ * If a physical group is inactive or idle, we should disable it immediately,
+ * if group is in virtual, and virtual group is idle, disable given physical group in it.
+ */
+void mali_executor_group_disable(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+
+       mali_executor_lock();
+
+       if ((NULL != mali_group_get_gp_core(group) || NULL != mali_group_get_pp_core(group))
+           && (!mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED))) {
+               mali_executor_group_disable_internal(group);
+       }
+
+       mali_executor_schedule();
+       mali_executor_unlock();
+
+       _mali_osk_wq_schedule_work(executor_wq_notify_core_change);
+}
+
+mali_bool mali_executor_group_is_disabled(struct mali_group *group)
+{
+       /* NB: This function is not optimized for time critical usage */
+
+       mali_bool ret;
+
+       MALI_DEBUG_ASSERT_POINTER(group);
+
+       mali_executor_lock();
+       ret = mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED);
+       mali_executor_unlock();
+
+       return ret;
+}
+
+int mali_executor_set_perf_level(unsigned int target_core_nr, mali_bool override)
+{
+       if (target_core_nr == num_physical_pp_cores_enabled) return 0;
+       if (MALI_FALSE == core_scaling_enabled && MALI_FALSE == override) return -EPERM;
+       if (target_core_nr > num_physical_pp_cores_total) return -EINVAL;
+       if (0 == target_core_nr) return -EINVAL;
+
+       mali_executor_core_scale(target_core_nr);
+
+       _mali_osk_wq_schedule_work(executor_wq_notify_core_change);
+
+       return 0;
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_executor_dump_state(char *buf, u32 size)
+{
+       int n = 0;
+       struct mali_group *group;
+       struct mali_group *temp;
+
+       mali_executor_lock();
+
+       switch (gp_group_state) {
+       case EXEC_STATE_INACTIVE:
+               n += _mali_osk_snprintf(buf + n, size - n,
+                                       "GP group is in state INACTIVE\n");
+               break;
+       case EXEC_STATE_IDLE:
+               n += _mali_osk_snprintf(buf + n, size - n,
+                                       "GP group is in state IDLE\n");
+               break;
+       case EXEC_STATE_WORKING:
+               n += _mali_osk_snprintf(buf + n, size - n,
+                                       "GP group is in state WORKING\n");
+               break;
+       default:
+               n += _mali_osk_snprintf(buf + n, size - n,
+                                       "GP group is in unknown/illegal state %u\n",
+                                       gp_group_state);
+               break;
+       }
+
+       n += mali_group_dump_state(gp_group, buf + n, size - n);
+
+       n += _mali_osk_snprintf(buf + n, size - n,
+                               "Physical PP groups in WORKING state (count = %u):\n",
+                               group_list_working_count);
+
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working, struct mali_group, executor_list) {
+               n += mali_group_dump_state(group, buf + n, size - n);
+       }
+
+       n += _mali_osk_snprintf(buf + n, size - n,
+                               "Physical PP groups in IDLE state (count = %u):\n",
+                               group_list_idle_count);
+
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, executor_list) {
+               n += mali_group_dump_state(group, buf + n, size - n);
+       }
+
+       n += _mali_osk_snprintf(buf + n, size - n,
+                               "Physical PP groups in INACTIVE state (count = %u):\n",
+                               group_list_inactive_count);
+
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_inactive, struct mali_group, executor_list) {
+               n += mali_group_dump_state(group, buf + n, size - n);
+       }
+
+       n += _mali_osk_snprintf(buf + n, size - n,
+                               "Physical PP groups in DISABLED state (count = %u):\n",
+                               group_list_disabled_count);
+
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, executor_list) {
+               n += mali_group_dump_state(group, buf + n, size - n);
+       }
+
+       if (mali_executor_has_virtual_group()) {
+               switch (virtual_group_state) {
+               case EXEC_STATE_EMPTY:
+                       n += _mali_osk_snprintf(buf + n, size - n,
+                                               "Virtual PP group is in state EMPTY\n");
+                       break;
+               case EXEC_STATE_INACTIVE:
+                       n += _mali_osk_snprintf(buf + n, size - n,
+                                               "Virtual PP group is in state INACTIVE\n");
+                       break;
+               case EXEC_STATE_IDLE:
+                       n += _mali_osk_snprintf(buf + n, size - n,
+                                               "Virtual PP group is in state IDLE\n");
+                       break;
+               case EXEC_STATE_WORKING:
+                       n += _mali_osk_snprintf(buf + n, size - n,
+                                               "Virtual PP group is in state WORKING\n");
+                       break;
+               default:
+                       n += _mali_osk_snprintf(buf + n, size - n,
+                                               "Virtual PP group is in unknown/illegal state %u\n",
+                                               virtual_group_state);
+                       break;
+               }
+
+               n += mali_group_dump_state(virtual_group, buf + n, size - n);
+       }
+
+       mali_executor_unlock();
+
+       n += _mali_osk_snprintf(buf + n, size - n, "\n");
+
+       return n;
+}
+#endif
+
+_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores(_mali_uk_get_pp_number_of_cores_s *args)
+{
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+       args->number_of_total_cores = num_physical_pp_cores_total;
+       args->number_of_enabled_cores = num_physical_pp_cores_enabled;
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_pp_core_version(_mali_uk_get_pp_core_version_s *args)
+{
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+       args->version = pp_version;
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores(_mali_uk_get_gp_number_of_cores_s *args)
+{
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+       args->number_of_cores = 1;
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_gp_core_version(_mali_uk_get_gp_core_version_s *args)
+{
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+       args->version = gp_version;
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_gp_suspend_response(_mali_uk_gp_suspend_response_s *args)
+{
+       struct mali_session_data *session;
+       struct mali_gp_job *job;
+
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+       session = (struct mali_session_data *)(uintptr_t)args->ctx;
+
+       if (_MALIGP_JOB_RESUME_WITH_NEW_HEAP == args->code) {
+               _mali_osk_notification_t *new_notification = NULL;
+
+               new_notification = _mali_osk_notification_create(
+                                          _MALI_NOTIFICATION_GP_STALLED,
+                                          sizeof(_mali_uk_gp_job_suspended_s));
+
+               if (NULL != new_notification) {
+                       MALI_DEBUG_PRINT(3, ("Executor: Resuming job %u with new heap; 0x%08X - 0x%08X\n",
+                                            args->cookie, args->arguments[0], args->arguments[1]));
+
+                       mali_executor_lock();
+
+                       /* Resume the job in question if it is still running */
+                       job = mali_group_get_running_gp_job(gp_group);
+                       if (NULL != job &&
+                           args->cookie == mali_gp_job_get_id(job) &&
+                           session == mali_gp_job_get_session(job)) {
+                               /*
+                                * Correct job is running, resume with new heap
+                                */
+
+                               mali_gp_job_set_oom_notification(job,
+                                                                new_notification);
+
+                               /* This will also re-enable interrupts */
+                               mali_group_resume_gp_with_new_heap(gp_group,
+                                                                  args->cookie,
+                                                                  args->arguments[0],
+                                                                  args->arguments[1]);
+
+                               mali_executor_unlock();
+                               return _MALI_OSK_ERR_OK;
+                       } else {
+                               MALI_DEBUG_PRINT(2, ("Executor: Unable to resume  gp job becasue gp time out or any other unexpected reason!\n"));
+
+                               _mali_osk_notification_delete(new_notification);
+
+                               mali_executor_unlock();
+                               return _MALI_OSK_ERR_FAULT;
+                       }
+               } else {
+                       MALI_PRINT_ERROR(("Executor: Failed to allocate notification object. Will abort GP job.\n"));
+               }
+       } else {
+               MALI_DEBUG_PRINT(2, ("Executor: Aborting job %u, no new heap provided\n", args->cookie));
+       }
+
+       mali_executor_lock();
+
+       /* Abort the job in question if it is still running */
+       job = mali_group_get_running_gp_job(gp_group);
+       if (NULL != job &&
+           args->cookie == mali_gp_job_get_id(job) &&
+           session == mali_gp_job_get_session(job)) {
+               /* Correct job is still running */
+               struct mali_gp_job *job_done = NULL;
+
+               mali_executor_complete_group(gp_group, MALI_FALSE, &job_done, NULL);
+
+               /* The same job should have completed */
+               MALI_DEBUG_ASSERT(job_done == job);
+
+               /* GP job completed, make sure it is freed */
+               mali_scheduler_complete_gp_job(job_done, MALI_FALSE,
+                                              MALI_TRUE, MALI_TRUE);
+       }
+
+       mali_executor_unlock();
+       return _MALI_OSK_ERR_FAULT;
+}
+
+
+/*
+ * ---------- Implementation of static functions ----------
+ */
+
+void mali_executor_lock(void)
+{
+       _mali_osk_spinlock_irq_lock(mali_executor_lock_obj);
+       MALI_DEBUG_PRINT(5, ("Executor: lock taken\n"));
+}
+
+void mali_executor_unlock(void)
+{
+       MALI_DEBUG_PRINT(5, ("Executor: Releasing lock\n"));
+       _mali_osk_spinlock_irq_unlock(mali_executor_lock_obj);
+}
+
+static mali_bool mali_executor_is_suspended(void *data)
+{
+       mali_bool ret;
+
+       /* This callback does not use the data pointer. */
+       MALI_IGNORE(data);
+
+       mali_executor_lock();
+
+       ret = pause_count > 0 && !mali_executor_is_working();
+
+       mali_executor_unlock();
+
+       return ret;
+}
+
+static mali_bool mali_executor_is_working()
+{
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+       return (0 != group_list_working_count ||
+               EXEC_STATE_WORKING == gp_group_state ||
+               EXEC_STATE_WORKING == virtual_group_state);
+}
+
+static void mali_executor_disable_empty_virtual(void)
+{
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT(virtual_group_state != EXEC_STATE_EMPTY);
+       MALI_DEBUG_ASSERT(virtual_group_state != EXEC_STATE_WORKING);
+
+       if (mali_group_is_empty(virtual_group)) {
+               virtual_group_state = EXEC_STATE_EMPTY;
+       }
+}
+
+static mali_bool mali_executor_physical_rejoin_virtual(struct mali_group *group)
+{
+       mali_bool trigger_pm_update = MALI_FALSE;
+
+       MALI_DEBUG_ASSERT_POINTER(group);
+       /* Only rejoining after job has completed (still active) */
+       MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE ==
+                         mali_group_get_state(group));
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT(MALI_TRUE == mali_executor_has_virtual_group());
+       MALI_DEBUG_ASSERT(MALI_FALSE == mali_group_is_virtual(group));
+
+       /* Make sure group and virtual group have same status */
+
+       if (MALI_GROUP_STATE_INACTIVE == mali_group_get_state(virtual_group)) {
+               if (mali_group_deactivate(group)) {
+                       trigger_pm_update = MALI_TRUE;
+               }
+
+               if (virtual_group_state == EXEC_STATE_EMPTY) {
+                       virtual_group_state = EXEC_STATE_INACTIVE;
+               }
+       } else if (MALI_GROUP_STATE_ACTIVATION_PENDING ==
+                  mali_group_get_state(virtual_group)) {
+               /*
+                * Activation is pending for virtual group, leave
+                * this child group as active.
+                */
+               if (virtual_group_state == EXEC_STATE_EMPTY) {
+                       virtual_group_state = EXEC_STATE_INACTIVE;
+               }
+       } else {
+               MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE ==
+                                 mali_group_get_state(virtual_group));
+
+               if (virtual_group_state == EXEC_STATE_EMPTY) {
+                       virtual_group_state = EXEC_STATE_IDLE;
+               }
+       }
+
+       /* Remove group from idle list */
+       MALI_DEBUG_ASSERT(mali_executor_group_is_in_state(group,
+                         EXEC_STATE_IDLE));
+       _mali_osk_list_delinit(&group->executor_list);
+       group_list_idle_count--;
+
+       /*
+        * And finally rejoin the virtual group
+        * group will start working on same job as virtual_group,
+        * if virtual_group is working on a job
+        */
+       mali_group_add_group(virtual_group, group);
+
+       return trigger_pm_update;
+}
+
+static mali_bool mali_executor_has_virtual_group(void)
+{
+#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470))
+       return (NULL != virtual_group) ? MALI_TRUE : MALI_FALSE;
+#else
+       return MALI_FALSE;
+#endif /* (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) */
+}
+
+static mali_bool mali_executor_virtual_group_is_usable(void)
+{
+#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470))
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       return ((EXEC_STATE_INACTIVE == virtual_group_state ||
+               EXEC_STATE_IDLE == virtual_group_state) && (virtual_group->state != MALI_GROUP_STATE_ACTIVATION_PENDING)) ?
+              MALI_TRUE : MALI_FALSE;
+#else
+       return MALI_FALSE;
+#endif /* (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) */
+}
+
+static mali_bool mali_executor_tackle_gp_bound(void)
+{
+       struct mali_pp_job *job;
+
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+       job = mali_scheduler_job_pp_physical_peek();
+
+       if (NULL != job && MALI_TRUE == mali_is_mali400()) {
+               if (0 < group_list_working_count &&
+                   mali_pp_job_is_large_and_unstarted(job)) {
+                       return MALI_TRUE;
+               }
+       }
+
+       return MALI_FALSE;
+}
+
+static mali_bool mali_executor_schedule_is_early_out(mali_bool *gpu_secure_mode_is_needed)
+{
+       struct mali_pp_job *next_pp_job_to_start = NULL;
+       struct mali_group *group;
+       struct mali_group *tmp_group;
+       struct mali_pp_job *physical_pp_job_working = NULL;
+       struct mali_pp_job *virtual_pp_job_working = NULL;
+       mali_bool gpu_working_in_protected_mode = MALI_FALSE;
+       mali_bool gpu_working_in_non_protected_mode = MALI_FALSE;
+
+       MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
+
+       *gpu_secure_mode_is_needed = MALI_FALSE;
+
+       /* Check if the gpu secure mode is supported, exit if not.*/
+       if (MALI_FALSE == _mali_osk_gpu_secure_mode_is_supported()) {
+               return MALI_FALSE;
+       }
+
+       /* Check if need to set gpu secure mode for the next pp job,
+        * get the next pp job that will be scheduled  if exist.
+        */
+       next_pp_job_to_start = mali_scheduler_job_pp_next();
+
+       /* Check current pp physical/virtual running job is protected job or not if exist.*/
+       _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_working,
+                                   struct mali_group, executor_list) {
+               physical_pp_job_working = group->pp_running_job;
+               break;
+       }
+
+       if (EXEC_STATE_WORKING == virtual_group_state) {
+               virtual_pp_job_working = virtual_group->pp_running_job;
+       }
+
+       if (NULL != physical_pp_job_working) {
+               if (MALI_TRUE == mali_pp_job_is_protected_job(physical_pp_job_working)) {
+                       gpu_working_in_protected_mode = MALI_TRUE;
+               } else {
+                       gpu_working_in_non_protected_mode = MALI_TRUE;
+               }
+       } else if (NULL != virtual_pp_job_working) {
+               if (MALI_TRUE == mali_pp_job_is_protected_job(virtual_pp_job_working)) {
+                       gpu_working_in_protected_mode = MALI_TRUE;
+               } else {
+                       gpu_working_in_non_protected_mode = MALI_TRUE;
+               }
+       } else if (EXEC_STATE_WORKING == gp_group_state) {
+               gpu_working_in_non_protected_mode = MALI_TRUE;
+       }
+
+       /* If the next pp job is the protected pp job.*/
+       if ((NULL != next_pp_job_to_start) && MALI_TRUE == mali_pp_job_is_protected_job(next_pp_job_to_start)) {
+               /* if gp is working or any non-protected pp job is working now, unable to schedule protected pp job. */
+               if (MALI_TRUE == gpu_working_in_non_protected_mode)
+                       return MALI_TRUE;
+
+               *gpu_secure_mode_is_needed = MALI_TRUE;
+               return MALI_FALSE;
+
+       }
+
+       if (MALI_TRUE == gpu_working_in_protected_mode) {
+               /* Unable to schedule non-protected pp job/gp job if exist protected pp running jobs*/
+               return MALI_TRUE;
+       }
+
+       return MALI_FALSE;
+}
+/*
+ * This is where jobs are actually started.
+ */
+static void mali_executor_schedule(void)
+{
+       u32 i;
+       u32 num_physical_needed = 0;
+       u32 num_physical_to_process = 0;
+       mali_bool trigger_pm_update = MALI_FALSE;
+       mali_bool deactivate_idle_group = MALI_TRUE;
+       mali_bool gpu_secure_mode_is_needed = MALI_FALSE;
+       mali_bool is_gpu_secure_mode = MALI_FALSE;
+       /* Physical groups + jobs to start in this function */
+       struct mali_group *groups_to_start[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS];
+       struct mali_pp_job *jobs_to_start[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS];
+       u32 sub_jobs_to_start[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS];
+       int num_jobs_to_start = 0;
+
+       /* Virtual job to start in this function */
+       struct mali_pp_job *virtual_job_to_start = NULL;
+
+       /* GP job to start in this function */
+       struct mali_gp_job *gp_job_to_start = NULL;
+
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+       if (pause_count > 0) {
+               /* Execution is suspended, don't schedule any jobs. */
+               return;
+       }
+
+       /* Lock needed in order to safely handle the job queues */
+       mali_scheduler_lock();
+
+       /* 1. Check the schedule if need to early out. */
+       if (MALI_TRUE == mali_executor_schedule_is_early_out(&gpu_secure_mode_is_needed)) {
+               mali_scheduler_unlock();
+               return;
+       }
+
+       /* 2. Activate gp firstly if have gp job queued. */
+       if ((EXEC_STATE_INACTIVE == gp_group_state)
+           && (0 < mali_scheduler_job_gp_count())
+           && (gpu_secure_mode_is_needed == MALI_FALSE)) {
+
+               enum mali_group_state state =
+                       mali_group_activate(gp_group);
+               if (MALI_GROUP_STATE_ACTIVE == state) {
+                       /* Set GP group state to idle */
+                       gp_group_state = EXEC_STATE_IDLE;
+               } else {
+                       trigger_pm_update = MALI_TRUE;
+               }
+       }
+
+       /* 3. Prepare as many physical groups as needed/possible */
+
+       num_physical_needed = mali_scheduler_job_physical_head_count(gpu_secure_mode_is_needed);
+
+       /* On mali-450 platform, we don't need to enter in this block frequently. */
+       if (0 < num_physical_needed) {
+
+               if (num_physical_needed <= group_list_idle_count) {
+                       /* We have enough groups on idle list already */
+                       num_physical_to_process = num_physical_needed;
+                       num_physical_needed = 0;
+               } else {
+                       /* We need to get a hold of some more groups */
+                       num_physical_to_process = group_list_idle_count;
+                       num_physical_needed -= group_list_idle_count;
+               }
+
+               if (0 < num_physical_needed) {
+
+                       /* 3.1. Activate groups which are inactive */
+
+                       struct mali_group *group;
+                       struct mali_group *temp;
+
+                       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_inactive,
+                                                   struct mali_group, executor_list) {
+                               enum mali_group_state state =
+                                       mali_group_activate(group);
+                               if (MALI_GROUP_STATE_ACTIVE == state) {
+                                       /* Move from inactive to idle */
+                                       mali_executor_change_state_pp_physical(group,
+                                                                              &group_list_inactive,
+                                                                              &group_list_inactive_count,
+                                                                              &group_list_idle,
+                                                                              &group_list_idle_count);
+                                       num_physical_to_process++;
+                               } else {
+                                       trigger_pm_update = MALI_TRUE;
+                               }
+
+                               num_physical_needed--;
+                               if (0 == num_physical_needed) {
+                                       /* We have activated all the groups we need */
+                                       break;
+                               }
+                       }
+               }
+
+               if (mali_executor_virtual_group_is_usable()) {
+
+                       /*
+                        * 3.2. And finally, steal and activate groups
+                        * from virtual group if we need even more
+                        */
+                       while (0 < num_physical_needed) {
+                               struct mali_group *group;
+
+                               group = mali_group_acquire_group(virtual_group);
+                               if (NULL != group) {
+                                       enum mali_group_state state;
+
+                                       mali_executor_disable_empty_virtual();
+
+                                       state = mali_group_activate(group);
+                                       if (MALI_GROUP_STATE_ACTIVE == state) {
+                                               /* Group is ready, add to idle list */
+                                               _mali_osk_list_add(
+                                                       &group->executor_list,
+                                                       &group_list_idle);
+                                               group_list_idle_count++;
+                                               num_physical_to_process++;
+                                       } else {
+                                               /*
+                                                * Group is not ready yet,
+                                                * add to inactive list
+                                                */
+                                               _mali_osk_list_add(
+                                                       &group->executor_list,
+                                                       &group_list_inactive);
+                                               group_list_inactive_count++;
+
+                                               trigger_pm_update = MALI_TRUE;
+                                       }
+                                       num_physical_needed--;
+                               } else {
+                                       /*
+                                        * We could not get enough groups
+                                        * from the virtual group.
+                                        */
+                                       break;
+                               }
+                       }
+               }
+
+               /* 3.3. Assign physical jobs to groups */
+
+               if (0 < num_physical_to_process) {
+                       struct mali_group *group;
+                       struct mali_group *temp;
+
+                       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle,
+                                                   struct mali_group, executor_list) {
+                               struct mali_pp_job *job = NULL;
+                               u32 sub_job = MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS;
+
+                               MALI_DEBUG_ASSERT(num_jobs_to_start <
+                                                 MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS);
+
+                               MALI_DEBUG_ASSERT(0 <
+                                                 mali_scheduler_job_physical_head_count(gpu_secure_mode_is_needed));
+
+                               /* If the next pp job is non-protected, check if gp bound now. */
+                               if ((MALI_FALSE == gpu_secure_mode_is_needed)
+                                   && (mali_executor_hint_is_enabled(MALI_EXECUTOR_HINT_GP_BOUND))
+                                   && (MALI_TRUE == mali_executor_tackle_gp_bound())) {
+                                       /*
+                                       * We're gp bound,
+                                       * don't start this right now.
+                                       */
+                                       deactivate_idle_group = MALI_FALSE;
+                                       num_physical_to_process = 0;
+                                       break;
+                               }
+
+                               job = mali_scheduler_job_pp_physical_get(
+                                             &sub_job);
+
+                               if (MALI_FALSE == gpu_secure_mode_is_needed) {
+                                       MALI_DEBUG_ASSERT(MALI_FALSE == mali_pp_job_is_protected_job(job));
+                               } else {
+                                       MALI_DEBUG_ASSERT(MALI_TRUE == mali_pp_job_is_protected_job(job));
+                               }
+
+                               MALI_DEBUG_ASSERT_POINTER(job);
+                               MALI_DEBUG_ASSERT(sub_job <= MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS);
+
+                               /* Put job + group on list of jobs to start later on */
+
+                               groups_to_start[num_jobs_to_start] = group;
+                               jobs_to_start[num_jobs_to_start] = job;
+                               sub_jobs_to_start[num_jobs_to_start] = sub_job;
+                               num_jobs_to_start++;
+
+                               /* Move group from idle to working */
+                               mali_executor_change_state_pp_physical(group,
+                                                                      &group_list_idle,
+                                                                      &group_list_idle_count,
+                                                                      &group_list_working,
+                                                                      &group_list_working_count);
+
+                               num_physical_to_process--;
+                               if (0 == num_physical_to_process) {
+                                       /* Got all we needed */
+                                       break;
+                               }
+                       }
+               }
+       }
+
+       /* 4. Deactivate idle pp group , must put deactive here before active vitual group
+        *    for cover case first only has physical job in normal queue but group inactive,
+        *    so delay the job start go to active group, when group activated,
+        *    call scheduler again, but now if we get high queue virtual job,
+        *    we will do nothing in schedule cause executor schedule stop
+        */
+
+       if (MALI_TRUE == mali_executor_deactivate_list_idle(deactivate_idle_group
+                       && (!mali_timeline_has_physical_pp_job()))) {
+               trigger_pm_update = MALI_TRUE;
+       }
+
+       /* 5. Activate virtual group, if needed */
+       if (EXEC_STATE_INACTIVE == virtual_group_state &&
+           MALI_TRUE ==  mali_scheduler_job_next_is_virtual()) {
+               struct mali_pp_job *virtual_job = mali_scheduler_job_pp_virtual_peek();
+               if ((MALI_FALSE == gpu_secure_mode_is_needed && MALI_FALSE == mali_pp_job_is_protected_job(virtual_job))
+                   || (MALI_TRUE == gpu_secure_mode_is_needed && MALI_TRUE == mali_pp_job_is_protected_job(virtual_job))) {
+                       enum mali_group_state state =
+                               mali_group_activate(virtual_group);
+                       if (MALI_GROUP_STATE_ACTIVE == state) {
+                               /* Set virtual group state to idle */
+                               virtual_group_state = EXEC_STATE_IDLE;
+                       } else {
+                               trigger_pm_update = MALI_TRUE;
+                       }
+               }
+       }
+
+       /* 6. To power up group asap,  trigger pm update only when no need to swith the gpu mode. */
+
+       is_gpu_secure_mode = _mali_osk_gpu_secure_mode_is_enabled();
+
+       if ((MALI_FALSE == gpu_secure_mode_is_needed && MALI_FALSE == is_gpu_secure_mode)
+           || (MALI_TRUE == gpu_secure_mode_is_needed && MALI_TRUE == is_gpu_secure_mode)) {
+               if (MALI_TRUE == trigger_pm_update) {
+                       trigger_pm_update = MALI_FALSE;
+                       mali_pm_update_async();
+               }
+       }
+
+       /* 7. Assign jobs to idle virtual group (or deactivate if no job) */
+
+       if (EXEC_STATE_IDLE == virtual_group_state) {
+               if (MALI_TRUE == mali_scheduler_job_next_is_virtual()) {
+                       struct mali_pp_job *virtual_job = mali_scheduler_job_pp_virtual_peek();
+                       if ((MALI_FALSE == gpu_secure_mode_is_needed && MALI_FALSE == mali_pp_job_is_protected_job(virtual_job))
+                           || (MALI_TRUE == gpu_secure_mode_is_needed && MALI_TRUE == mali_pp_job_is_protected_job(virtual_job))) {
+                               virtual_job_to_start =
+                                       mali_scheduler_job_pp_virtual_get();
+                               virtual_group_state = EXEC_STATE_WORKING;
+                       }
+               } else if (!mali_timeline_has_virtual_pp_job()) {
+                       virtual_group_state = EXEC_STATE_INACTIVE;
+
+                       if (mali_group_deactivate(virtual_group)) {
+                               trigger_pm_update = MALI_TRUE;
+                       }
+               }
+       }
+
+       /* 8. Assign job to idle GP group (or deactivate if no job) */
+
+       if (EXEC_STATE_IDLE == gp_group_state && MALI_FALSE == gpu_secure_mode_is_needed) {
+               if (0 < mali_scheduler_job_gp_count()) {
+                       gp_job_to_start = mali_scheduler_job_gp_get();
+                       gp_group_state = EXEC_STATE_WORKING;
+               } else if (!mali_timeline_has_gp_job()) {
+                       gp_group_state = EXEC_STATE_INACTIVE;
+                       if (mali_group_deactivate(gp_group)) {
+                               trigger_pm_update = MALI_TRUE;
+                       }
+               }
+       }
+
+       /* 9. We no longer need the schedule/queue lock */
+
+       mali_scheduler_unlock();
+
+       /* 10. start jobs */
+       if (NULL != virtual_job_to_start) {
+               MALI_DEBUG_ASSERT(!mali_group_pp_is_active(virtual_group));
+               mali_group_start_pp_job(virtual_group,
+                                       virtual_job_to_start, 0, is_gpu_secure_mode);
+       }
+
+       for (i = 0; i < num_jobs_to_start; i++) {
+               MALI_DEBUG_ASSERT(!mali_group_pp_is_active(
+                                         groups_to_start[i]));
+               mali_group_start_pp_job(groups_to_start[i],
+                                       jobs_to_start[i],
+                                       sub_jobs_to_start[i], is_gpu_secure_mode);
+       }
+
+       MALI_DEBUG_ASSERT_POINTER(gp_group);
+
+       if (NULL != gp_job_to_start) {
+               MALI_DEBUG_ASSERT(!mali_group_gp_is_active(gp_group));
+               mali_group_start_gp_job(gp_group, gp_job_to_start, is_gpu_secure_mode);
+       }
+
+       /* 11. Trigger any pending PM updates */
+       if (MALI_TRUE == trigger_pm_update) {
+               mali_pm_update_async();
+       }
+}
+
+/* Handler for deferred schedule requests */
+static void mali_executor_wq_schedule(void *arg)
+{
+       MALI_IGNORE(arg);
+       mali_executor_lock();
+       mali_executor_schedule();
+       mali_executor_unlock();
+}
+
+static void mali_executor_send_gp_oom_to_user(struct mali_gp_job *job)
+{
+       _mali_uk_gp_job_suspended_s *jobres;
+       _mali_osk_notification_t *notification;
+
+       notification = mali_gp_job_get_oom_notification(job);
+
+       /*
+        * Remember the id we send to user space, so we have something to
+        * verify when we get a response
+        */
+       gp_returned_cookie = mali_gp_job_get_id(job);
+
+       jobres = (_mali_uk_gp_job_suspended_s *)notification->result_buffer;
+       jobres->user_job_ptr = mali_gp_job_get_user_id(job);
+       jobres->cookie = gp_returned_cookie;
+
+       mali_session_send_notification(mali_gp_job_get_session(job),
+                                      notification);
+}
+static struct mali_gp_job *mali_executor_complete_gp(struct mali_group *group,
+               mali_bool success)
+{
+       struct mali_gp_job *job;
+
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+       /* Extracts the needed HW status from core and reset */
+       job = mali_group_complete_gp(group, success);
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       /* Core is now ready to go into idle list */
+       gp_group_state = EXEC_STATE_IDLE;
+
+       /* This will potentially queue more GP and PP jobs */
+       mali_timeline_tracker_release(&job->tracker);
+
+       /* Signal PP job */
+       mali_gp_job_signal_pp_tracker(job, success);
+
+       return job;
+}
+
+static struct mali_pp_job *mali_executor_complete_pp(struct mali_group *group,
+               mali_bool success)
+{
+       struct mali_pp_job *job;
+       u32 sub_job;
+       mali_bool job_is_done;
+
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+       /* Extracts the needed HW status from core and reset */
+       job = mali_group_complete_pp(group, success, &sub_job);
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       /* Core is now ready to go into idle list */
+       if (mali_group_is_virtual(group)) {
+               virtual_group_state = EXEC_STATE_IDLE;
+       } else {
+               /* Move from working to idle state */
+               mali_executor_change_state_pp_physical(group,
+                                                      &group_list_working,
+                                                      &group_list_working_count,
+                                                      &group_list_idle,
+                                                      &group_list_idle_count);
+       }
+
+       /* It is the executor module which owns the jobs themselves by now */
+       mali_pp_job_mark_sub_job_completed(job, success);
+       job_is_done = mali_pp_job_is_complete(job);
+
+       if (job_is_done) {
+               /* This will potentially queue more GP and PP jobs */
+               mali_timeline_tracker_release(&job->tracker);
+       }
+
+       return job;
+}
+
+static void mali_executor_complete_group(struct mali_group *group,
+               mali_bool success,
+               struct mali_gp_job **gp_job_done,
+               struct mali_pp_job **pp_job_done)
+{
+       struct mali_gp_core *gp_core = mali_group_get_gp_core(group);
+       struct mali_pp_core *pp_core = mali_group_get_pp_core(group);
+       struct mali_gp_job *gp_job = NULL;
+       struct mali_pp_job *pp_job = NULL;
+       mali_bool pp_job_is_done = MALI_TRUE;
+
+       if (NULL != gp_core) {
+               gp_job = mali_executor_complete_gp(group, success);
+       } else {
+               MALI_DEBUG_ASSERT_POINTER(pp_core);
+               MALI_IGNORE(pp_core);
+               pp_job = mali_executor_complete_pp(group, success);
+
+               pp_job_is_done = mali_pp_job_is_complete(pp_job);
+       }
+
+       if (pause_count > 0) {
+               /* Execution has been suspended */
+
+               if (!mali_executor_is_working()) {
+                       /* Last job completed, wake up sleepers */
+                       _mali_osk_wait_queue_wake_up(
+                               executor_working_wait_queue);
+               }
+       } else if (MALI_TRUE == mali_group_disable_requested(group)) {
+               mali_executor_core_scale_in_group_complete(group);
+
+               mali_executor_schedule();
+       } else {
+               /* try to schedule new jobs */
+               mali_executor_schedule();
+       }
+
+       if (NULL != gp_job) {
+               MALI_DEBUG_ASSERT_POINTER(gp_job_done);
+               *gp_job_done = gp_job;
+       } else if (pp_job_is_done) {
+               MALI_DEBUG_ASSERT_POINTER(pp_job);
+               MALI_DEBUG_ASSERT_POINTER(pp_job_done);
+               *pp_job_done = pp_job;
+       }
+}
+
+static void mali_executor_change_state_pp_physical(struct mali_group *group,
+               _mali_osk_list_t *old_list,
+               u32 *old_count,
+               _mali_osk_list_t *new_list,
+               u32 *new_count)
+{
+       /*
+        * It's a bit more complicated to change the state for the physical PP
+        * groups since their state is determined by the list they are on.
+        */
+#if defined(DEBUG)
+       mali_bool found = MALI_FALSE;
+       struct mali_group *group_iter;
+       struct mali_group *temp;
+       u32 old_counted = 0;
+       u32 new_counted = 0;
+
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(old_list);
+       MALI_DEBUG_ASSERT_POINTER(old_count);
+       MALI_DEBUG_ASSERT_POINTER(new_list);
+       MALI_DEBUG_ASSERT_POINTER(new_count);
+
+       /*
+        * Verify that group is present on old list,
+        * and that the count is correct
+        */
+
+       _MALI_OSK_LIST_FOREACHENTRY(group_iter, temp, old_list,
+                                   struct mali_group, executor_list) {
+               old_counted++;
+               if (group == group_iter) {
+                       found = MALI_TRUE;
+               }
+       }
+
+       _MALI_OSK_LIST_FOREACHENTRY(group_iter, temp, new_list,
+                                   struct mali_group, executor_list) {
+               new_counted++;
+       }
+
+       if (MALI_FALSE == found) {
+               if (old_list == &group_list_idle) {
+                       MALI_DEBUG_PRINT(1, (" old Group list is idle,"));
+               } else if (old_list == &group_list_inactive) {
+                       MALI_DEBUG_PRINT(1, (" old Group list is inactive,"));
+               } else if (old_list == &group_list_working) {
+                       MALI_DEBUG_PRINT(1, (" old Group list is working,"));
+               } else if (old_list == &group_list_disabled) {
+                       MALI_DEBUG_PRINT(1, (" old Group list is disable,"));
+               }
+
+               if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_WORKING)) {
+                       MALI_DEBUG_PRINT(1, (" group in working \n"));
+               } else if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_INACTIVE)) {
+                       MALI_DEBUG_PRINT(1, (" group in inactive \n"));
+               } else if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_IDLE)) {
+                       MALI_DEBUG_PRINT(1, (" group in idle \n"));
+               } else if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED)) {
+                       MALI_DEBUG_PRINT(1, (" but group in disabled \n"));
+               }
+       }
+
+       MALI_DEBUG_ASSERT(MALI_TRUE == found);
+       MALI_DEBUG_ASSERT(0 < (*old_count));
+       MALI_DEBUG_ASSERT((*old_count) == old_counted);
+       MALI_DEBUG_ASSERT((*new_count) == new_counted);
+#endif
+
+       _mali_osk_list_move(&group->executor_list, new_list);
+       (*old_count)--;
+       (*new_count)++;
+}
+
+static void mali_executor_set_state_pp_physical(struct mali_group *group,
+               _mali_osk_list_t *new_list,
+               u32 *new_count)
+{
+       _mali_osk_list_add(&group->executor_list, new_list);
+       (*new_count)++;
+}
+
+static mali_bool mali_executor_group_is_in_state(struct mali_group *group,
+               enum mali_executor_state_t state)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+       if (gp_group == group) {
+               if (gp_group_state == state) {
+                       return MALI_TRUE;
+               }
+       } else if (virtual_group == group || mali_group_is_in_virtual(group)) {
+               if (virtual_group_state == state) {
+                       return MALI_TRUE;
+               }
+       } else {
+               /* Physical PP group */
+               struct mali_group *group_iter;
+               struct mali_group *temp;
+               _mali_osk_list_t *list;
+
+               if (EXEC_STATE_DISABLED == state) {
+                       list = &group_list_disabled;
+               } else if (EXEC_STATE_INACTIVE == state) {
+                       list = &group_list_inactive;
+               } else if (EXEC_STATE_IDLE == state) {
+                       list = &group_list_idle;
+               } else {
+                       MALI_DEBUG_ASSERT(EXEC_STATE_WORKING == state);
+                       list = &group_list_working;
+               }
+
+               _MALI_OSK_LIST_FOREACHENTRY(group_iter, temp, list,
+                                           struct mali_group, executor_list) {
+                       if (group_iter == group) {
+                               return MALI_TRUE;
+                       }
+               }
+       }
+
+       /* group not in correct state */
+       return MALI_FALSE;
+}
+
+static void mali_executor_group_enable_internal(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT(group);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT(mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED));
+
+       /* Put into inactive state (== "lowest" enabled state) */
+       if (group == gp_group) {
+               MALI_DEBUG_ASSERT(EXEC_STATE_DISABLED == gp_group_state);
+               gp_group_state = EXEC_STATE_INACTIVE;
+       } else {
+               mali_executor_change_state_pp_physical(group,
+                                                      &group_list_disabled,
+                                                      &group_list_disabled_count,
+                                                      &group_list_inactive,
+                                                      &group_list_inactive_count);
+
+               ++num_physical_pp_cores_enabled;
+               MALI_DEBUG_PRINT(4, ("Enabling group id %d \n", group->pp_core->core_id));
+       }
+
+       if (MALI_GROUP_STATE_ACTIVE == mali_group_activate(group)) {
+               MALI_DEBUG_ASSERT(MALI_TRUE == mali_group_power_is_on(group));
+
+               /* Move from inactive to idle */
+               if (group == gp_group) {
+                       gp_group_state = EXEC_STATE_IDLE;
+               } else {
+                       mali_executor_change_state_pp_physical(group,
+                                                              &group_list_inactive,
+                                                              &group_list_inactive_count,
+                                                              &group_list_idle,
+                                                              &group_list_idle_count);
+
+                       if (mali_executor_has_virtual_group()) {
+                               if (mali_executor_physical_rejoin_virtual(group)) {
+                                       mali_pm_update_async();
+                               }
+                       }
+               }
+       } else {
+               mali_pm_update_async();
+       }
+}
+
+static void mali_executor_group_disable_internal(struct mali_group *group)
+{
+       mali_bool working;
+
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT(!mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED));
+
+       working = mali_executor_group_is_in_state(group, EXEC_STATE_WORKING);
+       if (MALI_TRUE == working) {
+               /** Group to be disabled once it completes current work,
+                * when virtual group completes, also check child groups for this flag */
+               mali_group_set_disable_request(group, MALI_TRUE);
+               return;
+       }
+
+       /* Put into disabled state */
+       if (group == gp_group) {
+               /* GP group */
+               MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != gp_group_state);
+               gp_group_state = EXEC_STATE_DISABLED;
+       } else {
+               if (mali_group_is_in_virtual(group)) {
+                       /* A child group of virtual group. move the specific group from virtual group */
+                       MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != virtual_group_state);
+
+                       mali_executor_set_state_pp_physical(group,
+                                                           &group_list_disabled,
+                                                           &group_list_disabled_count);
+
+                       mali_group_remove_group(virtual_group, group);
+                       mali_executor_disable_empty_virtual();
+               } else {
+                       mali_executor_change_group_status_disabled(group);
+               }
+
+               --num_physical_pp_cores_enabled;
+               MALI_DEBUG_PRINT(4, ("Disabling group id %d \n", group->pp_core->core_id));
+       }
+
+       if (MALI_GROUP_STATE_INACTIVE != group->state) {
+               if (MALI_TRUE == mali_group_deactivate(group)) {
+                       mali_pm_update_async();
+               }
+       }
+}
+
+static void mali_executor_notify_core_change(u32 num_cores)
+{
+       mali_bool done = MALI_FALSE;
+
+       if (mali_is_mali450() || mali_is_mali470()) {
+               return;
+       }
+
+       /*
+        * This function gets a bit complicated because we can't hold the session lock while
+        * allocating notification objects.
+        */
+       while (!done) {
+               u32 i;
+               u32 num_sessions_alloc;
+               u32 num_sessions_with_lock;
+               u32 used_notification_objects = 0;
+               _mali_osk_notification_t **notobjs;
+
+               /* Pre allocate the number of notifications objects we need right now (might change after lock has been taken) */
+               num_sessions_alloc = mali_session_get_count();
+               if (0 == num_sessions_alloc) {
+                       /* No sessions to report to */
+                       return;
+               }
+
+               notobjs = (_mali_osk_notification_t **)_mali_osk_malloc(sizeof(_mali_osk_notification_t *) * num_sessions_alloc);
+               if (NULL == notobjs) {
+                       MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure)\n"));
+                       /* there is probably no point in trying again, system must be really low on memory and probably unusable now anyway */
+                       return;
+               }
+
+               for (i = 0; i < num_sessions_alloc; i++) {
+                       notobjs[i] = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_NUM_CORE_CHANGE, sizeof(_mali_uk_pp_num_cores_changed_s));
+                       if (NULL != notobjs[i]) {
+                               _mali_uk_pp_num_cores_changed_s *data = notobjs[i]->result_buffer;
+                               data->number_of_enabled_cores = num_cores;
+                       } else {
+                               MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure %u)\n", i));
+                       }
+               }
+
+               mali_session_lock();
+
+               /* number of sessions will not change while we hold the lock */
+               num_sessions_with_lock = mali_session_get_count();
+
+               if (num_sessions_alloc >= num_sessions_with_lock) {
+                       /* We have allocated enough notification objects for all the sessions atm */
+                       struct mali_session_data *session, *tmp;
+                       MALI_SESSION_FOREACH(session, tmp, link) {
+                               MALI_DEBUG_ASSERT(used_notification_objects < num_sessions_alloc);
+                               if (NULL != notobjs[used_notification_objects]) {
+                                       mali_session_send_notification(session, notobjs[used_notification_objects]);
+                                       notobjs[used_notification_objects] = NULL; /* Don't track this notification object any more */
+                               }
+                               used_notification_objects++;
+                       }
+                       done = MALI_TRUE;
+               }
+
+               mali_session_unlock();
+
+               /* Delete any remaining/unused notification objects */
+               for (; used_notification_objects < num_sessions_alloc; used_notification_objects++) {
+                       if (NULL != notobjs[used_notification_objects]) {
+                               _mali_osk_notification_delete(notobjs[used_notification_objects]);
+                       }
+               }
+
+               _mali_osk_free(notobjs);
+       }
+}
+
+static mali_bool mali_executor_core_scaling_is_done(void *data)
+{
+       u32 i;
+       u32 num_groups;
+       mali_bool ret = MALI_TRUE;
+
+       MALI_IGNORE(data);
+
+       mali_executor_lock();
+
+       num_groups = mali_group_get_glob_num_groups();
+
+       for (i = 0; i < num_groups; i++) {
+               struct mali_group *group = mali_group_get_glob_group(i);
+
+               if (NULL != group) {
+                       if (MALI_TRUE == group->disable_requested && NULL != mali_group_get_pp_core(group)) {
+                               ret = MALI_FALSE;
+                               break;
+                       }
+               }
+       }
+       mali_executor_unlock();
+
+       return ret;
+}
+
+static void mali_executor_wq_notify_core_change(void *arg)
+{
+       MALI_IGNORE(arg);
+
+       if (mali_is_mali450() || mali_is_mali470()) {
+               return;
+       }
+
+       _mali_osk_wait_queue_wait_event(executor_notify_core_change_wait_queue,
+                                       mali_executor_core_scaling_is_done, NULL);
+
+       mali_executor_notify_core_change(num_physical_pp_cores_enabled);
+}
+
+/**
+ * Clear all disable request from the _last_ core scaling behavior.
+ */
+static void mali_executor_core_scaling_reset(void)
+{
+       u32 i;
+       u32 num_groups;
+
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+       num_groups = mali_group_get_glob_num_groups();
+
+       for (i = 0; i < num_groups; i++) {
+               struct mali_group *group = mali_group_get_glob_group(i);
+
+               if (NULL != group) {
+                       group->disable_requested = MALI_FALSE;
+               }
+       }
+
+       for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+               core_scaling_delay_up_mask[i] = 0;
+       }
+}
+
+static void mali_executor_core_scale(unsigned int target_core_nr)
+{
+       int current_core_scaling_mask[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 };
+       int target_core_scaling_mask[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 };
+       int i;
+
+       MALI_DEBUG_ASSERT(0 < target_core_nr);
+       MALI_DEBUG_ASSERT(num_physical_pp_cores_total >= target_core_nr);
+
+       mali_executor_lock();
+
+       if (target_core_nr < num_physical_pp_cores_enabled) {
+               MALI_DEBUG_PRINT(2, ("Requesting %d cores: disabling %d cores\n", target_core_nr, num_physical_pp_cores_enabled - target_core_nr));
+       } else {
+               MALI_DEBUG_PRINT(2, ("Requesting %d cores: enabling %d cores\n", target_core_nr, target_core_nr - num_physical_pp_cores_enabled));
+       }
+
+       /* When a new core scaling request is comming,  we should remove the un-doing
+        * part of the last core scaling request.  It's safe because we have only one
+        * lock(executor lock) protection. */
+       mali_executor_core_scaling_reset();
+
+       mali_pm_get_best_power_cost_mask(num_physical_pp_cores_enabled, current_core_scaling_mask);
+       mali_pm_get_best_power_cost_mask(target_core_nr, target_core_scaling_mask);
+
+       for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+               target_core_scaling_mask[i] = target_core_scaling_mask[i] - current_core_scaling_mask[i];
+               MALI_DEBUG_PRINT(5, ("target_core_scaling_mask[%d] = %d\n", i, target_core_scaling_mask[i]));
+       }
+
+       for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+               if (0 > target_core_scaling_mask[i]) {
+                       struct mali_pm_domain *domain;
+
+                       domain = mali_pm_domain_get_from_index(i);
+
+                       /* Domain is valid and has pp cores */
+                       if ((NULL != domain) && !(_mali_osk_list_empty(&domain->group_list))) {
+                               struct mali_group *group;
+                               struct mali_group *temp;
+
+                               _MALI_OSK_LIST_FOREACHENTRY(group, temp, &domain->group_list, struct mali_group, pm_domain_list) {
+                                       if (NULL != mali_group_get_pp_core(group) && (!mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED))
+                                           && (!mali_group_is_virtual(group))) {
+                                               mali_executor_group_disable_internal(group);
+                                               target_core_scaling_mask[i]++;
+                                               if ((0 == target_core_scaling_mask[i])) {
+                                                       break;
+                                               }
+
+                                       }
+                               }
+                       }
+               }
+       }
+
+       for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+               /**
+                * Target_core_scaling_mask[i] is bigger than 0,
+                * means we need to enable some pp cores in
+                * this domain whose domain index is i.
+                */
+               if (0 < target_core_scaling_mask[i]) {
+                       struct mali_pm_domain *domain;
+
+                       if (num_physical_pp_cores_enabled >= target_core_nr) {
+                               break;
+                       }
+
+                       domain = mali_pm_domain_get_from_index(i);
+
+                       /* Domain is valid and has pp cores */
+                       if ((NULL != domain) && !(_mali_osk_list_empty(&domain->group_list))) {
+                               struct mali_group *group;
+                               struct mali_group *temp;
+
+                               _MALI_OSK_LIST_FOREACHENTRY(group, temp, &domain->group_list, struct mali_group, pm_domain_list) {
+                                       if (NULL != mali_group_get_pp_core(group) && mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED)
+                                           && (!mali_group_is_virtual(group))) {
+                                               mali_executor_group_enable_internal(group);
+                                               target_core_scaling_mask[i]--;
+
+                                               if ((0 == target_core_scaling_mask[i]) || num_physical_pp_cores_enabled == target_core_nr) {
+                                                       break;
+                                               }
+                                       }
+                               }
+                       }
+               }
+       }
+
+       /**
+        * Here, we may still have some pp cores not been enabled because of some
+        * pp cores need to be disabled are still in working state.
+        */
+       for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+               if (0 < target_core_scaling_mask[i]) {
+                       core_scaling_delay_up_mask[i] = target_core_scaling_mask[i];
+               }
+       }
+
+       mali_executor_schedule();
+       mali_executor_unlock();
+}
+
+static void mali_executor_core_scale_in_group_complete(struct mali_group *group)
+{
+       int num_pp_cores_disabled = 0;
+       int num_pp_cores_to_enable = 0;
+       int i;
+
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT(MALI_TRUE == mali_group_disable_requested(group));
+
+       /* Disable child group of virtual group */
+       if (mali_group_is_virtual(group)) {
+               struct mali_group *child;
+               struct mali_group *temp;
+
+               _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+                       if (MALI_TRUE == mali_group_disable_requested(child)) {
+                               mali_group_set_disable_request(child, MALI_FALSE);
+                               mali_executor_group_disable_internal(child);
+                               num_pp_cores_disabled++;
+                       }
+               }
+               mali_group_set_disable_request(group, MALI_FALSE);
+       } else {
+               mali_executor_group_disable_internal(group);
+               mali_group_set_disable_request(group, MALI_FALSE);
+               if (NULL != mali_group_get_pp_core(group)) {
+                       num_pp_cores_disabled++;
+               }
+       }
+
+       num_pp_cores_to_enable = num_pp_cores_disabled;
+
+       for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+               if (0 < core_scaling_delay_up_mask[i]) {
+                       struct mali_pm_domain *domain;
+
+                       if (0 == num_pp_cores_to_enable) {
+                               break;
+                       }
+
+                       domain = mali_pm_domain_get_from_index(i);
+
+                       /* Domain is valid and has pp cores */
+                       if ((NULL != domain) && !(_mali_osk_list_empty(&domain->group_list))) {
+                               struct mali_group *disabled_group;
+                               struct mali_group *temp;
+
+                               _MALI_OSK_LIST_FOREACHENTRY(disabled_group, temp, &domain->group_list, struct mali_group, pm_domain_list) {
+                                       if (NULL != mali_group_get_pp_core(disabled_group) && mali_executor_group_is_in_state(disabled_group, EXEC_STATE_DISABLED)) {
+                                               mali_executor_group_enable_internal(disabled_group);
+                                               core_scaling_delay_up_mask[i]--;
+                                               num_pp_cores_to_enable--;
+
+                                               if ((0 == core_scaling_delay_up_mask[i]) || 0 == num_pp_cores_to_enable) {
+                                                       break;
+                                               }
+                                       }
+                               }
+                       }
+               }
+       }
+
+       _mali_osk_wait_queue_wake_up(executor_notify_core_change_wait_queue);
+}
+
+static void mali_executor_change_group_status_disabled(struct mali_group *group)
+{
+       /* Physical PP group */
+       mali_bool idle;
+
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+       idle = mali_executor_group_is_in_state(group, EXEC_STATE_IDLE);
+       if (MALI_TRUE == idle) {
+               mali_executor_change_state_pp_physical(group,
+                                                      &group_list_idle,
+                                                      &group_list_idle_count,
+                                                      &group_list_disabled,
+                                                      &group_list_disabled_count);
+       } else {
+               mali_executor_change_state_pp_physical(group,
+                                                      &group_list_inactive,
+                                                      &group_list_inactive_count,
+                                                      &group_list_disabled,
+                                                      &group_list_disabled_count);
+       }
+}
+
+static mali_bool mali_executor_deactivate_list_idle(mali_bool deactivate_idle_group)
+{
+       mali_bool trigger_pm_update = MALI_FALSE;
+
+       if (group_list_idle_count > 0) {
+               if (mali_executor_has_virtual_group()) {
+
+                       /* Rejoin virtual group on Mali-450 */
+
+                       struct mali_group *group;
+                       struct mali_group *temp;
+
+                       _MALI_OSK_LIST_FOREACHENTRY(group, temp,
+                                                   &group_list_idle,
+                                                   struct mali_group, executor_list) {
+                               if (mali_executor_physical_rejoin_virtual(
+                                           group)) {
+                                       trigger_pm_update = MALI_TRUE;
+                               }
+                       }
+               } else if (deactivate_idle_group) {
+                       struct mali_group *group;
+                       struct mali_group *temp;
+
+                       /* Deactivate group on Mali-300/400 */
+
+                       _MALI_OSK_LIST_FOREACHENTRY(group, temp,
+                                                   &group_list_idle,
+                                                   struct mali_group, executor_list) {
+                               if (mali_group_deactivate(group)) {
+                                       trigger_pm_update = MALI_TRUE;
+                               }
+
+                               /* Move from idle to inactive */
+                               mali_executor_change_state_pp_physical(group,
+                                                                      &group_list_idle,
+                                                                      &group_list_idle_count,
+                                                                      &group_list_inactive,
+                                                                      &group_list_inactive_count);
+                       }
+               }
+       }
+
+       return trigger_pm_update;
+}
+
+void mali_executor_running_status_print(void)
+{
+       struct mali_group *group = NULL;
+       struct mali_group *temp = NULL;
+
+       MALI_PRINT(("GP running job: %p\n", gp_group->gp_running_job));
+       if ((gp_group->gp_core) && (gp_group->is_working)) {
+               mali_group_dump_status(gp_group);
+       }
+       MALI_PRINT(("Physical PP groups in WORKING state (count = %u):\n", group_list_working_count));
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working, struct mali_group, executor_list) {
+               MALI_PRINT(("PP running job: %p, subjob %d \n", group->pp_running_job, group->pp_running_sub_job));
+               mali_group_dump_status(group);
+       }
+       MALI_PRINT(("Physical PP groups in INACTIVE state (count = %u):\n", group_list_inactive_count));
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_inactive, struct mali_group, executor_list) {
+               MALI_PRINT(("\tPP status %d, SW power: %s\n", group->state, group->power_is_on ? "On" : "Off"));
+               MALI_PRINT(("\tPP #%d: %s\n", group->pp_core->core_id, group->pp_core->hw_core.description));
+       }
+       MALI_PRINT(("Physical PP groups in IDLE state (count = %u):\n", group_list_idle_count));
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, executor_list) {
+               MALI_PRINT(("\tPP status %d, SW power: %s\n", group->state, group->power_is_on ? "On" : "Off"));
+               MALI_PRINT(("\tPP #%d: %s\n", group->pp_core->core_id, group->pp_core->hw_core.description));
+       }
+       MALI_PRINT(("Physical PP groups in DISABLED state (count = %u):\n", group_list_disabled_count));
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, executor_list) {
+               MALI_PRINT(("\tPP status %d, SW power: %s\n", group->state, group->power_is_on ? "On" : "Off"));
+               MALI_PRINT(("\tPP #%d: %s\n", group->pp_core->core_id, group->pp_core->hw_core.description));
+       }
+
+       if (mali_executor_has_virtual_group()) {
+               MALI_PRINT(("Virtual group running job: %p\n", virtual_group->pp_running_job));
+               MALI_PRINT(("Virtual group status: %d\n", virtual_group_state));
+               MALI_PRINT(("Virtual group->status: %d\n", virtual_group->state));
+               MALI_PRINT(("\tSW power: %s\n", virtual_group->power_is_on ? "On" : "Off"));
+               _MALI_OSK_LIST_FOREACHENTRY(group, temp, &virtual_group->group_list,
+                                           struct mali_group, group_list) {
+                       int i = 0;
+                       MALI_PRINT(("\tchild group(%s) running job: %p\n", group->pp_core->hw_core.description, group->pp_running_job));
+                       MALI_PRINT(("\tchild group(%s)->status: %d\n", group->pp_core->hw_core.description, group->state));
+                       MALI_PRINT(("\tchild group(%s) SW power: %s\n", group->pp_core->hw_core.description, group->power_is_on ? "On" : "Off"));
+#if MALI_STATE_TRACKING
+                       if (group->pm_domain) {
+                               MALI_PRINT(("\tPower domain: id %u\n", mali_pm_domain_get_id(group->pm_domain)));
+                               MALI_PRINT(("\tMask:0x%04x \n", mali_pm_domain_get_mask(group->pm_domain)));
+                               MALI_PRINT(("\tUse-count:%u \n", mali_pm_domain_get_use_count(group->pm_domain)));
+                               MALI_PRINT(("\tCurrent power status:%s \n", (mali_pm_domain_get_mask(group->pm_domain)& mali_pm_get_current_mask()) ? "On" : "Off"));
+                               MALI_PRINT(("\tWanted  power status:%s \n", (mali_pm_domain_get_mask(group->pm_domain)& mali_pm_get_wanted_mask()) ? "On" : "Off"));
+                       }
+#endif
+
+                       for (i = 0; i < 2; i++) {
+                               if (NULL != group->l2_cache_core[i]) {
+                                       struct mali_pm_domain *domain;
+                                       domain = mali_l2_cache_get_pm_domain(group->l2_cache_core[i]);
+                                       MALI_PRINT(("\t L2(index %d) group SW power: %s\n", i, group->l2_cache_core[i]->power_is_on ? "On" : "Off"));
+#if MALI_STATE_TRACKING
+                                       if (domain) {
+                                               MALI_PRINT(("\tL2 Power domain: id %u\n", mali_pm_domain_get_id(domain)));
+                                               MALI_PRINT(("\tL2 Mask:0x%04x \n", mali_pm_domain_get_mask(domain)));
+                                               MALI_PRINT(("\tL2 Use-count:%u \n", mali_pm_domain_get_use_count(domain)));
+                                               MALI_PRINT(("\tL2 Current power status:%s \n", (mali_pm_domain_get_mask(domain) & mali_pm_get_current_mask()) ? "On" : "Off"));
+                                               MALI_PRINT(("\tL2 Wanted  power status:%s \n", (mali_pm_domain_get_mask(domain) & mali_pm_get_wanted_mask()) ? "On" : "Off"));
+                                       }
+#endif
+                               }
+                       }
+               }
+               if (EXEC_STATE_WORKING == virtual_group_state) {
+                       mali_group_dump_status(virtual_group);
+               }
+       }
+}
+
+void mali_executor_status_dump(void)
+{
+       mali_executor_lock();
+       mali_scheduler_lock();
+
+       /* print schedule queue status */
+       mali_scheduler_gp_pp_job_queue_print();
+
+       mali_scheduler_unlock();
+       mali_executor_unlock();
+}
diff --git a/utgard/r8p0/common/mali_executor.h b/utgard/r8p0/common/mali_executor.h
new file mode 100755 (executable)
index 0000000..1d69dc3
--- /dev/null
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2012, 2014-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_EXECUTOR_H__
+#define __MALI_EXECUTOR_H__
+
+#include "mali_osk.h"
+#include "mali_scheduler_types.h"
+#include "mali_kernel_common.h"
+
+typedef enum {
+       MALI_EXECUTOR_HINT_GP_BOUND = 0
+#define MALI_EXECUTOR_HINT_MAX        1
+} mali_executor_hint;
+
+extern mali_bool mali_executor_hints[MALI_EXECUTOR_HINT_MAX];
+
+/* forward declare struct instead of using include */
+struct mali_session_data;
+struct mali_group;
+struct mali_pp_core;
+
+extern _mali_osk_spinlock_irq_t *mali_executor_lock_obj;
+
+#define MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD() MALI_DEBUG_ASSERT_LOCK_HELD(mali_executor_lock_obj);
+
+_mali_osk_errcode_t mali_executor_initialize(void);
+void mali_executor_terminate(void);
+
+void mali_executor_populate(void);
+void mali_executor_depopulate(void);
+
+void mali_executor_suspend(void);
+void mali_executor_resume(void);
+
+u32 mali_executor_get_num_cores_total(void);
+u32 mali_executor_get_num_cores_enabled(void);
+struct mali_pp_core *mali_executor_get_virtual_pp(void);
+struct mali_group *mali_executor_get_virtual_group(void);
+
+void mali_executor_zap_all_active(struct mali_session_data *session);
+
+/**
+ * Schedule GP and PP according to bitmask.
+ *
+ * @param mask A scheduling bitmask.
+ * @param deferred_schedule MALI_TRUE if schedule should be deferred, MALI_FALSE if not.
+ */
+void mali_executor_schedule_from_mask(mali_scheduler_mask mask, mali_bool deferred_schedule);
+
+_mali_osk_errcode_t mali_executor_interrupt_gp(struct mali_group *group, mali_bool in_upper_half);
+_mali_osk_errcode_t mali_executor_interrupt_pp(struct mali_group *group, mali_bool in_upper_half);
+_mali_osk_errcode_t mali_executor_interrupt_mmu(struct mali_group *group, mali_bool in_upper_half);
+void mali_executor_group_power_up(struct mali_group *groups[], u32 num_groups);
+void mali_executor_group_power_down(struct mali_group *groups[], u32 num_groups);
+
+void mali_executor_abort_session(struct mali_session_data *session);
+
+void mali_executor_core_scaling_enable(void);
+void mali_executor_core_scaling_disable(void);
+mali_bool mali_executor_core_scaling_is_enabled(void);
+
+void mali_executor_group_enable(struct mali_group *group);
+void mali_executor_group_disable(struct mali_group *group);
+mali_bool mali_executor_group_is_disabled(struct mali_group *group);
+
+int mali_executor_set_perf_level(unsigned int target_core_nr, mali_bool override);
+
+#if MALI_STATE_TRACKING
+u32 mali_executor_dump_state(char *buf, u32 size);
+#endif
+
+MALI_STATIC_INLINE void mali_executor_hint_enable(mali_executor_hint hint)
+{
+       MALI_DEBUG_ASSERT(hint < MALI_EXECUTOR_HINT_MAX);
+       mali_executor_hints[hint] = MALI_TRUE;
+}
+
+MALI_STATIC_INLINE void mali_executor_hint_disable(mali_executor_hint hint)
+{
+       MALI_DEBUG_ASSERT(hint < MALI_EXECUTOR_HINT_MAX);
+       mali_executor_hints[hint] = MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_executor_hint_is_enabled(mali_executor_hint hint)
+{
+       MALI_DEBUG_ASSERT(hint < MALI_EXECUTOR_HINT_MAX);
+       return mali_executor_hints[hint];
+}
+
+void mali_executor_running_status_print(void);
+void mali_executor_status_dump(void);
+void mali_executor_lock(void);
+void mali_executor_unlock(void);
+#endif /* __MALI_EXECUTOR_H__ */
diff --git a/utgard/r8p0/common/mali_gp.c b/utgard/r8p0/common/mali_gp.c
new file mode 100755 (executable)
index 0000000..60da8ac
--- /dev/null
@@ -0,0 +1,363 @@
+/*
+ * Copyright (C) 2011-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_gp.h"
+#include "mali_hw_core.h"
+#include "mali_group.h"
+#include "mali_osk.h"
+#include "regs/mali_gp_regs.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#if defined(CONFIG_MALI400_PROFILING)
+#include "mali_osk_profiling.h"
+#endif
+
+#include <mali_platform.h>
+
+static struct mali_gp_core *mali_global_gp_core = NULL;
+
+/* Interrupt handlers */
+static void mali_gp_irq_probe_trigger(void *data);
+static _mali_osk_errcode_t mali_gp_irq_probe_ack(void *data);
+
+struct mali_gp_core *mali_gp_create(const _mali_osk_resource_t *resource, struct mali_group *group)
+{
+       struct mali_gp_core *core = NULL;
+
+       MALI_DEBUG_ASSERT(NULL == mali_global_gp_core);
+       MALI_DEBUG_PRINT(2, ("Mali GP: Creating Mali GP core: %s\n", resource->description));
+
+       core = _mali_osk_malloc(sizeof(struct mali_gp_core));
+       if (NULL != core) {
+               if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALIGP2_REGISTER_ADDRESS_SPACE_SIZE)) {
+                       _mali_osk_errcode_t ret;
+
+                       ret = mali_gp_reset(core);
+
+                       if (_MALI_OSK_ERR_OK == ret) {
+                               ret = mali_group_add_gp_core(group, core);
+                               if (_MALI_OSK_ERR_OK == ret) {
+                                       /* Setup IRQ handlers (which will do IRQ probing if needed) */
+                                       core->irq = _mali_osk_irq_init(resource->irq,
+                                                                      mali_group_upper_half_gp,
+                                                                      group,
+                                                                      mali_gp_irq_probe_trigger,
+                                                                      mali_gp_irq_probe_ack,
+                                                                      core,
+                                                                      resource->description);
+                                       if (NULL != core->irq) {
+                                               MALI_DEBUG_PRINT(4, ("Mali GP: set global gp core from 0x%08X to 0x%08X\n", mali_global_gp_core, core));
+                                               mali_global_gp_core = core;
+
+                                               return core;
+                                       } else {
+                                               MALI_PRINT_ERROR(("Mali GP: Failed to setup interrupt handlers for GP core %s\n", core->hw_core.description));
+                                       }
+                                       mali_group_remove_gp_core(group);
+                               } else {
+                                       MALI_PRINT_ERROR(("Mali GP: Failed to add core %s to group\n", core->hw_core.description));
+                               }
+                       }
+                       mali_hw_core_delete(&core->hw_core);
+               }
+
+               _mali_osk_free(core);
+       } else {
+               MALI_PRINT_ERROR(("Failed to allocate memory for GP core\n"));
+       }
+
+       return NULL;
+}
+
+void mali_gp_delete(struct mali_gp_core *core)
+{
+       MALI_DEBUG_ASSERT_POINTER(core);
+
+       _mali_osk_irq_term(core->irq);
+       mali_hw_core_delete(&core->hw_core);
+       mali_global_gp_core = NULL;
+       _mali_osk_free(core);
+}
+
+void mali_gp_stop_bus(struct mali_gp_core *core)
+{
+       MALI_DEBUG_ASSERT_POINTER(core);
+
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_STOP_BUS);
+}
+
+_mali_osk_errcode_t mali_gp_stop_bus_wait(struct mali_gp_core *core)
+{
+       int i;
+
+       MALI_DEBUG_ASSERT_POINTER(core);
+
+       /* Send the stop bus command. */
+       mali_gp_stop_bus(core);
+
+       /* Wait for bus to be stopped */
+       for (i = 0; i < MALI_REG_POLL_COUNT_SLOW; i++) {
+               if (mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS) & MALIGP2_REG_VAL_STATUS_BUS_STOPPED) {
+                       break;
+               }
+       }
+
+       if (MALI_REG_POLL_COUNT_SLOW == i) {
+               MALI_PRINT_ERROR(("Mali GP: Failed to stop bus on %s\n", core->hw_core.description));
+               if (mali_gp_reset_fail < 65533)
+                       mali_gp_reset_fail++;
+               return _MALI_OSK_ERR_FAULT;
+       }
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_gp_hard_reset(struct mali_gp_core *core)
+{
+       const u32 reset_wait_target_register = MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_LIMIT;
+       const u32 reset_invalid_value = 0xC0FFE000;
+       const u32 reset_check_value = 0xC01A0000;
+       const u32 reset_default_value = 0;
+       int i;
+
+       MALI_DEBUG_ASSERT_POINTER(core);
+       MALI_DEBUG_PRINT(4, ("Mali GP: Hard reset of core %s\n", core->hw_core.description));
+
+       mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_invalid_value);
+
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_RESET);
+
+       for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+               mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_check_value);
+               if (reset_check_value == mali_hw_core_register_read(&core->hw_core, reset_wait_target_register)) {
+                       break;
+               }
+       }
+
+       if (MALI_REG_POLL_COUNT_FAST == i) {
+               MALI_PRINT_ERROR(("Mali GP: The hard reset loop didn't work, unable to recover\n"));
+       }
+
+       mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_default_value); /* set it back to the default */
+       /* Re-enable interrupts */
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL);
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
+
+}
+
+void mali_gp_reset_async(struct mali_gp_core *core)
+{
+       MALI_DEBUG_ASSERT_POINTER(core);
+
+       MALI_DEBUG_PRINT(4, ("Mali GP: Reset of core %s\n", core->hw_core.description));
+
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, 0); /* disable the IRQs */
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALI400GP_REG_VAL_IRQ_RESET_COMPLETED);
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALI400GP_REG_VAL_CMD_SOFT_RESET);
+
+}
+
+_mali_osk_errcode_t mali_gp_reset_wait(struct mali_gp_core *core)
+{
+       int i;
+       u32 rawstat = 0;
+
+       MALI_DEBUG_ASSERT_POINTER(core);
+
+       for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+               rawstat = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT);
+               if (rawstat & MALI400GP_REG_VAL_IRQ_RESET_COMPLETED) {
+                       break;
+               }
+       }
+
+       if (i == MALI_REG_POLL_COUNT_FAST) {
+               MALI_PRINT_ERROR(("Mali GP: Failed to reset core %s, rawstat: 0x%08x\n",
+                                 core->hw_core.description, rawstat));
+               if (mali_gp_reset_fail < 65533)
+                       mali_gp_reset_fail++;
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       /* Re-enable interrupts */
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL);
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_gp_reset(struct mali_gp_core *core)
+{
+       mali_gp_reset_async(core);
+       return mali_gp_reset_wait(core);
+}
+
+void mali_gp_job_start(struct mali_gp_core *core, struct mali_gp_job *job)
+{
+       u32 startcmd = 0;
+       u32 *frame_registers = mali_gp_job_get_frame_registers(job);
+       u32 counter_src0 = mali_gp_job_get_perf_counter_src0(job);
+       u32 counter_src1 = mali_gp_job_get_perf_counter_src1(job);
+
+       MALI_DEBUG_ASSERT_POINTER(core);
+
+       if (mali_gp_job_has_vs_job(job)) {
+               startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_VS;
+       }
+
+       if (mali_gp_job_has_plbu_job(job)) {
+               startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_PLBU;
+       }
+
+       MALI_DEBUG_ASSERT(0 != startcmd);
+
+       mali_hw_core_register_write_array_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR, frame_registers, MALIGP2_NUM_REGS_FRAME);
+
+       if (MALI_HW_CORE_NO_COUNTER != counter_src0) {
+               mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC, counter_src0);
+               mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+       }
+       if (MALI_HW_CORE_NO_COUNTER != counter_src1) {
+               mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC, counter_src1);
+               mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+       }
+
+       MALI_DEBUG_PRINT(3, ("Mali GP: Starting job (0x%08x) on core %s with command 0x%08X\n", job, core->hw_core.description, startcmd));
+
+       mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC);
+
+       /* Barrier to make sure the previous register write is finished */
+       _mali_osk_write_mem_barrier();
+
+       /* This is the command that starts the core.
+        *
+        * Don't actually run the job if PROFILING_SKIP_PP_JOBS are set, just
+        * force core to assert the completion interrupt.
+        */
+#if !defined(PROFILING_SKIP_GP_JOBS)
+       mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, startcmd);
+#else
+       {
+               u32 bits = 0;
+
+               if (mali_gp_job_has_vs_job(job))
+                       bits = MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST;
+               if (mali_gp_job_has_plbu_job(job))
+                       bits |= MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST;
+
+               mali_hw_core_register_write_relaxed(&core->hw_core,
+                                                   MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT, bits);
+       }
+#endif
+
+       /* Barrier to make sure the previous register write is finished */
+       _mali_osk_write_mem_barrier();
+}
+
+void mali_gp_resume_with_new_heap(struct mali_gp_core *core, u32 start_addr, u32 end_addr)
+{
+       u32 irq_readout;
+
+       MALI_DEBUG_ASSERT_POINTER(core);
+
+       irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT);
+
+       if (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM) {
+               mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, (MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | MALIGP2_REG_VAL_IRQ_HANG));
+               mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED); /* re-enable interrupts */
+               mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR, start_addr);
+               mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR, end_addr);
+
+               MALI_DEBUG_PRINT(3, ("Mali GP: Resuming job\n"));
+
+               mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC);
+               _mali_osk_write_mem_barrier();
+       }
+       /*
+        * else: core has been reset between PLBU_OUT_OF_MEM interrupt and this new heap response.
+        * A timeout or a page fault on Mali-200 PP core can cause this behaviour.
+        */
+}
+
+u32 mali_gp_core_get_version(struct mali_gp_core *core)
+{
+       MALI_DEBUG_ASSERT_POINTER(core);
+       return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_VERSION);
+}
+
+struct mali_gp_core *mali_gp_get_global_gp_core(void)
+{
+       return mali_global_gp_core;
+}
+
+/* ------------- interrupt handling below ------------------ */
+static void mali_gp_irq_probe_trigger(void *data)
+{
+       struct mali_gp_core *core = (struct mali_gp_core *)data;
+
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT, MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR);
+       _mali_osk_mem_barrier();
+}
+
+static _mali_osk_errcode_t mali_gp_irq_probe_ack(void *data)
+{
+       struct mali_gp_core *core = (struct mali_gp_core *)data;
+       u32 irq_readout;
+
+       irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_STAT);
+       if (MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR & irq_readout) {
+               mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR);
+               _mali_osk_mem_barrier();
+               return _MALI_OSK_ERR_OK;
+       }
+
+       return _MALI_OSK_ERR_FAULT;
+}
+
+/* ------ local helper functions below --------- */
+#if MALI_STATE_TRACKING
+u32 mali_gp_dump_state(struct mali_gp_core *core, char *buf, u32 size)
+{
+       int n = 0;
+
+       n += _mali_osk_snprintf(buf + n, size - n, "\tGP: %s\n", core->hw_core.description);
+
+       return n;
+}
+#endif
+
+void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_gp_job *job)
+{
+       u32 val0 = 0;
+       u32 val1 = 0;
+       u32 counter_src0 = mali_gp_job_get_perf_counter_src0(job);
+       u32 counter_src1 = mali_gp_job_get_perf_counter_src1(job);
+
+       if (MALI_HW_CORE_NO_COUNTER != counter_src0) {
+               val0 = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE);
+               mali_gp_job_set_perf_counter_value0(job, val0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+               _mali_osk_profiling_report_hw_counter(COUNTER_VP_0_C0, val0);
+               _mali_osk_profiling_record_global_counters(COUNTER_VP_0_C0, val0);
+#endif
+
+       }
+
+       if (MALI_HW_CORE_NO_COUNTER != counter_src1) {
+               val1 = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+               mali_gp_job_set_perf_counter_value1(job, val1);
+
+#if defined(CONFIG_MALI400_PROFILING)
+               _mali_osk_profiling_report_hw_counter(COUNTER_VP_0_C1, val1);
+               _mali_osk_profiling_record_global_counters(COUNTER_VP_0_C1, val1);
+#endif
+       }
+}
diff --git a/utgard/r8p0/common/mali_gp.h b/utgard/r8p0/common/mali_gp.h
new file mode 100755 (executable)
index 0000000..ecbe70e
--- /dev/null
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2011-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_GP_H__
+#define __MALI_GP_H__
+
+#include "mali_osk.h"
+#include "mali_gp_job.h"
+#include "mali_hw_core.h"
+#include "regs/mali_gp_regs.h"
+
+struct mali_group;
+
+/**
+ * Definition of the GP core struct
+ * Used to track a GP core in the system.
+ */
+struct mali_gp_core {
+       struct mali_hw_core  hw_core;           /**< Common for all HW cores */
+       _mali_osk_irq_t     *irq;               /**< IRQ handler */
+};
+
+_mali_osk_errcode_t mali_gp_initialize(void);
+void mali_gp_terminate(void);
+
+struct mali_gp_core *mali_gp_create(const _mali_osk_resource_t *resource, struct mali_group *group);
+void mali_gp_delete(struct mali_gp_core *core);
+
+void mali_gp_stop_bus(struct mali_gp_core *core);
+_mali_osk_errcode_t mali_gp_stop_bus_wait(struct mali_gp_core *core);
+void mali_gp_reset_async(struct mali_gp_core *core);
+_mali_osk_errcode_t mali_gp_reset_wait(struct mali_gp_core *core);
+void mali_gp_hard_reset(struct mali_gp_core *core);
+_mali_osk_errcode_t mali_gp_reset(struct mali_gp_core *core);
+
+void mali_gp_job_start(struct mali_gp_core *core, struct mali_gp_job *job);
+void mali_gp_resume_with_new_heap(struct mali_gp_core *core, u32 start_addr, u32 end_addr);
+
+u32 mali_gp_core_get_version(struct mali_gp_core *core);
+
+struct mali_gp_core *mali_gp_get_global_gp_core(void);
+
+#if MALI_STATE_TRACKING
+u32 mali_gp_dump_state(struct mali_gp_core *core, char *buf, u32 size);
+#endif
+
+void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_gp_job *job);
+
+MALI_STATIC_INLINE const char *mali_gp_core_description(struct mali_gp_core *core)
+{
+       return core->hw_core.description;
+}
+
+MALI_STATIC_INLINE enum mali_interrupt_result mali_gp_get_interrupt_result(struct mali_gp_core *core)
+{
+       u32 stat_used = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_STAT) &
+                       MALIGP2_REG_VAL_IRQ_MASK_USED;
+
+       if (0 == stat_used) {
+               return MALI_INTERRUPT_RESULT_NONE;
+       } else if ((MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST |
+                   MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST) == stat_used) {
+               return MALI_INTERRUPT_RESULT_SUCCESS;
+       } else if (MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST == stat_used) {
+               return MALI_INTERRUPT_RESULT_SUCCESS_VS;
+       } else if (MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST == stat_used) {
+               return MALI_INTERRUPT_RESULT_SUCCESS_PLBU;
+       } else if (MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM & stat_used) {
+               return MALI_INTERRUPT_RESULT_OOM;
+       }
+
+       return MALI_INTERRUPT_RESULT_ERROR;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_get_rawstat(struct mali_gp_core *core)
+{
+       MALI_DEBUG_ASSERT_POINTER(core);
+       return mali_hw_core_register_read(&core->hw_core,
+                                         MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT);
+}
+
+MALI_STATIC_INLINE u32 mali_gp_is_active(struct mali_gp_core *core)
+{
+       u32 status = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS);
+       return (status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE void mali_gp_mask_all_interrupts(struct mali_gp_core *core)
+{
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_NONE);
+}
+
+MALI_STATIC_INLINE void mali_gp_enable_interrupts(struct mali_gp_core *core, enum mali_interrupt_result exceptions)
+{
+       /* Enable all interrupts, except those specified in exceptions */
+       u32 value;
+
+       if (MALI_INTERRUPT_RESULT_SUCCESS_VS == exceptions) {
+               /* Enable all used except VS complete */
+               value = MALIGP2_REG_VAL_IRQ_MASK_USED &
+                       ~MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST;
+       } else {
+               MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_SUCCESS_PLBU ==
+                                 exceptions);
+               /* Enable all used except PLBU complete */
+               value = MALIGP2_REG_VAL_IRQ_MASK_USED &
+                       ~MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST;
+       }
+
+       mali_hw_core_register_write(&core->hw_core,
+                                   MALIGP2_REG_ADDR_MGMT_INT_MASK,
+                                   value);
+}
+
+MALI_STATIC_INLINE u32 mali_gp_read_plbu_alloc_start_addr(struct mali_gp_core *core)
+{
+       return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR);
+}
+
+#endif /* __MALI_GP_H__ */
diff --git a/utgard/r8p0/common/mali_gp_job.c b/utgard/r8p0/common/mali_gp_job.c
new file mode 100755 (executable)
index 0000000..fb8dcd8
--- /dev/null
@@ -0,0 +1,302 @@
+/*
+ * Copyright (C) 2011-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_gp_job.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_uk_types.h"
+#include "mali_memory_virtual.h"
+#include "mali_memory_defer_bind.h"
+
+static u32 gp_counter_src0 = MALI_HW_CORE_NO_COUNTER;      /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
+static u32 gp_counter_src1 = MALI_HW_CORE_NO_COUNTER;           /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
+static void _mali_gp_del_varying_allocations(struct mali_gp_job *job);
+
+
+static int _mali_gp_add_varying_allocations(struct mali_session_data *session,
+               struct mali_gp_job *job,
+               u32 *alloc,
+               u32 num)
+{
+       int i = 0;
+       struct mali_gp_allocation_node *alloc_node;
+       mali_mem_allocation *mali_alloc = NULL;
+       struct mali_vma_node *mali_vma_node = NULL;
+
+       for (i = 0 ; i < num ; i++) {
+               MALI_DEBUG_ASSERT(alloc[i]);
+               alloc_node = _mali_osk_calloc(1, sizeof(struct mali_gp_allocation_node));
+               if (alloc_node) {
+                       INIT_LIST_HEAD(&alloc_node->node);
+                       /* find mali allocation structure by vaddress*/
+                       mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, alloc[i], 0);
+
+                       if (likely(mali_vma_node)) {
+                               mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
+                               MALI_DEBUG_ASSERT(alloc[i] == mali_vma_node->vm_node.start);
+                       } else {
+                               MALI_DEBUG_PRINT(1, ("ERROE!_mali_gp_add_varying_allocations,can't find allocation %d by address =0x%x, num=%d\n", i, alloc[i], num));
+                               _mali_osk_free(alloc_node);
+                               goto fail;
+                       }
+                       alloc_node->alloc = mali_alloc;
+                       /* add to gp job varying alloc list*/
+                       list_move(&alloc_node->node, &job->varying_alloc);
+               } else
+                       goto fail;
+       }
+
+       return 0;
+fail:
+       MALI_DEBUG_PRINT(1, ("ERROE!_mali_gp_add_varying_allocations,failed to alloc memory!\n"));
+       _mali_gp_del_varying_allocations(job);
+       return -1;
+}
+
+
+static void _mali_gp_del_varying_allocations(struct mali_gp_job *job)
+{
+       struct mali_gp_allocation_node *alloc_node, *tmp_node;
+
+       list_for_each_entry_safe(alloc_node, tmp_node, &job->varying_alloc, node) {
+               list_del(&alloc_node->node);
+               kfree(alloc_node);
+       }
+       INIT_LIST_HEAD(&job->varying_alloc);
+}
+
+struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *uargs, u32 id, struct mali_timeline_tracker *pp_tracker)
+{
+       struct mali_gp_job *job;
+       u32 perf_counter_flag;
+       u32 __user *memory_list = NULL;
+       struct mali_gp_allocation_node *alloc_node, *tmp_node;
+
+       job = _mali_osk_calloc(1, sizeof(struct mali_gp_job));
+       if (NULL != job) {
+               job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_FINISHED, sizeof(_mali_uk_gp_job_finished_s));
+               if (NULL == job->finished_notification) {
+                       goto fail3;
+               }
+
+               job->oom_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_STALLED, sizeof(_mali_uk_gp_job_suspended_s));
+               if (NULL == job->oom_notification) {
+                       goto fail2;
+               }
+
+               if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_gp_start_job_s))) {
+                       goto fail1;
+               }
+
+               perf_counter_flag = mali_gp_job_get_perf_counter_flag(job);
+
+               /* case when no counters came from user space
+                * so pass the debugfs / DS-5 provided global ones to the job object */
+               if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) ||
+                     (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))) {
+                       mali_gp_job_set_perf_counter_src0(job, mali_gp_job_get_gp_counter_src0());
+                       mali_gp_job_set_perf_counter_src1(job, mali_gp_job_get_gp_counter_src1());
+               }
+
+               _mali_osk_list_init(&job->list);
+               job->session = session;
+               job->id = id;
+               job->heap_current_addr = job->uargs.frame_registers[4];
+               job->perf_counter_value0 = 0;
+               job->perf_counter_value1 = 0;
+               job->pid = _mali_osk_get_pid();
+               job->tid = _mali_osk_get_tid();
+
+
+               INIT_LIST_HEAD(&job->varying_alloc);
+               INIT_LIST_HEAD(&job->vary_todo);
+               job->dmem = NULL;
+
+               if (job->uargs.deferred_mem_num > session->allocation_mgr.mali_allocation_num) {
+                       MALI_PRINT_ERROR(("Mali GP job: The number of  varying buffer to defer bind  is invalid !\n"));
+                       goto fail1;
+               }
+
+               /* add varying allocation list*/
+               if (job->uargs.deferred_mem_num > 0) {
+                       /* copy varying list from user space*/
+                       job->varying_list = _mali_osk_calloc(1, sizeof(u32) * job->uargs.deferred_mem_num);
+                       if (!job->varying_list) {
+                               MALI_PRINT_ERROR(("Mali GP job: allocate varying_list failed varying_alloc_num = %d !\n", job->uargs.deferred_mem_num));
+                               goto fail1;
+                       }
+
+                       memory_list = (u32 __user *)(uintptr_t)job->uargs.deferred_mem_list;
+
+                       if (0 != _mali_osk_copy_from_user(job->varying_list, memory_list, sizeof(u32) * job->uargs.deferred_mem_num)) {
+                               MALI_PRINT_ERROR(("Mali GP job: Failed to copy varying list from user space!\n"));
+                               goto fail;
+                       }
+
+                       if (unlikely(_mali_gp_add_varying_allocations(session, job, job->varying_list,
+                                       job->uargs.deferred_mem_num))) {
+                               MALI_PRINT_ERROR(("Mali GP job: _mali_gp_add_varying_allocations failed!\n"));
+                               goto fail;
+                       }
+
+                       /* do preparetion for each allocation */
+                       list_for_each_entry_safe(alloc_node, tmp_node, &job->varying_alloc, node) {
+                               if (unlikely(_MALI_OSK_ERR_OK != mali_mem_defer_bind_allocation_prepare(alloc_node->alloc, &job->vary_todo, &job->required_varying_memsize))) {
+                                       MALI_PRINT_ERROR(("Mali GP job: mali_mem_defer_bind_allocation_prepare failed!\n"));
+                                       goto fail;
+                               }
+                       }
+
+                       _mali_gp_del_varying_allocations(job);
+
+                       /* bind varying here, to avoid memory latency issue. */
+                       {
+                               struct mali_defer_mem_block dmem_block;
+
+                               INIT_LIST_HEAD(&dmem_block.free_pages);
+                               atomic_set(&dmem_block.num_free_pages, 0);
+
+                               if (mali_mem_prepare_mem_for_job(job, &dmem_block)) {
+                                       MALI_PRINT_ERROR(("Mali GP job: mali_mem_prepare_mem_for_job failed!\n"));
+                                       goto fail;
+                               }
+                               if (_MALI_OSK_ERR_OK != mali_mem_defer_bind(job, &dmem_block)) {
+                                       MALI_PRINT_ERROR(("gp job create, mali_mem_defer_bind failed! GP %x fail!", job));
+                                       goto fail;
+                               }
+                       }
+
+                       if (job->uargs.varying_memsize > MALI_UK_BIG_VARYING_SIZE) {
+                               job->big_job = 1;
+                       }
+               }
+               job->pp_tracker = pp_tracker;
+               if (NULL != job->pp_tracker) {
+                       /* Take a reference on PP job's tracker that will be released when the GP
+                          job is done. */
+                       mali_timeline_system_tracker_get(session->timeline_system, pp_tracker);
+               }
+
+               mali_timeline_tracker_init(&job->tracker, MALI_TIMELINE_TRACKER_GP, NULL, job);
+               mali_timeline_fence_copy_uk_fence(&(job->tracker.fence), &(job->uargs.fence));
+
+               return job;
+       } else {
+               MALI_PRINT_ERROR(("Mali GP job: _mali_osk_calloc failed!\n"));
+               return NULL;
+       }
+
+
+fail:
+       _mali_osk_free(job->varying_list);
+       /* Handle allocate fail here, free all varying node */
+       {
+               struct mali_backend_bind_list *bkn, *bkn_tmp;
+               list_for_each_entry_safe(bkn, bkn_tmp , &job->vary_todo, node) {
+                       list_del(&bkn->node);
+                       _mali_osk_free(bkn);
+               }
+       }
+fail1:
+       _mali_osk_notification_delete(job->oom_notification);
+fail2:
+       _mali_osk_notification_delete(job->finished_notification);
+fail3:
+       _mali_osk_free(job);
+       return NULL;
+}
+
+void mali_gp_job_delete(struct mali_gp_job *job)
+{
+       struct mali_backend_bind_list *bkn, *bkn_tmp;
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT(NULL == job->pp_tracker);
+       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
+       _mali_osk_free(job->varying_list);
+
+       /* Handle allocate fail here, free all varying node */
+       list_for_each_entry_safe(bkn, bkn_tmp , &job->vary_todo, node) {
+               list_del(&bkn->node);
+               _mali_osk_free(bkn);
+       }
+
+       mali_mem_defer_dmem_free(job);
+
+       /* de-allocate the pre-allocated oom notifications */
+       if (NULL != job->oom_notification) {
+               _mali_osk_notification_delete(job->oom_notification);
+               job->oom_notification = NULL;
+       }
+       if (NULL != job->finished_notification) {
+               _mali_osk_notification_delete(job->finished_notification);
+               job->finished_notification = NULL;
+       }
+
+       _mali_osk_free(job);
+}
+
+void mali_gp_job_list_add(struct mali_gp_job *job, _mali_osk_list_t *list)
+{
+       struct mali_gp_job *iter;
+       struct mali_gp_job *tmp;
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+
+       /* Find position in list/queue where job should be added. */
+       _MALI_OSK_LIST_FOREACHENTRY_REVERSE(iter, tmp, list,
+                                           struct mali_gp_job, list) {
+
+               /* A span is used to handle job ID wrapping. */
+               bool job_is_after = (mali_gp_job_get_id(job) -
+                                    mali_gp_job_get_id(iter)) <
+                                   MALI_SCHEDULER_JOB_ID_SPAN;
+
+               if (job_is_after) {
+                       break;
+               }
+       }
+
+       _mali_osk_list_add(&job->list, &iter->list);
+}
+
+u32 mali_gp_job_get_gp_counter_src0(void)
+{
+       return gp_counter_src0;
+}
+
+void mali_gp_job_set_gp_counter_src0(u32 counter)
+{
+       gp_counter_src0 = counter;
+}
+
+u32 mali_gp_job_get_gp_counter_src1(void)
+{
+       return gp_counter_src1;
+}
+
+void mali_gp_job_set_gp_counter_src1(u32 counter)
+{
+       gp_counter_src1 = counter;
+}
+
+mali_scheduler_mask mali_gp_job_signal_pp_tracker(struct mali_gp_job *job, mali_bool success)
+{
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       if (NULL != job->pp_tracker) {
+               schedule_mask |= mali_timeline_system_tracker_put(job->session->timeline_system, job->pp_tracker, MALI_FALSE == success);
+               job->pp_tracker = NULL;
+       }
+
+       return schedule_mask;
+}
diff --git a/utgard/r8p0/common/mali_gp_job.h b/utgard/r8p0/common/mali_gp_job.h
new file mode 100755 (executable)
index 0000000..6a67543
--- /dev/null
@@ -0,0 +1,324 @@
+/*
+ * Copyright (C) 2011-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_GP_JOB_H__
+#define __MALI_GP_JOB_H__
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_uk_types.h"
+#include "mali_session.h"
+#include "mali_timeline.h"
+#include "mali_scheduler_types.h"
+#include "mali_scheduler.h"
+#include "mali_executor.h"
+#include "mali_timeline.h"
+
+struct mali_defer_mem;
+/**
+ * This structure represents a GP job
+ *
+ * The GP job object itself is not protected by any single lock,
+ * but relies on other locks instead (scheduler, executor and timeline lock).
+ * Think of the job object as moving between these sub systems through-out
+ * its lifetime. Different part of the GP job struct is used by different
+ * subsystems. Accessor functions ensure that correct lock is taken.
+ * Do NOT access any data members directly from outside this module!
+ */
+struct mali_gp_job {
+       /*
+        * These members are typically only set at creation,
+        * and only read later on.
+        * They do not require any lock protection.
+        */
+       _mali_uk_gp_start_job_s uargs;                     /**< Arguments from user space */
+       struct mali_session_data *session;                 /**< Session which submitted this job */
+       u32 pid;                                           /**< Process ID of submitting process */
+       u32 tid;                                           /**< Thread ID of submitting thread */
+       u32 id;                                            /**< Identifier for this job in kernel space (sequential numbering) */
+       u32 cache_order;                                   /**< Cache order used for L2 cache flushing (sequential numbering) */
+       struct mali_timeline_tracker tracker;              /**< Timeline tracker for this job */
+       struct mali_timeline_tracker *pp_tracker;          /**< Pointer to Timeline tracker for PP job that depends on this job. */
+       _mali_osk_notification_t *finished_notification;   /**< Notification sent back to userspace on job complete */
+
+       /*
+        * These members are used by the scheduler,
+        * protected by scheduler lock
+        */
+       _mali_osk_list_t list;                             /**< Used to link jobs together in the scheduler queue */
+
+       /*
+        * These members are used by the executor and/or group,
+        * protected by executor lock
+        */
+       _mali_osk_notification_t *oom_notification;        /**< Notification sent back to userspace on OOM */
+
+       /*
+        * Set by executor/group on job completion, read by scheduler when
+        * returning job to user. Hold executor lock when setting,
+        * no lock needed when reading
+        */
+       u32 heap_current_addr;                             /**< Holds the current HEAP address when the job has completed */
+       u32 perf_counter_value0;                           /**< Value of performance counter 0 (to be returned to user space) */
+       u32 perf_counter_value1;                           /**< Value of performance counter 1 (to be returned to user space) */
+       struct mali_defer_mem *dmem;                                          /** < used for defer bind to store dmem info */
+       struct list_head varying_alloc;                    /**< hold the list of varying allocations */
+       u32 bind_flag;                                     /** < flag for deferbind*/
+       u32 *varying_list;                                 /**< varying memory list need to to defer bind*/
+       struct list_head vary_todo;                        /**< list of backend list need to do defer bind*/
+       u32 required_varying_memsize;                      /** < size of varying memory to reallocate*/
+       u32 big_job;                                       /** < if the gp job have large varying output and may take long time*/
+};
+
+#define MALI_DEFER_BIND_MEMORY_PREPARED (0x1 << 0)
+#define MALI_DEFER_BIND_MEMORY_BINDED (0x1 << 2)
+
+struct mali_gp_allocation_node {
+       struct list_head node;
+       mali_mem_allocation *alloc;
+};
+
+struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *uargs, u32 id, struct mali_timeline_tracker *pp_tracker);
+void mali_gp_job_delete(struct mali_gp_job *job);
+
+u32 mali_gp_job_get_gp_counter_src0(void);
+void mali_gp_job_set_gp_counter_src0(u32 counter);
+u32 mali_gp_job_get_gp_counter_src1(void);
+void mali_gp_job_set_gp_counter_src1(u32 counter);
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_id(struct mali_gp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return (NULL == job) ? 0 : job->id;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_cache_order(struct mali_gp_job *job,
+               u32 cache_order)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+       job->cache_order = cache_order;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_cache_order(struct mali_gp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return (NULL == job) ? 0 : job->cache_order;
+}
+
+MALI_STATIC_INLINE u64 mali_gp_job_get_user_id(struct mali_gp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->uargs.user_job_ptr;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_frame_builder_id(struct mali_gp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->uargs.frame_builder_id;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_flush_id(struct mali_gp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->uargs.flush_id;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_pid(struct mali_gp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->pid;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_tid(struct mali_gp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->tid;
+}
+
+MALI_STATIC_INLINE u32 *mali_gp_job_get_frame_registers(struct mali_gp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->uargs.frame_registers;
+}
+
+MALI_STATIC_INLINE struct mali_session_data *mali_gp_job_get_session(struct mali_gp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->session;
+}
+
+MALI_STATIC_INLINE mali_bool mali_gp_job_has_vs_job(struct mali_gp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return (job->uargs.frame_registers[0] != job->uargs.frame_registers[1]) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_gp_job_has_plbu_job(struct mali_gp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return (job->uargs.frame_registers[2] != job->uargs.frame_registers[3]) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_current_heap_addr(struct mali_gp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->heap_current_addr;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_current_heap_addr(struct mali_gp_job *job, u32 heap_addr)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       job->heap_current_addr = heap_addr;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_flag(struct mali_gp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->uargs.perf_counter_flag;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_src0(struct mali_gp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->uargs.perf_counter_src0;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_src1(struct mali_gp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->uargs.perf_counter_src1;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_value0(struct mali_gp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->perf_counter_value0;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_value1(struct mali_gp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->perf_counter_value1;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_src0(struct mali_gp_job *job, u32 src)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       job->uargs.perf_counter_src0 = src;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_src1(struct mali_gp_job *job, u32 src)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       job->uargs.perf_counter_src1 = src;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_value0(struct mali_gp_job *job, u32 value)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       job->perf_counter_value0 = value;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_value1(struct mali_gp_job *job, u32 value)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       job->perf_counter_value1 = value;
+}
+
+void mali_gp_job_list_add(struct mali_gp_job *job, _mali_osk_list_t *list);
+
+MALI_STATIC_INLINE void mali_gp_job_list_move(struct mali_gp_job *job,
+               _mali_osk_list_t *list)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+       MALI_DEBUG_ASSERT(!_mali_osk_list_empty(&job->list));
+       _mali_osk_list_move(&job->list, list);
+}
+
+MALI_STATIC_INLINE void mali_gp_job_list_remove(struct mali_gp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+       _mali_osk_list_delinit(&job->list);
+}
+
+MALI_STATIC_INLINE _mali_osk_notification_t *
+mali_gp_job_get_finished_notification(struct mali_gp_job *job)
+{
+       _mali_osk_notification_t *notification;
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_POINTER(job->finished_notification);
+
+       notification = job->finished_notification;
+       job->finished_notification = NULL;
+
+       return notification;
+}
+
+MALI_STATIC_INLINE _mali_osk_notification_t *mali_gp_job_get_oom_notification(
+       struct mali_gp_job *job)
+{
+       _mali_osk_notification_t *notification;
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT_POINTER(job->oom_notification);
+
+       notification = job->oom_notification;
+       job->oom_notification = NULL;
+
+       return notification;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_oom_notification(
+       struct mali_gp_job *job,
+       _mali_osk_notification_t *notification)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT(NULL == job->oom_notification);
+       job->oom_notification = notification;
+}
+
+MALI_STATIC_INLINE struct mali_timeline_tracker *mali_gp_job_get_tracker(
+       struct mali_gp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return &(job->tracker);
+}
+
+
+MALI_STATIC_INLINE u32 *mali_gp_job_get_timeline_point_ptr(
+       struct mali_gp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return (u32 __user *)(uintptr_t)job->uargs.timeline_point_ptr;
+}
+
+
+/**
+ * Release reference on tracker for PP job that depends on this GP job.
+ *
+ * @note If GP job has a reference on tracker, this function MUST be called before the GP job is
+ * deleted.
+ *
+ * @param job GP job that is done.
+ * @param success MALI_TRUE if job completed successfully, MALI_FALSE if not.
+ * @return A scheduling bitmask indicating whether scheduling needs to be done.
+ */
+mali_scheduler_mask mali_gp_job_signal_pp_tracker(struct mali_gp_job *job, mali_bool success);
+
+#endif /* __MALI_GP_JOB_H__ */
diff --git a/utgard/r8p0/common/mali_group.c b/utgard/r8p0/common/mali_group.c
new file mode 100755 (executable)
index 0000000..2878639
--- /dev/null
@@ -0,0 +1,1949 @@
+/*
+ * Copyright (C) 2011-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/types.h>
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 29))
+#include <mach/cpu.h>
+#endif
+#include "mali_kernel_common.h"
+#include "mali_group.h"
+#include "mali_osk.h"
+#include "mali_l2_cache.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_mmu.h"
+#include "mali_dlbu.h"
+#include "mali_broadcast.h"
+#include "mali_scheduler.h"
+#include "mali_osk_profiling.h"
+#include "mali_osk_mali.h"
+#include "mali_pm_domain.h"
+#include "mali_pm.h"
+#include "mali_executor.h"
+#include <mali_platform.h>
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+#include <linux/sched.h>
+#include <trace/events/gpu.h>
+#endif
+
+#define MALI_MAX_NUM_DOMAIN_REFS (MALI_MAX_NUMBER_OF_GROUPS * 2)
+
+#if defined(CONFIG_MALI400_PROFILING)
+static void mali_group_report_l2_cache_counters_per_core(struct mali_group *group, u32 core_num);
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+
+static struct mali_group *mali_global_groups[MALI_MAX_NUMBER_OF_GROUPS] = { NULL, };
+static u32 mali_global_num_groups = 0;
+
+/* SW timer for job execution */
+int mali_max_job_runtime = MALI_MAX_JOB_RUNTIME_DEFAULT;
+
+/* local helper functions */
+static void mali_group_bottom_half_mmu(void *data);
+static void mali_group_bottom_half_gp(void *data);
+static void mali_group_bottom_half_pp(void *data);
+static void mali_group_timeout(void *data);
+static void mali_group_reset_pp(struct mali_group *group);
+static void mali_group_reset_mmu(struct mali_group *group);
+
+static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session, mali_bool is_reload);
+static void mali_group_recovery_reset(struct mali_group *group);
+
+struct mali_group *mali_group_create(struct mali_l2_cache_core *core,
+        struct mali_dlbu_core *dlbu,
+        struct mali_bcast_unit *bcast,
+        u32 domain_index)
+{
+    struct mali_group *group = NULL;
+
+    if (mali_global_num_groups >= MALI_MAX_NUMBER_OF_GROUPS) {
+        MALI_PRINT_ERROR(("Mali group: Too many group objects created\n"));
+        return NULL;
+    }
+
+    group = _mali_osk_calloc(1, sizeof(struct mali_group));
+    if (NULL != group) {
+        group->timeout_timer = _mali_osk_timer_init();
+        if (NULL != group->timeout_timer) {
+            _mali_osk_timer_setcallback(group->timeout_timer, mali_group_timeout, (void *)group);
+
+            group->l2_cache_core[0] = core;
+            _mali_osk_list_init(&group->group_list);
+            _mali_osk_list_init(&group->executor_list);
+            _mali_osk_list_init(&group->pm_domain_list);
+            group->bcast_core = bcast;
+            group->dlbu_core = dlbu;
+
+            /* register this object as a part of the correct power domain */
+            if ((NULL != core) || (NULL != dlbu) || (NULL != bcast))
+                group->pm_domain = mali_pm_register_group(domain_index, group);
+
+            mali_global_groups[mali_global_num_groups] = group;
+            mali_global_num_groups++;
+
+            return group;
+        }
+        _mali_osk_free(group);
+    }
+
+    return NULL;
+}
+
+void mali_group_delete(struct mali_group *group)
+{
+    u32 i;
+
+    MALI_DEBUG_PRINT(4, ("Deleting group %s\n",
+                mali_group_core_description(group)));
+
+    MALI_DEBUG_ASSERT(NULL == group->parent_group);
+    MALI_DEBUG_ASSERT((MALI_GROUP_STATE_INACTIVE == group->state) || ((MALI_GROUP_STATE_ACTIVATION_PENDING == group->state)));
+
+    /* Delete the resources that this group owns */
+    if (NULL != group->gp_core) {
+        mali_gp_delete(group->gp_core);
+    }
+
+    if (NULL != group->pp_core) {
+        mali_pp_delete(group->pp_core);
+    }
+
+    if (NULL != group->mmu) {
+        mali_mmu_delete(group->mmu);
+    }
+
+    if (mali_group_is_virtual(group)) {
+        /* Remove all groups from virtual group */
+        struct mali_group *child;
+        struct mali_group *temp;
+
+        _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+            child->parent_group = NULL;
+            mali_group_delete(child);
+        }
+
+        mali_dlbu_delete(group->dlbu_core);
+
+        if (NULL != group->bcast_core) {
+            mali_bcast_unit_delete(group->bcast_core);
+        }
+    }
+
+    for (i = 0; i < mali_global_num_groups; i++) {
+        if (mali_global_groups[i] == group) {
+            mali_global_groups[i] = NULL;
+            mali_global_num_groups--;
+
+            if (i != mali_global_num_groups) {
+                /* We removed a group from the middle of the array -- move the last
+                 * group to the current position to close the gap */
+                mali_global_groups[i] = mali_global_groups[mali_global_num_groups];
+                mali_global_groups[mali_global_num_groups] = NULL;
+            }
+
+            break;
+        }
+    }
+
+    if (NULL != group->timeout_timer) {
+        _mali_osk_timer_del(group->timeout_timer);
+        _mali_osk_timer_term(group->timeout_timer);
+    }
+
+    if (NULL != group->bottom_half_work_mmu) {
+        _mali_osk_wq_delete_work(group->bottom_half_work_mmu);
+    }
+
+    if (NULL != group->bottom_half_work_gp) {
+        _mali_osk_wq_delete_work(group->bottom_half_work_gp);
+    }
+
+    if (NULL != group->bottom_half_work_pp) {
+        _mali_osk_wq_delete_work(group->bottom_half_work_pp);
+    }
+
+    _mali_osk_free(group);
+}
+
+_mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group, struct mali_mmu_core *mmu_core)
+{
+    /* This group object now owns the MMU core object */
+    group->mmu = mmu_core;
+    group->bottom_half_work_mmu = _mali_osk_wq_create_work(mali_group_bottom_half_mmu, group);
+    if (NULL == group->bottom_half_work_mmu) {
+        return _MALI_OSK_ERR_FAULT;
+    }
+    return _MALI_OSK_ERR_OK;
+}
+
+void mali_group_remove_mmu_core(struct mali_group *group)
+{
+    /* This group object no longer owns the MMU core object */
+    group->mmu = NULL;
+    if (NULL != group->bottom_half_work_mmu) {
+        _mali_osk_wq_delete_work(group->bottom_half_work_mmu);
+    }
+}
+
+_mali_osk_errcode_t mali_group_add_gp_core(struct mali_group *group, struct mali_gp_core *gp_core)
+{
+    /* This group object now owns the GP core object */
+    group->gp_core = gp_core;
+    group->bottom_half_work_gp = _mali_osk_wq_create_work(mali_group_bottom_half_gp, group);
+    if (NULL == group->bottom_half_work_gp) {
+        return _MALI_OSK_ERR_FAULT;
+    }
+    return _MALI_OSK_ERR_OK;
+}
+
+void mali_group_remove_gp_core(struct mali_group *group)
+{
+    /* This group object no longer owns the GP core object */
+    group->gp_core = NULL;
+    if (NULL != group->bottom_half_work_gp) {
+        _mali_osk_wq_delete_work(group->bottom_half_work_gp);
+    }
+}
+
+_mali_osk_errcode_t mali_group_add_pp_core(struct mali_group *group, struct mali_pp_core *pp_core)
+{
+    /* This group object now owns the PP core object */
+    group->pp_core = pp_core;
+    group->bottom_half_work_pp = _mali_osk_wq_create_work(mali_group_bottom_half_pp, group);
+    if (NULL == group->bottom_half_work_pp) {
+        return _MALI_OSK_ERR_FAULT;
+    }
+    return _MALI_OSK_ERR_OK;
+}
+
+void mali_group_remove_pp_core(struct mali_group *group)
+{
+    /* This group object no longer owns the PP core object */
+    group->pp_core = NULL;
+    if (NULL != group->bottom_half_work_pp) {
+        _mali_osk_wq_delete_work(group->bottom_half_work_pp);
+    }
+}
+
+enum mali_group_state mali_group_activate(struct mali_group *group)
+{
+    MALI_DEBUG_ASSERT_POINTER(group);
+    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+    MALI_DEBUG_PRINT(4, ("Group: Activating group %s\n",
+                mali_group_core_description(group)));
+
+    if (MALI_GROUP_STATE_INACTIVE == group->state) {
+        /* Group is inactive, get PM refs in order to power up */
+
+        /*
+         * We'll take a maximum of 2 power domain references pr group,
+         * one for the group itself, and one for it's L2 cache.
+         */
+        struct mali_pm_domain *domains[MALI_MAX_NUM_DOMAIN_REFS];
+        struct mali_group *groups[MALI_MAX_NUM_DOMAIN_REFS];
+        u32 num_domains = 0;
+        mali_bool all_groups_on;
+
+        /* Deal with child groups first */
+        if (mali_group_is_virtual(group)) {
+            /*
+             * The virtual group might have 0, 1 or 2 L2s in
+             * its l2_cache_core array, but we ignore these and
+             * let the child groups take the needed L2 cache ref
+             * on behalf of the virtual group.
+             * In other words; The L2 refs are taken in pair with
+             * the physical group which the L2 is attached to.
+             */
+            struct mali_group *child;
+            struct mali_group *temp;
+
+            /*
+             * Child group is inactive, get PM
+             * refs in order to power up.
+             */
+            _MALI_OSK_LIST_FOREACHENTRY(child, temp,
+                    &group->group_list,
+                    struct mali_group, group_list) {
+                MALI_DEBUG_ASSERT(MALI_GROUP_STATE_INACTIVE
+                        == child->state);
+
+                child->state = MALI_GROUP_STATE_ACTIVATION_PENDING;
+
+                MALI_DEBUG_ASSERT_POINTER(
+                        child->pm_domain);
+                domains[num_domains] = child->pm_domain;
+                groups[num_domains] = child;
+                num_domains++;
+
+                /*
+                 * Take L2 domain ref for child group.
+                 */
+                MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS
+                        > num_domains);
+                domains[num_domains] = mali_l2_cache_get_pm_domain(
+                        child->l2_cache_core[0]);
+                groups[num_domains] = NULL;
+                MALI_DEBUG_ASSERT(NULL ==
+                        child->l2_cache_core[1]);
+                num_domains++;
+            }
+        } else {
+            /* Take L2 domain ref for physical groups. */
+            MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS >
+                    num_domains);
+
+            domains[num_domains] = mali_l2_cache_get_pm_domain(
+                    group->l2_cache_core[0]);
+            groups[num_domains] = NULL;
+            MALI_DEBUG_ASSERT(NULL == group->l2_cache_core[1]);
+            num_domains++;
+        }
+
+        /* Do the group itself last (it's dependencies first) */
+
+        group->state = MALI_GROUP_STATE_ACTIVATION_PENDING;
+
+        MALI_DEBUG_ASSERT_POINTER(group->pm_domain);
+        domains[num_domains] = group->pm_domain;
+        groups[num_domains] = group;
+        num_domains++;
+
+        all_groups_on = mali_pm_get_domain_refs(domains, groups,
+                num_domains);
+
+        /*
+         * Complete activation for group, include
+         * virtual group or physical group.
+         */
+        if (MALI_TRUE == all_groups_on) {
+
+            mali_group_set_active(group);
+        }
+    } else if (MALI_GROUP_STATE_ACTIVE == group->state) {
+        /* Already active */
+        MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
+    } else {
+        /*
+         * Activation already pending, group->power_is_on could
+         * be both true or false. We need to wait for power up
+         * notification anyway.
+         */
+        MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVATION_PENDING
+                == group->state);
+    }
+
+    MALI_DEBUG_PRINT(4, ("Group: group %s activation result: %s\n",
+                mali_group_core_description(group),
+                MALI_GROUP_STATE_ACTIVE == group->state ?
+                "ACTIVE" : "PENDING"));
+
+    return group->state;
+}
+
+mali_bool mali_group_set_active(struct mali_group *group)
+{
+    MALI_DEBUG_ASSERT_POINTER(group);
+    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+    MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVATION_PENDING == group->state);
+    MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
+
+    MALI_DEBUG_PRINT(4, ("Group: Activation completed for %s\n",
+                mali_group_core_description(group)));
+
+    if (mali_group_is_virtual(group)) {
+        struct mali_group *child;
+        struct mali_group *temp;
+
+        _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list,
+                struct mali_group, group_list) {
+            if (MALI_TRUE != child->power_is_on) {
+                return MALI_FALSE;
+            }
+
+            child->state = MALI_GROUP_STATE_ACTIVE;
+        }
+
+        mali_group_reset(group);
+    }
+
+    /* Go to ACTIVE state */
+    group->state = MALI_GROUP_STATE_ACTIVE;
+
+    return MALI_TRUE;
+}
+
+mali_bool mali_group_deactivate(struct mali_group *group)
+{
+    struct mali_pm_domain *domains[MALI_MAX_NUM_DOMAIN_REFS];
+    u32 num_domains = 0;
+    mali_bool power_down = MALI_FALSE;
+
+    MALI_DEBUG_ASSERT_POINTER(group);
+    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+    MALI_DEBUG_ASSERT(MALI_GROUP_STATE_INACTIVE != group->state);
+
+    MALI_DEBUG_PRINT(3, ("Group: Deactivating group %s\n",
+                mali_group_core_description(group)));
+
+    group->state = MALI_GROUP_STATE_INACTIVE;
+
+    MALI_DEBUG_ASSERT_POINTER(group->pm_domain);
+    domains[num_domains] = group->pm_domain;
+    num_domains++;
+
+    if (mali_group_is_virtual(group)) {
+        /* Release refs for all child groups */
+        struct mali_group *child;
+        struct mali_group *temp;
+
+        _MALI_OSK_LIST_FOREACHENTRY(child, temp,
+                &group->group_list,
+                struct mali_group, group_list) {
+            child->state = MALI_GROUP_STATE_INACTIVE;
+
+            MALI_DEBUG_ASSERT_POINTER(child->pm_domain);
+            domains[num_domains] = child->pm_domain;
+            num_domains++;
+
+            /* Release L2 cache domain for child groups */
+            MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS >
+                    num_domains);
+            domains[num_domains] = mali_l2_cache_get_pm_domain(
+                    child->l2_cache_core[0]);
+            MALI_DEBUG_ASSERT(NULL == child->l2_cache_core[1]);
+            num_domains++;
+        }
+
+        /*
+         * Must do mali_group_power_down() steps right here for
+         * virtual group, because virtual group itself is likely to
+         * stay powered on, however child groups are now very likely
+         * to be powered off (and thus lose their state).
+         */
+
+        mali_group_clear_session(group);
+        /*
+         * Disable the broadcast unit (clear it's mask).
+         * This is needed in case the GPU isn't actually
+         * powered down at this point and groups are
+         * removed from an inactive virtual group.
+         * If not, then the broadcast unit will intercept
+         * their interrupts!
+         */
+        mali_bcast_disable(group->bcast_core);
+    } else {
+        /* Release L2 cache domain for physical groups */
+        MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS >
+                num_domains);
+        domains[num_domains] = mali_l2_cache_get_pm_domain(
+                group->l2_cache_core[0]);
+        MALI_DEBUG_ASSERT(NULL == group->l2_cache_core[1]);
+        num_domains++;
+    }
+
+    power_down = mali_pm_put_domain_refs(domains, num_domains);
+
+    return power_down;
+}
+
+void mali_group_power_up(struct mali_group *group)
+{
+    MALI_DEBUG_ASSERT_POINTER(group);
+    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+    MALI_DEBUG_PRINT(3, ("Group: Power up for %s\n",
+                mali_group_core_description(group)));
+
+    group->power_is_on = MALI_TRUE;
+
+    if (MALI_FALSE == mali_group_is_virtual(group)
+            && MALI_FALSE == mali_group_is_in_virtual(group)) {
+        mali_group_reset(group);
+    }
+
+    /*
+     * When we just acquire only one physical group form virt group,
+     * we should remove the bcast&dlbu mask from virt group and
+     * reset bcast and dlbu core, although part of pp cores in virt
+     * group maybe not be powered on.
+     */
+    if (MALI_TRUE == mali_group_is_virtual(group)) {
+        mali_bcast_reset(group->bcast_core);
+        mali_dlbu_update_mask(group->dlbu_core);
+    }
+}
+
+void mali_group_power_down(struct mali_group *group)
+{
+    MALI_DEBUG_ASSERT_POINTER(group);
+    MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
+    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+    MALI_DEBUG_PRINT(3, ("Group: Power down for %s\n",
+                mali_group_core_description(group)));
+
+    group->power_is_on = MALI_FALSE;
+
+    if (mali_group_is_virtual(group)) {
+        /*
+         * What we do for physical jobs in this function should
+         * already have been done in mali_group_deactivate()
+         * for virtual group.
+         */
+        MALI_DEBUG_ASSERT(NULL == group->session);
+    } else {
+        mali_group_clear_session(group);
+    }
+}
+
+MALI_DEBUG_CODE(static void mali_group_print_virtual(struct mali_group *vgroup)
+        {
+        u32 i;
+        struct mali_group *group;
+        struct mali_group *temp;
+
+        MALI_DEBUG_PRINT(4, ("Virtual group %s (%p)\n",
+                mali_group_core_description(vgroup),
+                vgroup));
+        MALI_DEBUG_PRINT(4, ("l2_cache_core[0] = %p, ref = %d\n", vgroup->l2_cache_core[0], vgroup->l2_cache_core_ref_count[0]));
+        MALI_DEBUG_PRINT(4, ("l2_cache_core[1] = %p, ref = %d\n", vgroup->l2_cache_core[1], vgroup->l2_cache_core_ref_count[1]));
+
+        i = 0;
+        _MALI_OSK_LIST_FOREACHENTRY(group, temp, &vgroup->group_list, struct mali_group, group_list) {
+        MALI_DEBUG_PRINT(4, ("[%d] %s (%p), l2_cache_core[0] = %p\n",
+                i, mali_group_core_description(group),
+                group, group->l2_cache_core[0]));
+        i++;
+        }
+        })
+
+static void mali_group_dump_core_status(struct mali_group *group)
+{
+       u32 i;
+
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT(NULL != group->gp_core || (NULL != group->pp_core && !mali_group_is_virtual(group)));
+
+       if (NULL != group->gp_core) {
+               MALI_PRINT(("Dump Group %s\n", group->gp_core->hw_core.description));
+
+               for (i = 0; i < 0xA8; i += 0x10) {
+                       MALI_PRINT(("0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, mali_hw_core_register_read(&group->gp_core->hw_core, i),
+                                   mali_hw_core_register_read(&group->gp_core->hw_core, i + 4),
+                                   mali_hw_core_register_read(&group->gp_core->hw_core, i + 8),
+                                   mali_hw_core_register_read(&group->gp_core->hw_core, i + 12)));
+               }
+
+
+       } else {
+               MALI_PRINT(("Dump Group %s\n", group->pp_core->hw_core.description));
+
+               for (i = 0; i < 0x5c; i += 0x10) {
+                       MALI_PRINT(("0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, mali_hw_core_register_read(&group->pp_core->hw_core, i),
+                                   mali_hw_core_register_read(&group->pp_core->hw_core, i + 4),
+                                   mali_hw_core_register_read(&group->pp_core->hw_core, i + 8),
+                                   mali_hw_core_register_read(&group->pp_core->hw_core, i + 12)));
+               }
+
+               /* Ignore some minor registers */
+               for (i = 0x1000; i < 0x1068; i += 0x10) {
+                       MALI_PRINT(("0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, mali_hw_core_register_read(&group->pp_core->hw_core, i),
+                                   mali_hw_core_register_read(&group->pp_core->hw_core, i + 4),
+                                   mali_hw_core_register_read(&group->pp_core->hw_core, i + 8),
+                                   mali_hw_core_register_read(&group->pp_core->hw_core, i + 12)));
+               }
+       }
+
+       MALI_PRINT(("Dump Group MMU\n"));
+       for (i = 0; i < 0x24; i += 0x10) {
+               MALI_PRINT(("0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, mali_hw_core_register_read(&group->mmu->hw_core, i),
+                           mali_hw_core_register_read(&group->mmu->hw_core, i + 4),
+                           mali_hw_core_register_read(&group->mmu->hw_core, i + 8),
+                           mali_hw_core_register_read(&group->mmu->hw_core, i + 12)));
+       }
+}
+
+
+/**
+ * @Dump group status
+ */
+void mali_group_dump_status(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+
+       if (mali_group_is_virtual(group)) {
+               struct mali_group *group_c;
+               struct mali_group *temp;
+               _MALI_OSK_LIST_FOREACHENTRY(group_c, temp, &group->group_list, struct mali_group, group_list) {
+                       mali_group_dump_core_status(group_c);
+               }
+       } else {
+               mali_group_dump_core_status(group);
+       }
+}
+
+/**
+ * @brief Add child group to virtual group parent
+ */
+void mali_group_add_group(struct mali_group *parent, struct mali_group *child)
+{
+    mali_bool found;
+    u32 i;
+
+    MALI_DEBUG_PRINT(3, ("Adding group %s to virtual group %s\n",
+                mali_group_core_description(child),
+                mali_group_core_description(parent)));
+
+    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+    MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
+    MALI_DEBUG_ASSERT(!mali_group_is_virtual(child));
+    MALI_DEBUG_ASSERT(NULL == child->parent_group);
+
+    _mali_osk_list_addtail(&child->group_list, &parent->group_list);
+
+    child->parent_group = parent;
+
+    MALI_DEBUG_ASSERT_POINTER(child->l2_cache_core[0]);
+
+    MALI_DEBUG_PRINT(4, ("parent->l2_cache_core: [0] = %p, [1] = %p\n", parent->l2_cache_core[0], parent->l2_cache_core[1]));
+    MALI_DEBUG_PRINT(4, ("child->l2_cache_core: [0] = %p, [1] = %p\n", child->l2_cache_core[0], child->l2_cache_core[1]));
+
+    /* Keep track of the L2 cache cores of child groups */
+    found = MALI_FALSE;
+    for (i = 0; i < 2; i++) {
+        if (parent->l2_cache_core[i] == child->l2_cache_core[0]) {
+            MALI_DEBUG_ASSERT(parent->l2_cache_core_ref_count[i] > 0);
+            parent->l2_cache_core_ref_count[i]++;
+            found = MALI_TRUE;
+        }
+    }
+
+    if (!found) {
+        /* First time we see this L2 cache, add it to our list */
+        i = (NULL == parent->l2_cache_core[0]) ? 0 : 1;
+
+        MALI_DEBUG_PRINT(4, ("First time we see l2_cache %p. Adding to [%d] = %p\n", child->l2_cache_core[0], i, parent->l2_cache_core[i]));
+
+        MALI_DEBUG_ASSERT(NULL == parent->l2_cache_core[i]);
+
+        parent->l2_cache_core[i] = child->l2_cache_core[0];
+        parent->l2_cache_core_ref_count[i]++;
+    }
+
+    /* Update Broadcast Unit and DLBU */
+    mali_bcast_add_group(parent->bcast_core, child);
+    mali_dlbu_add_group(parent->dlbu_core, child);
+
+    if (MALI_TRUE == parent->power_is_on) {
+        mali_bcast_reset(parent->bcast_core);
+        mali_dlbu_update_mask(parent->dlbu_core);
+    }
+
+    if (MALI_TRUE == child->power_is_on) {
+        if (NULL == parent->session) {
+            if (NULL != child->session) {
+                /*
+                 * Parent has no session, so clear
+                 * child session as well.
+                 */
+                mali_mmu_activate_empty_page_directory(child->mmu);
+            }
+        } else {
+            if (parent->session == child->session) {
+                /* We already have same session as parent,
+                 * so a simple zap should be enough.
+                 */
+                mali_mmu_zap_tlb(child->mmu);
+            } else {
+                /*
+                 * Parent has a different session, so we must
+                 * switch to that sessions page table
+                 */
+                mali_mmu_activate_page_directory(child->mmu, mali_session_get_page_directory(parent->session));
+            }
+
+            /* It is the parent which keeps the session from now on */
+            child->session = NULL;
+        }
+    } else {
+        /* should have been cleared when child was powered down */
+        MALI_DEBUG_ASSERT(NULL == child->session);
+    }
+
+    /* Start job on child when parent is active */
+    if (NULL != parent->pp_running_job) {
+        struct mali_pp_job *job = parent->pp_running_job;
+
+        MALI_DEBUG_PRINT(3, ("Group %x joining running job %d on virtual group %x\n",
+                    child, mali_pp_job_get_id(job), parent));
+
+        /* Only allowed to add active child to an active parent */
+        MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE == parent->state);
+        MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE == child->state);
+
+        mali_pp_job_start(child->pp_core, job, mali_pp_core_get_id(child->pp_core), MALI_TRUE);
+
+        _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+                MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+                mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
+
+        _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+                MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
+                mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+        trace_gpu_sched_switch(
+                mali_pp_core_description(group->pp_core),
+                sched_clock(), mali_pp_job_get_tid(job),
+                0, mali_pp_job_get_id(job));
+#endif
+
+#if defined(CONFIG_MALI400_PROFILING)
+        trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core),
+                mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job));
+#endif
+    }
+
+    MALI_DEBUG_CODE(mali_group_print_virtual(parent);)
+}
+
+/**
+ * @brief Remove child group from virtual group parent
+ */
+void mali_group_remove_group(struct mali_group *parent, struct mali_group *child)
+{
+    u32 i;
+
+    MALI_DEBUG_PRINT(3, ("Removing group %s from virtual group %s\n",
+                mali_group_core_description(child),
+                mali_group_core_description(parent)));
+
+    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+    MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
+    MALI_DEBUG_ASSERT(!mali_group_is_virtual(child));
+    MALI_DEBUG_ASSERT(parent == child->parent_group);
+
+    /* Update Broadcast Unit and DLBU */
+    mali_bcast_remove_group(parent->bcast_core, child);
+    mali_dlbu_remove_group(parent->dlbu_core, child);
+
+    if (MALI_TRUE == parent->power_is_on) {
+        mali_bcast_reset(parent->bcast_core);
+        mali_dlbu_update_mask(parent->dlbu_core);
+    }
+
+    child->session = parent->session;
+    child->parent_group = NULL;
+
+    _mali_osk_list_delinit(&child->group_list);
+    if (_mali_osk_list_empty(&parent->group_list)) {
+        parent->session = NULL;
+    }
+
+    /* Keep track of the L2 cache cores of child groups */
+    i = (child->l2_cache_core[0] == parent->l2_cache_core[0]) ? 0 : 1;
+
+    MALI_DEBUG_ASSERT(child->l2_cache_core[0] == parent->l2_cache_core[i]);
+
+    parent->l2_cache_core_ref_count[i]--;
+    if (parent->l2_cache_core_ref_count[i] == 0) {
+        parent->l2_cache_core[i] = NULL;
+    }
+
+    MALI_DEBUG_CODE(mali_group_print_virtual(parent));
+}
+
+struct mali_group *mali_group_acquire_group(struct mali_group *parent)
+{
+    struct mali_group *child = NULL;
+
+    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+    MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
+
+    if (!_mali_osk_list_empty(&parent->group_list)) {
+        child = _MALI_OSK_LIST_ENTRY(parent->group_list.prev, struct mali_group, group_list);
+        mali_group_remove_group(parent, child);
+    }
+
+    if (NULL != child) {
+        if (MALI_GROUP_STATE_ACTIVE != parent->state
+                && MALI_TRUE == child->power_is_on) {
+            mali_group_reset(child);
+        }
+    }
+
+    return child;
+}
+
+void mali_group_reset(struct mali_group *group)
+{
+    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+    MALI_DEBUG_ASSERT(NULL == group->gp_running_job);
+    MALI_DEBUG_ASSERT(NULL == group->pp_running_job);
+    MALI_DEBUG_ASSERT(NULL == group->session);
+
+    MALI_DEBUG_PRINT(3, ("Group: reset of %s\n",
+                mali_group_core_description(group)));
+
+    if (NULL != group->dlbu_core) {
+        mali_dlbu_reset(group->dlbu_core);
+    }
+
+    if (NULL != group->bcast_core) {
+        mali_bcast_reset(group->bcast_core);
+    }
+
+    MALI_DEBUG_ASSERT(NULL != group->mmu);
+    mali_group_reset_mmu(group);
+
+    if (NULL != group->gp_core) {
+        MALI_DEBUG_ASSERT(NULL == group->pp_core);
+        mali_gp_reset(group->gp_core);
+    } else {
+        MALI_DEBUG_ASSERT(NULL != group->pp_core);
+        mali_group_reset_pp(group);
+    }
+}
+
+void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job, mali_bool gpu_secure_mode_pre_enabled)
+{
+    struct mali_session_data *session;
+
+    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+    MALI_DEBUG_PRINT(3, ("Group: Starting GP job 0x%08X on group %s\n",
+                job,
+                mali_group_core_description(group)));
+
+    session = mali_gp_job_get_session(job);
+
+    MALI_DEBUG_ASSERT_POINTER(group->l2_cache_core[0]);
+    mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_gp_job_get_cache_order(job));
+
+       /* Reset GPU and disable gpu secure mode if needed. */
+       if (MALI_TRUE == _mali_osk_gpu_secure_mode_is_enabled()) {
+               struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+               _mali_osk_gpu_reset_and_secure_mode_disable();
+               /* Need to disable the pmu interrupt mask register */
+               if (NULL != pmu) {
+                       mali_pmu_reset(pmu);
+               }
+       }
+
+       /* Reload mmu page table if needed */
+       if (MALI_TRUE == gpu_secure_mode_pre_enabled) {
+               mali_group_reset(group);
+               mali_group_activate_page_directory(group, session, MALI_TRUE);
+       } else {
+               mali_group_activate_page_directory(group, session, MALI_FALSE);
+       }
+
+    mali_gp_job_start(group->gp_core, job);
+
+    _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+            MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0) |
+            MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+            mali_gp_job_get_frame_builder_id(job), mali_gp_job_get_flush_id(job), 0, 0, 0);
+    _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+            MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+            mali_gp_job_get_pid(job), mali_gp_job_get_tid(job), 0, 0, 0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+    trace_mali_core_active(mali_gp_job_get_pid(job), 1 /* active */, 1 /* GP */,  0 /* core */,
+            mali_gp_job_get_frame_builder_id(job), mali_gp_job_get_flush_id(job));
+#endif
+
+#if defined(CONFIG_MALI400_PROFILING)
+    if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+            (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+        mali_group_report_l2_cache_counters_per_core(group, 0);
+    }
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+    trace_gpu_sched_switch(mali_gp_core_description(group->gp_core),
+            sched_clock(), mali_gp_job_get_tid(job),
+            0, mali_gp_job_get_id(job));
+#endif
+
+    group->gp_running_job = job;
+    group->is_working = MALI_TRUE;
+
+    /* Setup SW timer and record start time */
+    group->start_time = _mali_osk_time_tickcount();
+    _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
+
+    MALI_DEBUG_PRINT(4, ("Group: Started GP job 0x%08X on group %s at %u\n",
+                job,
+                mali_group_core_description(group),
+                group->start_time));
+}
+
+/* Used to set all the registers except frame renderer list address and fragment shader stack address
+ * It means the caller must set these two registers properly before calling this function
+ */
+void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job, mali_bool gpu_secure_mode_pre_enabled)
+{
+    struct mali_session_data *session;
+
+    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+    MALI_DEBUG_PRINT(3, ("Group: Starting PP job 0x%08X part %u/%u on group %s\n",
+                job, sub_job + 1,
+                mali_pp_job_get_sub_job_count(job),
+                mali_group_core_description(group)));
+
+    session = mali_pp_job_get_session(job);
+
+    if (NULL != group->l2_cache_core[0]) {
+        mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_pp_job_get_cache_order(job));
+    }
+
+    if (NULL != group->l2_cache_core[1]) {
+        mali_l2_cache_invalidate_conditional(group->l2_cache_core[1], mali_pp_job_get_cache_order(job));
+    }
+
+       /* Reset GPU and change gpu secure mode if needed. */
+       if (MALI_TRUE == mali_pp_job_is_protected_job(job) && MALI_FALSE == _mali_osk_gpu_secure_mode_is_enabled()) {
+               struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+               _mali_osk_gpu_reset_and_secure_mode_enable();
+               /* Need to disable the pmu interrupt mask register */
+               if (NULL != pmu) {
+                       mali_pmu_reset(pmu);
+               }
+       } else if (MALI_FALSE == mali_pp_job_is_protected_job(job) && MALI_TRUE == _mali_osk_gpu_secure_mode_is_enabled()) {
+               struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+               _mali_osk_gpu_reset_and_secure_mode_disable();
+               /* Need to disable the pmu interrupt mask register */
+               if (NULL != pmu) {
+                       mali_pmu_reset(pmu);
+               }
+       }
+
+       /* Reload the mmu page table if needed */
+       if ((MALI_TRUE == mali_pp_job_is_protected_job(job) && MALI_FALSE == gpu_secure_mode_pre_enabled)
+           || (MALI_FALSE == mali_pp_job_is_protected_job(job) && MALI_TRUE == gpu_secure_mode_pre_enabled)) {
+               mali_group_reset(group);
+               mali_group_activate_page_directory(group, session, MALI_TRUE);
+       } else {
+               mali_group_activate_page_directory(group, session, MALI_FALSE);
+       }
+
+    if (mali_group_is_virtual(group)) {
+        struct mali_group *child;
+        struct mali_group *temp;
+        u32 core_num = 0;
+
+        MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(job));
+
+        /* Configure DLBU for the job */
+        mali_dlbu_config_job(group->dlbu_core, job);
+
+        /* Write stack address for each child group */
+        _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+            mali_pp_write_addr_stack(child->pp_core, job);
+            core_num++;
+        }
+
+        mali_pp_job_start(group->pp_core, job, sub_job, MALI_FALSE);
+    } else {
+        mali_pp_job_start(group->pp_core, job, sub_job, MALI_FALSE);
+    }
+
+    /* if the group is virtual, loop through physical groups which belong to this group
+     * and call profiling events for its cores as virtual */
+    if (MALI_TRUE == mali_group_is_virtual(group)) {
+        struct mali_group *child;
+        struct mali_group *temp;
+
+        _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+            _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                    MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+                    MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+                    mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
+
+            _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                    MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+                    MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
+                    mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+            trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core),
+                    mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job));
+#endif
+        }
+
+#if defined(CONFIG_MALI400_PROFILING)
+        if (0 != group->l2_cache_core_ref_count[0]) {
+            if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+                    (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+                mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+            }
+        }
+        if (0 != group->l2_cache_core_ref_count[1]) {
+            if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) &&
+                    (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) {
+                mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1]));
+            }
+        }
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+
+    } else { /* group is physical - call profiling events for physical cores */
+        _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) |
+                MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+                mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
+
+        _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) |
+                MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL,
+                mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+        trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(group->pp_core),
+                mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job));
+#endif
+
+#if defined(CONFIG_MALI400_PROFILING)
+        if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+                (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+            mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+        }
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+    }
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+    trace_gpu_sched_switch(mali_pp_core_description(group->pp_core),
+            sched_clock(), mali_pp_job_get_tid(job),
+            0, mali_pp_job_get_id(job));
+#endif
+
+    group->pp_running_job = job;
+    group->pp_running_sub_job = sub_job;
+    group->is_working = MALI_TRUE;
+
+    /* Setup SW timer and record start time */
+    group->start_time = _mali_osk_time_tickcount();
+    _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
+
+    MALI_DEBUG_PRINT(4, ("Group: Started PP job 0x%08X part %u/%u on group %s at %u\n",
+                job, sub_job + 1,
+                mali_pp_job_get_sub_job_count(job),
+                mali_group_core_description(group),
+                group->start_time));
+
+}
+
+void mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr)
+{
+    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+    MALI_DEBUG_ASSERT_POINTER(group->l2_cache_core[0]);
+    mali_l2_cache_invalidate(group->l2_cache_core[0]);
+
+    mali_mmu_zap_tlb_without_stall(group->mmu);
+
+    mali_gp_resume_with_new_heap(group->gp_core, start_addr, end_addr);
+
+    _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME |
+            MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+            0, 0, 0, 0, 0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+    trace_mali_core_active(mali_gp_job_get_pid(group->gp_running_job), 1 /* active */, 1 /* GP */,  0 /* core */,
+            mali_gp_job_get_frame_builder_id(group->gp_running_job), mali_gp_job_get_flush_id(group->gp_running_job));
+#endif
+}
+
+static void mali_group_reset_mmu(struct mali_group *group)
+{
+    struct mali_group *child;
+    struct mali_group *temp;
+    _mali_osk_errcode_t err;
+
+    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+    if (!mali_group_is_virtual(group)) {
+        /* This is a physical group or an idle virtual group -- simply wait for
+         * the reset to complete. */
+        err = mali_mmu_reset(group->mmu);
+        MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
+    } else { /* virtual group */
+        /* Loop through all members of this virtual group and wait
+         * until they are done resetting.
+         */
+        _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+            err = mali_mmu_reset(child->mmu);
+            MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
+        }
+    }
+}
+
+static void mali_group_reset_pp(struct mali_group *group)
+{
+    struct mali_group *child;
+    struct mali_group *temp;
+
+    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+    mali_pp_reset_async(group->pp_core);
+
+    if (!mali_group_is_virtual(group) || NULL == group->pp_running_job) {
+        /* This is a physical group or an idle virtual group -- simply wait for
+         * the reset to complete. */
+        mali_pp_reset_wait(group->pp_core);
+    } else {
+        /* Loop through all members of this virtual group and wait until they
+         * are done resetting.
+         */
+        _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+            mali_pp_reset_wait(child->pp_core);
+        }
+    }
+}
+
+struct mali_pp_job *mali_group_complete_pp(struct mali_group *group, mali_bool success, u32 *sub_job)
+{
+    struct mali_pp_job *pp_job_to_return;
+
+    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+    MALI_DEBUG_ASSERT_POINTER(group);
+    MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+    MALI_DEBUG_ASSERT_POINTER(group->pp_running_job);
+    MALI_DEBUG_ASSERT_POINTER(sub_job);
+    MALI_DEBUG_ASSERT(MALI_TRUE == group->is_working);
+
+    /* Stop/clear the timeout timer. */
+    _mali_osk_timer_del_async(group->timeout_timer);
+
+    if (NULL != group->pp_running_job) {
+
+        /* Deal with HW counters and profiling */
+
+        if (MALI_TRUE == mali_group_is_virtual(group)) {
+            struct mali_group *child;
+            struct mali_group *temp;
+
+            /* update performance counters from each physical pp core within this virtual group */
+            _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+                mali_pp_update_performance_counters(group->pp_core, child->pp_core, group->pp_running_job, mali_pp_core_get_id(child->pp_core));
+            }
+
+#if defined(CONFIG_MALI400_PROFILING)
+            /* send profiling data per physical core */
+            _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+                _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                        MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+                        MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
+                        mali_pp_job_get_perf_counter_value0(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
+                        mali_pp_job_get_perf_counter_value1(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
+                        mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8),
+                        0, 0);
+
+                trace_mali_core_active(mali_pp_job_get_pid(group->pp_running_job),
+                        0 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core),
+                        mali_pp_job_get_frame_builder_id(group->pp_running_job),
+                        mali_pp_job_get_flush_id(group->pp_running_job));
+            }
+            if (0 != group->l2_cache_core_ref_count[0]) {
+                if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+                        (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+                    mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+                }
+            }
+            if (0 != group->l2_cache_core_ref_count[1]) {
+                if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) &&
+                        (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) {
+                    mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1]));
+                }
+            }
+
+#endif
+        } else {
+            /* update performance counters for a physical group's pp core */
+            mali_pp_update_performance_counters(group->pp_core, group->pp_core, group->pp_running_job, group->pp_running_sub_job);
+
+#if defined(CONFIG_MALI400_PROFILING)
+            _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                    MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) |
+                    MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL,
+                    mali_pp_job_get_perf_counter_value0(group->pp_running_job, group->pp_running_sub_job),
+                    mali_pp_job_get_perf_counter_value1(group->pp_running_job, group->pp_running_sub_job),
+                    mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8),
+                    0, 0);
+
+            trace_mali_core_active(mali_pp_job_get_pid(group->pp_running_job),
+                    0 /* active */, 0 /* PP */, mali_pp_core_get_id(group->pp_core),
+                    mali_pp_job_get_frame_builder_id(group->pp_running_job),
+                    mali_pp_job_get_flush_id(group->pp_running_job));
+
+            if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+                    (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+                mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+            }
+#endif
+        }
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+        trace_gpu_sched_switch(
+                mali_gp_core_description(group->gp_core),
+                sched_clock(), 0, 0, 0);
+#endif
+
+    }
+
+    if (success) {
+        /* Only do soft reset for successful jobs, a full recovery
+         * reset will be done for failed jobs. */
+        mali_pp_reset_async(group->pp_core);
+    }
+
+    pp_job_to_return = group->pp_running_job;
+    group->pp_running_job = NULL;
+    group->is_working = MALI_FALSE;
+    *sub_job = group->pp_running_sub_job;
+
+    if (!success) {
+        MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n"));
+        mali_group_recovery_reset(group);
+    } else if (_MALI_OSK_ERR_OK != mali_pp_reset_wait(group->pp_core)) {
+        MALI_PRINT_ERROR(("Mali group: Executing recovery reset due to reset failure\n"));
+        mali_group_recovery_reset(group);
+    }
+
+    return pp_job_to_return;
+}
+
+struct mali_gp_job *mali_group_complete_gp(struct mali_group *group, mali_bool success)
+{
+    struct mali_gp_job *gp_job_to_return;
+
+    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+    MALI_DEBUG_ASSERT_POINTER(group);
+    MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+    MALI_DEBUG_ASSERT_POINTER(group->gp_running_job);
+    MALI_DEBUG_ASSERT(MALI_TRUE == group->is_working);
+
+    /* Stop/clear the timeout timer. */
+    _mali_osk_timer_del_async(group->timeout_timer);
+
+    if (NULL != group->gp_running_job) {
+        mali_gp_update_performance_counters(group->gp_core, group->gp_running_job);
+
+#if defined(CONFIG_MALI400_PROFILING)
+        _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+                mali_gp_job_get_perf_counter_value0(group->gp_running_job),
+                mali_gp_job_get_perf_counter_value1(group->gp_running_job),
+                mali_gp_job_get_perf_counter_src0(group->gp_running_job) | (mali_gp_job_get_perf_counter_src1(group->gp_running_job) << 8),
+                0, 0);
+
+        if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+                (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
+            mali_group_report_l2_cache_counters_per_core(group, 0);
+#endif
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+        trace_gpu_sched_switch(
+                mali_pp_core_description(group->pp_core),
+                sched_clock(), 0, 0, 0);
+#endif
+
+#if defined(CONFIG_MALI400_PROFILING)
+        trace_mali_core_active(mali_gp_job_get_pid(group->gp_running_job), 0 /* active */, 1 /* GP */,  0 /* core */,
+                mali_gp_job_get_frame_builder_id(group->gp_running_job), mali_gp_job_get_flush_id(group->gp_running_job));
+#endif
+
+        mali_gp_job_set_current_heap_addr(group->gp_running_job,
+                mali_gp_read_plbu_alloc_start_addr(group->gp_core));
+    }
+
+    if (success) {
+        /* Only do soft reset for successful jobs, a full recovery
+         * reset will be done for failed jobs. */
+        mali_gp_reset_async(group->gp_core);
+    }
+
+    gp_job_to_return = group->gp_running_job;
+    group->gp_running_job = NULL;
+    group->is_working = MALI_FALSE;
+
+    if (!success) {
+        MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n"));
+        mali_group_recovery_reset(group);
+    } else if (_MALI_OSK_ERR_OK != mali_gp_reset_wait(group->gp_core)) {
+        MALI_PRINT_ERROR(("Mali group: Executing recovery reset due to reset failure\n"));
+        mali_group_recovery_reset(group);
+    }
+
+    return gp_job_to_return;
+}
+
+struct mali_group *mali_group_get_glob_group(u32 index)
+{
+    if (mali_global_num_groups > index) {
+        return mali_global_groups[index];
+    }
+
+    return NULL;
+}
+
+u32 mali_group_get_glob_num_groups(void)
+{
+    return mali_global_num_groups;
+}
+
+static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session, mali_bool is_reload)
+{
+    MALI_DEBUG_PRINT(5, ("Mali group: Activating page directory 0x%08X from session 0x%08X on group %s\n",
+                mali_session_get_page_directory(session), session,
+                mali_group_core_description(group)));
+
+    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+       if (group->session != session || MALI_TRUE == is_reload) {
+               /* Different session than last time, so we need to do some work */
+               MALI_DEBUG_PRINT(5, ("Mali group: Activate session: %08x previous: %08x on group %s\n",
+                                    session, group->session,
+                                    mali_group_core_description(group)));
+               mali_mmu_activate_page_directory(group->mmu, mali_session_get_page_directory(session));
+               group->session = session;
+       } else {
+               /* Same session as last time, so no work required */
+               MALI_DEBUG_PRINT(4, ("Mali group: Activate existing session 0x%08X on group %s\n",
+                                    session->page_directory,
+                                    mali_group_core_description(group)));
+               mali_mmu_zap_tlb_without_stall(group->mmu);
+       }
+}
+
+static void mali_group_recovery_reset(struct mali_group *group)
+{
+    _mali_osk_errcode_t err;
+
+    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+    /* Stop cores, bus stop */
+    if (NULL != group->pp_core) {
+        mali_pp_stop_bus(group->pp_core);
+    } else {
+        mali_gp_stop_bus(group->gp_core);
+    }
+
+    /* Flush MMU and clear page fault (if any) */
+    mali_mmu_activate_fault_flush_page_directory(group->mmu);
+    mali_mmu_page_fault_done(group->mmu);
+
+    /* Wait for cores to stop bus, then do a hard reset on them */
+    if (NULL != group->pp_core) {
+        if (mali_group_is_virtual(group)) {
+            struct mali_group *child, *temp;
+
+            /* Disable the broadcast unit while we do reset directly on the member cores. */
+            mali_bcast_disable(group->bcast_core);
+
+            _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+                mali_pp_stop_bus_wait(child->pp_core);
+                mali_pp_hard_reset(child->pp_core);
+            }
+
+            mali_bcast_enable(group->bcast_core);
+        } else {
+            mali_pp_stop_bus_wait(group->pp_core);
+            mali_pp_hard_reset(group->pp_core);
+        }
+    } else {
+        mali_gp_stop_bus_wait(group->gp_core);
+        mali_gp_hard_reset(group->gp_core);
+    }
+
+    /* Reset MMU */
+    err = mali_mmu_reset(group->mmu);
+    MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
+    MALI_IGNORE(err);
+
+    group->session = NULL;
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size)
+{
+    int n = 0;
+    int i;
+    struct mali_group *child;
+    struct mali_group *temp;
+
+    if (mali_group_is_virtual(group)) {
+        n += _mali_osk_snprintf(buf + n, size - n,
+                "Virtual PP Group: %p\n", group);
+    } else if (mali_group_is_in_virtual(group)) {
+        n += _mali_osk_snprintf(buf + n, size - n,
+                "Child PP Group: %p\n", group);
+    } else if (NULL != group->pp_core) {
+        n += _mali_osk_snprintf(buf + n, size - n,
+                "Physical PP Group: %p\n", group);
+    } else {
+        MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+        n += _mali_osk_snprintf(buf + n, size - n,
+                "GP Group: %p\n", group);
+    }
+
+    switch (group->state) {
+        case MALI_GROUP_STATE_INACTIVE:
+            n += _mali_osk_snprintf(buf + n, size - n,
+                    "\tstate: INACTIVE\n");
+            break;
+        case MALI_GROUP_STATE_ACTIVATION_PENDING:
+            n += _mali_osk_snprintf(buf + n, size - n,
+                    "\tstate: ACTIVATION_PENDING\n");
+            break;
+        case MALI_GROUP_STATE_ACTIVE:
+            n += _mali_osk_snprintf(buf + n, size - n,
+                    "\tstate: MALI_GROUP_STATE_ACTIVE\n");
+            break;
+        default:
+            n += _mali_osk_snprintf(buf + n, size - n,
+                    "\tstate: UNKNOWN (%d)\n", group->state);
+            MALI_DEBUG_ASSERT(0);
+            break;
+    }
+
+    n += _mali_osk_snprintf(buf + n, size - n,
+            "\tSW power: %s\n",
+            group->power_is_on ? "On" : "Off");
+
+    n += mali_pm_dump_state_domain(group->pm_domain, buf + n, size - n);
+
+    for (i = 0; i < 2; i++) {
+        if (NULL != group->l2_cache_core[i]) {
+            struct mali_pm_domain *domain;
+            domain = mali_l2_cache_get_pm_domain(
+                    group->l2_cache_core[i]);
+            n += mali_pm_dump_state_domain(domain,
+                    buf + n, size - n);
+        }
+    }
+
+    if (group->gp_core) {
+        n += mali_gp_dump_state(group->gp_core, buf + n, size - n);
+        n += _mali_osk_snprintf(buf + n, size - n,
+                "\tGP running job: %p\n", group->gp_running_job);
+    }
+
+    if (group->pp_core) {
+        n += mali_pp_dump_state(group->pp_core, buf + n, size - n);
+        n += _mali_osk_snprintf(buf + n, size - n,
+                "\tPP running job: %p, subjob %d \n",
+                group->pp_running_job,
+                group->pp_running_sub_job);
+    }
+
+    _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list,
+            struct mali_group, group_list) {
+        n += mali_group_dump_state(child, buf + n, size - n);
+    }
+
+    return n;
+}
+#endif
+
+/* Kasin added. */
+#if MESON_CPU_TYPE == MESON_CPU_TYPE_MESON6
+#include <platform/meson_m400/mali_fix.h>
+#define INT_MALI_PP2_MMU ( 6+32)
+struct _mali_osk_irq_t_struct;
+u32 get_irqnum(struct _mali_osk_irq_t_struct* irq);
+#endif
+
+_mali_osk_errcode_t mali_group_upper_half_mmu(void *data)
+{
+    struct mali_group *group = (struct mali_group *)data;
+#if MESON_CPU_TYPE == MESON_CPU_TYPE_MESON6
+    struct mali_mmu_core *mmu = group->mmu;
+#endif
+    _mali_osk_errcode_t ret;
+
+    MALI_DEBUG_ASSERT_POINTER(group);
+    MALI_DEBUG_ASSERT_POINTER(group->mmu);
+
+#if MESON_CPU_TYPE == MESON_CPU_TYPE_MESON6
+    if (MALI_FALSE == group->power_is_on)
+        MALI_SUCCESS;
+    if (get_irqnum(mmu->irq) == INT_MALI_PP2_MMU)
+    {
+        if (group == NULL || group->pp_core == NULL)
+            MALI_SUCCESS;
+        if (group->pp_core->core_id == 0) {
+            if (malifix_get_mmu_int_process_state(0) == MMU_INT_HIT)
+                malifix_set_mmu_int_process_state(0, MMU_INT_TOP);
+            else
+                MALI_SUCCESS;
+        }
+        else if (group->pp_core->core_id == 1) {
+            if (malifix_get_mmu_int_process_state(1) == MMU_INT_HIT)
+                malifix_set_mmu_int_process_state(1, MMU_INT_TOP);
+            else
+                MALI_SUCCESS;
+        } else
+            MALI_SUCCESS;
+    }
+#endif
+
+#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       mali_executor_lock();
+       if (!mali_group_is_working(group)) {
+               /* Not working, so nothing to do */
+               mali_executor_unlock();
+               return _MALI_OSK_ERR_FAULT;
+       }
+#endif
+    if (NULL != group->gp_core) {
+        _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+                0, 0, /* No pid and tid for interrupt handler */
+                MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
+                mali_mmu_get_rawstat(group->mmu), 0);
+    } else {
+        MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+        _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+                0, 0, /* No pid and tid for interrupt handler */
+                MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
+                    mali_pp_core_get_id(group->pp_core)),
+                mali_mmu_get_rawstat(group->mmu), 0);
+    }
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       mali_executor_unlock();
+#endif
+#endif
+
+    ret = mali_executor_interrupt_mmu(group, MALI_TRUE);
+
+#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       mali_executor_lock();
+       if (!mali_group_is_working(group) && (!mali_group_power_is_on(group))) {
+               /* group complete and on job shedule on it, it already power off */
+               if (NULL != group->gp_core) {
+                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+                                                     0, 0, /* No pid and tid for interrupt handler */
+                                                     MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
+                                                     0xFFFFFFFF, 0);
+               } else {
+                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+                                                     0, 0, /* No pid and tid for interrupt handler */
+                                                     MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
+                                                             mali_pp_core_get_id(group->pp_core)),
+                                                     0xFFFFFFFF, 0);
+               }
+
+               mali_executor_unlock();
+               return ret;
+       }
+#endif
+
+    if (NULL != group->gp_core) {
+        _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+                0, 0, /* No pid and tid for interrupt handler */
+                MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
+                mali_mmu_get_rawstat(group->mmu), 0);
+    } else {
+        _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+                0, 0, /* No pid and tid for interrupt handler */
+                MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
+                    mali_pp_core_get_id(group->pp_core)),
+                mali_mmu_get_rawstat(group->mmu), 0);
+    }
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       mali_executor_unlock();
+#endif
+#endif
+
+    return ret;
+}
+
+static void mali_group_bottom_half_mmu(void *data)
+{
+    struct mali_group *group = (struct mali_group *)data;
+#if MESON_CPU_TYPE == MESON_CPU_TYPE_MESON6
+    struct mali_mmu_core *mmu = group->mmu;
+#endif
+
+    MALI_DEBUG_ASSERT_POINTER(group);
+    MALI_DEBUG_ASSERT_POINTER(group->mmu);
+
+    if (NULL != group->gp_core) {
+        _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                0, _mali_osk_get_tid(), /* pid and tid */
+                MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
+                mali_mmu_get_rawstat(group->mmu), 0);
+    } else {
+        MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+        _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                0, _mali_osk_get_tid(), /* pid and tid */
+                MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
+                    mali_pp_core_get_id(group->pp_core)),
+                mali_mmu_get_rawstat(group->mmu), 0);
+    }
+
+    mali_executor_interrupt_mmu(group, MALI_FALSE);
+
+    if (NULL != group->gp_core) {
+        _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                0, _mali_osk_get_tid(), /* pid and tid */
+                MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
+                mali_mmu_get_rawstat(group->mmu), 0);
+    } else {
+        _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                0, _mali_osk_get_tid(), /* pid and tid */
+                MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
+                    mali_pp_core_get_id(group->pp_core)),
+                mali_mmu_get_rawstat(group->mmu), 0);
+    }
+#if MESON_CPU_TYPE == MESON_CPU_TYPE_MESON6
+    if (get_irqnum(mmu->irq) == INT_MALI_PP2_MMU)
+    {
+        if (group->pp_core->core_id == 0) {
+            if (malifix_get_mmu_int_process_state(0) == MMU_INT_TOP)
+                malifix_set_mmu_int_process_state(0, MMU_INT_NONE);
+        }
+        else if (group->pp_core->core_id == 1) {
+            if (malifix_get_mmu_int_process_state(1) == MMU_INT_TOP)
+                malifix_set_mmu_int_process_state(1, MMU_INT_NONE);
+        }
+    }
+#endif
+}
+
+_mali_osk_errcode_t mali_group_upper_half_gp(void *data)
+{
+    struct mali_group *group = (struct mali_group *)data;
+    _mali_osk_errcode_t ret;
+
+    MALI_DEBUG_ASSERT_POINTER(group);
+    MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+    MALI_DEBUG_ASSERT_POINTER(group->mmu);
+
+#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       mali_executor_lock();
+       if (!mali_group_is_working(group)) {
+               /* Not working, so nothing to do */
+               mali_executor_unlock();
+               return _MALI_OSK_ERR_FAULT;
+       }
+#endif
+    _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+            MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+            MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+            0, 0, /* No pid and tid for interrupt handler */
+            MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
+            mali_gp_get_rawstat(group->gp_core), 0);
+
+    MALI_DEBUG_PRINT(4, ("Group: Interrupt 0x%08X from %s\n",
+                mali_gp_get_rawstat(group->gp_core),
+                mali_group_core_description(group)));
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       mali_executor_unlock();
+#endif
+#endif
+    ret = mali_executor_interrupt_gp(group, MALI_TRUE);
+
+#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       mali_executor_lock();
+       if (!mali_group_is_working(group) && (!mali_group_power_is_on(group))) {
+               /* group complete and on job shedule on it, it already power off */
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+                                             0, 0, /* No pid and tid for interrupt handler */
+                                             MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
+                                             0xFFFFFFFF, 0);
+               mali_executor_unlock();
+               return ret;
+       }
+#endif
+    _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+            MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+            MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+            0, 0, /* No pid and tid for interrupt handler */
+            MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
+            mali_gp_get_rawstat(group->gp_core), 0);
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       mali_executor_unlock();
+#endif
+#endif
+    return ret;
+}
+
+static void mali_group_bottom_half_gp(void *data)
+{
+    struct mali_group *group = (struct mali_group *)data;
+
+    MALI_DEBUG_ASSERT_POINTER(group);
+    MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+    MALI_DEBUG_ASSERT_POINTER(group->mmu);
+
+    _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+            MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+            MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+            0, _mali_osk_get_tid(), /* pid and tid */
+            MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
+            mali_gp_get_rawstat(group->gp_core), 0);
+
+    mali_executor_interrupt_gp(group, MALI_FALSE);
+
+    _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+            MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+            MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+            0, _mali_osk_get_tid(), /* pid and tid */
+            MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
+            mali_gp_get_rawstat(group->gp_core), 0);
+}
+
+#if MESON_CPU_TYPE == MESON_CPU_TYPE_MESON6
+int PP0_int_cnt = 0;
+int mali_PP0_int_cnt(void)
+{
+    return PP0_int_cnt;
+}
+EXPORT_SYMBOL(mali_PP0_int_cnt);
+
+int PP1_int_cnt = 0;
+int mali_PP1_int_cnt(void)
+{
+    return PP1_int_cnt;
+}
+EXPORT_SYMBOL(mali_PP1_int_cnt);
+#endif
+
+_mali_osk_errcode_t mali_group_upper_half_pp(void *data)
+{
+    struct mali_group *group = (struct mali_group *)data;
+#if MESON_CPU_TYPE == MESON_CPU_TYPE_MESON6
+    struct mali_pp_core *core = group->pp_core;
+#endif
+    _mali_osk_errcode_t ret;
+
+    MALI_DEBUG_ASSERT_POINTER(group);
+    MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+    MALI_DEBUG_ASSERT_POINTER(group->mmu);
+#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       mali_executor_lock();
+       if (!mali_group_is_working(group)) {
+               /* Not working, so nothing to do */
+               mali_executor_unlock();
+               return _MALI_OSK_ERR_FAULT;
+       }
+#endif
+
+    _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+            MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+            MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+            0, 0, /* No pid and tid for interrupt handler */
+            MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
+                mali_pp_core_get_id(group->pp_core)),
+            mali_pp_get_rawstat(group->pp_core), 0);
+
+    MALI_DEBUG_PRINT(4, ("Group: Interrupt 0x%08X from %s\n",
+                mali_pp_get_rawstat(group->pp_core),
+                mali_group_core_description(group)));
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       mali_executor_unlock();
+#endif
+#endif
+
+    ret = mali_executor_interrupt_pp(group, MALI_TRUE);
+
+#if MESON_CPU_TYPE == MESON_CPU_TYPE_MESON6
+    if (core->core_id == 0)
+        PP0_int_cnt++;
+    else if (core->core_id == 1)
+        PP1_int_cnt++;
+#endif
+#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       mali_executor_lock();
+       if (!mali_group_is_working(group) && (!mali_group_power_is_on(group))) {
+               /* group complete and on job shedule on it, it already power off */
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+                                             0, 0, /* No pid and tid for interrupt handler */
+                                             MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
+                                                     mali_pp_core_get_id(group->pp_core)),
+                                             0xFFFFFFFF, 0);
+               mali_executor_unlock();
+               return ret;
+       }
+#endif
+    _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+            MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+            MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+            0, 0, /* No pid and tid for interrupt handler */
+            MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
+                mali_pp_core_get_id(group->pp_core)),
+            mali_pp_get_rawstat(group->pp_core), 0);
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       mali_executor_unlock();
+#endif
+#endif
+    return ret;
+}
+
+static void mali_group_bottom_half_pp(void *data)
+{
+    struct mali_group *group = (struct mali_group *)data;
+
+    MALI_DEBUG_ASSERT_POINTER(group);
+    MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+    MALI_DEBUG_ASSERT_POINTER(group->mmu);
+
+    _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+            MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+            MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+            0, _mali_osk_get_tid(), /* pid and tid */
+            MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
+                mali_pp_core_get_id(group->pp_core)),
+            mali_pp_get_rawstat(group->pp_core), 0);
+
+    mali_executor_interrupt_pp(group, MALI_FALSE);
+
+    _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+            MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+            MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+            0, _mali_osk_get_tid(), /* pid and tid */
+            MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
+                mali_pp_core_get_id(group->pp_core)),
+            mali_pp_get_rawstat(group->pp_core), 0);
+}
+
+static void mali_group_timeout(void *data)
+{
+    struct mali_group *group = (struct mali_group *)data;
+    MALI_DEBUG_ASSERT_POINTER(group);
+
+    MALI_DEBUG_PRINT(2, ("Group: timeout handler for %s at %u\n",
+                mali_group_core_description(group),
+                _mali_osk_time_tickcount()));
+
+    if (mali_core_timeout < 65533)
+        mali_core_timeout++;
+    if (NULL != group->gp_core) {
+        mali_group_schedule_bottom_half_gp(group);
+    } else {
+        MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+        mali_group_schedule_bottom_half_pp(group);
+    }
+}
+
+mali_bool mali_group_zap_session(struct mali_group *group,
+        struct mali_session_data *session)
+{
+    MALI_DEBUG_ASSERT_POINTER(group);
+    MALI_DEBUG_ASSERT_POINTER(session);
+    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+    if (group->session != session) {
+        /* not running from this session */
+        return MALI_TRUE; /* success */
+    }
+
+    if (group->is_working) {
+        /* The Zap also does the stall and disable_stall */
+        mali_bool zap_success = mali_mmu_zap_tlb(group->mmu);
+        return zap_success;
+    } else {
+        /* Just remove the session instead of zapping */
+        mali_group_clear_session(group);
+        return MALI_TRUE; /* success */
+    }
+}
+
+#if defined(CONFIG_MALI400_PROFILING)
+static void mali_group_report_l2_cache_counters_per_core(struct mali_group *group, u32 core_num)
+{
+    u32 source0 = 0;
+    u32 value0 = 0;
+    u32 source1 = 0;
+    u32 value1 = 0;
+    u32 profiling_channel = 0;
+
+    MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+    switch (core_num) {
+        case 0:
+            profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+                MALI_PROFILING_EVENT_CHANNEL_GPU |
+                MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS;
+            break;
+        case 1:
+            profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+                MALI_PROFILING_EVENT_CHANNEL_GPU |
+                MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L21_COUNTERS;
+            break;
+        case 2:
+            profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+                MALI_PROFILING_EVENT_CHANNEL_GPU |
+                MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L22_COUNTERS;
+            break;
+        default:
+            profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+                MALI_PROFILING_EVENT_CHANNEL_GPU |
+                MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS;
+            break;
+    }
+
+    if (0 == core_num) {
+        mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
+    }
+    if (1 == core_num) {
+        if (1 == mali_l2_cache_get_id(group->l2_cache_core[0])) {
+            mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
+        } else if (1 == mali_l2_cache_get_id(group->l2_cache_core[1])) {
+            mali_l2_cache_core_get_counter_values(group->l2_cache_core[1], &source0, &value0, &source1, &value1);
+        }
+    }
+    if (2 == core_num) {
+        if (2 == mali_l2_cache_get_id(group->l2_cache_core[0])) {
+            mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
+        } else if (2 == mali_l2_cache_get_id(group->l2_cache_core[1])) {
+            mali_l2_cache_core_get_counter_values(group->l2_cache_core[1], &source0, &value0, &source1, &value1);
+        }
+    }
+
+    _mali_osk_profiling_add_event(profiling_channel, source1 << 8 | source0, value0, value1, 0, 0);
+}
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
diff --git a/utgard/r8p0/common/mali_group.h b/utgard/r8p0/common/mali_group.h
new file mode 100755 (executable)
index 0000000..afe966f
--- /dev/null
@@ -0,0 +1,460 @@
+/*
+ * Copyright (C) 2011-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_GROUP_H__
+#define __MALI_GROUP_H__
+
+#include "mali_osk.h"
+#include "mali_l2_cache.h"
+#include "mali_mmu.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_session.h"
+#include "mali_osk_profiling.h"
+
+/**
+ * @brief Default max runtime [ms] for a core job - used by timeout timers
+ */
+#define MALI_MAX_JOB_RUNTIME_DEFAULT 5000
+
+extern int mali_max_job_runtime;
+
+#define MALI_MAX_NUMBER_OF_GROUPS 10
+#define MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS 8
+
+enum mali_group_state {
+       MALI_GROUP_STATE_INACTIVE,
+       MALI_GROUP_STATE_ACTIVATION_PENDING,
+       MALI_GROUP_STATE_ACTIVE,
+};
+
+/**
+ * The structure represents a render group
+ * A render group is defined by all the cores that share the same Mali MMU
+ */
+
+struct mali_group {
+       struct mali_mmu_core        *mmu;
+       struct mali_session_data    *session;
+
+       enum mali_group_state        state;
+       mali_bool                    power_is_on;
+
+       mali_bool                    is_working;
+       unsigned long                start_time; /* in ticks */
+
+       struct mali_gp_core         *gp_core;
+       struct mali_gp_job          *gp_running_job;
+
+       struct mali_pp_core         *pp_core;
+       struct mali_pp_job          *pp_running_job;
+       u32                         pp_running_sub_job;
+
+       struct mali_pm_domain       *pm_domain;
+
+       struct mali_l2_cache_core   *l2_cache_core[2];
+       u32                         l2_cache_core_ref_count[2];
+
+       /* Parent virtual group (if any) */
+       struct mali_group           *parent_group;
+
+       struct mali_dlbu_core       *dlbu_core;
+       struct mali_bcast_unit      *bcast_core;
+
+       /* Used for working groups which needs to be disabled */
+       mali_bool                    disable_requested;
+
+       /* Used by group to link child groups (for virtual group) */
+       _mali_osk_list_t            group_list;
+
+       /* Used by executor module in order to link groups of same state */
+       _mali_osk_list_t            executor_list;
+
+       /* Used by PM domains to link groups of same domain */
+       _mali_osk_list_t             pm_domain_list;
+
+       _mali_osk_wq_work_t         *bottom_half_work_mmu;
+       _mali_osk_wq_work_t         *bottom_half_work_gp;
+       _mali_osk_wq_work_t         *bottom_half_work_pp;
+
+       _mali_osk_timer_t           *timeout_timer;
+};
+
+/** @brief Create a new Mali group object
+ *
+ * @return A pointer to a new group object
+ */
+struct mali_group *mali_group_create(struct mali_l2_cache_core *core,
+                                    struct mali_dlbu_core *dlbu,
+                                    struct mali_bcast_unit *bcast,
+                                    u32 domain_index);
+
+void mali_group_dump_status(struct mali_group *group);
+
+void mali_group_delete(struct mali_group *group);
+
+_mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group,
+               struct mali_mmu_core *mmu_core);
+void mali_group_remove_mmu_core(struct mali_group *group);
+
+_mali_osk_errcode_t mali_group_add_gp_core(struct mali_group *group,
+               struct mali_gp_core *gp_core);
+void mali_group_remove_gp_core(struct mali_group *group);
+
+_mali_osk_errcode_t mali_group_add_pp_core(struct mali_group *group,
+               struct mali_pp_core *pp_core);
+void mali_group_remove_pp_core(struct mali_group *group);
+
+MALI_STATIC_INLINE const char *mali_group_core_description(
+       struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       if (NULL != group->pp_core) {
+               return mali_pp_core_description(group->pp_core);
+       } else {
+               MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+               return mali_gp_core_description(group->gp_core);
+       }
+}
+
+MALI_STATIC_INLINE mali_bool mali_group_is_virtual(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+
+#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470))
+       return (NULL != group->dlbu_core);
+#else
+       return MALI_FALSE;
+#endif
+}
+
+/** @brief Check if a group is a part of a virtual group or not
+ */
+MALI_STATIC_INLINE mali_bool mali_group_is_in_virtual(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470))
+       return (NULL != group->parent_group) ? MALI_TRUE : MALI_FALSE;
+#else
+       return MALI_FALSE;
+#endif
+}
+
+/** @brief Reset group
+ *
+ * This function will reset the entire group,
+ * including all the cores present in the group.
+ *
+ * @param group Pointer to the group to reset
+ */
+void mali_group_reset(struct mali_group *group);
+
+MALI_STATIC_INLINE struct mali_session_data *mali_group_get_session(
+       struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+       return group->session;
+}
+
+MALI_STATIC_INLINE void mali_group_clear_session(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+       if (NULL != group->session) {
+               mali_mmu_activate_empty_page_directory(group->mmu);
+               group->session = NULL;
+       }
+}
+
+enum mali_group_state mali_group_activate(struct mali_group *group);
+
+/*
+ * Change state from ACTIVATION_PENDING to ACTIVE
+ * For virtual group, all childs need to be ACTIVE first
+ */
+mali_bool mali_group_set_active(struct mali_group *group);
+
+/*
+ * @return MALI_TRUE means one or more domains can now be powered off,
+ * and caller should call either mali_pm_update_async() or
+ * mali_pm_update_sync() in order to do so.
+ */
+mali_bool mali_group_deactivate(struct mali_group *group);
+
+MALI_STATIC_INLINE enum mali_group_state mali_group_get_state(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       return group->state;
+}
+
+MALI_STATIC_INLINE mali_bool mali_group_power_is_on(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       return group->power_is_on;
+}
+
+void mali_group_power_up(struct mali_group *group);
+void mali_group_power_down(struct mali_group *group);
+
+MALI_STATIC_INLINE void mali_group_set_disable_request(
+       struct mali_group *group, mali_bool disable)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       group->disable_requested = disable;
+
+       /**
+        * When one of child group's disable_requeset is set TRUE, then
+        * the disable_request of parent group should also be set to TRUE.
+        * While, the disable_request of parent group should only be set to FALSE
+        * only when all of its child group's disable_request are set to FALSE.
+        */
+       if (NULL != group->parent_group && MALI_TRUE == disable) {
+               group->parent_group->disable_requested = disable;
+       }
+}
+
+MALI_STATIC_INLINE mali_bool mali_group_disable_requested(
+       struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       return group->disable_requested;
+}
+
+/** @brief Virtual groups */
+void mali_group_add_group(struct mali_group *parent, struct mali_group *child);
+struct mali_group *mali_group_acquire_group(struct mali_group *parent);
+void mali_group_remove_group(struct mali_group *parent, struct mali_group *child);
+
+/** @brief Checks if the group is working.
+ */
+MALI_STATIC_INLINE mali_bool mali_group_is_working(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       if (mali_group_is_in_virtual(group)) {
+               struct mali_group *tmp_group = mali_executor_get_virtual_group();
+               return tmp_group->is_working;
+       }
+       return group->is_working;
+}
+
+MALI_STATIC_INLINE struct mali_gp_job *mali_group_get_running_gp_job(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       return group->gp_running_job;
+}
+
+/** @brief Zap MMU TLB on all groups
+ *
+ * Zap TLB on group if \a session is active.
+ */
+mali_bool mali_group_zap_session(struct mali_group *group,
+                                struct mali_session_data *session);
+
+/** @brief Get pointer to GP core object
+ */
+MALI_STATIC_INLINE struct mali_gp_core *mali_group_get_gp_core(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       return group->gp_core;
+}
+
+/** @brief Get pointer to PP core object
+ */
+MALI_STATIC_INLINE struct mali_pp_core *mali_group_get_pp_core(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       return group->pp_core;
+}
+
+/** @brief Start GP job
+ */
+void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job, mali_bool gpu_secure_mode_pre_enabled);
+
+void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job, mali_bool gpu_secure_mode_pre_enabled);
+
+/** @brief Start virtual group Job on a virtual group
+*/
+void mali_group_start_job_on_virtual(struct mali_group *group, struct mali_pp_job *job, u32 first_subjob, u32 last_subjob);
+
+
+/** @brief Start a subjob from a particular on a specific PP group
+*/
+void mali_group_start_job_on_group(struct mali_group *group, struct mali_pp_job *job, u32 subjob);
+
+
+/** @brief remove all the unused groups in tmp_unused group  list, so that the group is in consistent status.
+ */
+void mali_group_non_dlbu_job_done_virtual(struct mali_group *group);
+
+
+/** @brief Resume GP job that suspended waiting for more heap memory
+ */
+void mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr);
+
+MALI_STATIC_INLINE enum mali_interrupt_result mali_group_get_interrupt_result_gp(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       return mali_gp_get_interrupt_result(group->gp_core);
+}
+
+MALI_STATIC_INLINE enum mali_interrupt_result mali_group_get_interrupt_result_pp(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       return mali_pp_get_interrupt_result(group->pp_core);
+}
+
+MALI_STATIC_INLINE enum mali_interrupt_result mali_group_get_interrupt_result_mmu(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->mmu);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       return mali_mmu_get_interrupt_result(group->mmu);
+}
+
+MALI_STATIC_INLINE mali_bool mali_group_gp_is_active(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       return mali_gp_is_active(group->gp_core);
+}
+
+MALI_STATIC_INLINE mali_bool mali_group_pp_is_active(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       return mali_pp_is_active(group->pp_core);
+}
+
+MALI_STATIC_INLINE mali_bool mali_group_has_timed_out(struct mali_group *group)
+{
+       unsigned long time_cost;
+       struct mali_group *tmp_group = group;
+
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+       /* if the group is in virtual need to use virtual_group's start time */
+       if (mali_group_is_in_virtual(group)) {
+               tmp_group = mali_executor_get_virtual_group();
+       }
+
+       time_cost = _mali_osk_time_tickcount() - tmp_group->start_time;
+       if (_mali_osk_time_mstoticks(mali_max_job_runtime) <= time_cost) {
+               /*
+                * current tick is at or after timeout end time,
+                * so this is a valid timeout
+                */
+               return MALI_TRUE;
+       } else {
+               /*
+                * Not a valid timeout. A HW interrupt probably beat
+                * us to it, and the timer wasn't properly deleted
+                * (async deletion used due to atomic context).
+                */
+               return MALI_FALSE;
+       }
+}
+
+MALI_STATIC_INLINE void mali_group_mask_all_interrupts_gp(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       return mali_gp_mask_all_interrupts(group->gp_core);
+}
+
+MALI_STATIC_INLINE void mali_group_mask_all_interrupts_pp(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       return mali_pp_mask_all_interrupts(group->pp_core);
+}
+
+MALI_STATIC_INLINE void mali_group_enable_interrupts_gp(
+       struct mali_group *group,
+       enum mali_interrupt_result exceptions)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       mali_gp_enable_interrupts(group->gp_core, exceptions);
+}
+
+MALI_STATIC_INLINE void mali_group_schedule_bottom_half_gp(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+       _mali_osk_wq_schedule_work(group->bottom_half_work_gp);
+}
+
+
+MALI_STATIC_INLINE void mali_group_schedule_bottom_half_pp(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+       _mali_osk_wq_schedule_work(group->bottom_half_work_pp);
+}
+
+MALI_STATIC_INLINE void mali_group_schedule_bottom_half_mmu(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->mmu);
+       _mali_osk_wq_schedule_work(group->bottom_half_work_mmu);
+}
+
+struct mali_pp_job *mali_group_complete_pp(struct mali_group *group, mali_bool success, u32 *sub_job);
+
+struct mali_gp_job *mali_group_complete_gp(struct mali_group *group, mali_bool success);
+
+#if defined(CONFIG_MALI400_PROFILING)
+MALI_STATIC_INLINE void mali_group_oom(struct mali_group *group)
+{
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND |
+                                     MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+                                     0, 0, 0, 0, 0);
+}
+#endif
+
+struct mali_group *mali_group_get_glob_group(u32 index);
+u32 mali_group_get_glob_num_groups(void);
+
+u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size);
+
+
+_mali_osk_errcode_t mali_group_upper_half_mmu(void *data);
+_mali_osk_errcode_t mali_group_upper_half_gp(void *data);
+_mali_osk_errcode_t mali_group_upper_half_pp(void *data);
+
+MALI_STATIC_INLINE mali_bool mali_group_is_empty(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT(mali_group_is_virtual(group));
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       return _mali_osk_list_empty(&group->group_list);
+}
+
+#endif /* __MALI_GROUP_H__ */
diff --git a/utgard/r8p0/common/mali_hw_core.c b/utgard/r8p0/common/mali_hw_core.c
new file mode 100755 (executable)
index 0000000..dd10cfe
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2011-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_hw_core.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_osk_mali.h"
+
+_mali_osk_errcode_t mali_hw_core_create(struct mali_hw_core *core, const _mali_osk_resource_t *resource, u32 reg_size)
+{
+       core->phys_addr = resource->base;
+       core->phys_offset = resource->base - _mali_osk_resource_base_address();
+       core->description = resource->description;
+       core->size = reg_size;
+
+       MALI_DEBUG_ASSERT(core->phys_offset < core->phys_addr);
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_mem_reqregion(core->phys_addr, core->size, core->description)) {
+               core->mapped_registers = _mali_osk_mem_mapioregion(core->phys_addr, core->size, core->description);
+               if (NULL != core->mapped_registers) {
+                       return _MALI_OSK_ERR_OK;
+               } else {
+                       MALI_PRINT_ERROR(("Failed to map memory region for core %s at phys_addr 0x%08X\n", core->description, core->phys_addr));
+               }
+               _mali_osk_mem_unreqregion(core->phys_addr, core->size);
+       } else {
+               MALI_PRINT_ERROR(("Failed to request memory region for core %s at phys_addr 0x%08X\n", core->description, core->phys_addr));
+       }
+
+       return _MALI_OSK_ERR_FAULT;
+}
+
+void mali_hw_core_delete(struct mali_hw_core *core)
+{
+       if (NULL != core->mapped_registers) {
+               _mali_osk_mem_unmapioregion(core->phys_addr, core->size, core->mapped_registers);
+               core->mapped_registers = NULL;
+       }
+       _mali_osk_mem_unreqregion(core->phys_addr, core->size);
+}
diff --git a/utgard/r8p0/common/mali_hw_core.h b/utgard/r8p0/common/mali_hw_core.h
new file mode 100755 (executable)
index 0000000..e435725
--- /dev/null
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2011-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_HW_CORE_H__
+#define __MALI_HW_CORE_H__
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+/**
+ * The common parts for all Mali HW cores (GP, PP, MMU, L2 and PMU)
+ * This struct is embedded inside all core specific structs.
+ */
+struct mali_hw_core {
+       uintptr_t phys_addr;              /**< Physical address of the registers */
+       u32 phys_offset;                  /**< Offset from start of Mali to registers */
+       u32 size;                         /**< Size of registers */
+       mali_io_address mapped_registers; /**< Virtual mapping of the registers */
+       const char *description;          /**< Name of unit (as specified in device configuration) */
+};
+
+#define MALI_REG_POLL_COUNT_FAST 1000000
+#define MALI_REG_POLL_COUNT_SLOW 1000000
+
+/*
+ * GP and PP core translate their int_stat/rawstat into one of these
+ */
+enum mali_interrupt_result {
+       MALI_INTERRUPT_RESULT_NONE,
+       MALI_INTERRUPT_RESULT_SUCCESS,
+       MALI_INTERRUPT_RESULT_SUCCESS_VS,
+       MALI_INTERRUPT_RESULT_SUCCESS_PLBU,
+       MALI_INTERRUPT_RESULT_OOM,
+       MALI_INTERRUPT_RESULT_ERROR
+};
+
+_mali_osk_errcode_t mali_hw_core_create(struct mali_hw_core *core, const _mali_osk_resource_t *resource, u32 reg_size);
+void mali_hw_core_delete(struct mali_hw_core *core);
+
+MALI_STATIC_INLINE u32 mali_hw_core_register_read(struct mali_hw_core *core, u32 relative_address)
+{
+       u32 read_val;
+       read_val = _mali_osk_mem_ioread32(core->mapped_registers, relative_address);
+       MALI_DEBUG_PRINT(6, ("register_read for core %s, relative addr=0x%04X, val=0x%08X\n",
+                            core->description, relative_address, read_val));
+       return read_val;
+}
+
+MALI_STATIC_INLINE void mali_hw_core_register_write_relaxed(struct mali_hw_core *core, u32 relative_address, u32 new_val)
+{
+       MALI_DEBUG_PRINT(6, ("register_write_relaxed for core %s, relative addr=0x%04X, val=0x%08X\n",
+                            core->description, relative_address, new_val));
+       _mali_osk_mem_iowrite32_relaxed(core->mapped_registers, relative_address, new_val);
+}
+
+/* Conditionally write a register.
+ * The register will only be written if the new value is different from the old_value.
+ * If the new value is different, the old value will also be updated */
+MALI_STATIC_INLINE void mali_hw_core_register_write_relaxed_conditional(struct mali_hw_core *core, u32 relative_address, u32 new_val, const u32 old_val)
+{
+       MALI_DEBUG_PRINT(6, ("register_write_relaxed for core %s, relative addr=0x%04X, val=0x%08X\n",
+                            core->description, relative_address, new_val));
+       if (old_val != new_val) {
+               _mali_osk_mem_iowrite32_relaxed(core->mapped_registers, relative_address, new_val);
+       }
+}
+
+MALI_STATIC_INLINE void mali_hw_core_register_write(struct mali_hw_core *core, u32 relative_address, u32 new_val)
+{
+       MALI_DEBUG_PRINT(6, ("register_write for core %s, relative addr=0x%04X, val=0x%08X\n",
+                            core->description, relative_address, new_val));
+       _mali_osk_mem_iowrite32(core->mapped_registers, relative_address, new_val);
+}
+
+MALI_STATIC_INLINE void mali_hw_core_register_write_array_relaxed(struct mali_hw_core *core, u32 relative_address, u32 *write_array, u32 nr_of_regs)
+{
+       u32 i;
+       MALI_DEBUG_PRINT(6, ("register_write_array: for core %s, relative addr=0x%04X, nr of regs=%u\n",
+                            core->description, relative_address, nr_of_regs));
+
+       /* Do not use burst writes against the registers */
+       for (i = 0; i < nr_of_regs; i++) {
+               mali_hw_core_register_write_relaxed(core, relative_address + i * 4, write_array[i]);
+       }
+}
+
+/* Conditionally write a set of registers.
+ * The register will only be written if the new value is different from the old_value.
+ * If the new value is different, the old value will also be updated */
+MALI_STATIC_INLINE void mali_hw_core_register_write_array_relaxed_conditional(struct mali_hw_core *core, u32 relative_address, u32 *write_array, u32 nr_of_regs, const u32 *old_array)
+{
+       u32 i;
+       MALI_DEBUG_PRINT(6, ("register_write_array: for core %s, relative addr=0x%04X, nr of regs=%u\n",
+                            core->description, relative_address, nr_of_regs));
+
+       /* Do not use burst writes against the registers */
+       for (i = 0; i < nr_of_regs; i++) {
+               if (old_array[i] != write_array[i]) {
+                       mali_hw_core_register_write_relaxed(core, relative_address + i * 4, write_array[i]);
+               }
+       }
+}
+
+#endif /* __MALI_HW_CORE_H__ */
diff --git a/utgard/r8p0/common/mali_kernel_common.h b/utgard/r8p0/common/mali_kernel_common.h
new file mode 100755 (executable)
index 0000000..9bae265
--- /dev/null
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2010, 2012-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_COMMON_H__
+#define __MALI_KERNEL_COMMON_H__
+
+#include "mali_osk.h"
+
+/* Make sure debug is defined when it should be */
+#ifndef DEBUG
+#if defined(_DEBUG)
+#define DEBUG
+#endif
+#endif
+
+/* The file include several useful macros for error checking, debugging and printing.
+ * - MALI_PRINTF(...)           Do not use this function: Will be included in Release builds.
+ * - MALI_DEBUG_PRINT(nr, (X) ) Prints the second argument if nr<=MALI_DEBUG_LEVEL.
+ * - MALI_DEBUG_ERROR( (X) )    Prints an errortext, a source trace, and the given error message.
+ * - MALI_DEBUG_ASSERT(exp,(X)) If the asserted expr is false, the program will exit.
+ * - MALI_DEBUG_ASSERT_POINTER(pointer)  Triggers if the pointer is a zero pointer.
+ * - MALI_DEBUG_CODE( X )       The code inside the macro is only compiled in Debug builds.
+ *
+ * The (X) means that you must add an extra parenthesis around the argumentlist.
+ *
+ * The  printf function: MALI_PRINTF(...) is routed to _mali_osk_debugmsg
+ *
+ * Suggested range for the DEBUG-LEVEL is [1:6] where
+ * [1:2] Is messages with highest priority, indicate possible errors.
+ * [3:4] Is messages with medium priority, output important variables.
+ * [5:6] Is messages with low priority, used during extensive debugging.
+ */
+
+/**
+*  Fundamental error macro. Reports an error code. This is abstracted to allow us to
+*  easily switch to a different error reporting method if we want, and also to allow
+*  us to search for error returns easily.
+*
+*  Note no closing semicolon - this is supplied in typical usage:
+*
+*  MALI_ERROR(MALI_ERROR_OUT_OF_MEMORY);
+*/
+#define MALI_ERROR(error_code) return (error_code)
+
+/**
+ *  Basic error macro, to indicate success.
+ *  Note no closing semicolon - this is supplied in typical usage:
+ *
+ *  MALI_SUCCESS;
+ */
+#define MALI_SUCCESS MALI_ERROR(_MALI_OSK_ERR_OK)
+
+/**
+ *  Basic error macro. This checks whether the given condition is true, and if not returns
+ *  from this function with the supplied error code. This is a macro so that we can override it
+ *  for stress testing.
+ *
+ *  Note that this uses the do-while-0 wrapping to ensure that we don't get problems with dangling
+ *  else clauses. Note also no closing semicolon - this is supplied in typical usage:
+ *
+ *  MALI_CHECK((p!=NULL), ERROR_NO_OBJECT);
+ */
+#define MALI_CHECK(condition, error_code) do { if(!(condition)) MALI_ERROR(error_code); } while(0)
+
+/**
+ *  Error propagation macro. If the expression given is anything other than
+ *  _MALI_OSK_NO_ERROR, then the value is returned from the enclosing function
+ *  as an error code. This effectively acts as a guard clause, and propagates
+ *  error values up the call stack. This uses a temporary value to ensure that
+ *  the error expression is not evaluated twice.
+ *  If the counter for forcing a failure has been set using _mali_force_error,
+ *  this error will be returned without evaluating the expression in
+ *  MALI_CHECK_NO_ERROR
+ */
+#define MALI_CHECK_NO_ERROR(expression) \
+       do { _mali_osk_errcode_t _check_no_error_result=(expression); \
+               if(_check_no_error_result != _MALI_OSK_ERR_OK) \
+                       MALI_ERROR(_check_no_error_result); \
+       } while(0)
+
+/**
+ *  Pointer check macro. Checks non-null pointer.
+ */
+#define MALI_CHECK_NON_NULL(pointer, error_code) MALI_CHECK( ((pointer)!=NULL), (error_code) )
+
+/**
+ *  Error macro with goto. This checks whether the given condition is true, and if not jumps
+ *  to the specified label using a goto. The label must therefore be local to the function in
+ *  which this macro appears. This is most usually used to execute some clean-up code before
+ *  exiting with a call to ERROR.
+ *
+ *  Like the other macros, this is a macro to allow us to override the condition if we wish,
+ *  e.g. to force an error during stress testing.
+ */
+#define MALI_CHECK_GOTO(condition, label) do { if(!(condition)) goto label; } while(0)
+
+/**
+ *  Explicitly ignore a parameter passed into a function, to suppress compiler warnings.
+ *  Should only be used with parameter names.
+ */
+#define MALI_IGNORE(x) x=x
+
+#if defined(CONFIG_MALI_QUIET)
+#define MALI_PRINTF(args)
+#else
+#define MALI_PRINTF(args) _mali_osk_dbgmsg args;
+#endif
+
+#define MALI_PRINT_ERROR(args) do{ \
+               MALI_PRINTF(("Mali: ERR: %s\n" ,__FILE__)); \
+               MALI_PRINTF(("           %s()%4d\n           ", __FUNCTION__, __LINE__)) ; \
+               MALI_PRINTF(args); \
+               MALI_PRINTF(("\n")); \
+       } while(0)
+
+#define MALI_PRINT(args) do{ \
+               MALI_PRINTF(("Mali: ")); \
+               MALI_PRINTF(args); \
+       } while (0)
+
+#ifdef DEBUG
+#ifndef mali_debug_level
+extern int mali_debug_level;
+#endif
+
+#define MALI_DEBUG_CODE(code) code
+#define MALI_DEBUG_PRINT(level, args)  do { \
+               if((level) <=  mali_debug_level)\
+               {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); } \
+       } while (0)
+
+#define MALI_DEBUG_PRINT_ERROR(args) MALI_PRINT_ERROR(args)
+
+#define MALI_DEBUG_PRINT_IF(level,condition,args)  \
+       if((condition)&&((level) <=  mali_debug_level))\
+       {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); }
+
+#define MALI_DEBUG_PRINT_ELSE(level, args)\
+       else if((level) <=  mali_debug_level)\
+       { MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); }
+
+/**
+ * @note these variants of DEBUG ASSERTS will cause a debugger breakpoint
+ * to be entered (see _mali_osk_break() ). An alternative would be to call
+ * _mali_osk_abort(), on OSs that support it.
+ */
+#define MALI_DEBUG_PRINT_ASSERT(condition, args) do  {if( !(condition)) { MALI_PRINT_ERROR(args); _mali_osk_break(); } } while(0)
+#define MALI_DEBUG_ASSERT_POINTER(pointer) do  {if( (pointer)== NULL) {MALI_PRINT_ERROR(("NULL pointer " #pointer)); _mali_osk_break();} } while(0)
+#define MALI_DEBUG_ASSERT(condition) do  {if( !(condition)) {MALI_PRINT_ERROR(("ASSERT failed: " #condition )); _mali_osk_break();} } while(0)
+
+#else /* DEBUG */
+
+#define MALI_DEBUG_CODE(code)
+#define MALI_DEBUG_PRINT(string,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ERROR(args) do {} while(0)
+#define MALI_DEBUG_PRINT_IF(level,condition,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ELSE(level,condition,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ASSERT(condition,args) do {} while(0)
+#define MALI_DEBUG_ASSERT_POINTER(pointer) do {} while(0)
+#define MALI_DEBUG_ASSERT(condition) do {} while(0)
+
+#endif /* DEBUG */
+
+/**
+ * variables from user space cannot be dereferenced from kernel space; tagging them
+ * with __user allows the GCC compiler to generate a warning. Other compilers may
+ * not support this so we define it here as an empty macro if the compiler doesn't
+ * define it.
+ */
+#ifndef __user
+#define __user
+#endif
+
+#endif /* __MALI_KERNEL_COMMON_H__ */
diff --git a/utgard/r8p0/common/mali_kernel_core.c b/utgard/r8p0/common/mali_kernel_core.c
new file mode 100755 (executable)
index 0000000..bee4033
--- /dev/null
@@ -0,0 +1,1349 @@
+/*
+ * Copyright (C) 2010-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+#include "mali_kernel_core.h"
+#include "mali_memory.h"
+#include "mali_mem_validation.h"
+#include "mali_mmu.h"
+#include "mali_mmu_page_directory.h"
+#include "mali_dlbu.h"
+#include "mali_broadcast.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_executor.h"
+#include "mali_pp_job.h"
+#include "mali_group.h"
+#include "mali_pm.h"
+#include "mali_pmu.h"
+#include "mali_scheduler.h"
+#include "mali_kernel_utilization.h"
+#include "mali_l2_cache.h"
+#include "mali_timeline.h"
+#include "mali_soft_job.h"
+#include "mali_pm_domain.h"
+#if defined(CONFIG_MALI400_PROFILING)
+#include "mali_osk_profiling.h"
+#endif
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+#include "mali_profiling_internal.h"
+#endif
+#include "mali_control_timer.h"
+#include "mali_dvfs_policy.h"
+#include <linux/sched.h>
+#include <linux/atomic.h>
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+#include <linux/dma-fence.h>
+#else
+#include <linux/fence.h>
+#endif
+#endif
+
+#define MALI_SHARED_MEMORY_DEFAULT_SIZE 0xffffffff
+
+/* Mali GPU memory. Real values come from module parameter or from device specific data */
+unsigned int mali_dedicated_mem_start = 0;
+unsigned int mali_dedicated_mem_size = 0;
+
+/* Default shared memory size is set to 4G. */
+unsigned int mali_shared_mem_size = MALI_SHARED_MEMORY_DEFAULT_SIZE;
+
+/* Frame buffer memory to be accessible by Mali GPU */
+int mali_fb_start = 0;
+int mali_fb_size = 0;
+
+/* Mali max job runtime */
+extern int mali_max_job_runtime;
+
+/** Start profiling from module load? */
+int mali_boot_profiling = 0;
+
+/** Limits for the number of PP cores behind each L2 cache. */
+int mali_max_pp_cores_group_1 = 0xFF;
+int mali_max_pp_cores_group_2 = 0xFF;
+
+int mali_inited_pp_cores_group_1 = 0;
+int mali_inited_pp_cores_group_2 = 0;
+
+static _mali_product_id_t global_product_id = _MALI_PRODUCT_ID_UNKNOWN;
+static uintptr_t global_gpu_base_address = 0;
+static u32 global_gpu_major_version = 0;
+static u32 global_gpu_minor_version = 0;
+
+mali_bool mali_gpu_class_is_mali450 = MALI_FALSE;
+mali_bool mali_gpu_class_is_mali470 = MALI_FALSE;
+
+static _mali_osk_errcode_t mali_set_global_gpu_base_address(void)
+{
+       _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+
+       global_gpu_base_address = _mali_osk_resource_base_address();
+       if (0 == global_gpu_base_address) {
+               err = _MALI_OSK_ERR_ITEM_NOT_FOUND;
+       }
+
+       return err;
+}
+
+static u32 mali_get_bcast_id(_mali_osk_resource_t *resource_pp)
+{
+       switch (resource_pp->base - global_gpu_base_address) {
+       case 0x08000:
+       case 0x20000: /* fall-through for aliased mapping */
+               return 0x01;
+       case 0x0A000:
+       case 0x22000: /* fall-through for aliased mapping */
+               return 0x02;
+       case 0x0C000:
+       case 0x24000: /* fall-through for aliased mapping */
+               return 0x04;
+       case 0x0E000:
+       case 0x26000: /* fall-through for aliased mapping */
+               return 0x08;
+       case 0x28000:
+               return 0x10;
+       case 0x2A000:
+               return 0x20;
+       case 0x2C000:
+               return 0x40;
+       case 0x2E000:
+               return 0x80;
+       default:
+               return 0;
+       }
+}
+
+static _mali_osk_errcode_t mali_parse_product_info(void)
+{
+       _mali_osk_resource_t first_pp_resource;
+
+       /* Find the first PP core resource (again) */
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_PP0, &first_pp_resource)) {
+               /* Create a dummy PP object for this core so that we can read the version register */
+               struct mali_group *group = mali_group_create(NULL, NULL, NULL, MALI_DOMAIN_INDEX_PP0);
+               if (NULL != group) {
+                       struct mali_pp_core *pp_core = mali_pp_create(&first_pp_resource, group, MALI_FALSE, mali_get_bcast_id(&first_pp_resource));
+                       if (NULL != pp_core) {
+                               u32 pp_version;
+
+                               pp_version = mali_pp_core_get_version(pp_core);
+
+                               mali_group_delete(group);
+
+                               global_gpu_major_version = (pp_version >> 8) & 0xFF;
+                               global_gpu_minor_version = pp_version & 0xFF;
+
+                               switch (pp_version >> 16) {
+                               case MALI200_PP_PRODUCT_ID:
+                                       global_product_id = _MALI_PRODUCT_ID_MALI200;
+                                       MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-200 r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+                                       MALI_PRINT_ERROR(("Mali-200 is not supported by this driver.\n"));
+                                       _mali_osk_abort();
+                                       break;
+                               case MALI300_PP_PRODUCT_ID:
+                                       global_product_id = _MALI_PRODUCT_ID_MALI300;
+                                       MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-300 r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+                                       break;
+                               case MALI400_PP_PRODUCT_ID:
+                                       global_product_id = _MALI_PRODUCT_ID_MALI400;
+                                       MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-400 MP r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+                                       break;
+                               case MALI450_PP_PRODUCT_ID:
+                                       global_product_id = _MALI_PRODUCT_ID_MALI450;
+                                       MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-450 MP r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+                                       break;
+                               case MALI470_PP_PRODUCT_ID:
+                                       global_product_id = _MALI_PRODUCT_ID_MALI470;
+                                       MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-470 MP r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+                                       break;
+                               default:
+                                       MALI_DEBUG_PRINT(2, ("Found unknown Mali GPU (r%up%u)\n", global_gpu_major_version, global_gpu_minor_version));
+                                       return _MALI_OSK_ERR_FAULT;
+                               }
+
+                               return _MALI_OSK_ERR_OK;
+                       } else {
+                               MALI_PRINT_ERROR(("Failed to create initial PP object\n"));
+                       }
+               } else {
+                       MALI_PRINT_ERROR(("Failed to create initial group object\n"));
+               }
+       } else {
+               MALI_PRINT_ERROR(("First PP core not specified in config file\n"));
+       }
+
+       return _MALI_OSK_ERR_FAULT;
+}
+
+static void mali_delete_groups(void)
+{
+       struct mali_group *group;
+
+       group = mali_group_get_glob_group(0);
+       while (NULL != group) {
+               mali_group_delete(group);
+               group = mali_group_get_glob_group(0);
+       }
+
+       MALI_DEBUG_ASSERT(0 == mali_group_get_glob_num_groups());
+}
+
+static void mali_delete_l2_cache_cores(void)
+{
+       struct mali_l2_cache_core *l2;
+
+       l2 = mali_l2_cache_core_get_glob_l2_core(0);
+       while (NULL != l2) {
+               mali_l2_cache_delete(l2);
+               l2 = mali_l2_cache_core_get_glob_l2_core(0);
+       }
+
+       MALI_DEBUG_ASSERT(0 == mali_l2_cache_core_get_glob_num_l2_cores());
+}
+
+static struct mali_l2_cache_core *mali_create_l2_cache_core(_mali_osk_resource_t *resource, u32 domain_index)
+{
+       struct mali_l2_cache_core *l2_cache = NULL;
+
+       if (NULL != resource) {
+
+               MALI_DEBUG_PRINT(3, ("Found L2 cache %s\n", resource->description));
+
+               l2_cache = mali_l2_cache_create(resource, domain_index);
+               if (NULL == l2_cache) {
+                       MALI_PRINT_ERROR(("Failed to create L2 cache object\n"));
+                       return NULL;
+               }
+       }
+       MALI_DEBUG_PRINT(3, ("Created L2 cache core object\n"));
+
+       return l2_cache;
+}
+
+static _mali_osk_errcode_t mali_parse_config_l2_cache(void)
+{
+       struct mali_l2_cache_core *l2_cache = NULL;
+
+       if (mali_is_mali400()) {
+               _mali_osk_resource_t l2_resource;
+               if (_MALI_OSK_ERR_OK != _mali_osk_resource_find(MALI400_OFFSET_L2_CACHE0, &l2_resource)) {
+                       MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache in config file\n"));
+                       return _MALI_OSK_ERR_FAULT;
+               }
+
+               l2_cache = mali_create_l2_cache_core(&l2_resource, MALI_DOMAIN_INDEX_L20);
+               if (NULL == l2_cache) {
+                       return _MALI_OSK_ERR_FAULT;
+               }
+       } else if (mali_is_mali450()) {
+               /*
+                * L2 for GP    at 0x10000
+                * L2 for PP0-3 at 0x01000
+                * L2 for PP4-7 at 0x11000 (optional)
+                */
+
+               _mali_osk_resource_t l2_gp_resource;
+               _mali_osk_resource_t l2_pp_grp0_resource;
+               _mali_osk_resource_t l2_pp_grp1_resource;
+
+               /* Make cluster for GP's L2 */
+               if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI450_OFFSET_L2_CACHE0, &l2_gp_resource)) {
+                       MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for GP\n"));
+                       l2_cache = mali_create_l2_cache_core(&l2_gp_resource, MALI_DOMAIN_INDEX_L20);
+                       if (NULL == l2_cache) {
+                               return _MALI_OSK_ERR_FAULT;
+                       }
+               } else {
+                       MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for GP in config file\n"));
+                       return _MALI_OSK_ERR_FAULT;
+               }
+
+               /* Find corresponding l2 domain */
+               if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI450_OFFSET_L2_CACHE1, &l2_pp_grp0_resource)) {
+                       MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for PP group 0\n"));
+                       l2_cache = mali_create_l2_cache_core(&l2_pp_grp0_resource, MALI_DOMAIN_INDEX_L21);
+                       if (NULL == l2_cache) {
+                               return _MALI_OSK_ERR_FAULT;
+                       }
+               } else {
+                       MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for PP group 0 in config file\n"));
+                       return _MALI_OSK_ERR_FAULT;
+               }
+
+               /* Second PP core group is optional, don't fail if we don't find it */
+               if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI450_OFFSET_L2_CACHE2, &l2_pp_grp1_resource)) {
+                       MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for PP group 1\n"));
+                       l2_cache = mali_create_l2_cache_core(&l2_pp_grp1_resource, MALI_DOMAIN_INDEX_L22);
+                       if (NULL == l2_cache) {
+                               return _MALI_OSK_ERR_FAULT;
+                       }
+               }
+       } else if (mali_is_mali470()) {
+               _mali_osk_resource_t l2c1_resource;
+
+               /* Make cluster for L2C1 */
+               if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI470_OFFSET_L2_CACHE1, &l2c1_resource)) {
+                       MALI_DEBUG_PRINT(3, ("Creating Mali-470 L2 cache 1\n"));
+                       l2_cache = mali_create_l2_cache_core(&l2c1_resource, MALI_DOMAIN_INDEX_L21);
+                       if (NULL == l2_cache) {
+                               return _MALI_OSK_ERR_FAULT;
+                       }
+               } else {
+                       MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for L2C1\n"));
+                       return _MALI_OSK_ERR_FAULT;
+               }
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+static struct mali_group *mali_create_group(struct mali_l2_cache_core *cache,
+               _mali_osk_resource_t *resource_mmu,
+               _mali_osk_resource_t *resource_gp,
+               _mali_osk_resource_t *resource_pp,
+               u32 domain_index)
+{
+       struct mali_mmu_core *mmu;
+       struct mali_group *group;
+
+       MALI_DEBUG_PRINT(3, ("Starting new group for MMU %s\n", resource_mmu->description));
+
+       /* Create the group object */
+       group = mali_group_create(cache, NULL, NULL, domain_index);
+       if (NULL == group) {
+               MALI_PRINT_ERROR(("Failed to create group object for MMU %s\n", resource_mmu->description));
+               return NULL;
+       }
+
+       /* Create the MMU object inside group */
+       mmu = mali_mmu_create(resource_mmu, group, MALI_FALSE);
+       if (NULL == mmu) {
+               MALI_PRINT_ERROR(("Failed to create MMU object\n"));
+               mali_group_delete(group);
+               return NULL;
+       }
+
+       if (NULL != resource_gp) {
+               /* Create the GP core object inside this group */
+               struct mali_gp_core *gp_core = mali_gp_create(resource_gp, group);
+               if (NULL == gp_core) {
+                       /* No need to clean up now, as we will clean up everything linked in from the cluster when we fail this function */
+                       MALI_PRINT_ERROR(("Failed to create GP object\n"));
+                       mali_group_delete(group);
+                       return NULL;
+               }
+       }
+
+       if (NULL != resource_pp) {
+               struct mali_pp_core *pp_core;
+
+               /* Create the PP core object inside this group */
+               pp_core = mali_pp_create(resource_pp, group, MALI_FALSE, mali_get_bcast_id(resource_pp));
+               if (NULL == pp_core) {
+                       /* No need to clean up now, as we will clean up everything linked in from the cluster when we fail this function */
+                       MALI_PRINT_ERROR(("Failed to create PP object\n"));
+                       mali_group_delete(group);
+                       return NULL;
+               }
+       }
+
+       return group;
+}
+
+static _mali_osk_errcode_t mali_create_virtual_group(_mali_osk_resource_t *resource_mmu_pp_bcast,
+               _mali_osk_resource_t *resource_pp_bcast,
+               _mali_osk_resource_t *resource_dlbu,
+               _mali_osk_resource_t *resource_bcast)
+{
+       struct mali_mmu_core *mmu_pp_bcast_core;
+       struct mali_pp_core *pp_bcast_core;
+       struct mali_dlbu_core *dlbu_core;
+       struct mali_bcast_unit *bcast_core;
+       struct mali_group *group;
+
+       MALI_DEBUG_PRINT(2, ("Starting new virtual group for MMU PP broadcast core %s\n", resource_mmu_pp_bcast->description));
+
+       /* Create the DLBU core object */
+       dlbu_core = mali_dlbu_create(resource_dlbu);
+       if (NULL == dlbu_core) {
+               MALI_PRINT_ERROR(("Failed to create DLBU object \n"));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       /* Create the Broadcast unit core */
+       bcast_core = mali_bcast_unit_create(resource_bcast);
+       if (NULL == bcast_core) {
+               MALI_PRINT_ERROR(("Failed to create Broadcast unit object!\n"));
+               mali_dlbu_delete(dlbu_core);
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       /* Create the group object */
+#if defined(DEBUG)
+       /* Get a physical PP group to temporarily add to broadcast unit.  IRQ
+        * verification needs a physical group in the broadcast unit to test
+        * the broadcast unit interrupt line. */
+       {
+               struct mali_group *phys_group = NULL;
+               int i;
+               for (i = 0; i < mali_group_get_glob_num_groups(); i++) {
+                       phys_group = mali_group_get_glob_group(i);
+                       if (NULL != mali_group_get_pp_core(phys_group)) break;
+               }
+               MALI_DEBUG_ASSERT(NULL != mali_group_get_pp_core(phys_group));
+
+               /* Add the group temporarily to the broadcast, and update the
+                * broadcast HW. Since the HW is not updated when removing the
+                * group the IRQ check will work when the virtual PP is created
+                * later.
+                *
+                * When the virtual group gets populated, the actually used
+                * groups will be added to the broadcast unit and the HW will
+                * be updated.
+                */
+               mali_bcast_add_group(bcast_core, phys_group);
+               mali_bcast_reset(bcast_core);
+               mali_bcast_remove_group(bcast_core, phys_group);
+       }
+#endif /* DEBUG */
+       group = mali_group_create(NULL, dlbu_core, bcast_core, MALI_DOMAIN_INDEX_DUMMY);
+       if (NULL == group) {
+               MALI_PRINT_ERROR(("Failed to create group object for MMU PP broadcast core %s\n", resource_mmu_pp_bcast->description));
+               mali_bcast_unit_delete(bcast_core);
+               mali_dlbu_delete(dlbu_core);
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       /* Create the MMU object inside group */
+       mmu_pp_bcast_core = mali_mmu_create(resource_mmu_pp_bcast, group, MALI_TRUE);
+       if (NULL == mmu_pp_bcast_core) {
+               MALI_PRINT_ERROR(("Failed to create MMU PP broadcast object\n"));
+               mali_group_delete(group);
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       /* Create the PP core object inside this group */
+       pp_bcast_core = mali_pp_create(resource_pp_bcast, group, MALI_TRUE, 0);
+       if (NULL == pp_bcast_core) {
+               /* No need to clean up now, as we will clean up everything linked in from the cluster when we fail this function */
+               MALI_PRINT_ERROR(("Failed to create PP object\n"));
+               mali_group_delete(group);
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_parse_config_groups(void)
+{
+       struct mali_group *group;
+       int cluster_id_gp = 0;
+       int cluster_id_pp_grp0 = 0;
+       int cluster_id_pp_grp1 = 0;
+       int i;
+
+       _mali_osk_resource_t resource_gp;
+       _mali_osk_resource_t resource_gp_mmu;
+       _mali_osk_resource_t resource_pp[8];
+       _mali_osk_resource_t resource_pp_mmu[8];
+       _mali_osk_resource_t resource_pp_mmu_bcast;
+       _mali_osk_resource_t resource_pp_bcast;
+       _mali_osk_resource_t resource_dlbu;
+       _mali_osk_resource_t resource_bcast;
+       _mali_osk_errcode_t resource_gp_found;
+       _mali_osk_errcode_t resource_gp_mmu_found;
+       _mali_osk_errcode_t resource_pp_found[8];
+       _mali_osk_errcode_t resource_pp_mmu_found[8];
+       _mali_osk_errcode_t resource_pp_mmu_bcast_found;
+       _mali_osk_errcode_t resource_pp_bcast_found;
+       _mali_osk_errcode_t resource_dlbu_found;
+       _mali_osk_errcode_t resource_bcast_found;
+
+       if (!(mali_is_mali400() || mali_is_mali450() || mali_is_mali470())) {
+               /* No known HW core */
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       if (MALI_MAX_JOB_RUNTIME_DEFAULT == mali_max_job_runtime) {
+               /* Group settings are not overridden by module parameters, so use device settings */
+               _mali_osk_device_data data = { 0, };
+
+               if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+                       /* Use device specific settings (if defined) */
+                       if (0 != data.max_job_runtime) {
+                               mali_max_job_runtime = data.max_job_runtime;
+                       }
+               }
+       }
+
+       if (mali_is_mali450()) {
+               /* Mali-450 have separate L2s for GP, and PP core group(s) */
+               cluster_id_pp_grp0 = 1;
+               cluster_id_pp_grp1 = 2;
+       }
+
+       resource_gp_found = _mali_osk_resource_find(MALI_OFFSET_GP, &resource_gp);
+       resource_gp_mmu_found = _mali_osk_resource_find(MALI_OFFSET_GP_MMU, &resource_gp_mmu);
+       resource_pp_found[0] = _mali_osk_resource_find(MALI_OFFSET_PP0, &(resource_pp[0]));
+       resource_pp_found[1] = _mali_osk_resource_find(MALI_OFFSET_PP1, &(resource_pp[1]));
+       resource_pp_found[2] = _mali_osk_resource_find(MALI_OFFSET_PP2, &(resource_pp[2]));
+       resource_pp_found[3] = _mali_osk_resource_find(MALI_OFFSET_PP3, &(resource_pp[3]));
+       resource_pp_found[4] = _mali_osk_resource_find(MALI_OFFSET_PP4, &(resource_pp[4]));
+       resource_pp_found[5] = _mali_osk_resource_find(MALI_OFFSET_PP5, &(resource_pp[5]));
+       resource_pp_found[6] = _mali_osk_resource_find(MALI_OFFSET_PP6, &(resource_pp[6]));
+       resource_pp_found[7] = _mali_osk_resource_find(MALI_OFFSET_PP7, &(resource_pp[7]));
+       resource_pp_mmu_found[0] = _mali_osk_resource_find(MALI_OFFSET_PP0_MMU, &(resource_pp_mmu[0]));
+       resource_pp_mmu_found[1] = _mali_osk_resource_find(MALI_OFFSET_PP1_MMU, &(resource_pp_mmu[1]));
+       resource_pp_mmu_found[2] = _mali_osk_resource_find(MALI_OFFSET_PP2_MMU, &(resource_pp_mmu[2]));
+       resource_pp_mmu_found[3] = _mali_osk_resource_find(MALI_OFFSET_PP3_MMU, &(resource_pp_mmu[3]));
+       resource_pp_mmu_found[4] = _mali_osk_resource_find(MALI_OFFSET_PP4_MMU, &(resource_pp_mmu[4]));
+       resource_pp_mmu_found[5] = _mali_osk_resource_find(MALI_OFFSET_PP5_MMU, &(resource_pp_mmu[5]));
+       resource_pp_mmu_found[6] = _mali_osk_resource_find(MALI_OFFSET_PP6_MMU, &(resource_pp_mmu[6]));
+       resource_pp_mmu_found[7] = _mali_osk_resource_find(MALI_OFFSET_PP7_MMU, &(resource_pp_mmu[7]));
+
+
+       if (mali_is_mali450() || mali_is_mali470()) {
+               resource_bcast_found = _mali_osk_resource_find(MALI_OFFSET_BCAST, &resource_bcast);
+               resource_dlbu_found = _mali_osk_resource_find(MALI_OFFSET_DLBU, &resource_dlbu);
+               resource_pp_mmu_bcast_found = _mali_osk_resource_find(MALI_OFFSET_PP_BCAST_MMU, &resource_pp_mmu_bcast);
+               resource_pp_bcast_found = _mali_osk_resource_find(MALI_OFFSET_PP_BCAST, &resource_pp_bcast);
+
+               if (_MALI_OSK_ERR_OK != resource_bcast_found ||
+                   _MALI_OSK_ERR_OK != resource_dlbu_found ||
+                   _MALI_OSK_ERR_OK != resource_pp_mmu_bcast_found ||
+                   _MALI_OSK_ERR_OK != resource_pp_bcast_found) {
+                       /* Missing mandatory core(s) for Mali-450 or Mali-470 */
+                       MALI_DEBUG_PRINT(2, ("Missing mandatory resources, Mali-450 needs DLBU, Broadcast unit, virtual PP core and virtual MMU\n"));
+                       return _MALI_OSK_ERR_FAULT;
+               }
+       }
+
+       if (_MALI_OSK_ERR_OK != resource_gp_found ||
+           _MALI_OSK_ERR_OK != resource_gp_mmu_found ||
+           _MALI_OSK_ERR_OK != resource_pp_found[0] ||
+           _MALI_OSK_ERR_OK != resource_pp_mmu_found[0]) {
+               /* Missing mandatory core(s) */
+               MALI_DEBUG_PRINT(2, ("Missing mandatory resource, need at least one GP and one PP, both with a separate MMU\n"));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       MALI_DEBUG_ASSERT(1 <= mali_l2_cache_core_get_glob_num_l2_cores());
+       group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_gp), &resource_gp_mmu, &resource_gp, NULL, MALI_DOMAIN_INDEX_GP);
+       if (NULL == group) {
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       /* Create group for first (and mandatory) PP core */
+       MALI_DEBUG_ASSERT(mali_l2_cache_core_get_glob_num_l2_cores() >= (cluster_id_pp_grp0 + 1)); /* >= 1 on Mali-300 and Mali-400, >= 2 on Mali-450 */
+       group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp0), &resource_pp_mmu[0], NULL, &resource_pp[0], MALI_DOMAIN_INDEX_PP0);
+       if (NULL == group) {
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       mali_inited_pp_cores_group_1++;
+
+       /* Create groups for rest of the cores in the first PP core group */
+       for (i = 1; i < 4; i++) { /* First half of the PP cores belong to first core group */
+               if (mali_inited_pp_cores_group_1 < mali_max_pp_cores_group_1) {
+                       if (_MALI_OSK_ERR_OK == resource_pp_found[i] && _MALI_OSK_ERR_OK == resource_pp_mmu_found[i]) {
+                               group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp0), &resource_pp_mmu[i], NULL, &resource_pp[i], MALI_DOMAIN_INDEX_PP0 + i);
+                               if (NULL == group) {
+                                       return _MALI_OSK_ERR_FAULT;
+                               }
+
+                               mali_inited_pp_cores_group_1++;
+                       }
+               }
+       }
+
+       /* Create groups for cores in the second PP core group */
+       for (i = 4; i < 8; i++) { /* Second half of the PP cores belong to second core group */
+               if (mali_inited_pp_cores_group_2 < mali_max_pp_cores_group_2) {
+                       if (_MALI_OSK_ERR_OK == resource_pp_found[i] && _MALI_OSK_ERR_OK == resource_pp_mmu_found[i]) {
+                               MALI_DEBUG_ASSERT(mali_l2_cache_core_get_glob_num_l2_cores() >= 2); /* Only Mali-450 have a second core group */
+                               group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp1), &resource_pp_mmu[i], NULL, &resource_pp[i], MALI_DOMAIN_INDEX_PP0 + i);
+                               if (NULL == group) {
+                                       return _MALI_OSK_ERR_FAULT;
+                               }
+
+                               mali_inited_pp_cores_group_2++;
+                       }
+               }
+       }
+
+       if (mali_is_mali450() || mali_is_mali470()) {
+               _mali_osk_errcode_t err = mali_create_virtual_group(&resource_pp_mmu_bcast, &resource_pp_bcast, &resource_dlbu, &resource_bcast);
+               if (_MALI_OSK_ERR_OK != err) {
+                       return err;
+               }
+       }
+
+       mali_max_pp_cores_group_1 = mali_inited_pp_cores_group_1;
+       mali_max_pp_cores_group_2 = mali_inited_pp_cores_group_2;
+       MALI_DEBUG_PRINT(2, ("%d+%d PP cores initialized\n", mali_inited_pp_cores_group_1, mali_inited_pp_cores_group_2));
+
+       return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_check_shared_interrupts(void)
+{
+#if !defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       if (MALI_TRUE == _mali_osk_shared_interrupts()) {
+               MALI_PRINT_ERROR(("Shared interrupts detected, but driver support is not enabled\n"));
+               return _MALI_OSK_ERR_FAULT;
+       }
+#endif /* !defined(CONFIG_MALI_SHARED_INTERRUPTS) */
+
+       /* It is OK to compile support for shared interrupts even if Mali is not using it. */
+       return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_parse_config_pmu(void)
+{
+       _mali_osk_resource_t resource_pmu;
+
+       MALI_DEBUG_ASSERT(0 != global_gpu_base_address);
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_PMU, &resource_pmu)) {
+               struct mali_pmu_core *pmu;
+
+               pmu = mali_pmu_create(&resource_pmu);
+               if (NULL == pmu) {
+                       MALI_PRINT_ERROR(("Failed to create PMU\n"));
+                       return _MALI_OSK_ERR_FAULT;
+               }
+       }
+
+       /* It's ok if the PMU doesn't exist */
+       return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_parse_config_memory(void)
+{
+       _mali_osk_device_data data = { 0, };
+       _mali_osk_errcode_t ret;
+
+       /* The priority of setting the value of mali_shared_mem_size,
+        * mali_dedicated_mem_start and mali_dedicated_mem_size:
+        * 1. module parameter;
+        * 2. platform data;
+        * 3. default value;
+        **/
+       if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+               /* Memory settings are not overridden by module parameters, so use device settings */
+               if (0 == mali_dedicated_mem_start && 0 == mali_dedicated_mem_size) {
+                       /* Use device specific settings (if defined) */
+                       mali_dedicated_mem_start = data.dedicated_mem_start;
+                       mali_dedicated_mem_size = data.dedicated_mem_size;
+               }
+
+               if (MALI_SHARED_MEMORY_DEFAULT_SIZE == mali_shared_mem_size &&
+                   0 != data.shared_mem_size) {
+                       mali_shared_mem_size = data.shared_mem_size;
+               }
+       }
+
+       if (0 < mali_dedicated_mem_size && 0 != mali_dedicated_mem_start) {
+               MALI_DEBUG_PRINT(2, ("Mali memory settings (dedicated: 0x%08X@0x%08X)\n",
+                                    mali_dedicated_mem_size, mali_dedicated_mem_start));
+
+               /* Dedicated memory */
+               ret = mali_memory_core_resource_dedicated_memory(mali_dedicated_mem_start, mali_dedicated_mem_size);
+               if (_MALI_OSK_ERR_OK != ret) {
+                       MALI_PRINT_ERROR(("Failed to register dedicated memory\n"));
+                       mali_memory_terminate();
+                       return ret;
+               }
+       }
+
+       if (0 < mali_shared_mem_size) {
+               MALI_DEBUG_PRINT(2, ("Mali memory settings (shared: 0x%08X)\n", mali_shared_mem_size));
+
+               /* Shared OS memory */
+               ret = mali_memory_core_resource_os_memory(mali_shared_mem_size);
+               if (_MALI_OSK_ERR_OK != ret) {
+                       MALI_PRINT_ERROR(("Failed to register shared OS memory\n"));
+                       mali_memory_terminate();
+                       return ret;
+               }
+       }
+
+       if (0 == mali_fb_start && 0 == mali_fb_size) {
+               /* Frame buffer settings are not overridden by module parameters, so use device settings */
+               _mali_osk_device_data data = { 0, };
+
+               if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+                       /* Use device specific settings (if defined) */
+                       mali_fb_start = data.fb_start;
+                       mali_fb_size = data.fb_size;
+               }
+
+               MALI_DEBUG_PRINT(2, ("Using device defined frame buffer settings (0x%08X@0x%08X)\n",
+                                    mali_fb_size, mali_fb_start));
+       } else {
+               MALI_DEBUG_PRINT(2, ("Using module defined frame buffer settings (0x%08X@0x%08X)\n",
+                                    mali_fb_size, mali_fb_start));
+       }
+
+       if (0 != mali_fb_size) {
+               /* Register frame buffer */
+               ret = mali_mem_validation_add_range(mali_fb_start, mali_fb_size);
+               if (_MALI_OSK_ERR_OK != ret) {
+                       MALI_PRINT_ERROR(("Failed to register frame buffer memory region\n"));
+                       mali_memory_terminate();
+                       return ret;
+               }
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+static void mali_detect_gpu_class(void)
+{
+       if (_mali_osk_identify_gpu_resource() == 0x450)
+               mali_gpu_class_is_mali450 = MALI_TRUE;
+
+       if (_mali_osk_identify_gpu_resource() == 0x470)
+               mali_gpu_class_is_mali470 = MALI_TRUE;
+}
+
+static _mali_osk_errcode_t mali_init_hw_reset(void)
+{
+#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470))
+       _mali_osk_resource_t resource_bcast;
+
+       /* Ensure broadcast unit is in a good state before we start creating
+        * groups and cores.
+        */
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_BCAST, &resource_bcast)) {
+               struct mali_bcast_unit *bcast_core;
+
+               bcast_core = mali_bcast_unit_create(&resource_bcast);
+               if (NULL == bcast_core) {
+                       MALI_PRINT_ERROR(("Failed to create Broadcast unit object!\n"));
+                       return _MALI_OSK_ERR_FAULT;
+               }
+               mali_bcast_unit_delete(bcast_core);
+       }
+#endif /* (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) */
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_initialize_subsystems(void)
+{
+       _mali_osk_errcode_t err;
+
+#ifdef CONFIG_MALI_DT
+       err = _mali_osk_resource_initialize();
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_terminate_subsystems();
+               return err;
+       }
+#endif
+
+       mali_pp_job_initialize();
+
+       err = mali_timeline_initialize();
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_terminate_subsystems();
+               return err;
+       }
+
+       err = mali_session_initialize();
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_terminate_subsystems();
+               return err;
+       }
+
+       /*Try to init gpu secure mode */
+       _mali_osk_gpu_secure_mode_init();
+
+#if defined(CONFIG_MALI400_PROFILING)
+       err = _mali_osk_profiling_init(mali_boot_profiling ? MALI_TRUE : MALI_FALSE);
+       if (_MALI_OSK_ERR_OK != err) {
+               /* No biggie if we weren't able to initialize the profiling */
+               MALI_PRINT_ERROR(("Failed to initialize profiling, feature will be unavailable\n"));
+       }
+#endif
+
+       err = mali_memory_initialize();
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_terminate_subsystems();
+               return err;
+       }
+
+       err = mali_executor_initialize();
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_terminate_subsystems();
+               return err;
+       }
+
+       err = mali_scheduler_initialize();
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_terminate_subsystems();
+               return err;
+       }
+
+       /* Configure memory early, needed by mali_mmu_initialize. */
+       err = mali_parse_config_memory();
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_terminate_subsystems();
+               return err;
+       }
+
+       err = mali_set_global_gpu_base_address();
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_terminate_subsystems();
+               return err;
+       }
+
+       /* Detect GPU class (uses L2 cache count) */
+       mali_detect_gpu_class();
+
+       err = mali_check_shared_interrupts();
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_terminate_subsystems();
+               return err;
+       }
+
+       /* Initialize the MALI PMU (will not touch HW!) */
+       err = mali_parse_config_pmu();
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_terminate_subsystems();
+               return err;
+       }
+
+       /* Initialize the power management module */
+       err = mali_pm_initialize();
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_terminate_subsystems();
+               return err;
+       }
+
+       /* Make sure the entire GPU stays on for the rest of this function */
+       mali_pm_init_begin();
+
+       /* Ensure HW is in a good state before starting to access cores. */
+       err = mali_init_hw_reset();
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_terminate_subsystems();
+               return err;
+       }
+
+       /* Detect which Mali GPU we are dealing with */
+       err = mali_parse_product_info();
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_pm_init_end();
+               mali_terminate_subsystems();
+               return err;
+       }
+
+       /* The global_product_id is now populated with the correct Mali GPU */
+
+       /* Start configuring the actual Mali hardware. */
+
+       err = mali_mmu_initialize();
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_pm_init_end();
+               mali_terminate_subsystems();
+               return err;
+       }
+
+       if (mali_is_mali450() || mali_is_mali470()) {
+               err = mali_dlbu_initialize();
+               if (_MALI_OSK_ERR_OK != err) {
+                       mali_pm_init_end();
+                       mali_terminate_subsystems();
+                       return err;
+               }
+       }
+
+       err = mali_parse_config_l2_cache();
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_pm_init_end();
+               mali_terminate_subsystems();
+               return err;
+       }
+
+       err = mali_parse_config_groups();
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_pm_init_end();
+               mali_terminate_subsystems();
+               return err;
+       }
+
+       /* Move groups into executor */
+       mali_executor_populate();
+
+       /* Need call after all group has assigned a domain */
+       mali_pm_power_cost_setup();
+
+       /* Initialize the GPU timer */
+       err = mali_control_timer_init();
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_pm_init_end();
+               mali_terminate_subsystems();
+               return err;
+       }
+
+       /* Initialize the GPU utilization tracking */
+       err = mali_utilization_init();
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_pm_init_end();
+               mali_terminate_subsystems();
+               return err;
+       }
+
+#if defined(CONFIG_MALI_DVFS)
+       err = mali_dvfs_policy_init();
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_pm_init_end();
+               mali_terminate_subsystems();
+               return err;
+       }
+#endif
+
+       /* Allowing the system to be turned off */
+       mali_pm_init_end();
+
+       return _MALI_OSK_ERR_OK; /* all ok */
+}
+
+void mali_terminate_subsystems(void)
+{
+       struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+       MALI_DEBUG_PRINT(2, ("terminate_subsystems() called\n"));
+
+       mali_utilization_term();
+       mali_control_timer_term();
+
+       mali_executor_depopulate();
+       mali_delete_groups(); /* Delete groups not added to executor */
+       mali_executor_terminate();
+
+       mali_scheduler_terminate();
+       mali_pp_job_terminate();
+       mali_delete_l2_cache_cores();
+       mali_mmu_terminate();
+
+       if (mali_is_mali450() || mali_is_mali470()) {
+               mali_dlbu_terminate();
+       }
+
+       mali_pm_terminate();
+
+       if (NULL != pmu) {
+               mali_pmu_delete(pmu);
+       }
+
+#if defined(CONFIG_MALI400_PROFILING)
+       _mali_osk_profiling_term();
+#endif
+
+       _mali_osk_gpu_secure_mode_deinit();
+
+       mali_memory_terminate();
+
+       mali_session_terminate();
+
+       mali_timeline_terminate();
+
+       global_gpu_base_address = 0;
+}
+
+_mali_product_id_t mali_kernel_core_get_product_id(void)
+{
+       return global_product_id;
+}
+
+u32 mali_kernel_core_get_gpu_major_version(void)
+{
+       return global_gpu_major_version;
+}
+
+u32 mali_kernel_core_get_gpu_minor_version(void)
+{
+       return global_gpu_minor_version;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_api_version(_mali_uk_get_api_version_s *args)
+{
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+       /* check compatability */
+       if (args->version == _MALI_UK_API_VERSION) {
+               args->compatible = 1;
+       } else {
+               args->compatible = 0;
+       }
+
+       args->version = _MALI_UK_API_VERSION; /* report our version */
+
+       /* success regardless of being compatible or not */
+       MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_api_version_v2(_mali_uk_get_api_version_v2_s *args)
+{
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+       /* check compatability */
+       if (args->version == _MALI_UK_API_VERSION) {
+               args->compatible = 1;
+       } else {
+               args->compatible = 0;
+       }
+
+       args->version = _MALI_UK_API_VERSION; /* report our version */
+
+       /* success regardless of being compatible or not */
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_wait_for_notification(_mali_uk_wait_for_notification_s *args)
+{
+       _mali_osk_errcode_t err;
+       _mali_osk_notification_t *notification;
+       _mali_osk_notification_queue_t *queue;
+       struct mali_session_data *session;
+
+       /* check input */
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+       session = (struct mali_session_data *)(uintptr_t)args->ctx;
+       queue = session->ioctl_queue;
+
+       /* if the queue does not exist we're currently shutting down */
+       if (NULL == queue) {
+               MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n"));
+               args->type = _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS;
+               return _MALI_OSK_ERR_OK;
+       }
+
+       /* receive a notification, might sleep */
+       err = _mali_osk_notification_queue_receive(queue, &notification);
+       if (_MALI_OSK_ERR_OK != err) {
+               MALI_ERROR(err); /* errcode returned, pass on to caller */
+       }
+
+       /* copy the buffer to the user */
+       args->type = (_mali_uk_notification_type)notification->notification_type;
+       _mali_osk_memcpy(&args->data, notification->result_buffer, notification->result_buffer_size);
+
+       /* finished with the notification */
+       _mali_osk_notification_delete(notification);
+
+       return _MALI_OSK_ERR_OK; /* all ok */
+}
+
+_mali_osk_errcode_t _mali_ukk_post_notification(_mali_uk_post_notification_s *args)
+{
+       _mali_osk_notification_t *notification;
+       _mali_osk_notification_queue_t *queue;
+       struct mali_session_data *session;
+
+       /* check input */
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+       session = (struct mali_session_data *)(uintptr_t)args->ctx;
+       queue = session->ioctl_queue;
+
+       /* if the queue does not exist we're currently shutting down */
+       if (NULL == queue) {
+               MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n"));
+               return _MALI_OSK_ERR_OK;
+       }
+
+       notification = _mali_osk_notification_create(args->type, 0);
+       if (NULL == notification) {
+               MALI_PRINT_ERROR(("Failed to create notification object\n"));
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       _mali_osk_notification_queue_send(queue, notification);
+
+       return _MALI_OSK_ERR_OK; /* all ok */
+}
+
+_mali_osk_errcode_t _mali_ukk_pending_submit(_mali_uk_pending_submit_s *args)
+{
+       wait_queue_head_t *queue;
+
+       /* check input */
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+       queue = mali_session_get_wait_queue();
+
+       /* check pending big job number, might sleep if larger than MAX allowed number */
+       if (wait_event_interruptible(*queue, MALI_MAX_PENDING_BIG_JOB > mali_scheduler_job_gp_big_job_count())) {
+               return _MALI_OSK_ERR_RESTARTSYSCALL;
+       }
+
+       return _MALI_OSK_ERR_OK; /* all ok */
+}
+
+
+_mali_osk_errcode_t _mali_ukk_request_high_priority(_mali_uk_request_high_priority_s *args)
+{
+       struct mali_session_data *session;
+
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+       session = (struct mali_session_data *)(uintptr_t)args->ctx;
+
+       if (!session->use_high_priority_job_queue) {
+               session->use_high_priority_job_queue = MALI_TRUE;
+               MALI_DEBUG_PRINT(2, ("Session 0x%08X with pid %d was granted higher priority.\n", session, _mali_osk_get_pid()));
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_open(void **context)
+{
+       u32 i;
+       struct mali_session_data *session;
+
+       /* allocated struct to track this session */
+       session = (struct mali_session_data *)_mali_osk_calloc(1, sizeof(struct mali_session_data));
+       MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_NOMEM);
+
+       MALI_DEBUG_PRINT(3, ("Session starting\n"));
+
+       /* create a response queue for this session */
+       session->ioctl_queue = _mali_osk_notification_queue_init();
+       if (NULL == session->ioctl_queue) {
+               goto err;
+       }
+
+       /*create a wait queue for this session */
+       session->wait_queue = _mali_osk_wait_queue_init();
+       if (NULL == session->wait_queue) {
+               goto err_wait_queue;
+       }
+
+       session->page_directory = mali_mmu_pagedir_alloc();
+       if (NULL == session->page_directory) {
+               goto err_mmu;
+       }
+
+       if (_MALI_OSK_ERR_OK != mali_mmu_pagedir_map(session->page_directory, MALI_DLBU_VIRT_ADDR, _MALI_OSK_MALI_PAGE_SIZE)) {
+               MALI_PRINT_ERROR(("Failed to map DLBU page into session\n"));
+               goto err_mmu;
+       }
+
+       if (0 != mali_dlbu_phys_addr) {
+               mali_mmu_pagedir_update(session->page_directory, MALI_DLBU_VIRT_ADDR, mali_dlbu_phys_addr,
+                                       _MALI_OSK_MALI_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);
+       }
+
+       if (_MALI_OSK_ERR_OK != mali_memory_session_begin(session)) {
+               goto err_session;
+       }
+
+       /* Create soft system. */
+       session->soft_job_system = mali_soft_job_system_create(session);
+       if (NULL == session->soft_job_system) {
+               goto err_soft;
+       }
+
+       /* Initialize the dma fence context.*/
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+       session->fence_context = dma_fence_context_alloc(1);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)
+       session->fence_context = fence_context_alloc(1);
+       _mali_osk_atomic_init(&session->fence_seqno, 0);
+#else
+       MALI_PRINT_ERROR(("The kernel version not support dma fence!\n"));
+       goto err_time_line;
+#endif
+#endif
+
+       /* Create timeline system. */
+       session->timeline_system = mali_timeline_system_create(session);
+       if (NULL == session->timeline_system) {
+               goto err_time_line;
+       }
+
+#if defined(CONFIG_MALI_DVFS)
+       _mali_osk_atomic_init(&session->number_of_window_jobs, 0);
+#endif
+
+       _mali_osk_atomic_init(&session->number_of_pp_jobs, 0);
+
+       session->use_high_priority_job_queue = MALI_FALSE;
+
+       /* Initialize list of PP jobs on this session. */
+       _MALI_OSK_INIT_LIST_HEAD(&session->pp_job_list);
+
+       /* Initialize the pp_job_fb_lookup_list array used to quickly lookup jobs from a given frame builder */
+       for (i = 0; i < MALI_PP_JOB_FB_LOOKUP_LIST_SIZE; ++i) {
+               _MALI_OSK_INIT_LIST_HEAD(&session->pp_job_fb_lookup_list[i]);
+       }
+
+       session->pid = _mali_osk_get_pid();
+       session->comm = _mali_osk_get_comm();
+       session->max_mali_mem_allocated_size = 0;
+       for (i = 0; i < MALI_MEM_TYPE_MAX; i ++) {
+               atomic_set(&session->mali_mem_array[i], 0);
+       }
+       atomic_set(&session->mali_mem_allocated_pages, 0);
+       *context = (void *)session;
+
+       /* Add session to the list of all sessions. */
+       mali_session_add(session);
+
+       MALI_DEBUG_PRINT(3, ("Session started\n"));
+       return _MALI_OSK_ERR_OK;
+
+err_time_line:
+       mali_soft_job_system_destroy(session->soft_job_system);
+err_soft:
+       mali_memory_session_end(session);
+err_session:
+       mali_mmu_pagedir_free(session->page_directory);
+err_mmu:
+       _mali_osk_wait_queue_term(session->wait_queue);
+err_wait_queue:
+       _mali_osk_notification_queue_term(session->ioctl_queue);
+err:
+       _mali_osk_free(session);
+       MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+}
+
+#if defined(DEBUG)
+/* parameter used for debug */
+extern u32 num_pm_runtime_resume;
+extern u32 num_pm_updates;
+extern u32 num_pm_updates_up;
+extern u32 num_pm_updates_down;
+#endif
+
+_mali_osk_errcode_t _mali_ukk_close(void **context)
+{
+       struct mali_session_data *session;
+       MALI_CHECK_NON_NULL(context, _MALI_OSK_ERR_INVALID_ARGS);
+       session = (struct mali_session_data *)*context;
+
+       MALI_DEBUG_PRINT(3, ("Session ending\n"));
+
+       MALI_DEBUG_ASSERT_POINTER(session->soft_job_system);
+       MALI_DEBUG_ASSERT_POINTER(session->timeline_system);
+
+       /* Remove session from list of all sessions. */
+       mali_session_remove(session);
+
+       /* This flag is used to prevent queueing of jobs due to activation. */
+       session->is_aborting = MALI_TRUE;
+
+       /* Stop the soft job timer. */
+       mali_timeline_system_stop_timer(session->timeline_system);
+
+       /* Abort queued jobs */
+       mali_scheduler_abort_session(session);
+
+       /* Abort executing jobs */
+       mali_executor_abort_session(session);
+
+       /* Abort the soft job system. */
+       mali_soft_job_system_abort(session->soft_job_system);
+
+       /* Force execution of all pending bottom half processing for GP and PP. */
+       _mali_osk_wq_flush();
+
+       /* The session PP list should now be empty. */
+       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&session->pp_job_list));
+
+       /* At this point the GP and PP scheduler no longer has any jobs queued or running from this
+        * session, and all soft jobs in the soft job system has been destroyed. */
+
+       /* Any trackers left in the timeline system are directly or indirectly waiting on external
+        * sync fences.  Cancel all sync fence waiters to trigger activation of all remaining
+        * trackers.  This call will sleep until all timelines are empty. */
+       mali_timeline_system_abort(session->timeline_system);
+
+       /* Flush pending work.
+        * Needed to make sure all bottom half processing related to this
+        * session has been completed, before we free internal data structures.
+        */
+       _mali_osk_wq_flush();
+
+       /* Destroy timeline system. */
+       mali_timeline_system_destroy(session->timeline_system);
+       session->timeline_system = NULL;
+
+       /* Destroy soft system. */
+       mali_soft_job_system_destroy(session->soft_job_system);
+       session->soft_job_system = NULL;
+
+       /*Wait for the session job lists become empty.*/
+       _mali_osk_wait_queue_wait_event(session->wait_queue, mali_session_pp_job_is_empty, (void *) session);
+
+       /* Free remaining memory allocated to this session */
+       mali_memory_session_end(session);
+
+#if defined(CONFIG_MALI_DVFS)
+       _mali_osk_atomic_term(&session->number_of_window_jobs);
+#endif
+
+#if defined(CONFIG_MALI400_PROFILING)
+       _mali_osk_profiling_stop_sampling(session->pid);
+#endif
+
+       /* Free session data structures */
+       mali_mmu_pagedir_unmap(session->page_directory, MALI_DLBU_VIRT_ADDR, _MALI_OSK_MALI_PAGE_SIZE);
+       mali_mmu_pagedir_free(session->page_directory);
+       _mali_osk_wait_queue_term(session->wait_queue);
+       _mali_osk_notification_queue_term(session->ioctl_queue);
+       _mali_osk_free(session);
+
+       *context = NULL;
+
+       MALI_DEBUG_PRINT(3, ("Session has ended\n"));
+
+#if defined(DEBUG)
+       MALI_DEBUG_PRINT(3, ("Stats: # runtime resumes: %u\n", num_pm_runtime_resume));
+       MALI_DEBUG_PRINT(3, ("       # PM updates: .... %u (up %u, down %u)\n", num_pm_updates, num_pm_updates_up, num_pm_updates_down));
+
+       num_pm_runtime_resume = 0;
+       num_pm_updates = 0;
+       num_pm_updates_up = 0;
+       num_pm_updates_down = 0;
+#endif
+
+       return _MALI_OSK_ERR_OK;;
+}
+
+#if MALI_STATE_TRACKING
+u32 _mali_kernel_core_dump_state(char *buf, u32 size)
+{
+       int n = 0; /* Number of bytes written to buf */
+
+       n += mali_scheduler_dump_state(buf + n, size - n);
+       n += mali_executor_dump_state(buf + n, size - n);
+
+       return n;
+}
+#endif
diff --git a/utgard/r8p0/common/mali_kernel_core.h b/utgard/r8p0/common/mali_kernel_core.h
new file mode 100644 (file)
index 0000000..cf6af32
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2010-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_CORE_H__
+#define __MALI_KERNEL_CORE_H__
+
+#include "mali_osk.h"
+
+typedef enum {
+       _MALI_PRODUCT_ID_UNKNOWN,
+       _MALI_PRODUCT_ID_MALI200,
+       _MALI_PRODUCT_ID_MALI300,
+       _MALI_PRODUCT_ID_MALI400,
+       _MALI_PRODUCT_ID_MALI450,
+       _MALI_PRODUCT_ID_MALI470,
+} _mali_product_id_t;
+
+extern mali_bool mali_gpu_class_is_mali450;
+extern mali_bool mali_gpu_class_is_mali470;
+
+_mali_osk_errcode_t mali_initialize_subsystems(void);
+
+void mali_terminate_subsystems(void);
+
+_mali_product_id_t mali_kernel_core_get_product_id(void);
+
+u32 mali_kernel_core_get_gpu_major_version(void);
+
+u32 mali_kernel_core_get_gpu_minor_version(void);
+
+u32 _mali_kernel_core_dump_state(char *buf, u32 size);
+
+MALI_STATIC_INLINE mali_bool mali_is_mali470(void)
+{
+       return mali_gpu_class_is_mali470;
+}
+
+MALI_STATIC_INLINE mali_bool mali_is_mali450(void)
+{
+       return mali_gpu_class_is_mali450;
+}
+
+MALI_STATIC_INLINE mali_bool mali_is_mali400(void)
+{
+       if (mali_gpu_class_is_mali450 || mali_gpu_class_is_mali470)
+               return MALI_FALSE;
+
+       return MALI_TRUE;
+}
+#endif /* __MALI_KERNEL_CORE_H__ */
diff --git a/utgard/r8p0/common/mali_kernel_utilization.c b/utgard/r8p0/common/mali_kernel_utilization.c
new file mode 100755 (executable)
index 0000000..ca7ebea
--- /dev/null
@@ -0,0 +1,440 @@
+/*
+ * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_utilization.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_scheduler.h"
+
+#include "mali_executor.h"
+#include "mali_dvfs_policy.h"
+#include "mali_control_timer.h"
+
+/* Thresholds for GP bound detection. */
+#define MALI_GP_BOUND_GP_UTILIZATION_THRESHOLD 240
+#define MALI_GP_BOUND_PP_UTILIZATION_THRESHOLD 250
+
+static _mali_osk_spinlock_irq_t *utilization_data_lock;
+
+static u32 num_running_gp_cores = 0;
+static u32 num_running_pp_cores = 0;
+
+static u64 work_start_time_gpu = 0;
+static u64 work_start_time_gp = 0;
+static u64 work_start_time_pp = 0;
+static u64 accumulated_work_time_gpu = 0;
+static u64 accumulated_work_time_gp = 0;
+static u64 accumulated_work_time_pp = 0;
+
+static u32 last_utilization_gpu = 0 ;
+static u32 last_utilization_gp = 0 ;
+static u32 last_utilization_pp = 0 ;
+
+void (*mali_utilization_callback)(struct mali_gpu_utilization_data *data) = NULL;
+
+/* Define the first timer control timer timeout in milliseconds */
+static u32 mali_control_first_timeout = 100;
+static struct mali_gpu_utilization_data mali_util_data = {0, };
+
+struct mali_gpu_utilization_data *mali_utilization_calculate(u64 *start_time, u64 *time_period, mali_bool *need_add_timer)
+{
+       u64 time_now;
+       u32 leading_zeroes;
+       u32 shift_val;
+       u32 work_normalized_gpu;
+       u32 work_normalized_gp;
+       u32 work_normalized_pp;
+       u32 period_normalized;
+       u32 utilization_gpu;
+       u32 utilization_gp;
+       u32 utilization_pp;
+
+       mali_utilization_data_lock();
+
+       time_now = _mali_osk_time_get_ns();
+
+       *time_period = time_now - *start_time;
+
+       if (accumulated_work_time_gpu == 0 && work_start_time_gpu == 0) {
+               mali_control_timer_pause();
+               /*
+                * No work done for this period
+                * - No need to reschedule timer
+                * - Report zero usage
+                */
+               last_utilization_gpu = 0;
+               last_utilization_gp = 0;
+               last_utilization_pp = 0;
+
+               mali_util_data.utilization_gpu = last_utilization_gpu;
+               mali_util_data.utilization_gp = last_utilization_gp;
+               mali_util_data.utilization_pp = last_utilization_pp;
+
+               mali_utilization_data_unlock();
+
+               *need_add_timer = MALI_FALSE;
+
+               mali_executor_hint_disable(MALI_EXECUTOR_HINT_GP_BOUND);
+
+               MALI_DEBUG_PRINT(4, ("last_utilization_gpu = %d \n", last_utilization_gpu));
+               MALI_DEBUG_PRINT(4, ("last_utilization_gp = %d \n", last_utilization_gp));
+               MALI_DEBUG_PRINT(4, ("last_utilization_pp = %d \n", last_utilization_pp));
+
+               return &mali_util_data;
+       }
+
+       /* If we are currently busy, update working period up to now */
+       if (work_start_time_gpu != 0) {
+               accumulated_work_time_gpu += (time_now - work_start_time_gpu);
+               work_start_time_gpu = time_now;
+
+               /* GP and/or PP will also be busy if the GPU is busy at this point */
+
+               if (work_start_time_gp != 0) {
+                       accumulated_work_time_gp += (time_now - work_start_time_gp);
+                       work_start_time_gp = time_now;
+               }
+
+               if (work_start_time_pp != 0) {
+                       accumulated_work_time_pp += (time_now - work_start_time_pp);
+                       work_start_time_pp = time_now;
+               }
+       }
+
+       /*
+        * We have two 64-bit values, a dividend and a divisor.
+        * To avoid dependencies to a 64-bit divider, we shift down the two values
+        * equally first.
+        * We shift the dividend up and possibly the divisor down, making the result X in 256.
+        */
+
+       /* Shift the 64-bit values down so they fit inside a 32-bit integer */
+       leading_zeroes = _mali_osk_clz((u32)(*time_period >> 32));
+       shift_val = 32 - leading_zeroes;
+       work_normalized_gpu = (u32)(accumulated_work_time_gpu >> shift_val);
+       work_normalized_gp = (u32)(accumulated_work_time_gp >> shift_val);
+       work_normalized_pp = (u32)(accumulated_work_time_pp >> shift_val);
+       period_normalized = (u32)(*time_period >> shift_val);
+
+       /*
+        * Now, we should report the usage in parts of 256
+        * this means we must shift up the dividend or down the divisor by 8
+        * (we could do a combination, but we just use one for simplicity,
+        * but the end result should be good enough anyway)
+        */
+       if (period_normalized > 0x00FFFFFF) {
+               /* The divisor is so big that it is safe to shift it down */
+               period_normalized >>= 8;
+       } else {
+               /*
+                * The divisor is so small that we can shift up the dividend, without loosing any data.
+                * (dividend is always smaller than the divisor)
+                */
+               work_normalized_gpu <<= 8;
+               work_normalized_gp <<= 8;
+               work_normalized_pp <<= 8;
+       }
+
+       utilization_gpu = work_normalized_gpu / period_normalized;
+       utilization_gp = work_normalized_gp / period_normalized;
+       utilization_pp = work_normalized_pp / period_normalized;
+
+       last_utilization_gpu = utilization_gpu;
+       last_utilization_gp = utilization_gp;
+       last_utilization_pp = utilization_pp;
+
+       if ((MALI_GP_BOUND_GP_UTILIZATION_THRESHOLD < last_utilization_gp) &&
+           (MALI_GP_BOUND_PP_UTILIZATION_THRESHOLD > last_utilization_pp)) {
+               mali_executor_hint_enable(MALI_EXECUTOR_HINT_GP_BOUND);
+       } else {
+               mali_executor_hint_disable(MALI_EXECUTOR_HINT_GP_BOUND);
+       }
+
+       /* starting a new period */
+       accumulated_work_time_gpu = 0;
+       accumulated_work_time_gp = 0;
+       accumulated_work_time_pp = 0;
+
+       *start_time = time_now;
+
+       mali_util_data.utilization_gp = last_utilization_gp;
+       mali_util_data.utilization_gpu = last_utilization_gpu;
+       mali_util_data.utilization_pp = last_utilization_pp;
+
+       mali_utilization_data_unlock();
+
+       *need_add_timer = MALI_TRUE;
+
+       MALI_DEBUG_PRINT(4, ("last_utilization_gpu = %d \n", last_utilization_gpu));
+       MALI_DEBUG_PRINT(4, ("last_utilization_gp = %d \n", last_utilization_gp));
+       MALI_DEBUG_PRINT(4, ("last_utilization_pp = %d \n", last_utilization_pp));
+
+       return &mali_util_data;
+}
+
+_mali_osk_errcode_t mali_utilization_init(void)
+{
+#if USING_GPU_UTILIZATION
+       _mali_osk_device_data data;
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+               if (NULL != data.utilization_callback) {
+                       mali_utilization_callback = data.utilization_callback;
+                       MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: Utilization handler installed \n"));
+               }
+       }
+#endif /* defined(USING_GPU_UTILIZATION) */
+
+       if (NULL == mali_utilization_callback) {
+               MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: No platform utilization handler installed\n"));
+       }
+
+       utilization_data_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_UTILIZATION);
+       if (NULL == utilization_data_lock) {
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       num_running_gp_cores = 0;
+       num_running_pp_cores = 0;
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_utilization_term(void)
+{
+       if (NULL != utilization_data_lock) {
+               _mali_osk_spinlock_irq_term(utilization_data_lock);
+       }
+}
+
+void mali_utilization_gp_start(void)
+{
+       mali_utilization_data_lock();
+
+       ++num_running_gp_cores;
+       if (1 == num_running_gp_cores) {
+               u64 time_now = _mali_osk_time_get_ns();
+
+               /* First GP core started, consider GP busy from now and onwards */
+               work_start_time_gp = time_now;
+
+               if (0 == num_running_pp_cores) {
+                       mali_bool is_resume = MALI_FALSE;
+                       /*
+                        * There are no PP cores running, so this is also the point
+                        * at which we consider the GPU to be busy as well.
+                        */
+                       work_start_time_gpu = time_now;
+
+                       is_resume  = mali_control_timer_resume(time_now);
+
+                       mali_utilization_data_unlock();
+
+                       if (is_resume) {
+                               /* Do some policy in new period for performance consideration */
+#if defined(CONFIG_MALI_DVFS)
+                               /* Clear session->number_of_window_jobs, prepare parameter for dvfs */
+                               mali_session_max_window_num();
+                               if (0 == last_utilization_gpu) {
+                                       /*
+                                        * for mali_dev_pause is called in set clock,
+                                        * so each time we change clock, we will set clock to
+                                        * highest step even if under down clock case,
+                                        * it is not nessesary, so we only set the clock under
+                                        * last time utilization equal 0, we stop the timer then
+                                        * start the GPU again case
+                                        */
+                                       mali_dvfs_policy_new_period();
+                               }
+#endif
+                               /*
+                                * First timeout using short interval for power consideration
+                                * because we give full power in the new period, but if the
+                                * job loading is light, finish in 10ms, the other time all keep
+                                * in high freq it will wast time.
+                                */
+                               mali_control_timer_add(mali_control_first_timeout);
+                       }
+               } else {
+                       mali_utilization_data_unlock();
+               }
+
+       } else {
+               /* Nothing to do */
+               mali_utilization_data_unlock();
+       }
+}
+
+void mali_utilization_pp_start(void)
+{
+       mali_utilization_data_lock();
+
+       ++num_running_pp_cores;
+       if (1 == num_running_pp_cores) {
+               u64 time_now = _mali_osk_time_get_ns();
+
+               /* First PP core started, consider PP busy from now and onwards */
+               work_start_time_pp = time_now;
+
+               if (0 == num_running_gp_cores) {
+                       mali_bool is_resume = MALI_FALSE;
+                       /*
+                        * There are no GP cores running, so this is also the point
+                        * at which we consider the GPU to be busy as well.
+                        */
+                       work_start_time_gpu = time_now;
+
+                       /* Start a new period if stoped */
+                       is_resume = mali_control_timer_resume(time_now);
+
+                       mali_utilization_data_unlock();
+
+                       if (is_resume) {
+#if defined(CONFIG_MALI_DVFS)
+                               /* Clear session->number_of_window_jobs, prepare parameter for dvfs */
+                               mali_session_max_window_num();
+                               if (0 == last_utilization_gpu) {
+                                       /*
+                                        * for mali_dev_pause is called in set clock,
+                                        * so each time we change clock, we will set clock to
+                                        * highest step even if under down clock case,
+                                        * it is not nessesary, so we only set the clock under
+                                        * last time utilization equal 0, we stop the timer then
+                                        * start the GPU again case
+                                        */
+                                       mali_dvfs_policy_new_period();
+                               }
+#endif
+
+                               /*
+                                * First timeout using short interval for power consideration
+                                * because we give full power in the new period, but if the
+                                * job loading is light, finish in 10ms, the other time all keep
+                                * in high freq it will wast time.
+                                */
+                               mali_control_timer_add(mali_control_first_timeout);
+                       }
+               } else {
+                       mali_utilization_data_unlock();
+               }
+       } else {
+               /* Nothing to do */
+               mali_utilization_data_unlock();
+       }
+}
+
+void mali_utilization_gp_end(void)
+{
+       mali_utilization_data_lock();
+
+       --num_running_gp_cores;
+       if (0 == num_running_gp_cores) {
+               u64 time_now = _mali_osk_time_get_ns();
+
+               /* Last GP core ended, consider GP idle from now and onwards */
+               accumulated_work_time_gp += (time_now - work_start_time_gp);
+               work_start_time_gp = 0;
+
+               if (0 == num_running_pp_cores) {
+                       /*
+                        * There are no PP cores running, so this is also the point
+                        * at which we consider the GPU to be idle as well.
+                        */
+                       accumulated_work_time_gpu += (time_now - work_start_time_gpu);
+                       work_start_time_gpu = 0;
+               }
+       }
+
+       mali_utilization_data_unlock();
+}
+
+void mali_utilization_pp_end(void)
+{
+       mali_utilization_data_lock();
+
+       --num_running_pp_cores;
+       if (0 == num_running_pp_cores) {
+               u64 time_now = _mali_osk_time_get_ns();
+
+               /* Last PP core ended, consider PP idle from now and onwards */
+               accumulated_work_time_pp += (time_now - work_start_time_pp);
+               work_start_time_pp = 0;
+
+               if (0 == num_running_gp_cores) {
+                       /*
+                        * There are no GP cores running, so this is also the point
+                        * at which we consider the GPU to be idle as well.
+                        */
+                       accumulated_work_time_gpu += (time_now - work_start_time_gpu);
+                       work_start_time_gpu = 0;
+               }
+       }
+
+       mali_utilization_data_unlock();
+}
+
+mali_bool mali_utilization_enabled(void)
+{
+#if defined(CONFIG_MALI_DVFS)
+       return mali_dvfs_policy_enabled();
+#else
+       return (NULL != mali_utilization_callback);
+#endif /* defined(CONFIG_MALI_DVFS) */
+}
+
+void mali_utilization_platform_realize(struct mali_gpu_utilization_data *util_data)
+{
+       MALI_DEBUG_ASSERT_POINTER(mali_utilization_callback);
+
+       mali_utilization_callback(util_data);
+}
+
+void mali_utilization_reset(void)
+{
+       accumulated_work_time_gpu = 0;
+       accumulated_work_time_gp = 0;
+       accumulated_work_time_pp = 0;
+
+       last_utilization_gpu = 0;
+       last_utilization_gp = 0;
+       last_utilization_pp = 0;
+}
+
+void mali_utilization_data_lock(void)
+{
+       _mali_osk_spinlock_irq_lock(utilization_data_lock);
+}
+
+void mali_utilization_data_unlock(void)
+{
+       _mali_osk_spinlock_irq_unlock(utilization_data_lock);
+}
+
+void mali_utilization_data_assert_locked(void)
+{
+       MALI_DEBUG_ASSERT_LOCK_HELD(utilization_data_lock);
+}
+
+u32 _mali_ukk_utilization_gp_pp(void)
+{
+       return last_utilization_gpu;
+}
+
+u32 _mali_ukk_utilization_gp(void)
+{
+       return last_utilization_gp;
+}
+
+u32 _mali_ukk_utilization_pp(void)
+{
+       return last_utilization_pp;
+}
diff --git a/utgard/r8p0/common/mali_kernel_utilization.h b/utgard/r8p0/common/mali_kernel_utilization.h
new file mode 100755 (executable)
index 0000000..5206225
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_UTILIZATION_H__
+#define __MALI_KERNEL_UTILIZATION_H__
+
+#include <linux/mali/mali_utgard.h>
+#include "mali_osk.h"
+
+/**
+ * Initialize/start the Mali GPU utilization metrics reporting.
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_utilization_init(void);
+
+/**
+ * Terminate the Mali GPU utilization metrics reporting
+ */
+void mali_utilization_term(void);
+
+/**
+ * Check if Mali utilization is enabled
+ */
+mali_bool mali_utilization_enabled(void);
+
+/**
+ * Should be called when a job is about to execute a GP job
+ */
+void mali_utilization_gp_start(void);
+
+/**
+ * Should be called when a job has completed executing a GP job
+ */
+void mali_utilization_gp_end(void);
+
+/**
+ * Should be called when a job is about to execute a PP job
+ */
+void mali_utilization_pp_start(void);
+
+/**
+ * Should be called when a job has completed executing a PP job
+ */
+void mali_utilization_pp_end(void);
+
+/**
+ * Should be called to calcution the GPU utilization
+ */
+struct mali_gpu_utilization_data *mali_utilization_calculate(u64 *start_time, u64 *time_period, mali_bool *need_add_timer);
+
+_mali_osk_spinlock_irq_t *mali_utilization_get_lock(void);
+
+void mali_utilization_platform_realize(struct mali_gpu_utilization_data *util_data);
+
+void mali_utilization_data_lock(void);
+
+void mali_utilization_data_unlock(void);
+
+void mali_utilization_data_assert_locked(void);
+
+void mali_utilization_reset(void);
+
+
+#endif /* __MALI_KERNEL_UTILIZATION_H__ */
diff --git a/utgard/r8p0/common/mali_kernel_vsync.c b/utgard/r8p0/common/mali_kernel_vsync.c
new file mode 100755 (executable)
index 0000000..3b2b108
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2011-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+
+#include "mali_osk_profiling.h"
+
+_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args)
+{
+       _mali_uk_vsync_event event = (_mali_uk_vsync_event)args->event;
+       MALI_IGNORE(event); /* event is not used for release code, and that is OK */
+
+       /*
+        * Manually generate user space events in kernel space.
+        * This saves user space from calling kernel space twice in this case.
+        * We just need to remember to add pid and tid manually.
+        */
+       if (event == _MALI_UK_VSYNC_EVENT_BEGIN_WAIT) {
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND |
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC,
+                                             _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0);
+       }
+
+       if (event == _MALI_UK_VSYNC_EVENT_END_WAIT) {
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME |
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC,
+                                             _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0);
+       }
+
+
+       MALI_DEBUG_PRINT(4, ("Received VSYNC event: %d\n", event));
+       MALI_SUCCESS;
+}
+
diff --git a/utgard/r8p0/common/mali_l2_cache.c b/utgard/r8p0/common/mali_l2_cache.c
new file mode 100755 (executable)
index 0000000..60c3957
--- /dev/null
@@ -0,0 +1,540 @@
+/*
+ * Copyright (C) 2010-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_l2_cache.h"
+#include "mali_hw_core.h"
+#include "mali_scheduler.h"
+#include "mali_pm.h"
+#include "mali_pm_domain.h"
+
+/**
+ * Size of the Mali L2 cache registers in bytes
+ */
+#define MALI400_L2_CACHE_REGISTERS_SIZE 0x30
+
+/**
+ * Mali L2 cache register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_l2_cache_register {
+       MALI400_L2_CACHE_REGISTER_SIZE         = 0x0004,
+       MALI400_L2_CACHE_REGISTER_STATUS       = 0x0008,
+       /*unused                               = 0x000C */
+       MALI400_L2_CACHE_REGISTER_COMMAND      = 0x0010,
+       MALI400_L2_CACHE_REGISTER_CLEAR_PAGE   = 0x0014,
+       MALI400_L2_CACHE_REGISTER_MAX_READS    = 0x0018,
+       MALI400_L2_CACHE_REGISTER_ENABLE       = 0x001C,
+       MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0 = 0x0020,
+       MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0 = 0x0024,
+       MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1 = 0x0028,
+       MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1 = 0x002C,
+} mali_l2_cache_register;
+
+/**
+ * Mali L2 cache commands
+ * These are the commands that can be sent to the Mali L2 cache unit
+ */
+typedef enum mali_l2_cache_command {
+       MALI400_L2_CACHE_COMMAND_CLEAR_ALL = 0x01,
+} mali_l2_cache_command;
+
+/**
+ * Mali L2 cache commands
+ * These are the commands that can be sent to the Mali L2 cache unit
+ */
+typedef enum mali_l2_cache_enable {
+       MALI400_L2_CACHE_ENABLE_DEFAULT = 0x0, /* Default */
+       MALI400_L2_CACHE_ENABLE_ACCESS = 0x01,
+       MALI400_L2_CACHE_ENABLE_READ_ALLOCATE = 0x02,
+} mali_l2_cache_enable;
+
+/**
+ * Mali L2 cache status bits
+ */
+typedef enum mali_l2_cache_status {
+       MALI400_L2_CACHE_STATUS_COMMAND_BUSY = 0x01,
+       MALI400_L2_CACHE_STATUS_DATA_BUSY    = 0x02,
+} mali_l2_cache_status;
+
+#define MALI400_L2_MAX_READS_NOT_SET -1
+
+static struct mali_l2_cache_core *
+       mali_global_l2s[MALI_MAX_NUMBER_OF_L2_CACHE_CORES] = { NULL, };
+static u32 mali_global_num_l2s = 0;
+
+int mali_l2_max_reads = MALI400_L2_MAX_READS_NOT_SET;
+
+
+/* Local helper functions */
+
+static void mali_l2_cache_reset(struct mali_l2_cache_core *cache);
+
+static _mali_osk_errcode_t mali_l2_cache_send_command(
+       struct mali_l2_cache_core *cache, u32 reg, u32 val);
+
+static void mali_l2_cache_lock(struct mali_l2_cache_core *cache)
+{
+       MALI_DEBUG_ASSERT_POINTER(cache);
+       _mali_osk_spinlock_irq_lock(cache->lock);
+}
+
+static void mali_l2_cache_unlock(struct mali_l2_cache_core *cache)
+{
+       MALI_DEBUG_ASSERT_POINTER(cache);
+       _mali_osk_spinlock_irq_unlock(cache->lock);
+}
+
+/* Implementation of the L2 cache interface */
+
+struct mali_l2_cache_core *mali_l2_cache_create(
+       _mali_osk_resource_t *resource, u32 domain_index)
+{
+       struct mali_l2_cache_core *cache = NULL;
+#if defined(DEBUG)
+       u32 cache_size;
+#endif
+
+       MALI_DEBUG_PRINT(4, ("Mali L2 cache: Creating Mali L2 cache: %s\n",
+                            resource->description));
+
+       if (mali_global_num_l2s >= MALI_MAX_NUMBER_OF_L2_CACHE_CORES) {
+               MALI_PRINT_ERROR(("Mali L2 cache: Too many L2 caches\n"));
+               return NULL;
+       }
+
+       cache = _mali_osk_malloc(sizeof(struct mali_l2_cache_core));
+       if (NULL == cache) {
+               MALI_PRINT_ERROR(("Mali L2 cache: Failed to allocate memory for L2 cache core\n"));
+               return NULL;
+       }
+
+       cache->core_id =  mali_global_num_l2s;
+       cache->counter_src0 = MALI_HW_CORE_NO_COUNTER;
+       cache->counter_src1 = MALI_HW_CORE_NO_COUNTER;
+       cache->counter_value0_base = 0;
+       cache->counter_value1_base = 0;
+       cache->pm_domain = NULL;
+       cache->power_is_on = MALI_FALSE;
+       cache->last_invalidated_id = 0;
+
+       if (_MALI_OSK_ERR_OK != mali_hw_core_create(&cache->hw_core,
+                       resource, MALI400_L2_CACHE_REGISTERS_SIZE)) {
+               _mali_osk_free(cache);
+               return NULL;
+       }
+
+#if defined(DEBUG)
+       cache_size = mali_hw_core_register_read(&cache->hw_core,
+                                               MALI400_L2_CACHE_REGISTER_SIZE);
+       MALI_DEBUG_PRINT(2, ("Mali L2 cache: Created %s: % 3uK, %u-way, % 2ubyte cache line, % 3ubit external bus\n",
+                            resource->description,
+                            1 << (((cache_size >> 16) & 0xff) - 10),
+                            1 << ((cache_size >> 8) & 0xff),
+                            1 << (cache_size & 0xff),
+                            1 << ((cache_size >> 24) & 0xff)));
+#endif
+
+       cache->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED,
+                       _MALI_OSK_LOCK_ORDER_L2);
+       if (NULL == cache->lock) {
+               MALI_PRINT_ERROR(("Mali L2 cache: Failed to create counter lock for L2 cache core %s\n",
+                                 cache->hw_core.description));
+               mali_hw_core_delete(&cache->hw_core);
+               _mali_osk_free(cache);
+               return NULL;
+       }
+
+       /* register with correct power domain */
+       cache->pm_domain = mali_pm_register_l2_cache(
+                                  domain_index, cache);
+
+       mali_global_l2s[mali_global_num_l2s] = cache;
+       mali_global_num_l2s++;
+
+       return cache;
+}
+
+void mali_l2_cache_delete(struct mali_l2_cache_core *cache)
+{
+       u32 i;
+       for (i = 0; i < mali_global_num_l2s; i++) {
+               if (mali_global_l2s[i] != cache) {
+                       continue;
+               }
+
+               mali_global_l2s[i] = NULL;
+               mali_global_num_l2s--;
+
+               if (i == mali_global_num_l2s) {
+                       /* Removed last element, nothing more to do */
+                       break;
+               }
+
+               /*
+                * We removed a l2 cache from the middle of the array,
+                * so move the last l2 cache to current position
+                */
+               mali_global_l2s[i] = mali_global_l2s[mali_global_num_l2s];
+               mali_global_l2s[mali_global_num_l2s] = NULL;
+
+               /* All good */
+               break;
+       }
+
+       _mali_osk_spinlock_irq_term(cache->lock);
+       mali_hw_core_delete(&cache->hw_core);
+       _mali_osk_free(cache);
+}
+
+void mali_l2_cache_power_up(struct mali_l2_cache_core *cache)
+{
+       MALI_DEBUG_ASSERT_POINTER(cache);
+
+       mali_l2_cache_lock(cache);
+
+       mali_l2_cache_reset(cache);
+
+       if ((1 << MALI_DOMAIN_INDEX_DUMMY) != cache->pm_domain->pmu_mask)
+               MALI_DEBUG_ASSERT(MALI_FALSE == cache->power_is_on);
+       cache->power_is_on = MALI_TRUE;
+
+       mali_l2_cache_unlock(cache);
+}
+
+void mali_l2_cache_power_down(struct mali_l2_cache_core *cache)
+{
+       MALI_DEBUG_ASSERT_POINTER(cache);
+
+       mali_l2_cache_lock(cache);
+
+       MALI_DEBUG_ASSERT(MALI_TRUE == cache->power_is_on);
+
+       /*
+        * The HW counters will start from zero again when we resume,
+        * but we should report counters as always increasing.
+        * Take a copy of the HW values now in order to add this to
+        * the values we report after being powered up.
+        *
+        * The physical power off of the L2 cache might be outside our
+        * own control (e.g. runtime PM). That is why we must manually
+        * set set the counter value to zero as well.
+        */
+
+       if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) {
+               cache->counter_value0_base += mali_hw_core_register_read(
+                                                     &cache->hw_core,
+                                                     MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0);
+               mali_hw_core_register_write(&cache->hw_core,
+                                           MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0, 0);
+       }
+
+       if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) {
+               cache->counter_value1_base += mali_hw_core_register_read(
+                                                     &cache->hw_core,
+                                                     MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1);
+               mali_hw_core_register_write(&cache->hw_core,
+                                           MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1, 0);
+       }
+
+
+       cache->power_is_on = MALI_FALSE;
+
+       mali_l2_cache_unlock(cache);
+}
+
+void mali_l2_cache_core_set_counter_src(
+       struct mali_l2_cache_core *cache, u32 source_id, u32 counter)
+{
+       u32 reg_offset_src;
+       u32 reg_offset_val;
+
+       MALI_DEBUG_ASSERT_POINTER(cache);
+       MALI_DEBUG_ASSERT(source_id >= 0 && source_id <= 1);
+
+       mali_l2_cache_lock(cache);
+
+       if (0 == source_id) {
+               /* start counting from 0 */
+               cache->counter_value0_base = 0;
+               cache->counter_src0 = counter;
+               reg_offset_src = MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0;
+               reg_offset_val = MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0;
+       } else {
+               /* start counting from 0 */
+               cache->counter_value1_base = 0;
+               cache->counter_src1 = counter;
+               reg_offset_src = MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1;
+               reg_offset_val = MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1;
+       }
+
+       if (cache->power_is_on) {
+               u32 hw_src;
+
+               if (MALI_HW_CORE_NO_COUNTER != counter) {
+                       hw_src = counter;
+               } else {
+                       hw_src = 0; /* disable value for HW */
+               }
+
+               /* Set counter src */
+               mali_hw_core_register_write(&cache->hw_core,
+                                           reg_offset_src, hw_src);
+
+               /* Make sure the HW starts counting from 0 again */
+               mali_hw_core_register_write(&cache->hw_core,
+                                           reg_offset_val, 0);
+       }
+
+       mali_l2_cache_unlock(cache);
+}
+
+void mali_l2_cache_core_get_counter_values(
+       struct mali_l2_cache_core *cache,
+       u32 *src0, u32 *value0, u32 *src1, u32 *value1)
+{
+       MALI_DEBUG_ASSERT_POINTER(cache);
+       MALI_DEBUG_ASSERT(NULL != src0);
+       MALI_DEBUG_ASSERT(NULL != value0);
+       MALI_DEBUG_ASSERT(NULL != src1);
+       MALI_DEBUG_ASSERT(NULL != value1);
+
+       mali_l2_cache_lock(cache);
+
+       *src0 = cache->counter_src0;
+       *src1 = cache->counter_src1;
+
+       if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) {
+               if (MALI_TRUE == cache->power_is_on) {
+                       *value0 = mali_hw_core_register_read(&cache->hw_core,
+                                                            MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0);
+               } else {
+                       *value0 = 0;
+               }
+
+               /* Add base offset value (in case we have been power off) */
+               *value0 += cache->counter_value0_base;
+       }
+
+       if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) {
+               if (MALI_TRUE == cache->power_is_on) {
+                       *value1 = mali_hw_core_register_read(&cache->hw_core,
+                                                            MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1);
+               } else {
+                       *value1 = 0;
+               }
+
+               /* Add base offset value (in case we have been power off) */
+               *value1 += cache->counter_value1_base;
+       }
+
+       mali_l2_cache_unlock(cache);
+}
+
+struct mali_l2_cache_core *mali_l2_cache_core_get_glob_l2_core(u32 index)
+{
+       if (mali_global_num_l2s > index) {
+               return mali_global_l2s[index];
+       }
+
+       return NULL;
+}
+
+u32 mali_l2_cache_core_get_glob_num_l2_cores(void)
+{
+       return mali_global_num_l2s;
+}
+
+void mali_l2_cache_invalidate(struct mali_l2_cache_core *cache)
+{
+       MALI_DEBUG_ASSERT_POINTER(cache);
+
+       if (NULL == cache) {
+               return;
+       }
+
+       mali_l2_cache_lock(cache);
+
+       cache->last_invalidated_id = mali_scheduler_get_new_cache_order();
+       mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND,
+                                  MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+
+       mali_l2_cache_unlock(cache);
+}
+
+void mali_l2_cache_invalidate_conditional(
+       struct mali_l2_cache_core *cache, u32 id)
+{
+       MALI_DEBUG_ASSERT_POINTER(cache);
+
+       if (NULL == cache) {
+               return;
+       }
+
+       /*
+        * If the last cache invalidation was done by a job with a higher id we
+        * don't have to flush. Since user space will store jobs w/ their
+        * corresponding memory in sequence (first job #0, then job #1, ...),
+        * we don't have to flush for job n-1 if job n has already invalidated
+        * the cache since we know for sure that job n-1's memory was already
+        * written when job n was started.
+        */
+
+       mali_l2_cache_lock(cache);
+
+       if (((s32)id) > ((s32)cache->last_invalidated_id)) {
+               /* Set latest invalidated id to current "point in time" */
+               cache->last_invalidated_id =
+                       mali_scheduler_get_new_cache_order();
+               mali_l2_cache_send_command(cache,
+                                          MALI400_L2_CACHE_REGISTER_COMMAND,
+                                          MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+       }
+
+       mali_l2_cache_unlock(cache);
+}
+
+void mali_l2_cache_invalidate_all(void)
+{
+       u32 i;
+       for (i = 0; i < mali_global_num_l2s; i++) {
+               struct mali_l2_cache_core *cache = mali_global_l2s[i];
+               _mali_osk_errcode_t ret;
+
+               MALI_DEBUG_ASSERT_POINTER(cache);
+
+               mali_l2_cache_lock(cache);
+
+               if (MALI_TRUE != cache->power_is_on) {
+                       mali_l2_cache_unlock(cache);
+                       continue;
+               }
+
+               cache->last_invalidated_id =
+                       mali_scheduler_get_new_cache_order();
+
+               ret = mali_l2_cache_send_command(cache,
+                                                MALI400_L2_CACHE_REGISTER_COMMAND,
+                                                MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+               if (_MALI_OSK_ERR_OK != ret) {
+                       MALI_PRINT_ERROR(("Failed to invalidate cache\n"));
+               }
+
+               mali_l2_cache_unlock(cache);
+       }
+}
+
+void mali_l2_cache_invalidate_all_pages(u32 *pages, u32 num_pages)
+{
+       u32 i;
+       for (i = 0; i < mali_global_num_l2s; i++) {
+               struct mali_l2_cache_core *cache = mali_global_l2s[i];
+               u32 j;
+
+               MALI_DEBUG_ASSERT_POINTER(cache);
+
+               mali_l2_cache_lock(cache);
+
+               if (MALI_TRUE != cache->power_is_on) {
+                       mali_l2_cache_unlock(cache);
+                       continue;
+               }
+
+               for (j = 0; j < num_pages; j++) {
+                       _mali_osk_errcode_t ret;
+
+                       ret = mali_l2_cache_send_command(cache,
+                                                        MALI400_L2_CACHE_REGISTER_CLEAR_PAGE,
+                                                        pages[j]);
+                       if (_MALI_OSK_ERR_OK != ret) {
+                               MALI_PRINT_ERROR(("Failed to invalidate cache (page)\n"));
+                       }
+               }
+
+               mali_l2_cache_unlock(cache);
+       }
+}
+
+/* -------- local helper functions below -------- */
+
+static void mali_l2_cache_reset(struct mali_l2_cache_core *cache)
+{
+    MALI_DEBUG_ASSERT_POINTER(cache);
+    MALI_DEBUG_ASSERT_LOCK_HELD(cache->lock);
+
+    /* Kasin Added, skip off power domain. */
+    if (cache && cache->pm_domain && cache->pm_domain->power_is_on == MALI_FALSE) {
+        printk("===========%s, %d skip off power domain?\n", __FUNCTION__, __LINE__);
+    }
+
+
+    /* Invalidate cache (just to keep it in a known state at startup) */
+    mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND,
+            MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+
+    /* Enable cache */
+    mali_hw_core_register_write(&cache->hw_core,
+            MALI400_L2_CACHE_REGISTER_ENABLE,
+            (u32)MALI400_L2_CACHE_ENABLE_ACCESS |
+            (u32)MALI400_L2_CACHE_ENABLE_READ_ALLOCATE);
+
+       if (MALI400_L2_MAX_READS_NOT_SET != mali_l2_max_reads) {
+               mali_hw_core_register_write(&cache->hw_core,
+                                           MALI400_L2_CACHE_REGISTER_MAX_READS,
+                                           (u32)mali_l2_max_reads);
+       }
+
+       /* Restart any performance counters (if enabled) */
+       if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) {
+
+               mali_hw_core_register_write(&cache->hw_core,
+                                           MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0,
+                                           cache->counter_src0);
+       }
+
+       if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) {
+               mali_hw_core_register_write(&cache->hw_core,
+                                           MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1,
+                                           cache->counter_src1);
+       }
+}
+
+static _mali_osk_errcode_t mali_l2_cache_send_command(
+       struct mali_l2_cache_core *cache, u32 reg, u32 val)
+{
+       int i = 0;
+       const int loop_count = 100000;
+
+       MALI_DEBUG_ASSERT_POINTER(cache);
+       MALI_DEBUG_ASSERT_LOCK_HELD(cache->lock);
+
+       /*
+        * First, wait for L2 cache command handler to go idle.
+        * (Commands received while processing another command will be ignored)
+        */
+       for (i = 0; i < loop_count; i++) {
+               if (!(mali_hw_core_register_read(&cache->hw_core,
+                                                MALI400_L2_CACHE_REGISTER_STATUS) &
+                     (u32)MALI400_L2_CACHE_STATUS_COMMAND_BUSY)) {
+                       break;
+               }
+       }
+
+       if (i == loop_count) {
+               MALI_DEBUG_PRINT(1, ("Mali L2 cache: aborting wait for command interface to go idle\n"));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       /* then issue the command */
+       mali_hw_core_register_write(&cache->hw_core, reg, val);
+
+       return _MALI_OSK_ERR_OK;
+}
diff --git a/utgard/r8p0/common/mali_l2_cache.h b/utgard/r8p0/common/mali_l2_cache.h
new file mode 100755 (executable)
index 0000000..de92ad6
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_L2_CACHE_H__
+#define __MALI_KERNEL_L2_CACHE_H__
+
+#include "mali_osk.h"
+#include "mali_hw_core.h"
+
+#define MALI_MAX_NUMBER_OF_L2_CACHE_CORES  3
+/* Maximum 1 GP and 4 PP for an L2 cache core (Mali-400 MP4) */
+#define MALI_MAX_NUMBER_OF_GROUPS_PER_L2_CACHE 5
+
+/**
+ * Definition of the L2 cache core struct
+ * Used to track a L2 cache unit in the system.
+ * Contains information about the mapping of the registers
+ */
+struct mali_l2_cache_core {
+       /* Common HW core functionality */
+       struct mali_hw_core hw_core;
+
+       /* Synchronize L2 cache access */
+       _mali_osk_spinlock_irq_t *lock;
+
+       /* Unique core ID */
+       u32 core_id;
+
+       /* The power domain this L2 cache belongs to */
+       struct mali_pm_domain *pm_domain;
+
+       /* MALI_TRUE if power is on for this L2 cache */
+       mali_bool power_is_on;
+
+       /* A "timestamp" to avoid unnecessary flushes */
+       u32 last_invalidated_id;
+
+       /* Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
+       u32 counter_src0;
+
+       /* Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
+       u32 counter_src1;
+
+       /*
+        * Performance counter 0 value base/offset
+        * (allows accumulative reporting even after power off)
+        */
+       u32 counter_value0_base;
+
+       /*
+        * Performance counter 0 value base/offset
+        * (allows accumulative reporting even after power off)
+        */
+       u32 counter_value1_base;
+
+       /* Used by PM domains to link L2 caches of same domain */
+       _mali_osk_list_t pm_domain_list;
+};
+
+_mali_osk_errcode_t mali_l2_cache_initialize(void);
+void mali_l2_cache_terminate(void);
+
+struct mali_l2_cache_core *mali_l2_cache_create(
+       _mali_osk_resource_t *resource, u32 domain_index);
+void mali_l2_cache_delete(struct mali_l2_cache_core *cache);
+
+MALI_STATIC_INLINE u32 mali_l2_cache_get_id(struct mali_l2_cache_core *cache)
+{
+       MALI_DEBUG_ASSERT_POINTER(cache);
+       return cache->core_id;
+}
+
+MALI_STATIC_INLINE struct mali_pm_domain *mali_l2_cache_get_pm_domain(
+       struct mali_l2_cache_core *cache)
+{
+       MALI_DEBUG_ASSERT_POINTER(cache);
+       return cache->pm_domain;
+}
+
+void mali_l2_cache_power_up(struct mali_l2_cache_core *cache);
+void mali_l2_cache_power_down(struct mali_l2_cache_core *cache);
+
+void mali_l2_cache_core_set_counter_src(
+       struct mali_l2_cache_core *cache, u32 source_id, u32 counter);
+
+MALI_STATIC_INLINE u32 mali_l2_cache_core_get_counter_src0(
+       struct mali_l2_cache_core *cache)
+{
+       MALI_DEBUG_ASSERT_POINTER(cache);
+       return cache->counter_src0;
+}
+
+MALI_STATIC_INLINE u32 mali_l2_cache_core_get_counter_src1(
+       struct mali_l2_cache_core *cache)
+{
+       MALI_DEBUG_ASSERT_POINTER(cache);
+       return cache->counter_src1;
+}
+
+void mali_l2_cache_core_get_counter_values(
+       struct mali_l2_cache_core *cache,
+       u32 *src0, u32 *value0, u32 *src1, u32 *value1);
+
+struct mali_l2_cache_core *mali_l2_cache_core_get_glob_l2_core(u32 index);
+u32 mali_l2_cache_core_get_glob_num_l2_cores(void);
+
+struct mali_group *mali_l2_cache_get_group(
+       struct mali_l2_cache_core *cache, u32 index);
+
+void mali_l2_cache_invalidate(struct mali_l2_cache_core *cache);
+void mali_l2_cache_invalidate_conditional(
+       struct mali_l2_cache_core *cache, u32 id);
+
+void mali_l2_cache_invalidate_all(void);
+void mali_l2_cache_invalidate_all_pages(u32 *pages, u32 num_pages);
+
+#endif /* __MALI_KERNEL_L2_CACHE_H__ */
diff --git a/utgard/r8p0/common/mali_mem_validation.c b/utgard/r8p0/common/mali_mem_validation.c
new file mode 100644 (file)
index 0000000..dddfb58
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2011-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_mem_validation.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+#define MALI_INVALID_MEM_ADDR 0xFFFFFFFF
+
+typedef struct {
+       u32 phys_base;        /**< Mali physical base of the memory, page aligned */
+       u32 size;             /**< size in bytes of the memory, multiple of page size */
+} _mali_mem_validation_t;
+
+static _mali_mem_validation_t mali_mem_validator = { MALI_INVALID_MEM_ADDR, MALI_INVALID_MEM_ADDR };
+
+_mali_osk_errcode_t mali_mem_validation_add_range(u32 start, u32 size)
+{
+       /* Check that no other MEM_VALIDATION resources exist */
+       if (MALI_INVALID_MEM_ADDR != mali_mem_validator.phys_base) {
+               MALI_PRINT_ERROR(("Failed to add frame buffer memory; another range is already specified\n"));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       /* Check restrictions on page alignment */
+       if ((0 != (start & (~_MALI_OSK_CPU_PAGE_MASK))) ||
+           (0 != (size & (~_MALI_OSK_CPU_PAGE_MASK)))) {
+               MALI_PRINT_ERROR(("Failed to add frame buffer memory; incorrect alignment\n"));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       mali_mem_validator.phys_base = start;
+       mali_mem_validator.size = size;
+       MALI_DEBUG_PRINT(2, ("Memory Validator installed for Mali physical address base=0x%08X, size=0x%08X\n",
+                            mali_mem_validator.phys_base, mali_mem_validator.size));
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_mem_validation_check(u32 phys_addr, u32 size)
+{
+#if 0
+       if (phys_addr < (phys_addr + size)) { /* Don't allow overflow (or zero size) */
+               if ((0 == (phys_addr & (~_MALI_OSK_CPU_PAGE_MASK))) &&
+                   (0 == (size & (~_MALI_OSK_CPU_PAGE_MASK)))) {
+                       if ((phys_addr          >= mali_mem_validator.phys_base) &&
+                           ((phys_addr + (size - 1)) >= mali_mem_validator.phys_base) &&
+                           (phys_addr          <= (mali_mem_validator.phys_base + (mali_mem_validator.size - 1))) &&
+                           ((phys_addr + (size - 1)) <= (mali_mem_validator.phys_base + (mali_mem_validator.size - 1)))) {
+                               MALI_DEBUG_PRINT(3, ("Accepted range 0x%08X + size 0x%08X (= 0x%08X)\n", phys_addr, size, (phys_addr + size - 1)));
+                               return _MALI_OSK_ERR_OK;
+                       }
+               }
+       }
+
+       MALI_PRINT_ERROR(("MALI PHYSICAL RANGE VALIDATION ERROR: The range supplied was: phys_base=0x%08X, size=0x%08X\n", phys_addr, size));
+
+       return _MALI_OSK_ERR_FAULT;
+#else
+       return _MALI_OSK_ERR_OK;
+#endif
+}
diff --git a/utgard/r8p0/common/mali_mem_validation.h b/utgard/r8p0/common/mali_mem_validation.h
new file mode 100644 (file)
index 0000000..f7e6cb1
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2011-2013, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_MEM_VALIDATION_H__
+#define __MALI_MEM_VALIDATION_H__
+
+#include "mali_osk.h"
+
+_mali_osk_errcode_t mali_mem_validation_add_range(u32 start, u32 size);
+_mali_osk_errcode_t mali_mem_validation_check(u32 phys_addr, u32 size);
+
+#endif /* __MALI_MEM_VALIDATION_H__ */
diff --git a/utgard/r8p0/common/mali_mmu.c b/utgard/r8p0/common/mali_mmu.c
new file mode 100755 (executable)
index 0000000..c08ce1f
--- /dev/null
@@ -0,0 +1,436 @@
+/*
+ * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_ukk.h"
+
+#include "mali_mmu.h"
+#include "mali_hw_core.h"
+#include "mali_group.h"
+#include "mali_mmu_page_directory.h"
+
+/**
+ * Size of the MMU registers in bytes
+ */
+#define MALI_MMU_REGISTERS_SIZE 0x24
+
+/**
+ * MMU commands
+ * These are the commands that can be sent
+ * to the MMU unit.
+ */
+typedef enum mali_mmu_command {
+       MALI_MMU_COMMAND_ENABLE_PAGING = 0x00, /**< Enable paging (memory translation) */
+       MALI_MMU_COMMAND_DISABLE_PAGING = 0x01, /**< Disable paging (memory translation) */
+       MALI_MMU_COMMAND_ENABLE_STALL = 0x02, /**<  Enable stall on page fault */
+       MALI_MMU_COMMAND_DISABLE_STALL = 0x03, /**< Disable stall on page fault */
+       MALI_MMU_COMMAND_ZAP_CACHE = 0x04, /**< Zap the entire page table cache */
+       MALI_MMU_COMMAND_PAGE_FAULT_DONE = 0x05, /**< Page fault processed */
+       MALI_MMU_COMMAND_HARD_RESET = 0x06 /**< Reset the MMU back to power-on settings */
+} mali_mmu_command;
+
+static void mali_mmu_probe_trigger(void *data);
+static _mali_osk_errcode_t mali_mmu_probe_ack(void *data);
+
+MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_raw_reset(struct mali_mmu_core *mmu);
+
+/* page fault queue flush helper pages
+ * note that the mapping pointers are currently unused outside of the initialization functions */
+static mali_dma_addr mali_page_fault_flush_page_directory = MALI_INVALID_PAGE;
+static mali_io_address mali_page_fault_flush_page_directory_mapping = NULL;
+static mali_dma_addr mali_page_fault_flush_page_table = MALI_INVALID_PAGE;
+static mali_io_address mali_page_fault_flush_page_table_mapping = NULL;
+static mali_dma_addr mali_page_fault_flush_data_page = MALI_INVALID_PAGE;
+static mali_io_address mali_page_fault_flush_data_page_mapping = NULL;
+
+/* an empty page directory (no address valid) which is active on any MMU not currently marked as in use */
+static mali_dma_addr mali_empty_page_directory_phys   = MALI_INVALID_PAGE;
+static mali_io_address mali_empty_page_directory_virt = NULL;
+
+
+_mali_osk_errcode_t mali_mmu_initialize(void)
+{
+       /* allocate the helper pages */
+       mali_empty_page_directory_phys = mali_allocate_empty_page(&mali_empty_page_directory_virt);
+       if (0 == mali_empty_page_directory_phys) {
+               MALI_DEBUG_PRINT_ERROR(("Mali MMU: Could not allocate empty page directory.\n"));
+               mali_empty_page_directory_phys = MALI_INVALID_PAGE;
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       if (_MALI_OSK_ERR_OK != mali_create_fault_flush_pages(&mali_page_fault_flush_page_directory,
+                       &mali_page_fault_flush_page_directory_mapping,
+                       &mali_page_fault_flush_page_table,
+                       &mali_page_fault_flush_page_table_mapping,
+                       &mali_page_fault_flush_data_page,
+                       &mali_page_fault_flush_data_page_mapping)) {
+               MALI_DEBUG_PRINT_ERROR(("Mali MMU: Could not allocate fault flush pages\n"));
+               mali_free_empty_page(mali_empty_page_directory_phys, mali_empty_page_directory_virt);
+               mali_empty_page_directory_phys = MALI_INVALID_PAGE;
+               mali_empty_page_directory_virt = NULL;
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_mmu_terminate(void)
+{
+       MALI_DEBUG_PRINT(3, ("Mali MMU: terminating\n"));
+
+       /* Free global helper pages */
+       mali_free_empty_page(mali_empty_page_directory_phys, mali_empty_page_directory_virt);
+       mali_empty_page_directory_phys = MALI_INVALID_PAGE;
+       mali_empty_page_directory_virt = NULL;
+
+       /* Free the page fault flush pages */
+       mali_destroy_fault_flush_pages(&mali_page_fault_flush_page_directory,
+                                      &mali_page_fault_flush_page_directory_mapping,
+                                      &mali_page_fault_flush_page_table,
+                                      &mali_page_fault_flush_page_table_mapping,
+                                      &mali_page_fault_flush_data_page,
+                                      &mali_page_fault_flush_data_page_mapping);
+}
+
+struct mali_mmu_core *mali_mmu_create(_mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual)
+{
+       struct mali_mmu_core *mmu = NULL;
+
+       MALI_DEBUG_ASSERT_POINTER(resource);
+
+       MALI_DEBUG_PRINT(2, ("Mali MMU: Creating Mali MMU: %s\n", resource->description));
+
+       mmu = _mali_osk_calloc(1, sizeof(struct mali_mmu_core));
+       if (NULL != mmu) {
+               if (_MALI_OSK_ERR_OK == mali_hw_core_create(&mmu->hw_core, resource, MALI_MMU_REGISTERS_SIZE)) {
+                       if (_MALI_OSK_ERR_OK == mali_group_add_mmu_core(group, mmu)) {
+                               if (is_virtual) {
+                                       /* Skip reset and IRQ setup for virtual MMU */
+                                       return mmu;
+                               }
+
+                               if (_MALI_OSK_ERR_OK == mali_mmu_reset(mmu)) {
+                                       /* Setup IRQ handlers (which will do IRQ probing if needed) */
+                                       mmu->irq = _mali_osk_irq_init(resource->irq,
+                                                                     mali_group_upper_half_mmu,
+                                                                     group,
+                                                                     mali_mmu_probe_trigger,
+                                                                     mali_mmu_probe_ack,
+                                                                     mmu,
+                                                                     resource->description);
+                                       if (NULL != mmu->irq) {
+                                               return mmu;
+                                       } else {
+                                               MALI_PRINT_ERROR(("Mali MMU: Failed to setup interrupt handlers for MMU %s\n", mmu->hw_core.description));
+                                       }
+                               }
+                               mali_group_remove_mmu_core(group);
+                       } else {
+                               MALI_PRINT_ERROR(("Mali MMU: Failed to add core %s to group\n", mmu->hw_core.description));
+                       }
+                       mali_hw_core_delete(&mmu->hw_core);
+               }
+
+               _mali_osk_free(mmu);
+       } else {
+               MALI_PRINT_ERROR(("Failed to allocate memory for MMU\n"));
+       }
+
+       return NULL;
+}
+
+void mali_mmu_delete(struct mali_mmu_core *mmu)
+{
+       if (NULL != mmu->irq) {
+               _mali_osk_irq_term(mmu->irq);
+       }
+
+       mali_hw_core_delete(&mmu->hw_core);
+       _mali_osk_free(mmu);
+}
+
+static void mali_mmu_enable_paging(struct mali_mmu_core *mmu)
+{
+       int i;
+
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_PAGING);
+
+       for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) {
+               if (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS) & MALI_MMU_STATUS_BIT_PAGING_ENABLED) {
+                       break;
+               }
+       }
+       if (MALI_REG_POLL_COUNT_FAST == i) {
+               MALI_PRINT_ERROR(("Enable paging request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+       }
+}
+
+/**
+ * Issues the enable stall command to the MMU and waits for HW to complete the request
+ * @param mmu The MMU to enable paging for
+ * @return MALI_TRUE if HW stall was successfully engaged, otherwise MALI_FALSE (req timed out)
+ */
+static mali_bool mali_mmu_enable_stall(struct mali_mmu_core *mmu)
+{
+       int i;
+       u32 mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+
+       if (0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED)) {
+               MALI_DEBUG_PRINT(4, ("MMU stall is implicit when Paging is not enabled.\n"));
+               return MALI_TRUE;
+       }
+
+       if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
+               MALI_DEBUG_PRINT(3, ("Aborting MMU stall request since it is in pagefault state.\n"));
+               return MALI_FALSE;
+       }
+
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_STALL);
+
+       for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) {
+               mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+               if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
+                       break;
+               }
+               if ((mmu_status & MALI_MMU_STATUS_BIT_STALL_ACTIVE) && (0 == (mmu_status & MALI_MMU_STATUS_BIT_STALL_NOT_ACTIVE))) {
+                       break;
+               }
+               if (0 == (mmu_status & (MALI_MMU_STATUS_BIT_PAGING_ENABLED))) {
+                       break;
+               }
+       }
+       if (MALI_REG_POLL_COUNT_FAST == i) {
+               MALI_DEBUG_PRINT(2, ("Enable stall request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+               return MALI_FALSE;
+       }
+
+       if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
+               MALI_DEBUG_PRINT(2, ("Aborting MMU stall request since it has a pagefault.\n"));
+               return MALI_FALSE;
+       }
+
+       return MALI_TRUE;
+}
+
+/**
+ * Issues the disable stall command to the MMU and waits for HW to complete the request
+ * @param mmu The MMU to enable paging for
+ */
+static void mali_mmu_disable_stall(struct mali_mmu_core *mmu)
+{
+       int i;
+       u32 mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+
+       if (0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED)) {
+               MALI_DEBUG_PRINT(3, ("MMU disable skipped since it was not enabled.\n"));
+               return;
+       }
+       if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
+               MALI_DEBUG_PRINT(2, ("Aborting MMU disable stall request since it is in pagefault state.\n"));
+               return;
+       }
+
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_DISABLE_STALL);
+
+       for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) {
+               u32 status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+               if (0 == (status & MALI_MMU_STATUS_BIT_STALL_ACTIVE)) {
+                       break;
+               }
+               if (status &  MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
+                       break;
+               }
+               if (0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED)) {
+                       break;
+               }
+       }
+       if (MALI_REG_POLL_COUNT_FAST == i) MALI_DEBUG_PRINT(1, ("Disable stall request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+}
+
+void mali_mmu_page_fault_done(struct mali_mmu_core *mmu)
+{
+       MALI_DEBUG_PRINT(4, ("Mali MMU: %s: Leaving page fault mode\n", mmu->hw_core.description));
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_PAGE_FAULT_DONE);
+}
+
+MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_raw_reset(struct mali_mmu_core *mmu)
+{
+       int i;
+
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, 0xCAFEBABE);
+       MALI_DEBUG_ASSERT(0xCAFEB000 == mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR));
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_HARD_RESET);
+
+       for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) {
+               if (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR) == 0) {
+                       break;
+               }
+       }
+       if (MALI_REG_POLL_COUNT_FAST == i) {
+               MALI_PRINT_ERROR(("Reset request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_mmu_reset(struct mali_mmu_core *mmu)
+{
+       _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+       mali_bool stall_success;
+       MALI_DEBUG_ASSERT_POINTER(mmu);
+
+       stall_success = mali_mmu_enable_stall(mmu);
+       if (!stall_success) {
+               err = _MALI_OSK_ERR_BUSY;
+       }
+
+       MALI_DEBUG_PRINT(3, ("Mali MMU: mali_kernel_mmu_reset: %s\n", mmu->hw_core.description));
+
+       if (_MALI_OSK_ERR_OK == mali_mmu_raw_reset(mmu)) {
+               mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+               /* no session is active, so just activate the empty page directory */
+               mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, mali_empty_page_directory_phys);
+               mali_mmu_enable_paging(mmu);
+               err = _MALI_OSK_ERR_OK;
+       }
+       mali_mmu_disable_stall(mmu);
+
+       return err;
+}
+
+mali_bool mali_mmu_zap_tlb(struct mali_mmu_core *mmu)
+{
+       mali_bool stall_success = mali_mmu_enable_stall(mmu);
+
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+
+       if (MALI_FALSE == stall_success) {
+               /* False means that it is in Pagefault state. Not possible to disable_stall then */
+               return MALI_FALSE;
+       }
+
+       mali_mmu_disable_stall(mmu);
+       return MALI_TRUE;
+}
+
+void mali_mmu_zap_tlb_without_stall(struct mali_mmu_core *mmu)
+{
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+}
+
+
+void mali_mmu_invalidate_page(struct mali_mmu_core *mmu, u32 mali_address)
+{
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_ZAP_ONE_LINE, MALI_MMU_PDE_ENTRY(mali_address));
+}
+
+static void mali_mmu_activate_address_space(struct mali_mmu_core *mmu, u32 page_directory)
+{
+       /* The MMU must be in stalled or page fault mode, for this writing to work */
+       MALI_DEBUG_ASSERT(0 != (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)
+                               & (MALI_MMU_STATUS_BIT_STALL_ACTIVE | MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE)));
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, page_directory);
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+
+}
+
+void mali_mmu_activate_page_directory(struct mali_mmu_core *mmu, struct mali_page_directory *pagedir)
+{
+       mali_bool stall_success;
+       MALI_DEBUG_ASSERT_POINTER(mmu);
+
+       MALI_DEBUG_PRINT(5, ("Asked to activate page directory 0x%x on MMU %s\n", pagedir, mmu->hw_core.description));
+
+       stall_success = mali_mmu_enable_stall(mmu);
+       MALI_DEBUG_ASSERT(stall_success);
+       MALI_IGNORE(stall_success);
+       mali_mmu_activate_address_space(mmu, pagedir->page_directory);
+       mali_mmu_disable_stall(mmu);
+}
+
+void mali_mmu_activate_empty_page_directory(struct mali_mmu_core *mmu)
+{
+       mali_bool stall_success;
+
+       MALI_DEBUG_ASSERT_POINTER(mmu);
+       MALI_DEBUG_PRINT(3, ("Activating the empty page directory on MMU %s\n", mmu->hw_core.description));
+
+       stall_success = mali_mmu_enable_stall(mmu);
+
+       /* This function can only be called when the core is idle, so it could not fail. */
+       MALI_DEBUG_ASSERT(stall_success);
+       MALI_IGNORE(stall_success);
+
+       mali_mmu_activate_address_space(mmu, mali_empty_page_directory_phys);
+       mali_mmu_disable_stall(mmu);
+}
+
+void mali_mmu_activate_fault_flush_page_directory(struct mali_mmu_core *mmu)
+{
+       mali_bool stall_success;
+       MALI_DEBUG_ASSERT_POINTER(mmu);
+
+       MALI_DEBUG_PRINT(3, ("Activating the page fault flush page directory on MMU %s\n", mmu->hw_core.description));
+       stall_success = mali_mmu_enable_stall(mmu);
+       /* This function is expect to fail the stalling, since it might be in PageFault mode when it is called */
+       mali_mmu_activate_address_space(mmu, mali_page_fault_flush_page_directory);
+       if (MALI_TRUE == stall_success) mali_mmu_disable_stall(mmu);
+}
+
+/* Is called when we want the mmu to give an interrupt */
+static void mali_mmu_probe_trigger(void *data)
+{
+       struct mali_mmu_core *mmu = (struct mali_mmu_core *)data;
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+}
+
+/* Is called when the irq probe wants the mmu to acknowledge an interrupt from the hw */
+extern int mali_page_fault;
+static _mali_osk_errcode_t mali_mmu_probe_ack(void *data)
+{
+       struct mali_mmu_core *mmu = (struct mali_mmu_core *)data;
+       u32 int_stat;
+
+       int_stat = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_STATUS);
+
+       MALI_DEBUG_PRINT(2, ("mali_mmu_probe_irq_acknowledge: intstat 0x%x\n", int_stat));
+       if (int_stat & MALI_MMU_INTERRUPT_PAGE_FAULT) {
+               MALI_DEBUG_PRINT(2, ("Probe: Page fault detect: PASSED\n"));
+               mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_PAGE_FAULT);
+       } else {
+               MALI_DEBUG_PRINT(1, ("Probe: Page fault detect: FAILED\n"));
+               mali_page_fault++;
+       }
+
+       if (int_stat & MALI_MMU_INTERRUPT_READ_BUS_ERROR) {
+               MALI_DEBUG_PRINT(2, ("Probe: Bus read error detect: PASSED\n"));
+               mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+       } else {
+               MALI_DEBUG_PRINT(1, ("Probe: Bus read error detect: FAILED\n"));
+               mali_page_fault++;
+       }
+
+       if ((int_stat & (MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR)) ==
+           (MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR)) {
+               return _MALI_OSK_ERR_OK;
+       }
+
+       return _MALI_OSK_ERR_FAULT;
+}
+
+#if 0
+void mali_mmu_print_state(struct mali_mmu_core *mmu)
+{
+       MALI_DEBUG_PRINT(2, ("MMU: State of %s is 0x%08x\n", mmu->hw_core.description, mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+}
+#endif
diff --git a/utgard/r8p0/common/mali_mmu.h b/utgard/r8p0/common/mali_mmu.h
new file mode 100755 (executable)
index 0000000..ac31c7a
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2010-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_MMU_H__
+#define __MALI_MMU_H__
+
+#include "mali_osk.h"
+#include "mali_mmu_page_directory.h"
+#include "mali_hw_core.h"
+
+/* Forward declaration from mali_group.h */
+struct mali_group;
+
+/**
+ * MMU register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_mmu_register {
+       MALI_MMU_REGISTER_DTE_ADDR = 0x0000, /**< Current Page Directory Pointer */
+       MALI_MMU_REGISTER_STATUS = 0x0004, /**< Status of the MMU */
+       MALI_MMU_REGISTER_COMMAND = 0x0008, /**< Command register, used to control the MMU */
+       MALI_MMU_REGISTER_PAGE_FAULT_ADDR = 0x000C, /**< Logical address of the last page fault */
+       MALI_MMU_REGISTER_ZAP_ONE_LINE = 0x010, /**< Used to invalidate the mapping of a single page from the MMU */
+       MALI_MMU_REGISTER_INT_RAWSTAT = 0x0014, /**< Raw interrupt status, all interrupts visible */
+       MALI_MMU_REGISTER_INT_CLEAR = 0x0018, /**< Indicate to the MMU that the interrupt has been received */
+       MALI_MMU_REGISTER_INT_MASK = 0x001C, /**< Enable/disable types of interrupts */
+       MALI_MMU_REGISTER_INT_STATUS = 0x0020 /**< Interrupt status based on the mask */
+} mali_mmu_register;
+
+/**
+ * MMU interrupt register bits
+ * Each cause of the interrupt is reported
+ * through the (raw) interrupt status registers.
+ * Multiple interrupts can be pending, so multiple bits
+ * can be set at once.
+ */
+typedef enum mali_mmu_interrupt {
+       MALI_MMU_INTERRUPT_PAGE_FAULT = 0x01, /**< A page fault occured */
+       MALI_MMU_INTERRUPT_READ_BUS_ERROR = 0x02 /**< A bus read error occured */
+} mali_mmu_interrupt;
+
+typedef enum mali_mmu_status_bits {
+       MALI_MMU_STATUS_BIT_PAGING_ENABLED      = 1 << 0,
+       MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE   = 1 << 1,
+       MALI_MMU_STATUS_BIT_STALL_ACTIVE        = 1 << 2,
+       MALI_MMU_STATUS_BIT_IDLE                = 1 << 3,
+       MALI_MMU_STATUS_BIT_REPLAY_BUFFER_EMPTY = 1 << 4,
+       MALI_MMU_STATUS_BIT_PAGE_FAULT_IS_WRITE = 1 << 5,
+       MALI_MMU_STATUS_BIT_STALL_NOT_ACTIVE    = 1 << 31,
+} mali_mmu_status_bits;
+
+/**
+ * Definition of the MMU struct
+ * Used to track a MMU unit in the system.
+ * Contains information about the mapping of the registers
+ */
+struct mali_mmu_core {
+       struct mali_hw_core hw_core; /**< Common for all HW cores */
+       _mali_osk_irq_t *irq;        /**< IRQ handler */
+};
+
+_mali_osk_errcode_t mali_mmu_initialize(void);
+
+void mali_mmu_terminate(void);
+
+struct mali_mmu_core *mali_mmu_create(_mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual);
+void mali_mmu_delete(struct mali_mmu_core *mmu);
+
+_mali_osk_errcode_t mali_mmu_reset(struct mali_mmu_core *mmu);
+mali_bool mali_mmu_zap_tlb(struct mali_mmu_core *mmu);
+void mali_mmu_zap_tlb_without_stall(struct mali_mmu_core *mmu);
+void mali_mmu_invalidate_page(struct mali_mmu_core *mmu, u32 mali_address);
+
+void mali_mmu_activate_page_directory(struct mali_mmu_core *mmu, struct mali_page_directory *pagedir);
+void mali_mmu_activate_empty_page_directory(struct mali_mmu_core *mmu);
+void mali_mmu_activate_fault_flush_page_directory(struct mali_mmu_core *mmu);
+
+void mali_mmu_page_fault_done(struct mali_mmu_core *mmu);
+
+MALI_STATIC_INLINE enum mali_interrupt_result mali_mmu_get_interrupt_result(struct mali_mmu_core *mmu)
+{
+       u32 rawstat_used = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT);
+       if (0 == rawstat_used) {
+               return MALI_INTERRUPT_RESULT_NONE;
+       }
+
+       return MALI_INTERRUPT_RESULT_ERROR;
+}
+
+
+MALI_STATIC_INLINE u32 mali_mmu_get_int_status(struct mali_mmu_core *mmu)
+{
+       return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_STATUS);
+}
+
+MALI_STATIC_INLINE u32 mali_mmu_get_rawstat(struct mali_mmu_core *mmu)
+{
+       return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT);
+}
+
+MALI_STATIC_INLINE void mali_mmu_mask_all_interrupts(struct mali_mmu_core *mmu)
+{
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK, 0);
+}
+
+MALI_STATIC_INLINE u32 mali_mmu_get_status(struct mali_mmu_core *mmu)
+{
+       return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+}
+
+MALI_STATIC_INLINE u32 mali_mmu_get_page_fault_addr(struct mali_mmu_core *mmu)
+{
+       return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_PAGE_FAULT_ADDR);
+}
+
+#endif /* __MALI_MMU_H__ */
diff --git a/utgard/r8p0/common/mali_mmu_page_directory.c b/utgard/r8p0/common/mali_mmu_page_directory.c
new file mode 100644 (file)
index 0000000..a752f5c
--- /dev/null
@@ -0,0 +1,497 @@
+/*
+ * Copyright (C) 2011-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+#include "mali_uk_types.h"
+#include "mali_mmu_page_directory.h"
+#include "mali_memory.h"
+#include "mali_l2_cache.h"
+
+static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data);
+
+u32 mali_allocate_empty_page(mali_io_address *virt_addr)
+{
+       _mali_osk_errcode_t err;
+       mali_io_address mapping;
+       mali_dma_addr address;
+
+       if (_MALI_OSK_ERR_OK != mali_mmu_get_table_page(&address, &mapping)) {
+               /* Allocation failed */
+               MALI_DEBUG_PRINT(2, ("Mali MMU: Failed to get table page for empty pgdir\n"));
+               return 0;
+       }
+
+       MALI_DEBUG_ASSERT_POINTER(mapping);
+
+       err = fill_page(mapping, 0);
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_mmu_release_table_page(address, mapping);
+               MALI_DEBUG_PRINT(2, ("Mali MMU: Failed to zero page\n"));
+               return 0;
+       }
+
+       *virt_addr = mapping;
+       return address;
+}
+
+void mali_free_empty_page(mali_dma_addr address, mali_io_address virt_addr)
+{
+       if (MALI_INVALID_PAGE != address) {
+               mali_mmu_release_table_page(address, virt_addr);
+       }
+}
+
+_mali_osk_errcode_t mali_create_fault_flush_pages(mali_dma_addr *page_directory,
+               mali_io_address *page_directory_mapping,
+               mali_dma_addr *page_table, mali_io_address *page_table_mapping,
+               mali_dma_addr *data_page, mali_io_address *data_page_mapping)
+{
+       _mali_osk_errcode_t err;
+
+       err = mali_mmu_get_table_page(data_page, data_page_mapping);
+       if (_MALI_OSK_ERR_OK == err) {
+               err = mali_mmu_get_table_page(page_table, page_table_mapping);
+               if (_MALI_OSK_ERR_OK == err) {
+                       err = mali_mmu_get_table_page(page_directory, page_directory_mapping);
+                       if (_MALI_OSK_ERR_OK == err) {
+                               fill_page(*data_page_mapping, 0);
+                               fill_page(*page_table_mapping, *data_page | MALI_MMU_FLAGS_DEFAULT);
+                               fill_page(*page_directory_mapping, *page_table | MALI_MMU_FLAGS_PRESENT);
+                               MALI_SUCCESS;
+                       }
+                       mali_mmu_release_table_page(*page_table, *page_table_mapping);
+                       *page_table = MALI_INVALID_PAGE;
+               }
+               mali_mmu_release_table_page(*data_page, *data_page_mapping);
+               *data_page = MALI_INVALID_PAGE;
+       }
+       return err;
+}
+
+void mali_destroy_fault_flush_pages(
+       mali_dma_addr *page_directory, mali_io_address *page_directory_mapping,
+       mali_dma_addr *page_table, mali_io_address *page_table_mapping,
+       mali_dma_addr *data_page, mali_io_address *data_page_mapping)
+{
+       if (MALI_INVALID_PAGE != *page_directory) {
+               mali_mmu_release_table_page(*page_directory, *page_directory_mapping);
+               *page_directory = MALI_INVALID_PAGE;
+               *page_directory_mapping = NULL;
+       }
+
+       if (MALI_INVALID_PAGE != *page_table) {
+               mali_mmu_release_table_page(*page_table, *page_table_mapping);
+               *page_table = MALI_INVALID_PAGE;
+               *page_table_mapping = NULL;
+       }
+
+       if (MALI_INVALID_PAGE != *data_page) {
+               mali_mmu_release_table_page(*data_page, *data_page_mapping);
+               *data_page = MALI_INVALID_PAGE;
+               *data_page_mapping = NULL;
+       }
+}
+
+static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data)
+{
+       int i;
+       MALI_DEBUG_ASSERT_POINTER(mapping);
+
+       for (i = 0; i < MALI_MMU_PAGE_SIZE / 4; i++) {
+               _mali_osk_mem_iowrite32_relaxed(mapping, i * sizeof(u32), data);
+       }
+       _mali_osk_mem_barrier();
+       MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u32 mali_address, u32 size)
+{
+       const int first_pde = MALI_MMU_PDE_ENTRY(mali_address);
+       const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1);
+       _mali_osk_errcode_t err;
+       mali_io_address pde_mapping;
+       mali_dma_addr pde_phys;
+       int i, page_count;
+       u32 start_address;
+       if (last_pde < first_pde) {
+               MALI_PRINT_ERROR((" last_pde < first_pde\n"));
+               return _MALI_OSK_ERR_INVALID_ARGS;
+       }
+
+       for (i = first_pde; i <= last_pde; i++) {
+               if (0 == (_mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+                                                i * sizeof(u32)) & MALI_MMU_FLAGS_PRESENT)) {
+                       /* Page table not present */
+                       MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]);
+                       MALI_DEBUG_ASSERT(NULL == pagedir->page_entries_mapped[i]);
+
+                       err = mali_mmu_get_table_page(&pde_phys, &pde_mapping);
+                       if (_MALI_OSK_ERR_OK != err) {
+                               MALI_PRINT_ERROR(("Failed to allocate page table page.\n"));
+                               return err;
+                       }
+                       pagedir->page_entries_mapped[i] = pde_mapping;
+
+                       /* Update PDE, mark as present */
+                       _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32),
+                                                       pde_phys | MALI_MMU_FLAGS_PRESENT);
+
+                       MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]);
+               }
+
+               if (first_pde == last_pde) {
+                       pagedir->page_entries_usage_count[i] += size / MALI_MMU_PAGE_SIZE;
+               } else if (i == first_pde) {
+                       start_address = i * MALI_MMU_VIRTUAL_PAGE_SIZE;
+                       page_count = (start_address + MALI_MMU_VIRTUAL_PAGE_SIZE - mali_address) / MALI_MMU_PAGE_SIZE;
+                       pagedir->page_entries_usage_count[i] += page_count;
+               } else if (i == last_pde) {
+                       start_address = i * MALI_MMU_VIRTUAL_PAGE_SIZE;
+                       page_count = (mali_address + size - start_address) / MALI_MMU_PAGE_SIZE;
+                       pagedir->page_entries_usage_count[i] += page_count;
+               } else {
+                       pagedir->page_entries_usage_count[i] = 1024;
+               }
+       }
+       _mali_osk_write_mem_barrier();
+
+       return _MALI_OSK_ERR_OK;
+}
+
+MALI_STATIC_INLINE void mali_mmu_zero_pte(mali_io_address page_table, u32 mali_address, u32 size)
+{
+       int i;
+       const int first_pte = MALI_MMU_PTE_ENTRY(mali_address);
+       const int last_pte = MALI_MMU_PTE_ENTRY(mali_address + size - 1);
+
+       for (i = first_pte; i <= last_pte; i++) {
+               _mali_osk_mem_iowrite32_relaxed(page_table, i * sizeof(u32), 0);
+       }
+}
+
+static u32 mali_page_directory_get_phys_address(struct mali_page_directory *pagedir, u32 index)
+{
+       return (_mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+                                      index * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
+}
+
+
+_mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size)
+{
+       const int first_pde = MALI_MMU_PDE_ENTRY(mali_address);
+       const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1);
+       u32 left = size;
+       int i;
+       mali_bool pd_changed = MALI_FALSE;
+       u32 pages_to_invalidate[3]; /* hard-coded to 3: max two pages from the PT level plus max one page from PD level */
+       u32 num_pages_inv = 0;
+       mali_bool invalidate_all = MALI_FALSE; /* safety mechanism in case page_entries_usage_count is unreliable */
+
+       /* For all page directory entries in range. */
+       for (i = first_pde; i <= last_pde; i++) {
+               u32 size_in_pde, offset;
+
+               MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[i]);
+               MALI_DEBUG_ASSERT(0 != pagedir->page_entries_usage_count[i]);
+
+               /* Offset into page table, 0 if mali_address is 4MiB aligned */
+               offset = (mali_address & (MALI_MMU_VIRTUAL_PAGE_SIZE - 1));
+               if (left < MALI_MMU_VIRTUAL_PAGE_SIZE - offset) {
+                       size_in_pde = left;
+               } else {
+                       size_in_pde = MALI_MMU_VIRTUAL_PAGE_SIZE - offset;
+               }
+
+               pagedir->page_entries_usage_count[i] -= size_in_pde / MALI_MMU_PAGE_SIZE;
+
+               /* If entire page table is unused, free it */
+               if (0 == pagedir->page_entries_usage_count[i]) {
+                       u32 page_phys;
+                       void *page_virt;
+                       MALI_DEBUG_PRINT(4, ("Releasing page table as this is the last reference\n"));
+                       /* last reference removed, no need to zero out each PTE  */
+
+                       page_phys = MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i * sizeof(u32)));
+                       page_virt = pagedir->page_entries_mapped[i];
+                       pagedir->page_entries_mapped[i] = NULL;
+                       _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32), 0);
+
+                       mali_mmu_release_table_page(page_phys, page_virt);
+                       pd_changed = MALI_TRUE;
+               } else {
+                       MALI_DEBUG_ASSERT(num_pages_inv < 2);
+                       if (num_pages_inv < 2) {
+                               pages_to_invalidate[num_pages_inv] = mali_page_directory_get_phys_address(pagedir, i);
+                               num_pages_inv++;
+                       } else {
+                               invalidate_all = MALI_TRUE;
+                       }
+
+                       /* If part of the page table is still in use, zero the relevant PTEs */
+                       mali_mmu_zero_pte(pagedir->page_entries_mapped[i], mali_address, size_in_pde);
+               }
+
+               left -= size_in_pde;
+               mali_address += size_in_pde;
+       }
+       _mali_osk_write_mem_barrier();
+
+       /* L2 pages invalidation */
+       if (MALI_TRUE == pd_changed) {
+               MALI_DEBUG_ASSERT(num_pages_inv < 3);
+               if (num_pages_inv < 3) {
+                       pages_to_invalidate[num_pages_inv] = pagedir->page_directory;
+                       num_pages_inv++;
+               } else {
+                       invalidate_all = MALI_TRUE;
+               }
+       }
+
+       if (invalidate_all) {
+               mali_l2_cache_invalidate_all();
+       } else {
+               mali_l2_cache_invalidate_all_pages(pages_to_invalidate, num_pages_inv);
+       }
+
+       MALI_SUCCESS;
+}
+
+struct mali_page_directory *mali_mmu_pagedir_alloc(void)
+{
+       struct mali_page_directory *pagedir;
+       _mali_osk_errcode_t err;
+       mali_dma_addr phys;
+
+       pagedir = _mali_osk_calloc(1, sizeof(struct mali_page_directory));
+       if (NULL == pagedir) {
+               return NULL;
+       }
+
+       err = mali_mmu_get_table_page(&phys, &pagedir->page_directory_mapped);
+       if (_MALI_OSK_ERR_OK != err) {
+               _mali_osk_free(pagedir);
+               return NULL;
+       }
+
+       pagedir->page_directory = (u32)phys;
+
+       /* Zero page directory */
+       fill_page(pagedir->page_directory_mapped, 0);
+
+       return pagedir;
+}
+
+void mali_mmu_pagedir_free(struct mali_page_directory *pagedir)
+{
+       const int num_page_table_entries = sizeof(pagedir->page_entries_mapped) / sizeof(pagedir->page_entries_mapped[0]);
+       int i;
+
+       /* Free referenced page tables and zero PDEs. */
+       for (i = 0; i < num_page_table_entries; i++) {
+               if (pagedir->page_directory_mapped && (_mali_osk_mem_ioread32(
+                               pagedir->page_directory_mapped,
+                               sizeof(u32)*i) & MALI_MMU_FLAGS_PRESENT)) {
+                       mali_dma_addr phys = _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+                                            i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK;
+                       _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32), 0);
+                       mali_mmu_release_table_page(phys, pagedir->page_entries_mapped[i]);
+               }
+       }
+       _mali_osk_write_mem_barrier();
+
+       /* Free the page directory page. */
+       mali_mmu_release_table_page(pagedir->page_directory, pagedir->page_directory_mapped);
+
+       _mali_osk_free(pagedir);
+}
+
+
+void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address,
+                            mali_dma_addr phys_address, u32 size, u32 permission_bits)
+{
+       u32 end_address = mali_address + size;
+       u32 mali_phys = (u32)phys_address;
+
+       /* Map physical pages into MMU page tables */
+       for (; mali_address < end_address; mali_address += MALI_MMU_PAGE_SIZE, mali_phys += MALI_MMU_PAGE_SIZE) {
+               MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)]);
+               _mali_osk_mem_iowrite32_relaxed(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)],
+                                               MALI_MMU_PTE_ENTRY(mali_address) * sizeof(u32),
+                                               mali_phys | permission_bits);
+       }
+}
+
+void mali_mmu_pagedir_diag(struct mali_page_directory *pagedir, u32 fault_addr)
+{
+#if defined(DEBUG)
+       u32 pde_index, pte_index;
+       u32 pde, pte;
+
+       pde_index = MALI_MMU_PDE_ENTRY(fault_addr);
+       pte_index = MALI_MMU_PTE_ENTRY(fault_addr);
+
+
+       pde = _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+                                    pde_index * sizeof(u32));
+
+
+       if (pde & MALI_MMU_FLAGS_PRESENT) {
+               u32 pte_addr = MALI_MMU_ENTRY_ADDRESS(pde);
+
+               pte = _mali_osk_mem_ioread32(pagedir->page_entries_mapped[pde_index],
+                                            pte_index * sizeof(u32));
+
+               MALI_DEBUG_PRINT(2, ("\tMMU: %08x: Page table present: %08x\n"
+                                    "\t\tPTE: %08x, page %08x is %s\n",
+                                    fault_addr, pte_addr, pte,
+                                    MALI_MMU_ENTRY_ADDRESS(pte),
+                                    pte & MALI_MMU_FLAGS_DEFAULT ? "rw" : "not present"));
+       } else {
+               MALI_DEBUG_PRINT(2, ("\tMMU: %08x: Page table not present: %08x\n",
+                                    fault_addr, pde));
+       }
+#else
+       MALI_IGNORE(pagedir);
+       MALI_IGNORE(fault_addr);
+#endif
+}
+
+/* For instrumented */
+struct dump_info {
+       u32 buffer_left;
+       u32 register_writes_size;
+       u32 page_table_dump_size;
+       u32 *buffer;
+};
+
+static _mali_osk_errcode_t writereg(u32 where, u32 what, const char *comment, struct dump_info *info)
+{
+       if (NULL != info) {
+               info->register_writes_size += sizeof(u32) * 2; /* two 32-bit words */
+
+               if (NULL != info->buffer) {
+                       /* check that we have enough space */
+                       if (info->buffer_left < sizeof(u32) * 2) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+                       *info->buffer = where;
+                       info->buffer++;
+
+                       *info->buffer = what;
+                       info->buffer++;
+
+                       info->buffer_left -= sizeof(u32) * 2;
+               }
+       }
+
+       MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_mmu_dump_page(mali_io_address page, u32 phys_addr, struct dump_info *info)
+{
+       if (NULL != info) {
+               /* 4096 for the page and 4 bytes for the address */
+               const u32 page_size_in_elements = MALI_MMU_PAGE_SIZE / 4;
+               const u32 page_size_in_bytes = MALI_MMU_PAGE_SIZE;
+               const u32 dump_size_in_bytes = MALI_MMU_PAGE_SIZE + 4;
+
+               info->page_table_dump_size += dump_size_in_bytes;
+
+               if (NULL != info->buffer) {
+                       if (info->buffer_left < dump_size_in_bytes) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+                       *info->buffer = phys_addr;
+                       info->buffer++;
+
+                       _mali_osk_memcpy(info->buffer, page, page_size_in_bytes);
+                       info->buffer += page_size_in_elements;
+
+                       info->buffer_left -= dump_size_in_bytes;
+               }
+       }
+
+       MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_mmu_page_table(struct mali_page_directory *pagedir, struct dump_info *info)
+{
+       MALI_DEBUG_ASSERT_POINTER(pagedir);
+       MALI_DEBUG_ASSERT_POINTER(info);
+
+       if (NULL != pagedir->page_directory_mapped) {
+               int i;
+
+               MALI_CHECK_NO_ERROR(
+                       mali_mmu_dump_page(pagedir->page_directory_mapped, pagedir->page_directory, info)
+               );
+
+               for (i = 0; i < 1024; i++) {
+                       if (NULL != pagedir->page_entries_mapped[i]) {
+                               MALI_CHECK_NO_ERROR(
+                                       mali_mmu_dump_page(pagedir->page_entries_mapped[i],
+                                                          _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+                                                                          i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK, info)
+                               );
+                       }
+               }
+       }
+
+       MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_mmu_registers(struct mali_page_directory *pagedir, struct dump_info *info)
+{
+       MALI_CHECK_NO_ERROR(writereg(0x00000000, pagedir->page_directory,
+                                    "set the page directory address", info));
+       MALI_CHECK_NO_ERROR(writereg(0x00000008, 4, "zap???", info));
+       MALI_CHECK_NO_ERROR(writereg(0x00000008, 0, "enable paging", info));
+       MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size(_mali_uk_query_mmu_page_table_dump_size_s *args)
+{
+       struct dump_info info = { 0, 0, 0, NULL };
+       struct mali_session_data *session_data;
+
+       session_data = (struct mali_session_data *)(uintptr_t)(args->ctx);
+       MALI_DEBUG_ASSERT_POINTER(session_data);
+       MALI_DEBUG_ASSERT_POINTER(args);
+
+       MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info));
+       MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info));
+       args->size = info.register_writes_size + info.page_table_dump_size;
+       MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table(_mali_uk_dump_mmu_page_table_s *args)
+{
+       struct dump_info info = { 0, 0, 0, NULL };
+       struct mali_session_data *session_data;
+
+       MALI_DEBUG_ASSERT_POINTER(args);
+
+       session_data = (struct mali_session_data *)(uintptr_t)(args->ctx);
+       MALI_DEBUG_ASSERT_POINTER(session_data);
+
+       info.buffer_left = args->size;
+       info.buffer = (u32 *)(uintptr_t)args->buffer;
+
+       args->register_writes = (uintptr_t)info.buffer;
+       MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info));
+
+       args->page_table_dump = (uintptr_t)info.buffer;
+       MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info));
+
+       args->register_writes_size = info.register_writes_size;
+       args->page_table_dump_size = info.page_table_dump_size;
+
+       MALI_SUCCESS;
+}
diff --git a/utgard/r8p0/common/mali_mmu_page_directory.h b/utgard/r8p0/common/mali_mmu_page_directory.h
new file mode 100755 (executable)
index 0000000..4fc1058
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2011-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_MMU_PAGE_DIRECTORY_H__
+#define __MALI_MMU_PAGE_DIRECTORY_H__
+
+#include "mali_osk.h"
+
+/**
+ * Size of an MMU page in bytes
+ */
+#define MALI_MMU_PAGE_SIZE 0x1000
+
+/*
+ * Size of the address space referenced by a page table page
+ */
+#define MALI_MMU_VIRTUAL_PAGE_SIZE 0x400000 /* 4 MiB */
+
+/**
+ * Page directory index from address
+ * Calculates the page directory index from the given address
+ */
+#define MALI_MMU_PDE_ENTRY(address) (((address)>>22) & 0x03FF)
+
+/**
+ * Page table index from address
+ * Calculates the page table index from the given address
+ */
+#define MALI_MMU_PTE_ENTRY(address) (((address)>>12) & 0x03FF)
+
+/**
+ * Extract the memory address from an PDE/PTE entry
+ */
+#define MALI_MMU_ENTRY_ADDRESS(value) ((value) & 0xFFFFFC00)
+
+#define MALI_INVALID_PAGE ((u32)(~0))
+
+/**
+ *
+ */
+typedef enum mali_mmu_entry_flags {
+       MALI_MMU_FLAGS_PRESENT = 0x01,
+       MALI_MMU_FLAGS_READ_PERMISSION = 0x02,
+       MALI_MMU_FLAGS_WRITE_PERMISSION = 0x04,
+       MALI_MMU_FLAGS_OVERRIDE_CACHE  = 0x8,
+       MALI_MMU_FLAGS_WRITE_CACHEABLE  = 0x10,
+       MALI_MMU_FLAGS_WRITE_ALLOCATE  = 0x20,
+       MALI_MMU_FLAGS_WRITE_BUFFERABLE  = 0x40,
+       MALI_MMU_FLAGS_READ_CACHEABLE  = 0x80,
+       MALI_MMU_FLAGS_READ_ALLOCATE  = 0x100,
+       MALI_MMU_FLAGS_MASK = 0x1FF,
+} mali_mmu_entry_flags;
+
+
+#define MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE ( \
+               MALI_MMU_FLAGS_PRESENT | \
+               MALI_MMU_FLAGS_READ_PERMISSION |  \
+               MALI_MMU_FLAGS_WRITE_PERMISSION | \
+               MALI_MMU_FLAGS_OVERRIDE_CACHE | \
+               MALI_MMU_FLAGS_WRITE_CACHEABLE | \
+               MALI_MMU_FLAGS_WRITE_BUFFERABLE | \
+               MALI_MMU_FLAGS_READ_CACHEABLE | \
+               MALI_MMU_FLAGS_READ_ALLOCATE )
+
+#define MALI_MMU_FLAGS_DEFAULT ( \
+                                MALI_MMU_FLAGS_PRESENT | \
+                                MALI_MMU_FLAGS_READ_PERMISSION |  \
+                                MALI_MMU_FLAGS_WRITE_PERMISSION )
+
+
+struct mali_page_directory {
+       u32 page_directory; /**< Physical address of the memory session's page directory */
+       mali_io_address page_directory_mapped; /**< Pointer to the mapped version of the page directory into the kernel's address space */
+
+       mali_io_address page_entries_mapped[1024]; /**< Pointers to the page tables which exists in the page directory mapped into the kernel's address space */
+       u32   page_entries_usage_count[1024]; /**< Tracks usage count of the page table pages, so they can be releases on the last reference */
+};
+
+/* Map Mali virtual address space (i.e. ensure page tables exist for the virtual range)  */
+_mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u32 mali_address, u32 size);
+_mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size);
+
+/* Back virtual address space with actual pages. Assumes input is contiguous and 4k aligned. */
+void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address,
+                            mali_dma_addr phys_address, u32 size, u32 permission_bits);
+
+u32 mali_allocate_empty_page(mali_io_address *virtual);
+void mali_free_empty_page(mali_dma_addr address, mali_io_address virt_addr);
+_mali_osk_errcode_t mali_create_fault_flush_pages(mali_dma_addr *page_directory,
+               mali_io_address *page_directory_mapping,
+               mali_dma_addr *page_table, mali_io_address *page_table_mapping,
+               mali_dma_addr *data_page, mali_io_address *data_page_mapping);
+void mali_destroy_fault_flush_pages(
+       mali_dma_addr *page_directory, mali_io_address *page_directory_mapping,
+       mali_dma_addr *page_table, mali_io_address *page_table_mapping,
+       mali_dma_addr *data_page, mali_io_address *data_page_mapping);
+
+struct mali_page_directory *mali_mmu_pagedir_alloc(void);
+void mali_mmu_pagedir_free(struct mali_page_directory *pagedir);
+
+void mali_mmu_pagedir_diag(struct mali_page_directory *pagedir, u32 fault_addr);
+
+#endif /* __MALI_MMU_PAGE_DIRECTORY_H__ */
diff --git a/utgard/r8p0/common/mali_osk.h b/utgard/r8p0/common/mali_osk.h
new file mode 100755 (executable)
index 0000000..5c20fc3
--- /dev/null
@@ -0,0 +1,1389 @@
+/*
+ * Copyright (C) 2010-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk.h
+ * Defines the OS abstraction layer for the kernel device driver (OSK)
+ */
+
+#ifndef __MALI_OSK_H__
+#define __MALI_OSK_H__
+
+#include <linux/seq_file.h>
+#include "mali_osk_types.h"
+#include "mali_osk_specific.h"           /* include any per-os specifics */
+#include "mali_osk_locks.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup oskapi UDD OS Abstraction for Kernel-side (OSK) APIs
+ *
+ * @{
+ */
+
+/** @addtogroup _mali_osk_lock OSK Mutual Exclusion Locks
+ * @{ */
+
+#ifdef DEBUG
+/** @brief Macro for asserting that the current thread holds a given lock
+ */
+#define MALI_DEBUG_ASSERT_LOCK_HELD(l) MALI_DEBUG_ASSERT(_mali_osk_lock_get_owner((_mali_osk_lock_debug_t *)l) == _mali_osk_get_tid());
+
+/** @brief returns a lock's owner (thread id) if debugging is enabled
+ */
+#else
+#define MALI_DEBUG_ASSERT_LOCK_HELD(l) do {} while(0)
+#endif
+
+#define _mali_osk_ctxprintf     seq_printf
+
+/** @} */ /* end group _mali_osk_lock */
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief Find the containing structure of another structure
+ *
+ * This is the reverse of the operation 'offsetof'. This means that the
+ * following condition is satisfied:
+ *
+ *   ptr == _MALI_OSK_CONTAINER_OF( &ptr->member, type, member )
+ *
+ * When ptr is of type 'type'.
+ *
+ * Its purpose it to recover a larger structure that has wrapped a smaller one.
+ *
+ * @note no type or memory checking occurs to ensure that a wrapper structure
+ * does in fact exist, and that it is being recovered with respect to the
+ * correct member.
+ *
+ * @param ptr the pointer to the member that is contained within the larger
+ * structure
+ * @param type the type of the structure that contains the member
+ * @param member the name of the member in the structure that ptr points to.
+ * @return a pointer to a \a type object which contains \a member, as pointed
+ * to by \a ptr.
+ */
+#define _MALI_OSK_CONTAINER_OF(ptr, type, member) \
+       ((type *)( ((char *)ptr) - offsetof(type,member) ))
+
+/** @addtogroup _mali_osk_wq
+ * @{ */
+
+/** @brief Initialize work queues (for deferred work)
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_wq_init(void);
+
+/** @brief Terminate work queues (for deferred work)
+ */
+void _mali_osk_wq_term(void);
+
+/** @brief Create work in the work queue
+ *
+ * Creates a work object which can be scheduled in the work queue. When
+ * scheduled, \a handler will be called with \a data as the argument.
+ *
+ * Refer to \ref _mali_osk_wq_schedule_work() for details on how work
+ * is scheduled in the queue.
+ *
+ * The returned pointer must be freed with \ref _mali_osk_wq_delete_work()
+ * when no longer needed.
+ */
+_mali_osk_wq_work_t *_mali_osk_wq_create_work(_mali_osk_wq_work_handler_t handler, void *data);
+
+/** @brief A high priority version of \a _mali_osk_wq_create_work()
+ *
+ * Creates a work object which can be scheduled in the high priority work queue.
+ *
+ * This is unfortunately needed to get low latency scheduling of the Mali cores.  Normally we would
+ * schedule the next job in hw_irq or tasklet, but often we can't since we need to synchronously map
+ * and unmap shared memory when a job is connected to external fences (timelines). And this requires
+ * taking a mutex.
+ *
+ * We do signal a lot of other (low priority) work also as part of the job being finished, and if we
+ * don't set this Mali scheduling thread as high priority, we see that the CPU scheduler often runs
+ * random things instead of starting the next GPU job when the GPU is idle.  So setting the gpu
+ * scheduler to high priority does give a visually more responsive system.
+ *
+ * Start the high priority work with: \a _mali_osk_wq_schedule_work_high_pri()
+ */
+_mali_osk_wq_work_t *_mali_osk_wq_create_work_high_pri(_mali_osk_wq_work_handler_t handler, void *data);
+
+/** @brief Delete a work object
+ *
+ * This will flush the work queue to ensure that the work handler will not
+ * be called after deletion.
+ */
+void _mali_osk_wq_delete_work(_mali_osk_wq_work_t *work);
+
+/** @brief Delete a work object
+ *
+ * This will NOT flush the work queue, so only call this if you are sure that the work handler will
+ * not be called after deletion.
+ */
+void _mali_osk_wq_delete_work_nonflush(_mali_osk_wq_work_t *work);
+
+/** @brief Cause a queued, deferred call of the work handler
+ *
+ * _mali_osk_wq_schedule_work provides a mechanism for enqueuing deferred calls
+ * to the work handler. After calling \ref _mali_osk_wq_schedule_work(), the
+ * work handler will be scheduled to run at some point in the future.
+ *
+ * Typically this is called by the IRQ upper-half to defer further processing of
+ * IRQ-related work to the IRQ bottom-half handler. This is necessary for work
+ * that cannot be done in an IRQ context by the IRQ upper-half handler. Timer
+ * callbacks also use this mechanism, because they are treated as though they
+ * operate in an IRQ context. Refer to \ref _mali_osk_timer_t for more
+ * information.
+ *
+ * Code that operates in a kernel-process context (with no IRQ context
+ * restrictions) may also enqueue deferred calls to the IRQ bottom-half. The
+ * advantage over direct calling is that deferred calling allows the caller and
+ * IRQ bottom half to hold the same mutex, with a guarantee that they will not
+ * deadlock just by using this mechanism.
+ *
+ * _mali_osk_wq_schedule_work() places deferred call requests on a queue, to
+ * allow for more than one thread to make a deferred call. Therfore, if it is
+ * called 'K' times, then the IRQ bottom-half will be scheduled 'K' times too.
+ * 'K' is a number that is implementation-specific.
+ *
+ * _mali_osk_wq_schedule_work() is guaranteed to not block on:
+ * - enqueuing a deferred call request.
+ * - the completion of the work handler.
+ *
+ * This is to prevent deadlock. For example, if _mali_osk_wq_schedule_work()
+ * blocked, then it would cause a deadlock when the following two conditions
+ * hold:
+ * - The work handler callback (of type _mali_osk_wq_work_handler_t) locks
+ * a mutex
+ * - And, at the same time, the caller of _mali_osk_wq_schedule_work() also
+ * holds the same mutex
+ *
+ * @note care must be taken to not overflow the queue that
+ * _mali_osk_wq_schedule_work() operates on. Code must be structured to
+ * ensure that the number of requests made to the queue is bounded. Otherwise,
+ * work will be lost.
+ *
+ * The queue that _mali_osk_wq_schedule_work implements is a FIFO of N-writer,
+ * 1-reader type. The writers are the callers of _mali_osk_wq_schedule_work
+ * (all OSK-registered IRQ upper-half handlers in the system, watchdog timers,
+ * callers from a Kernel-process context). The reader is a single thread that
+ * handles all OSK-registered work.
+ *
+ * @param work a pointer to the _mali_osk_wq_work_t object corresponding to the
+ * work to begin processing.
+ */
+void _mali_osk_wq_schedule_work(_mali_osk_wq_work_t *work);
+
+/** @brief Cause a queued, deferred call of the high priority work handler
+ *
+ * Function is the same as \a _mali_osk_wq_schedule_work() with the only
+ * difference that it runs in a high (real time) priority on the system.
+ *
+ * Should only be used as a substitue for doing the same work in interrupts.
+ *
+ * This is allowed to sleep, but the work should be small since it will block
+ * all other applications.
+*/
+void _mali_osk_wq_schedule_work_high_pri(_mali_osk_wq_work_t *work);
+
+/** @brief Flush the work queue
+ *
+ * This will flush the OSK work queue, ensuring all work in the queue has
+ * completed before returning.
+ *
+ * Since this blocks on the completion of work in the work-queue, the
+ * caller of this function \b must \b not hold any mutexes that are taken by
+ * any registered work handler. To do so may cause a deadlock.
+ *
+ */
+void _mali_osk_wq_flush(void);
+
+/** @brief Create work in the delayed work queue
+ *
+ * Creates a work object which can be scheduled in the work queue. When
+ * scheduled, a timer will be start and the \a handler will be called with
+ * \a data as the argument when timer out
+ *
+ * Refer to \ref _mali_osk_wq_delayed_schedule_work() for details on how work
+ * is scheduled in the queue.
+ *
+ * The returned pointer must be freed with \ref _mali_osk_wq_delayed_delete_work_nonflush()
+ * when no longer needed.
+ */
+_mali_osk_wq_delayed_work_t *_mali_osk_wq_delayed_create_work(_mali_osk_wq_work_handler_t handler, void *data);
+
+/** @brief Delete a work object
+ *
+ * This will NOT flush the work queue, so only call this if you are sure that the work handler will
+ * not be called after deletion.
+ */
+void _mali_osk_wq_delayed_delete_work_nonflush(_mali_osk_wq_delayed_work_t *work);
+
+/** @brief Cancel a delayed work without waiting for it to finish
+ *
+ * Note that the \a work callback function may still be running on return from
+ * _mali_osk_wq_delayed_cancel_work_async().
+ *
+ * @param work The delayed work to be cancelled
+ */
+void _mali_osk_wq_delayed_cancel_work_async(_mali_osk_wq_delayed_work_t *work);
+
+/** @brief Cancel a delayed work and wait for it to finish
+ *
+ * When this function returns, the \a work was either cancelled or it finished running.
+ *
+ * @param work The delayed work to be cancelled
+ */
+void _mali_osk_wq_delayed_cancel_work_sync(_mali_osk_wq_delayed_work_t *work);
+
+/** @brief Put \a work task in global workqueue after delay
+ *
+ * After waiting for a given time this puts a job in the kernel-global
+ * workqueue.
+ *
+ * If \a work was already on a queue, this function will return without doing anything
+ *
+ * @param work job to be done
+ * @param delay number of jiffies to wait or 0 for immediate execution
+ */
+void _mali_osk_wq_delayed_schedule_work(_mali_osk_wq_delayed_work_t *work, u32 delay);
+
+/** @} */ /* end group _mali_osk_wq */
+
+
+/** @addtogroup _mali_osk_irq
+ * @{ */
+
+/** @brief Initialize IRQ handling for a resource
+ *
+ * Registers an interrupt handler \a uhandler for the given IRQ number \a irqnum.
+ * \a data will be passed as argument to the handler when an interrupt occurs.
+ *
+ * If \a irqnum is -1, _mali_osk_irq_init will probe for the IRQ number using
+ * the supplied \a trigger_func and \a ack_func. These functions will also
+ * receive \a data as their argument.
+ *
+ * @param irqnum The IRQ number that the resource uses, as seen by the CPU.
+ * The value -1 has a special meaning which indicates the use of probing, and
+ * trigger_func and ack_func must be non-NULL.
+ * @param uhandler The interrupt handler, corresponding to a ISR handler for
+ * the resource
+ * @param int_data resource specific data, which will be passed to uhandler
+ * @param trigger_func Optional: a function to trigger the resource's irq, to
+ * probe for the interrupt. Use NULL if irqnum != -1.
+ * @param ack_func Optional: a function to acknowledge the resource's irq, to
+ * probe for the interrupt. Use NULL if irqnum != -1.
+ * @param probe_data resource-specific data, which will be passed to
+ * (if present) trigger_func and ack_func
+ * @param description textual description of the IRQ resource.
+ * @return on success, a pointer to a _mali_osk_irq_t object, which represents
+ * the IRQ handling on this resource. NULL on failure.
+ */
+_mali_osk_irq_t *_mali_osk_irq_init(u32 irqnum, _mali_osk_irq_uhandler_t uhandler, void *int_data, _mali_osk_irq_trigger_t trigger_func, _mali_osk_irq_ack_t ack_func, void *probe_data, const char *description);
+
+/** @brief Terminate IRQ handling on a resource.
+ *
+ * This will disable the interrupt from the device, and then waits for any
+ * currently executing IRQ handlers to complete.
+ *
+ * @note If work is deferred to an IRQ bottom-half handler through
+ * \ref _mali_osk_wq_schedule_work(), be sure to flush any remaining work
+ * with \ref _mali_osk_wq_flush() or (implicitly) with \ref _mali_osk_wq_delete_work()
+ *
+ * @param irq a pointer to the _mali_osk_irq_t object corresponding to the
+ * resource whose IRQ handling is to be terminated.
+ */
+void _mali_osk_irq_term(_mali_osk_irq_t *irq);
+
+/** @} */ /* end group _mali_osk_irq */
+
+
+/** @addtogroup _mali_osk_atomic
+ * @{ */
+
+/** @brief Decrement an atomic counter
+ *
+ * @note It is an error to decrement the counter beyond -(1<<23)
+ *
+ * @param atom pointer to an atomic counter */
+void _mali_osk_atomic_dec(_mali_osk_atomic_t *atom);
+
+/** @brief Decrement an atomic counter, return new value
+ *
+ * @param atom pointer to an atomic counter
+ * @return The new value, after decrement */
+u32 _mali_osk_atomic_dec_return(_mali_osk_atomic_t *atom);
+
+/** @brief Increment an atomic counter
+ *
+ * @note It is an error to increment the counter beyond (1<<23)-1
+ *
+ * @param atom pointer to an atomic counter */
+void _mali_osk_atomic_inc(_mali_osk_atomic_t *atom);
+
+/** @brief Increment an atomic counter, return new value
+ *
+ * @param atom pointer to an atomic counter */
+u32 _mali_osk_atomic_inc_return(_mali_osk_atomic_t *atom);
+
+/** @brief Initialize an atomic counter
+ *
+ * @note the parameter required is a u32, and so signed integers should be
+ * cast to u32.
+ *
+ * @param atom pointer to an atomic counter
+ * @param val the value to initialize the atomic counter.
+ */
+void _mali_osk_atomic_init(_mali_osk_atomic_t *atom, u32 val);
+
+/** @brief Read a value from an atomic counter
+ *
+ * This can only be safely used to determine the value of the counter when it
+ * is guaranteed that other threads will not be modifying the counter. This
+ * makes its usefulness limited.
+ *
+ * @param atom pointer to an atomic counter
+ */
+u32 _mali_osk_atomic_read(_mali_osk_atomic_t *atom);
+
+/** @brief Terminate an atomic counter
+ *
+ * @param atom pointer to an atomic counter
+ */
+void _mali_osk_atomic_term(_mali_osk_atomic_t *atom);
+
+/** @brief Assign a new val to atomic counter, and return the old atomic counter
+ *
+ * @param atom pointer to an atomic counter
+ * @param val the new value assign to the atomic counter
+ * @return the old value of the atomic counter
+ */
+u32 _mali_osk_atomic_xchg(_mali_osk_atomic_t *atom, u32 val);
+/** @} */  /* end group _mali_osk_atomic */
+
+
+/** @defgroup _mali_osk_memory OSK Memory Allocation
+ * @{ */
+
+/** @brief Allocate zero-initialized memory.
+ *
+ * Returns a buffer capable of containing at least \a n elements of \a size
+ * bytes each. The buffer is initialized to zero.
+ *
+ * If there is a need for a bigger block of memory (16KB or bigger), then
+ * consider to use _mali_osk_vmalloc() instead, as this function might
+ * map down to a OS function with size limitations.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * @param n Number of elements to allocate
+ * @param size Size of each element
+ * @return On success, the zero-initialized buffer allocated. NULL on failure
+ */
+void *_mali_osk_calloc(u32 n, u32 size);
+
+/** @brief Allocate memory.
+ *
+ * Returns a buffer capable of containing at least \a size bytes. The
+ * contents of the buffer are undefined.
+ *
+ * If there is a need for a bigger block of memory (16KB or bigger), then
+ * consider to use _mali_osk_vmalloc() instead, as this function might
+ * map down to a OS function with size limitations.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * Remember to free memory using _mali_osk_free().
+ * @param size Number of bytes to allocate
+ * @return On success, the buffer allocated. NULL on failure.
+ */
+void *_mali_osk_malloc(u32 size);
+
+/** @brief Free memory.
+ *
+ * Reclaims the buffer pointed to by the parameter \a ptr for the system.
+ * All memory returned from _mali_osk_malloc() and _mali_osk_calloc()
+ * must be freed before the application exits. Otherwise,
+ * a memory leak will occur.
+ *
+ * Memory must be freed once. It is an error to free the same non-NULL pointer
+ * more than once.
+ *
+ * It is legal to free the NULL pointer.
+ *
+ * @param ptr Pointer to buffer to free
+ */
+void _mali_osk_free(void *ptr);
+
+/** @brief Allocate memory.
+ *
+ * Returns a buffer capable of containing at least \a size bytes. The
+ * contents of the buffer are undefined.
+ *
+ * This function is potentially slower than _mali_osk_malloc() and _mali_osk_calloc(),
+ * but do support bigger sizes.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * Remember to free memory using _mali_osk_free().
+ * @param size Number of bytes to allocate
+ * @return On success, the buffer allocated. NULL on failure.
+ */
+void *_mali_osk_valloc(u32 size);
+
+/** @brief Free memory.
+ *
+ * Reclaims the buffer pointed to by the parameter \a ptr for the system.
+ * All memory returned from _mali_osk_valloc() must be freed before the
+ * application exits. Otherwise a memory leak will occur.
+ *
+ * Memory must be freed once. It is an error to free the same non-NULL pointer
+ * more than once.
+ *
+ * It is legal to free the NULL pointer.
+ *
+ * @param ptr Pointer to buffer to free
+ */
+void _mali_osk_vfree(void *ptr);
+
+/** @brief Copies memory.
+ *
+ * Copies the \a len bytes from the buffer pointed by the parameter \a src
+ * directly to the buffer pointed by \a dst.
+ *
+ * It is an error for \a src to overlap \a dst anywhere in \a len bytes.
+ *
+ * @param dst Pointer to the destination array where the content is to be
+ * copied.
+ * @param src Pointer to the source of data to be copied.
+ * @param len Number of bytes to copy.
+ * @return \a dst is always passed through unmodified.
+ */
+void *_mali_osk_memcpy(void *dst, const void *src, u32 len);
+
+/** @brief Fills memory.
+ *
+ * Sets the first \a n bytes of the block of memory pointed to by \a s to
+ * the specified value
+ * @param s Pointer to the block of memory to fill.
+ * @param c Value to be set, passed as u32. Only the 8 Least Significant Bits (LSB)
+ * are used.
+ * @param n Number of bytes to be set to the value.
+ * @return \a s is always passed through unmodified
+ */
+void *_mali_osk_memset(void *s, u32 c, u32 n);
+/** @} */ /* end group _mali_osk_memory */
+
+
+/** @brief Checks the amount of memory allocated
+ *
+ * Checks that not more than \a max_allocated bytes are allocated.
+ *
+ * Some OS bring up an interactive out of memory dialogue when the
+ * system runs out of memory. This can stall non-interactive
+ * apps (e.g. automated test runs). This function can be used to
+ * not trigger the OOM dialogue by keeping allocations
+ * within a certain limit.
+ *
+ * @return MALI_TRUE when \a max_allocated bytes are not in use yet. MALI_FALSE
+ * when at least \a max_allocated bytes are in use.
+ */
+mali_bool _mali_osk_mem_check_allocated(u32 max_allocated);
+
+
+/** @addtogroup _mali_osk_low_level_memory
+ * @{ */
+
+/** @brief Issue a memory barrier
+ *
+ * This defines an arbitrary memory barrier operation, which forces an ordering constraint
+ * on memory read and write operations.
+ */
+void _mali_osk_mem_barrier(void);
+
+/** @brief Issue a write memory barrier
+ *
+ * This defines an write memory barrier operation which forces an ordering constraint
+ * on memory write operations.
+ */
+void _mali_osk_write_mem_barrier(void);
+
+/** @brief Map a physically contiguous region into kernel space
+ *
+ * This is primarily used for mapping in registers from resources, and Mali-MMU
+ * page tables. The mapping is only visable from kernel-space.
+ *
+ * Access has to go through _mali_osk_mem_ioread32 and _mali_osk_mem_iowrite32
+ *
+ * @param phys CPU-physical base address of the memory to map in. This must
+ * be aligned to the system's page size, which is assumed to be 4K.
+ * @param size the number of bytes of physically contiguous address space to
+ * map in
+ * @param description A textual description of the memory being mapped in.
+ * @return On success, a Mali IO address through which the mapped-in
+ * memory/registers can be accessed. NULL on failure.
+ */
+mali_io_address _mali_osk_mem_mapioregion(uintptr_t phys, u32 size, const char *description);
+
+/** @brief Unmap a physically contiguous address range from kernel space.
+ *
+ * The address range should be one previously mapped in through
+ * _mali_osk_mem_mapioregion.
+ *
+ * It is a programming error to do (but not limited to) the following:
+ * - attempt an unmap twice
+ * - unmap only part of a range obtained through _mali_osk_mem_mapioregion
+ * - unmap more than the range obtained through  _mali_osk_mem_mapioregion
+ * - unmap an address range that was not successfully mapped using
+ * _mali_osk_mem_mapioregion
+ * - provide a mapping that does not map to phys.
+ *
+ * @param phys CPU-physical base address of the memory that was originally
+ * mapped in. This must be aligned to the system's page size, which is assumed
+ * to be 4K
+ * @param size The number of bytes that were originally mapped in.
+ * @param mapping The Mali IO address through which the mapping is
+ * accessed.
+ */
+void _mali_osk_mem_unmapioregion(uintptr_t phys, u32 size, mali_io_address mapping);
+
+/** @brief Allocate and Map a physically contiguous region into kernel space
+ *
+ * This is used for allocating physically contiguous regions (such as Mali-MMU
+ * page tables) and mapping them into kernel space. The mapping is only
+ * visible from kernel-space.
+ *
+ * The alignment of the returned memory is guaranteed to be at least
+ * _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * Access must go through _mali_osk_mem_ioread32 and _mali_osk_mem_iowrite32
+ *
+ * @note This function is primarily to provide support for OSs that are
+ * incapable of separating the tasks 'allocate physically contiguous memory'
+ * and 'map it into kernel space'
+ *
+ * @param[out] phys CPU-physical base address of memory that was allocated.
+ * (*phys) will be guaranteed to be aligned to at least
+ * _MALI_OSK_CPU_PAGE_SIZE on success.
+ *
+ * @param[in] size the number of bytes of physically contiguous memory to
+ * allocate. This must be a multiple of _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @return On success, a Mali IO address through which the mapped-in
+ * memory/registers can be accessed. NULL on failure, and (*phys) is unmodified.
+ */
+mali_io_address _mali_osk_mem_allocioregion(u32 *phys, u32 size);
+
+/** @brief Free a physically contiguous address range from kernel space.
+ *
+ * The address range should be one previously mapped in through
+ * _mali_osk_mem_allocioregion.
+ *
+ * It is a programming error to do (but not limited to) the following:
+ * - attempt a free twice on the same ioregion
+ * - free only part of a range obtained through _mali_osk_mem_allocioregion
+ * - free more than the range obtained through  _mali_osk_mem_allocioregion
+ * - free an address range that was not successfully mapped using
+ * _mali_osk_mem_allocioregion
+ * - provide a mapping that does not map to phys.
+ *
+ * @param phys CPU-physical base address of the memory that was originally
+ * mapped in, which was aligned to _MALI_OSK_CPU_PAGE_SIZE.
+ * @param size The number of bytes that were originally mapped in, which was
+ * a multiple of _MALI_OSK_CPU_PAGE_SIZE.
+ * @param mapping The Mali IO address through which the mapping is
+ * accessed.
+ */
+void _mali_osk_mem_freeioregion(u32 phys, u32 size, mali_io_address mapping);
+
+/** @brief Request a region of physically contiguous memory
+ *
+ * This is used to ensure exclusive access to a region of physically contigous
+ * memory.
+ *
+ * It is acceptable to implement this as a stub. However, it is then the job
+ * of the System Integrator to ensure that no other device driver will be using
+ * the physical address ranges used by Mali, while the Mali device driver is
+ * loaded.
+ *
+ * @param phys CPU-physical base address of the memory to request. This must
+ * be aligned to the system's page size, which is assumed to be 4K.
+ * @param size the number of bytes of physically contiguous address space to
+ * request.
+ * @param description A textual description of the memory being requested.
+ * @return _MALI_OSK_ERR_OK on success. Otherwise, a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_osk_mem_reqregion(uintptr_t phys, u32 size, const char *description);
+
+/** @brief Un-request a region of physically contiguous memory
+ *
+ * This is used to release a regious of physically contiguous memory previously
+ * requested through _mali_osk_mem_reqregion, so that other device drivers may
+ * use it. This will be called at time of Mali device driver termination.
+ *
+ * It is a programming error to attempt to:
+ * - unrequest a region twice
+ * - unrequest only part of a range obtained through _mali_osk_mem_reqregion
+ * - unrequest more than the range obtained through  _mali_osk_mem_reqregion
+ * - unrequest an address range that was not successfully requested using
+ * _mali_osk_mem_reqregion
+ *
+ * @param phys CPU-physical base address of the memory to un-request. This must
+ * be aligned to the system's page size, which is assumed to be 4K
+ * @param size the number of bytes of physically contiguous address space to
+ * un-request.
+ */
+void _mali_osk_mem_unreqregion(uintptr_t phys, u32 size);
+
+/** @brief Read from a location currently mapped in through
+ * _mali_osk_mem_mapioregion
+ *
+ * This reads a 32-bit word from a 32-bit aligned location. It is a programming
+ * error to provide unaligned locations, or to read from memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to read from
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @return the 32-bit word from the specified location.
+ */
+u32 _mali_osk_mem_ioread32(volatile mali_io_address mapping, u32 offset);
+
+/** @brief Write to a location currently mapped in through
+ * _mali_osk_mem_mapioregion without memory barriers
+ *
+ * This write a 32-bit word to a 32-bit aligned location without using memory barrier.
+ * It is a programming error to provide unaligned locations, or to write to memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to write to
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @param val the 32-bit word to write.
+ */
+void _mali_osk_mem_iowrite32_relaxed(volatile mali_io_address addr, u32 offset, u32 val);
+
+/** @brief Write to a location currently mapped in through
+ * _mali_osk_mem_mapioregion with write memory barrier
+ *
+ * This write a 32-bit word to a 32-bit aligned location. It is a programming
+ * error to provide unaligned locations, or to write to memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to write to
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @param val the 32-bit word to write.
+ */
+void _mali_osk_mem_iowrite32(volatile mali_io_address mapping, u32 offset, u32 val);
+
+/** @brief Flush all CPU caches
+ *
+ * This should only be implemented if flushing of the cache is required for
+ * memory mapped in through _mali_osk_mem_mapregion.
+ */
+void _mali_osk_cache_flushall(void);
+
+/** @brief Flush any caches necessary for the CPU and MALI to have the same view of a range of uncached mapped memory
+ *
+ * This should only be implemented if your OS doesn't do a full cache flush (inner & outer)
+ * after allocating uncached mapped memory.
+ *
+ * Some OS do not perform a full cache flush (including all outer caches) for uncached mapped memory.
+ * They zero the memory through a cached mapping, then flush the inner caches but not the outer caches.
+ * This is required for MALI to have the correct view of the memory.
+ */
+void _mali_osk_cache_ensure_uncached_range_flushed(void *uncached_mapping, u32 offset, u32 size);
+
+/** @brief Safely copy as much data as possible from src to dest
+ *
+ * Do not crash if src or dest isn't available.
+ *
+ * @param dest Destination buffer (limited to user space mapped Mali memory)
+ * @param src Source buffer
+ * @param size Number of bytes to copy
+ * @return Number of bytes actually copied
+ */
+u32 _mali_osk_mem_write_safe(void *dest, const void *src, u32 size);
+
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+
+/** @addtogroup _mali_osk_notification
+ *
+ * User space notification framework
+ *
+ * Communication with user space of asynchronous events is performed through a
+ * synchronous call to the \ref u_k_api.
+ *
+ * Since the events are asynchronous, the events have to be queued until a
+ * synchronous U/K API call can be made by user-space. A U/K API call might also
+ * be received before any event has happened. Therefore the notifications the
+ * different subsystems wants to send to user space has to be queued for later
+ * reception, or a U/K API call has to be blocked until an event has occured.
+ *
+ * Typical uses of notifications are after running of jobs on the hardware or
+ * when changes to the system is detected that needs to be relayed to user
+ * space.
+ *
+ * After an event has occured user space has to be notified using some kind of
+ * message. The notification framework supports sending messages to waiting
+ * threads or queueing of messages until a U/K API call is made.
+ *
+ * The notification queue is a FIFO. There are no restrictions on the numbers
+ * of readers or writers in the queue.
+ *
+ * A message contains what user space needs to identifiy how to handle an
+ * event. This includes a type field and a possible type specific payload.
+ *
+ * A notification to user space is represented by a
+ * \ref _mali_osk_notification_t object. A sender gets hold of such an object
+ * using _mali_osk_notification_create(). The buffer given by the
+ * _mali_osk_notification_t::result_buffer field in the object is used to store
+ * any type specific data. The other fields are internal to the queue system
+ * and should not be touched.
+ *
+ * @{ */
+
+/** @brief Create a notification object
+ *
+ * Returns a notification object which can be added to the queue of
+ * notifications pending for user space transfer.
+ *
+ * The implementation will initialize all members of the
+ * \ref _mali_osk_notification_t object. In particular, the
+ * _mali_osk_notification_t::result_buffer member will be initialized to point
+ * to \a size bytes of storage, and that storage will be suitably aligned for
+ * storage of any structure. That is, the created buffer meets the same
+ * requirements as _mali_osk_malloc().
+ *
+ * The notification object must be deleted when not in use. Use
+ * _mali_osk_notification_delete() for deleting it.
+ *
+ * @note You \b must \b not call _mali_osk_free() on a \ref _mali_osk_notification_t,
+ * object, or on a _mali_osk_notification_t::result_buffer. You must only use
+ * _mali_osk_notification_delete() to free the resources assocaited with a
+ * \ref _mali_osk_notification_t object.
+ *
+ * @param type The notification type
+ * @param size The size of the type specific buffer to send
+ * @return Pointer to a notification object with a suitable buffer, or NULL on error.
+ */
+_mali_osk_notification_t *_mali_osk_notification_create(u32 type, u32 size);
+
+/** @brief Delete a notification object
+ *
+ * This must be called to reclaim the resources of a notification object. This
+ * includes:
+ * - The _mali_osk_notification_t::result_buffer
+ * - The \ref _mali_osk_notification_t itself.
+ *
+ * A notification object \b must \b not be used after it has been deleted by
+ * _mali_osk_notification_delete().
+ *
+ * In addition, the notification object may not be deleted while it is in a
+ * queue. That is, if it has been placed on a queue with
+ * _mali_osk_notification_queue_send(), then it must not be deleted until
+ * it has been received by a call to _mali_osk_notification_queue_receive().
+ * Otherwise, the queue may be corrupted.
+ *
+ * @param object the notification object to delete.
+ */
+void _mali_osk_notification_delete(_mali_osk_notification_t *object);
+
+/** @brief Create a notification queue
+ *
+ * Creates a notification queue which can be used to queue messages for user
+ * delivery and get queued messages from
+ *
+ * The queue is a FIFO, and has no restrictions on the numbers of readers or
+ * writers.
+ *
+ * When the queue is no longer in use, it must be terminated with
+ * \ref _mali_osk_notification_queue_term(). Failure to do so will result in a
+ * memory leak.
+ *
+ * @return Pointer to a new notification queue or NULL on error.
+ */
+_mali_osk_notification_queue_t *_mali_osk_notification_queue_init(void);
+
+/** @brief Destroy a notification queue
+ *
+ * Destroys a notification queue and frees associated resources from the queue.
+ *
+ * A notification queue \b must \b not be destroyed in the following cases:
+ * - while there are \ref _mali_osk_notification_t objects in the queue.
+ * - while there are writers currently acting upon the queue. That is, while
+ * a thread is currently calling \ref _mali_osk_notification_queue_send() on
+ * the queue, or while a thread may call
+ * \ref _mali_osk_notification_queue_send() on the queue in the future.
+ * - while there are readers currently waiting upon the queue. That is, while
+ * a thread is currently calling \ref _mali_osk_notification_queue_receive() on
+ * the queue, or while a thread may call
+ * \ref _mali_osk_notification_queue_receive() on the queue in the future.
+ *
+ * Therefore, all \ref _mali_osk_notification_t objects must be flushed and
+ * deleted by the code that makes use of the notification queues, since only
+ * they know the structure of the _mali_osk_notification_t::result_buffer
+ * (even if it may only be a flat sturcture).
+ *
+ * @note Since the queue is a FIFO, the code using notification queues may
+ * create its own 'flush' type of notification, to assist in flushing the
+ * queue.
+ *
+ * Once the queue has been destroyed, it must not be used again.
+ *
+ * @param queue The queue to destroy
+ */
+void _mali_osk_notification_queue_term(_mali_osk_notification_queue_t *queue);
+
+/** @brief Schedule notification for delivery
+ *
+ * When a \ref _mali_osk_notification_t object has been created successfully
+ * and set up, it may be added to the queue of objects waiting for user space
+ * transfer.
+ *
+ * The sending will not block if the queue is full.
+ *
+ * A \ref _mali_osk_notification_t object \b must \b not be put on two different
+ * queues at the same time, or enqueued twice onto a single queue before
+ * reception. However, it is acceptable for it to be requeued \em after reception
+ * from a call to _mali_osk_notification_queue_receive(), even onto the same queue.
+ *
+ * Again, requeuing must also not enqueue onto two different queues at the same
+ * time, or enqueue onto the same queue twice before reception.
+ *
+ * @param queue The notification queue to add this notification to
+ * @param object The entry to add
+ */
+void _mali_osk_notification_queue_send(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t *object);
+
+/** @brief Receive a notification from a queue
+ *
+ * Receives a single notification from the given queue.
+ *
+ * If no notifciations are ready the thread will sleep until one becomes ready.
+ * Therefore, notifications may not be received into an
+ * IRQ or 'atomic' context (that is, a context where sleeping is disallowed).
+ *
+ * @param queue The queue to receive from
+ * @param result Pointer to storage of a pointer of type
+ * \ref _mali_osk_notification_t*. \a result will be written to such that the
+ * expression \a (*result) will evaluate to a pointer to a valid
+ * \ref _mali_osk_notification_t object, or NULL if none were received.
+ * @return _MALI_OSK_ERR_OK on success. _MALI_OSK_ERR_RESTARTSYSCALL if the sleep was interrupted.
+ */
+_mali_osk_errcode_t _mali_osk_notification_queue_receive(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result);
+
+/** @brief Dequeues a notification from a queue
+ *
+ * Receives a single notification from the given queue.
+ *
+ * If no notifciations are ready the function call will return an error code.
+ *
+ * @param queue The queue to receive from
+ * @param result Pointer to storage of a pointer of type
+ * \ref _mali_osk_notification_t*. \a result will be written to such that the
+ * expression \a (*result) will evaluate to a pointer to a valid
+ * \ref _mali_osk_notification_t object, or NULL if none were received.
+ * @return _MALI_OSK_ERR_OK on success, _MALI_OSK_ERR_ITEM_NOT_FOUND if queue was empty.
+ */
+_mali_osk_errcode_t _mali_osk_notification_queue_dequeue(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result);
+
+/** @} */ /* end group _mali_osk_notification */
+
+
+/** @addtogroup _mali_osk_timer
+ *
+ * Timers use the OS's representation of time, which are 'ticks'. This is to
+ * prevent aliasing problems between the internal timer time, and the time
+ * asked for.
+ *
+ * @{ */
+
+/** @brief Initialize a timer
+ *
+ * Allocates resources for a new timer, and initializes them. This does not
+ * start the timer.
+ *
+ * @return a pointer to the allocated timer object, or NULL on failure.
+ */
+_mali_osk_timer_t *_mali_osk_timer_init(void);
+
+/** @brief Start a timer
+ *
+ * It is an error to start a timer without setting the callback via
+ * _mali_osk_timer_setcallback().
+ *
+ * It is an error to use this to start an already started timer.
+ *
+ * The timer will expire in \a ticks_to_expire ticks, at which point, the
+ * callback function will be invoked with the callback-specific data,
+ * as registered by _mali_osk_timer_setcallback().
+ *
+ * @param tim the timer to start
+ * @param ticks_to_expire the amount of time in ticks for the timer to run
+ * before triggering.
+ */
+void _mali_osk_timer_add(_mali_osk_timer_t *tim, unsigned long ticks_to_expire);
+
+/** @brief Modify a timer
+ *
+ * Set the relative time at which a timer will expire, and start it if it is
+ * stopped. If \a ticks_to_expire 0 the timer fires immediately.
+ *
+ * It is an error to modify a timer without setting the callback via
+ *  _mali_osk_timer_setcallback().
+ *
+ * The timer will expire at \a ticks_to_expire from the time of the call, at
+ * which point, the callback function will be invoked with the
+ * callback-specific data, as set by _mali_osk_timer_setcallback().
+ *
+ * @param tim the timer to modify, and start if necessary
+ * @param ticks_to_expire the \em absolute time in ticks at which this timer
+ * should trigger.
+ *
+ */
+void _mali_osk_timer_mod(_mali_osk_timer_t *tim, unsigned long ticks_to_expire);
+
+/** @brief Stop a timer, and block on its completion.
+ *
+ * Stop the timer. When the function returns, it is guaranteed that the timer's
+ * callback will not be running on any CPU core.
+ *
+ * Since stoping the timer blocks on compeletion of the callback, the callback
+ * may not obtain any mutexes that the caller holds. Otherwise, a deadlock will
+ * occur.
+ *
+ * @note While the callback itself is guaranteed to not be running, work
+ * enqueued on the work-queue by the timer (with
+ * \ref _mali_osk_wq_schedule_work()) may still run. The timer callback and
+ * work handler must take this into account.
+ *
+ * It is legal to stop an already stopped timer.
+ *
+ * @param tim the timer to stop.
+ *
+ */
+void _mali_osk_timer_del(_mali_osk_timer_t *tim);
+
+/** @brief Stop a timer.
+ *
+ * Stop the timer. When the function returns, the timer's callback may still be
+ * running on any CPU core.
+ *
+ * It is legal to stop an already stopped timer.
+ *
+ * @param tim the timer to stop.
+ */
+void _mali_osk_timer_del_async(_mali_osk_timer_t *tim);
+
+/** @brief Check if timer is pending.
+ *
+ * Check if timer is active.
+ *
+ * @param tim the timer to check
+ * @return MALI_TRUE if time is active, MALI_FALSE if it is not active
+ */
+mali_bool _mali_osk_timer_pending(_mali_osk_timer_t *tim);
+
+/** @brief Set a timer's callback parameters.
+ *
+ * This must be called at least once before a timer is started/modified.
+ *
+ * After a timer has been stopped or expires, the callback remains set. This
+ * means that restarting the timer will call the same function with the same
+ * parameters on expiry.
+ *
+ * @param tim the timer to set callback on.
+ * @param callback Function to call when timer expires
+ * @param data Function-specific data to supply to the function on expiry.
+ */
+void _mali_osk_timer_setcallback(_mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data);
+
+/** @brief Terminate a timer, and deallocate resources.
+ *
+ * The timer must first be stopped by calling _mali_osk_timer_del().
+ *
+ * It is a programming error for _mali_osk_timer_term() to be called on:
+ * - timer that is currently running
+ * - a timer that is currently executing its callback.
+ *
+ * @param tim the timer to deallocate.
+ */
+void _mali_osk_timer_term(_mali_osk_timer_t *tim);
+/** @} */ /* end group _mali_osk_timer */
+
+
+/** @defgroup _mali_osk_time OSK Time functions
+ *
+ * \ref _mali_osk_time use the OS's representation of time, which are
+ * 'ticks'. This is to prevent aliasing problems between the internal timer
+ * time, and the time asked for.
+ *
+ * OS tick time is measured as a u32. The time stored in a u32 may either be
+ * an absolute time, or a time delta between two events. Whilst it is valid to
+ * use math opeartors to \em change the tick value represented as a u32, it
+ * is often only meaningful to do such operations on time deltas, rather than
+ * on absolute time. However, it is meaningful to add/subtract time deltas to
+ * absolute times.
+ *
+ * Conversion between tick time and milliseconds (ms) may not be loss-less,
+ * and are \em implementation \em depenedant.
+ *
+ * Code use OS time must take this into account, since:
+ * - a small OS time may (or may not) be rounded
+ * - a large time may (or may not) overflow
+ *
+ * @{ */
+
+/** @brief Return whether ticka occurs after or at the same time as  tickb
+ *
+ * Systems where ticks can wrap must handle that.
+ *
+ * @param ticka ticka
+ * @param tickb tickb
+ * @return MALI_TRUE if ticka represents a time that occurs at or after tickb.
+ */
+mali_bool _mali_osk_time_after_eq(unsigned long ticka, unsigned long tickb);
+
+/** @brief Convert milliseconds to OS 'ticks'
+ *
+ * @param ms time interval in milliseconds
+ * @return the corresponding time interval in OS ticks.
+ */
+unsigned long _mali_osk_time_mstoticks(u32 ms);
+
+/** @brief Convert OS 'ticks' to milliseconds
+ *
+ * @param ticks time interval in OS ticks.
+ * @return the corresponding time interval in milliseconds
+ */
+u32 _mali_osk_time_tickstoms(unsigned long ticks);
+
+
+/** @brief Get the current time in OS 'ticks'.
+ * @return the current time in OS 'ticks'.
+ */
+unsigned long _mali_osk_time_tickcount(void);
+
+/** @brief Cause a microsecond delay
+ *
+ * The delay will have microsecond resolution, and is necessary for correct
+ * operation of the driver. At worst, the delay will be \b at least \a usecs
+ * microseconds, and so may be (significantly) more.
+ *
+ * This function may be implemented as a busy-wait, which is the most sensible
+ * implementation. On OSs where there are situations in which a thread must not
+ * sleep, this is definitely implemented as a busy-wait.
+ *
+ * @param usecs the number of microseconds to wait for.
+ */
+void _mali_osk_time_ubusydelay(u32 usecs);
+
+/** @brief Return time in nano seconds, since any given reference.
+ *
+ * @return Time in nano seconds
+ */
+u64 _mali_osk_time_get_ns(void);
+
+/** @brief Return time in nano seconds, since boot time.
+ *
+ * @return Time in nano seconds
+ */
+u64 _mali_osk_boot_time_get_ns(void);
+
+/** @} */ /* end group _mali_osk_time */
+
+/** @defgroup _mali_osk_math OSK Math
+ * @{ */
+
+/** @brief Count Leading Zeros (Little-endian)
+ *
+ * @note This function must be implemented to support the reference
+ * implementation of _mali_osk_find_first_zero_bit, as defined in
+ * mali_osk_bitops.h.
+ *
+ * @param val 32-bit words to count leading zeros on
+ * @return the number of leading zeros.
+ */
+u32 _mali_osk_clz(u32 val);
+
+/** @brief find last (most-significant) bit set
+ *
+ * @param val 32-bit words to count last bit set on
+ * @return last bit set.
+ */
+u32 _mali_osk_fls(u32 val);
+
+/** @} */ /* end group _mali_osk_math */
+
+/** @addtogroup _mali_osk_wait_queue OSK Wait Queue functionality
+ * @{ */
+
+/** @brief Initialize an empty Wait Queue */
+_mali_osk_wait_queue_t *_mali_osk_wait_queue_init(void);
+
+/** @brief Sleep if condition is false
+ *
+ * @param queue the queue to use
+ * @param condition function pointer to a boolean function
+ * @param data data parameter for condition function
+ *
+ * Put thread to sleep if the given \a condition function returns false. When
+ * being asked to wake up again, the condition will be re-checked and the
+ * thread only woken up if the condition is now true.
+ */
+void _mali_osk_wait_queue_wait_event(_mali_osk_wait_queue_t *queue, mali_bool(*condition)(void *), void *data);
+
+/** @brief Sleep if condition is false
+ *
+ * @param queue the queue to use
+ * @param condition function pointer to a boolean function
+ * @param data data parameter for condition function
+ * @param timeout timeout in ms
+ *
+ * Put thread to sleep if the given \a condition function returns false. When
+ * being asked to wake up again, the condition will be re-checked and the
+ * thread only woken up if the condition is now true.  Will return if time
+ * exceeds timeout.
+ */
+void _mali_osk_wait_queue_wait_event_timeout(_mali_osk_wait_queue_t *queue, mali_bool(*condition)(void *), void *data, u32 timeout);
+
+/** @brief Wake up all threads in wait queue if their respective conditions are
+ * true
+ *
+ * @param queue the queue whose threads should be woken up
+ *
+ * Wake up all threads in wait queue \a queue whose condition is now true.
+ */
+void _mali_osk_wait_queue_wake_up(_mali_osk_wait_queue_t *queue);
+
+/** @brief terminate a wait queue
+ *
+ * @param queue the queue to terminate.
+ */
+void _mali_osk_wait_queue_term(_mali_osk_wait_queue_t *queue);
+/** @} */ /* end group _mali_osk_wait_queue */
+
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief Output a device driver debug message.
+ *
+ * The interpretation of \a fmt is the same as the \c format parameter in
+ * _mali_osu_vsnprintf().
+ *
+ * @param fmt a _mali_osu_vsnprintf() style format string
+ * @param ... a variable-number of parameters suitable for \a fmt
+ */
+void _mali_osk_dbgmsg(const char *fmt, ...);
+
+/** @brief Print fmt into buf.
+ *
+ * The interpretation of \a fmt is the same as the \c format parameter in
+ * _mali_osu_vsnprintf().
+ *
+ * @param buf a pointer to the result buffer
+ * @param size the total number of bytes allowed to write to \a buf
+ * @param fmt a _mali_osu_vsnprintf() style format string
+ * @param ... a variable-number of parameters suitable for \a fmt
+ * @return The number of bytes written to \a buf
+ */
+u32 _mali_osk_snprintf(char *buf, u32 size, const char *fmt, ...);
+
+/** @brief Abnormal process abort.
+ *
+ * Terminates the caller-process if this function is called.
+ *
+ * This function will be called from Debug assert-macros in mali_kernel_common.h.
+ *
+ * This function will never return - because to continue from a Debug assert
+ * could cause even more problems, and hinder debugging of the initial problem.
+ *
+ * This function is only used in Debug builds, and is not used in Release builds.
+ */
+void _mali_osk_abort(void);
+
+/** @brief Sets breakpoint at point where function is called.
+ *
+ * This function will be called from Debug assert-macros in mali_kernel_common.h,
+ * to assist in debugging. If debugging at this level is not required, then this
+ * function may be implemented as a stub.
+ *
+ * This function is only used in Debug builds, and is not used in Release builds.
+ */
+void _mali_osk_break(void);
+
+/** @brief Return an identificator for calling process.
+ *
+ * @return Identificator for calling process.
+ */
+u32 _mali_osk_get_pid(void);
+
+/** @brief Return an name for calling process.
+ *
+ * @return name for calling process.
+ */
+char *_mali_osk_get_comm(void);
+
+/** @brief Return an identificator for calling thread.
+ *
+ * @return Identificator for calling thread.
+ */
+u32 _mali_osk_get_tid(void);
+
+
+/** @brief Take a reference to the power manager system for the Mali device (synchronously).
+ *
+ * When function returns successfully, Mali is ON.
+ *
+ * @note Call \a _mali_osk_pm_dev_ref_put() to release this reference.
+ */
+_mali_osk_errcode_t _mali_osk_pm_dev_ref_get_sync(void);
+
+/** @brief Take a reference to the external power manager system for the Mali device (asynchronously).
+ *
+ * Mali might not yet be on after this function as returned.
+ * Please use \a _mali_osk_pm_dev_barrier() or \a _mali_osk_pm_dev_ref_get_sync()
+ * to wait for Mali to be powered on.
+ *
+ * @note Call \a _mali_osk_pm_dev_ref_dec() to release this reference.
+ */
+_mali_osk_errcode_t _mali_osk_pm_dev_ref_get_async(void);
+
+/** @brief Release the reference to the external power manger system for the Mali device.
+ *
+ * When reference count reach zero, the cores can be off.
+ *
+ * @note This must be used to release references taken with
+ * \a _mali_osk_pm_dev_ref_get_sync() or \a _mali_osk_pm_dev_ref_get_sync().
+ */
+void _mali_osk_pm_dev_ref_put(void);
+
+/** @brief Block until pending PM operations are done
+ */
+void _mali_osk_pm_dev_barrier(void);
+
+/** @} */ /* end group  _mali_osk_miscellaneous */
+
+/** @defgroup _mali_osk_bitmap OSK Bitmap
+ * @{ */
+
+/** @brief Allocate a unique number from the bitmap object.
+ *
+ * @param bitmap Initialized bitmap object.
+ * @return An unique existence in the bitmap object.
+ */
+u32 _mali_osk_bitmap_alloc(struct _mali_osk_bitmap *bitmap);
+
+/** @brief Free a interger to the bitmap object.
+ *
+ * @param bitmap Initialized bitmap object.
+ * @param obj An number allocated from bitmap object.
+ */
+void _mali_osk_bitmap_free(struct _mali_osk_bitmap *bitmap, u32 obj);
+
+/** @brief Allocate continuous number from the bitmap object.
+ *
+ * @param bitmap Initialized bitmap object.
+ * @return start number of the continuous number block.
+ */
+u32 _mali_osk_bitmap_alloc_range(struct _mali_osk_bitmap *bitmap, int cnt);
+
+/** @brief Free a block of continuous number block to the bitmap object.
+ *
+ * @param bitmap Initialized bitmap object.
+ * @param obj Start number.
+ * @param cnt The size of the continuous number block.
+ */
+void _mali_osk_bitmap_free_range(struct _mali_osk_bitmap *bitmap, u32 obj, int cnt);
+
+/** @brief Available count could be used to allocate in the given bitmap object.
+ *
+ */
+u32 _mali_osk_bitmap_avail(struct _mali_osk_bitmap *bitmap);
+
+/** @brief Initialize an bitmap object..
+ *
+ * @param bitmap An poiter of uninitialized bitmap object.
+ * @param num Size of thei bitmap object and decide the memory size allocated.
+ * @param reserve start number used to allocate.
+ */
+int _mali_osk_bitmap_init(struct _mali_osk_bitmap *bitmap, u32 num, u32 reserve);
+
+/** @brief Free the given bitmap object.
+ *
+ * @param bitmap Initialized bitmap object.
+ */
+void _mali_osk_bitmap_term(struct _mali_osk_bitmap *bitmap);
+/** @} */ /* end group  _mali_osk_bitmap */
+
+/** @} */ /* end group osuapi */
+
+/** @} */ /* end group uddapi */
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+/* Check standard inlines */
+#ifndef MALI_STATIC_INLINE
+#error MALI_STATIC_INLINE not defined on your OS
+#endif
+
+#ifndef MALI_NON_STATIC_INLINE
+#error MALI_NON_STATIC_INLINE not defined on your OS
+#endif
+
+#endif /* __MALI_OSK_H__ */
diff --git a/utgard/r8p0/common/mali_osk_bitops.h b/utgard/r8p0/common/mali_osk_bitops.h
new file mode 100755 (executable)
index 0000000..61e0a91
--- /dev/null
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2010, 2013-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_bitops.h
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#ifndef __MALI_OSK_BITOPS_H__
+#define __MALI_OSK_BITOPS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+MALI_STATIC_INLINE void _mali_internal_clear_bit(u32 bit, u32 *addr)
+{
+       MALI_DEBUG_ASSERT(bit < 32);
+       MALI_DEBUG_ASSERT(NULL != addr);
+
+       (*addr) &= ~(1 << bit);
+}
+
+MALI_STATIC_INLINE void _mali_internal_set_bit(u32 bit, u32 *addr)
+{
+       MALI_DEBUG_ASSERT(bit < 32);
+       MALI_DEBUG_ASSERT(NULL != addr);
+
+       (*addr) |= (1 << bit);
+}
+
+MALI_STATIC_INLINE u32 _mali_internal_test_bit(u32 bit, u32 value)
+{
+       MALI_DEBUG_ASSERT(bit < 32);
+       return value & (1 << bit);
+}
+
+MALI_STATIC_INLINE int _mali_internal_find_first_zero_bit(u32 value)
+{
+       u32 inverted;
+       u32 negated;
+       u32 isolated;
+       u32 leading_zeros;
+
+       /* Begin with xxx...x0yyy...y, where ys are 1, number of ys is in range  0..31 */
+       inverted = ~value; /* zzz...z1000...0 */
+       /* Using count_trailing_zeros on inverted value -
+        * See ARM System Developers Guide for details of count_trailing_zeros */
+
+       /* Isolate the zero: it is preceeded by a run of 1s, so add 1 to it */
+       negated = (u32) - inverted ; /* -a == ~a + 1 (mod 2^n) for n-bit numbers */
+       /* negated = xxx...x1000...0 */
+
+       isolated = negated & inverted ; /* xxx...x1000...0 & zzz...z1000...0, zs are ~xs */
+       /* And so the first zero bit is in the same position as the 1 == number of 1s that preceeded it
+        * Note that the output is zero if value was all 1s */
+
+       leading_zeros = _mali_osk_clz(isolated);
+
+       return 31 - leading_zeros;
+}
+
+
+/** @defgroup _mali_osk_bitops OSK Non-atomic Bit-operations
+ * @{ */
+
+/**
+ * These bit-operations do not work atomically, and so locks must be used if
+ * atomicity is required.
+ *
+ * Reference implementations for Little Endian are provided, and so it should
+ * not normally be necessary to re-implement these. Efficient bit-twiddling
+ * techniques are used where possible, implemented in portable C.
+ *
+ * Note that these reference implementations rely on _mali_osk_clz() being
+ * implemented.
+ */
+
+/** @brief Clear a bit in a sequence of 32-bit words
+ * @param nr bit number to clear, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ */
+MALI_STATIC_INLINE void _mali_osk_clear_nonatomic_bit(u32 nr, u32 *addr)
+{
+       addr += nr >> 5; /* find the correct word */
+       nr = nr & ((1 << 5) - 1); /* The bit number within the word */
+
+       _mali_internal_clear_bit(nr, addr);
+}
+
+/** @brief Set a bit in a sequence of 32-bit words
+ * @param nr bit number to set, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ */
+MALI_STATIC_INLINE void _mali_osk_set_nonatomic_bit(u32 nr, u32 *addr)
+{
+       addr += nr >> 5; /* find the correct word */
+       nr = nr & ((1 << 5) - 1); /* The bit number within the word */
+
+       _mali_internal_set_bit(nr, addr);
+}
+
+/** @brief Test a bit in a sequence of 32-bit words
+ * @param nr bit number to test, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ * @return zero if bit was clear, non-zero if set. Do not rely on the return
+ * value being related to the actual word under test.
+ */
+MALI_STATIC_INLINE u32 _mali_osk_test_bit(u32 nr, u32 *addr)
+{
+       addr += nr >> 5; /* find the correct word */
+       nr = nr & ((1 << 5) - 1); /* The bit number within the word */
+
+       return _mali_internal_test_bit(nr, *addr);
+}
+
+/* Return maxbit if not found */
+/** @brief Find the first zero bit in a sequence of 32-bit words
+ * @param addr starting point for search.
+ * @param maxbit the maximum number of bits to search
+ * @return the number of the first zero bit found, or maxbit if none were found
+ * in the specified range.
+ */
+MALI_STATIC_INLINE u32 _mali_osk_find_first_zero_bit(const u32 *addr, u32 maxbit)
+{
+       u32 total;
+
+       for (total = 0; total < maxbit; total += 32, ++addr) {
+               int result;
+               result = _mali_internal_find_first_zero_bit(*addr);
+
+               /* non-negative signifies the bit was found */
+               if (result >= 0) {
+                       total += (u32)result;
+                       break;
+               }
+       }
+
+       /* Now check if we reached maxbit or above */
+       if (total >= maxbit) {
+               total = maxbit;
+       }
+
+       return total; /* either the found bit nr, or maxbit if not found */
+}
+/** @} */ /* end group _mali_osk_bitops */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_BITOPS_H__ */
diff --git a/utgard/r8p0/common/mali_osk_list.h b/utgard/r8p0/common/mali_osk_list.h
new file mode 100755 (executable)
index 0000000..a41f5ab
--- /dev/null
@@ -0,0 +1,273 @@
+/*
+ * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_list.h
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#ifndef __MALI_OSK_LIST_H__
+#define __MALI_OSK_LIST_H__
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+MALI_STATIC_INLINE void __mali_osk_list_add(_mali_osk_list_t *new_entry, _mali_osk_list_t *prev, _mali_osk_list_t *next)
+{
+       next->prev = new_entry;
+       new_entry->next = next;
+       new_entry->prev = prev;
+       prev->next = new_entry;
+}
+
+MALI_STATIC_INLINE void __mali_osk_list_del(_mali_osk_list_t *prev, _mali_osk_list_t *next)
+{
+       next->prev = prev;
+       prev->next = next;
+}
+
+/** @addtogroup _mali_osk_list OSK Doubly-Linked Circular Lists
+ * @{ */
+
+/** Reference implementations of Doubly-linked Circular Lists are provided.
+ * There is often no need to re-implement these.
+ *
+ * @note The implementation may differ subtly from any lists the OS provides.
+ * For this reason, these lists should not be mixed with OS-specific lists
+ * inside the OSK/UKK implementation. */
+
+/** @brief Initialize a list to be a head of an empty list
+ * @param exp the list to initialize. */
+#define _MALI_OSK_INIT_LIST_HEAD(exp) _mali_osk_list_init(exp)
+
+/** @brief Define a list variable, which is uninitialized.
+ * @param exp the name of the variable that the list will be defined as. */
+#define _MALI_OSK_LIST_HEAD(exp) _mali_osk_list_t exp
+
+/** @brief Define a list variable, which is initialized.
+ * @param exp the name of the variable that the list will be defined as. */
+#define _MALI_OSK_LIST_HEAD_STATIC_INIT(exp) _mali_osk_list_t exp = { &exp, &exp }
+
+/** @brief Initialize a list element.
+ *
+ * All list elements must be initialized before use.
+ *
+ * Do not use on any list element that is present in a list without using
+ * _mali_osk_list_del first, otherwise this will break the list.
+ *
+ * @param list the list element to initialize
+ */
+MALI_STATIC_INLINE void _mali_osk_list_init(_mali_osk_list_t *list)
+{
+       list->next = list;
+       list->prev = list;
+}
+
+/** @brief Insert a single list element after an entry in a list
+ *
+ * As an example, if this is inserted to the head of a list, then this becomes
+ * the first element of the list.
+ *
+ * Do not use to move list elements from one list to another, as it will break
+ * the originating list.
+ *
+ *
+ * @param newlist the list element to insert
+ * @param list the list in which to insert. The new element will be the next
+ * entry in this list
+ */
+MALI_STATIC_INLINE void _mali_osk_list_add(_mali_osk_list_t *new_entry, _mali_osk_list_t *list)
+{
+       __mali_osk_list_add(new_entry, list, list->next);
+}
+
+/** @brief Insert a single list element before an entry in a list
+ *
+ * As an example, if this is inserted to the head of a list, then this becomes
+ * the last element of the list.
+ *
+ * Do not use to move list elements from one list to another, as it will break
+ * the originating list.
+ *
+ * @param newlist the list element to insert
+ * @param list the list in which to insert. The new element will be the previous
+ * entry in this list
+ */
+MALI_STATIC_INLINE void _mali_osk_list_addtail(_mali_osk_list_t *new_entry, _mali_osk_list_t *list)
+{
+       __mali_osk_list_add(new_entry, list->prev, list);
+}
+
+/** @brief Remove a single element from a list
+ *
+ * The element will no longer be present in the list. The removed list element
+ * will be uninitialized, and so should not be traversed. It must be
+ * initialized before further use.
+ *
+ * @param list the list element to remove.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_del(_mali_osk_list_t *list)
+{
+       __mali_osk_list_del(list->prev, list->next);
+}
+
+/** @brief Remove a single element from a list, and re-initialize it
+ *
+ * The element will no longer be present in the list. The removed list element
+ * will initialized, and so can be used as normal.
+ *
+ * @param list the list element to remove and initialize.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_delinit(_mali_osk_list_t *list)
+{
+       __mali_osk_list_del(list->prev, list->next);
+       _mali_osk_list_init(list);
+}
+
+/** @brief Determine whether a list is empty.
+ *
+ * An empty list is one that contains a single element that points to itself.
+ *
+ * @param list the list to check.
+ * @return non-zero if the list is empty, and zero otherwise.
+ */
+MALI_STATIC_INLINE mali_bool _mali_osk_list_empty(_mali_osk_list_t *list)
+{
+       return list->next == list;
+}
+
+/** @brief Move a list element from one list to another.
+ *
+ * The list element must be initialized.
+ *
+ * As an example, moving a list item to the head of a new list causes this item
+ * to be the first element in the new list.
+ *
+ * @param move the list element to move
+ * @param list the new list into which the element will be inserted, as the next
+ * element in the list.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_move(_mali_osk_list_t *move_entry, _mali_osk_list_t *list)
+{
+       __mali_osk_list_del(move_entry->prev, move_entry->next);
+       _mali_osk_list_add(move_entry, list);
+}
+
+/** @brief Move an entire list
+ *
+ * The list element must be initialized.
+ *
+ * Allows you to move a list from one list head to another list head
+ *
+ * @param old_list The existing list head
+ * @param new_list The new list head (must be an empty list)
+ */
+MALI_STATIC_INLINE void _mali_osk_list_move_list(_mali_osk_list_t *old_list, _mali_osk_list_t *new_list)
+{
+       MALI_DEBUG_ASSERT(_mali_osk_list_empty(new_list));
+       if (!_mali_osk_list_empty(old_list)) {
+               new_list->next = old_list->next;
+               new_list->prev = old_list->prev;
+               new_list->next->prev = new_list;
+               new_list->prev->next = new_list;
+               old_list->next = old_list;
+               old_list->prev = old_list;
+       }
+}
+
+/** @brief Find the containing structure of a list
+ *
+ * When traversing a list, this is used to recover the containing structure,
+ * given that is contains a _mali_osk_list_t member.
+ *
+ * Each list must be of structures of one type, and must link the same members
+ * together, otherwise it will not be possible to correctly recover the
+ * sturctures that the lists link.
+ *
+ * @note no type or memory checking occurs to ensure that a structure does in
+ * fact exist for the list entry, and that it is being recovered with respect
+ * to the correct list member.
+ *
+ * @param ptr the pointer to the _mali_osk_list_t member in this structure
+ * @param type the type of the structure that contains the member
+ * @param member the member of the structure that ptr points to.
+ * @return a pointer to a \a type object which contains the _mali_osk_list_t
+ * \a member, as pointed to by the _mali_osk_list_t \a *ptr.
+ */
+#define _MALI_OSK_LIST_ENTRY(ptr, type, member) \
+       _MALI_OSK_CONTAINER_OF(ptr, type, member)
+
+/** @brief Enumerate a list safely
+ *
+ * With this macro, lists can be enumerated in a 'safe' manner. That is,
+ * entries can be deleted from the list without causing an error during
+ * enumeration. To achieve this, a 'temporary' pointer is required, which must
+ * be provided to the macro.
+ *
+ * Use it like a 'for()', 'while()' or 'do()' construct, and so it must be
+ * followed by a statement or compound-statement which will be executed for
+ * each list entry.
+ *
+ * Upon loop completion, providing that an early out was not taken in the
+ * loop body, then it is guaranteed that ptr->member == list, even if the loop
+ * body never executed.
+ *
+ * @param ptr a pointer to an object of type 'type', which points to the
+ * structure that contains the currently enumerated list entry.
+ * @param tmp a pointer to an object of type 'type', which must not be used
+ * inside the list-execution statement.
+ * @param list a pointer to a _mali_osk_list_t, from which enumeration will
+ * begin
+ * @param type the type of the structure that contains the _mali_osk_list_t
+ * member that is part of the list to be enumerated.
+ * @param member the _mali_osk_list_t member of the structure that is part of
+ * the list to be enumerated.
+ */
+#define _MALI_OSK_LIST_FOREACHENTRY(ptr, tmp, list, type, member)         \
+       for (ptr = _MALI_OSK_LIST_ENTRY((list)->next, type, member),      \
+            tmp = _MALI_OSK_LIST_ENTRY(ptr->member.next, type, member);  \
+            &ptr->member != (list);                                      \
+            ptr = tmp,                                                   \
+            tmp = _MALI_OSK_LIST_ENTRY(tmp->member.next, type, member))
+
+/** @brief Enumerate a list in reverse order safely
+ *
+ * This macro is identical to @ref _MALI_OSK_LIST_FOREACHENTRY, except that
+ * entries are enumerated in reverse order.
+ *
+ * @param ptr a pointer to an object of type 'type', which points to the
+ * structure that contains the currently enumerated list entry.
+ * @param tmp a pointer to an object of type 'type', which must not be used
+ * inside the list-execution statement.
+ * @param list a pointer to a _mali_osk_list_t, from which enumeration will
+ * begin
+ * @param type the type of the structure that contains the _mali_osk_list_t
+ * member that is part of the list to be enumerated.
+ * @param member the _mali_osk_list_t member of the structure that is part of
+ * the list to be enumerated.
+ */
+#define _MALI_OSK_LIST_FOREACHENTRY_REVERSE(ptr, tmp, list, type, member) \
+       for (ptr = _MALI_OSK_LIST_ENTRY((list)->prev, type, member),      \
+            tmp = _MALI_OSK_LIST_ENTRY(ptr->member.prev, type, member);  \
+            &ptr->member != (list);                                      \
+            ptr = tmp,                                                   \
+            tmp = _MALI_OSK_LIST_ENTRY(tmp->member.prev, type, member))
+
+/** @} */ /* end group _mali_osk_list */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_LIST_H__ */
diff --git a/utgard/r8p0/common/mali_osk_mali.h b/utgard/r8p0/common/mali_osk_mali.h
new file mode 100755 (executable)
index 0000000..f242cd4
--- /dev/null
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2010-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_mali.h
+ * Defines the OS abstraction layer which is specific for the Mali kernel device driver (OSK)
+ */
+
+#ifndef __MALI_OSK_MALI_H__
+#define __MALI_OSK_MALI_H__
+
+#include <linux/mali/mali_utgard.h>
+#include <mali_osk.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef CONFIG_MALI_DEVFREQ
+struct mali_device {
+       struct device *dev;
+#ifdef CONFIG_HAVE_CLK
+       struct clk *clock;
+#endif
+#ifdef CONFIG_REGULATOR
+       struct regulator *regulator;
+#endif
+#ifdef CONFIG_PM_DEVFREQ
+       struct devfreq_dev_profile devfreq_profile;
+       struct devfreq *devfreq;
+       unsigned long current_freq;
+       unsigned long current_voltage;
+#ifdef CONFIG_DEVFREQ_THERMAL
+       struct thermal_cooling_device *devfreq_cooling;
+#endif
+#endif
+       struct mali_pm_metrics_data mali_metrics;
+};
+#endif
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief Struct with device specific configuration data
+ */
+typedef struct mali_gpu_device_data _mali_osk_device_data;
+
+#ifdef CONFIG_MALI_DT
+/** @brief Initialize those device resources when we use device tree
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_resource_initialize(void);
+#endif
+
+/** @brief Find Mali GPU HW resource
+ *
+ * @param addr Address of Mali GPU resource to find
+ * @param res Storage for resource information if resource is found.
+ * @return _MALI_OSK_ERR_OK on success, _MALI_OSK_ERR_ITEM_NOT_FOUND if resource is not found
+ */
+_mali_osk_errcode_t _mali_osk_resource_find(u32 addr, _mali_osk_resource_t *res);
+
+
+/** @brief Find Mali GPU HW base address
+ *
+ * @return 0 if resources are found, otherwise the Mali GPU component with lowest address.
+ */
+uintptr_t _mali_osk_resource_base_address(void);
+
+/** @brief Find the specific GPU resource.
+ *
+ * @return value
+ * 0x400 if Mali 400 specific GPU resource identified
+ * 0x450 if Mali 450 specific GPU resource identified
+ * 0x470 if Mali 470 specific GPU resource identified
+ *
+ */
+u32 _mali_osk_identify_gpu_resource(void);
+
+/** @brief Retrieve the Mali GPU specific data
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_device_data_get(_mali_osk_device_data *data);
+
+/** @brief Find the pmu domain config from device data.
+ *
+ * @param domain_config_array used to store pmu domain config found in device data.
+ * @param array_size is the size of array domain_config_array.
+ */
+void _mali_osk_device_data_pmu_config_get(u16 *domain_config_array, int array_size);
+
+/** @brief Get Mali PMU switch delay
+ *
+ *@return pmu switch delay if it is configured
+ */
+u32 _mali_osk_get_pmu_switch_delay(void);
+
+/** @brief Determines if Mali GPU has been configured with shared interrupts.
+ *
+ * @return MALI_TRUE if shared interrupts, MALI_FALSE if not.
+ */
+mali_bool _mali_osk_shared_interrupts(void);
+
+/** @brief Initialize the gpu secure mode.
+ * The gpu secure mode will initially be in a disabled state.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_gpu_secure_mode_init(void);
+
+/** @brief Deinitialize the gpu secure mode.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_gpu_secure_mode_deinit(void);
+
+/** @brief Reset GPU and enable the gpu secure mode.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_gpu_reset_and_secure_mode_enable(void);
+
+/** @brief Reset GPU and disable the gpu secure mode.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_gpu_reset_and_secure_mode_disable(void);
+
+/** @brief Check if the gpu secure mode has been enabled.
+ * @return MALI_TRUE if enabled, otherwise MALI_FALSE.
+ */
+mali_bool _mali_osk_gpu_secure_mode_is_enabled(void);
+
+/** @brief Check if the gpu secure mode is supported.
+ * @return MALI_TRUE if supported, otherwise MALI_FALSE.
+ */
+mali_bool _mali_osk_gpu_secure_mode_is_supported(void);
+
+
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_MALI_H__ */
diff --git a/utgard/r8p0/common/mali_osk_profiling.h b/utgard/r8p0/common/mali_osk_profiling.h
new file mode 100755 (executable)
index 0000000..eca6ad4
--- /dev/null
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2010-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_OSK_PROFILING_H__
+#define __MALI_OSK_PROFILING_H__
+
+#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
+
+#include "mali_linux_trace.h"
+#include "mali_profiling_events.h"
+#include "mali_profiling_gator_api.h"
+
+#define MALI_PROFILING_MAX_BUFFER_ENTRIES 1048576
+
+#define MALI_PROFILING_NO_HW_COUNTER = ((u32)-1)
+
+/** @defgroup _mali_osk_profiling External profiling connectivity
+ * @{ */
+
+/**
+ * Initialize the profiling module.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_init(mali_bool auto_start);
+
+/*
+ * Terminate the profiling module.
+ */
+void _mali_osk_profiling_term(void);
+
+/**
+ * Stop the profile sampling operation.
+ */
+void _mali_osk_profiling_stop_sampling(u32 pid);
+
+/**
+ * Start recording profiling data
+ *
+ * The specified limit will determine how large the capture buffer is.
+ * MALI_PROFILING_MAX_BUFFER_ENTRIES determines the maximum size allowed by the device driver.
+ *
+ * @param limit The desired maximum number of events to record on input, the actual maximum on output.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_start(u32 *limit);
+
+/**
+ * Add an profiling event
+ *
+ * @param event_id The event identificator.
+ * @param data0 First data parameter, depending on event_id specified.
+ * @param data1 Second data parameter, depending on event_id specified.
+ * @param data2 Third data parameter, depending on event_id specified.
+ * @param data3 Fourth data parameter, depending on event_id specified.
+ * @param data4 Fifth data parameter, depending on event_id specified.
+ */
+void    _mali_osk_profiling_add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4);
+
+/**
+ * Report a hardware counter event.
+ *
+ * @param counter_id The ID of the counter.
+ * @param value The value of the counter.
+ */
+
+/* Call Linux tracepoint directly */
+#define _mali_osk_profiling_report_hw_counter(counter_id, value) trace_mali_hw_counter(counter_id, value)
+
+/**
+ * Report SW counters
+ *
+ * @param counters array of counter values
+ */
+void _mali_osk_profiling_report_sw_counters(u32 *counters);
+
+void _mali_osk_profiling_record_global_counters(int counter_id, u32 value);
+
+/**
+ * Stop recording profiling data
+ *
+ * @param count Returns the number of recorded events.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_stop(u32 *count);
+
+/**
+ * Retrieves the number of events that can be retrieved
+ *
+ * @return The number of recorded events that can be retrieved.
+ */
+u32 _mali_osk_profiling_get_count(void);
+
+/**
+ * Retrieve an event
+ *
+ * @param index Event index (start with 0 and continue until this function fails to retrieve all events)
+ * @param timestamp The timestamp for the retrieved event will be stored here.
+ * @param event_id The event ID for the retrieved event will be stored here.
+ * @param data The 5 data values for the retrieved event will be stored here.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_get_event(u32 index, u64 *timestamp, u32 *event_id, u32 data[5]);
+
+/**
+ * Clear the recorded buffer.
+ *
+ * This is needed in order to start another recording.
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_clear(void);
+
+/**
+ * Checks if a recording of profiling data is in progress
+ *
+ * @return MALI_TRUE if recording of profiling data is in progress, MALI_FALSE if not
+ */
+mali_bool _mali_osk_profiling_is_recording(void);
+
+/**
+ * Checks if profiling data is available for retrival
+ *
+ * @return MALI_TRUE if profiling data is avaiable, MALI_FALSE if not
+ */
+mali_bool _mali_osk_profiling_have_recording(void);
+
+/** @} */ /* end group _mali_osk_profiling */
+
+#else /* defined(CONFIG_MALI400_PROFILING)  && defined(CONFIG_TRACEPOINTS) */
+
+/* Dummy add_event, for when profiling is disabled. */
+
+#define _mali_osk_profiling_add_event(event_id, data0, data1, data2, data3, data4)
+
+#endif /* defined(CONFIG_MALI400_PROFILING)  && defined(CONFIG_TRACEPOINTS) */
+
+#endif /* __MALI_OSK_PROFILING_H__ */
+
+
diff --git a/utgard/r8p0/common/mali_osk_types.h b/utgard/r8p0/common/mali_osk_types.h
new file mode 100755 (executable)
index 0000000..6e9a133
--- /dev/null
@@ -0,0 +1,471 @@
+/*
+ * Copyright (C) 2010-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_types.h
+ * Defines types of the OS abstraction layer for the kernel device driver (OSK)
+ */
+
+#ifndef __MALI_OSK_TYPES_H__
+#define __MALI_OSK_TYPES_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup oskapi UDD OS Abstraction for Kernel-side (OSK) APIs
+ *
+ * @{
+ */
+
+/** @defgroup _mali_osk_miscellaneous OSK Miscellaneous functions, constants and types
+ * @{ */
+
+/* Define integer types used by OSK. Note: these currently clash with Linux so we only define them if not defined already */
+#ifndef __KERNEL__
+typedef unsigned char      u8;
+typedef signed char        s8;
+typedef unsigned short     u16;
+typedef signed short       s16;
+typedef unsigned int       u32;
+typedef signed int         s32;
+typedef unsigned long long u64;
+#define BITS_PER_LONG (sizeof(long)*8)
+#else
+/* Ensure Linux types u32, etc. are defined */
+#include <linux/types.h>
+#endif
+
+/** @brief Mali Boolean type which uses MALI_TRUE and MALI_FALSE
+  */
+typedef unsigned long mali_bool;
+
+#ifndef MALI_TRUE
+#define MALI_TRUE ((mali_bool)1)
+#endif
+
+#ifndef MALI_FALSE
+#define MALI_FALSE ((mali_bool)0)
+#endif
+
+#define MALI_HW_CORE_NO_COUNTER     ((u32)-1)
+
+
+#define MALI_S32_MAX 0x7fffffff
+
+/**
+ * @brief OSK Error codes
+ *
+ * Each OS may use its own set of error codes, and may require that the
+ * User/Kernel interface take certain error code. This means that the common
+ * error codes need to be sufficiently rich to pass the correct error code
+ * thorugh from the OSK to U/K layer, across all OSs.
+ *
+ * The result is that some error codes will appear redundant on some OSs.
+ * Under all OSs, the OSK layer must translate native OS error codes to
+ * _mali_osk_errcode_t codes. Similarly, the U/K layer must translate from
+ * _mali_osk_errcode_t codes to native OS error codes.
+ */
+typedef enum {
+       _MALI_OSK_ERR_OK = 0, /**< Success. */
+       _MALI_OSK_ERR_FAULT = -1, /**< General non-success */
+       _MALI_OSK_ERR_INVALID_FUNC = -2, /**< Invalid function requested through User/Kernel interface (e.g. bad IOCTL number) */
+       _MALI_OSK_ERR_INVALID_ARGS = -3, /**< Invalid arguments passed through User/Kernel interface */
+       _MALI_OSK_ERR_NOMEM = -4, /**< Insufficient memory */
+       _MALI_OSK_ERR_TIMEOUT = -5, /**< Timeout occurred */
+       _MALI_OSK_ERR_RESTARTSYSCALL = -6, /**< Special: On certain OSs, must report when an interruptable mutex is interrupted. Ignore otherwise. */
+       _MALI_OSK_ERR_ITEM_NOT_FOUND = -7, /**< Table Lookup failed */
+       _MALI_OSK_ERR_BUSY = -8, /**< Device/operation is busy. Try again later */
+       _MALI_OSK_ERR_UNSUPPORTED = -9, /**< Optional part of the interface used, and is unsupported */
+} _mali_osk_errcode_t;
+
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+/** @defgroup _mali_osk_wq OSK work queues
+ * @{ */
+
+/** @brief Private type for work objects */
+typedef struct _mali_osk_wq_work_s _mali_osk_wq_work_t;
+typedef struct _mali_osk_wq_delayed_work_s _mali_osk_wq_delayed_work_t;
+
+/** @brief Work queue handler function
+ *
+ * This function type is called when the work is scheduled by the work queue,
+ * e.g. as an IRQ bottom-half handler.
+ *
+ * Refer to \ref _mali_osk_wq_schedule_work() for more information on the
+ * work-queue and work handlers.
+ *
+ * @param arg resource-specific data
+ */
+typedef void (*_mali_osk_wq_work_handler_t)(void *arg);
+
+/* @} */ /* end group _mali_osk_wq */
+
+/** @defgroup _mali_osk_irq OSK IRQ handling
+ * @{ */
+
+/** @brief Private type for IRQ handling objects */
+typedef struct _mali_osk_irq_t_struct _mali_osk_irq_t;
+
+/** @brief Optional function to trigger an irq from a resource
+ *
+ * This function is implemented by the common layer to allow probing of a resource's IRQ.
+ * @param arg resource-specific data */
+typedef void (*_mali_osk_irq_trigger_t)(void *arg);
+
+/** @brief Optional function to acknowledge an irq from a resource
+ *
+ * This function is implemented by the common layer to allow probing of a resource's IRQ.
+ * @param arg resource-specific data
+ * @return _MALI_OSK_ERR_OK if the IRQ was successful, or a suitable _mali_osk_errcode_t on failure. */
+typedef _mali_osk_errcode_t (*_mali_osk_irq_ack_t)(void *arg);
+
+/** @brief IRQ 'upper-half' handler callback.
+ *
+ * This function is implemented by the common layer to do the initial handling of a
+ * resource's IRQ. This maps on to the concept of an ISR that does the minimum
+ * work necessary before handing off to an IST.
+ *
+ * The communication of the resource-specific data from the ISR to the IST is
+ * handled by the OSK implementation.
+ *
+ * On most systems, the IRQ upper-half handler executes in IRQ context.
+ * Therefore, the system may have restrictions about what can be done in this
+ * context
+ *
+ * If an IRQ upper-half handler requires more work to be done than can be
+ * acheived in an IRQ context, then it may defer the work with
+ * _mali_osk_wq_schedule_work(). Refer to \ref _mali_osk_wq_create_work() for
+ * more information.
+ *
+ * @param arg resource-specific data
+ * @return _MALI_OSK_ERR_OK if the IRQ was correctly handled, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+typedef _mali_osk_errcode_t (*_mali_osk_irq_uhandler_t)(void *arg);
+
+
+/** @} */ /* end group _mali_osk_irq */
+
+
+/** @defgroup _mali_osk_atomic OSK Atomic counters
+ * @{ */
+
+/** @brief Public type of atomic counters
+ *
+ * This is public for allocation on stack. On systems that support it, this is just a single 32-bit value.
+ * On others, it could be encapsulating an object stored elsewhere.
+ *
+ * Regardless of implementation, the \ref _mali_osk_atomic functions \b must be used
+ * for all accesses to the variable's value, even if atomicity is not required.
+ * Do not access u.val or u.obj directly.
+ */
+typedef struct {
+       union {
+               u32 val;
+               void *obj;
+       } u;
+} _mali_osk_atomic_t;
+/** @} */ /* end group _mali_osk_atomic */
+
+
+/** @defgroup _mali_osk_lock OSK Mutual Exclusion Locks
+ * @{ */
+
+
+/** @brief OSK Mutual Exclusion Lock ordered list
+ *
+ * This lists the various types of locks in the system and is used to check
+ * that locks are taken in the correct order.
+ *
+ * - Holding more than one lock of the same order at the same time is not
+ *   allowed.
+ * - Taking a lock of a lower order than the highest-order lock currently held
+ *   is not allowed.
+ *
+ */
+typedef enum {
+       /*  ||    Locks    ||  */
+       /*  ||   must be   ||  */
+       /* _||_  taken in _||_ */
+       /* \  /    this   \  / */
+       /*  \/    order!   \/  */
+
+       _MALI_OSK_LOCK_ORDER_FIRST = 0,
+
+       _MALI_OSK_LOCK_ORDER_SESSIONS,
+       _MALI_OSK_LOCK_ORDER_MEM_SESSION,
+       _MALI_OSK_LOCK_ORDER_MEM_INFO,
+       _MALI_OSK_LOCK_ORDER_MEM_PT_CACHE,
+       _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP,
+       _MALI_OSK_LOCK_ORDER_PM_EXECUTION,
+       _MALI_OSK_LOCK_ORDER_EXECUTOR,
+       _MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM,
+       _MALI_OSK_LOCK_ORDER_SCHEDULER,
+       _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED,
+       _MALI_OSK_LOCK_ORDER_PROFILING,
+       _MALI_OSK_LOCK_ORDER_L2,
+       _MALI_OSK_LOCK_ORDER_L2_COMMAND,
+       _MALI_OSK_LOCK_ORDER_UTILIZATION,
+       _MALI_OSK_LOCK_ORDER_SESSION_PENDING_JOBS,
+       _MALI_OSK_LOCK_ORDER_PM_STATE,
+
+       _MALI_OSK_LOCK_ORDER_LAST,
+} _mali_osk_lock_order_t;
+
+
+/** @brief OSK Mutual Exclusion Lock flags type
+ *
+ * - Any lock can use the order parameter.
+ */
+typedef enum {
+       _MALI_OSK_LOCKFLAG_UNORDERED        = 0x1, /**< Indicate that the order of this lock should not be checked */
+       _MALI_OSK_LOCKFLAG_ORDERED          = 0x2,
+       /** @enum _mali_osk_lock_flags_t
+        *
+        * Flags from 0x10000--0x80000000 are RESERVED for User-mode */
+
+} _mali_osk_lock_flags_t;
+
+/** @brief Mutual Exclusion Lock Mode Optimization hint
+ *
+ * The lock mode is used to implement the read/write locking of locks when we call
+ * functions _mali_osk_mutex_rw_init/wait/signal/term/. In this case, the RO mode can
+ * be used to allow multiple concurrent readers, but no writers. The RW mode is used for
+ * writers, and so will wait for all readers to release the lock (if any present).
+ * Further readers and writers will wait until the writer releases the lock.
+ *
+ * The mode is purely an optimization hint: for example, it is permissible for
+ * all locks to behave in RW mode, regardless of that supplied.
+ *
+ * It is an error to attempt to use locks in anything other that RW mode when
+ * call functions _mali_osk_mutex_rw_wait/signal().
+ *
+ */
+typedef enum {
+       _MALI_OSK_LOCKMODE_UNDEF = -1,  /**< Undefined lock mode. For internal use only */
+       _MALI_OSK_LOCKMODE_RW    = 0x0, /**< Read-write mode, default. All readers and writers are mutually-exclusive */
+       _MALI_OSK_LOCKMODE_RO,          /**< Read-only mode, to support multiple concurrent readers, but mutual exclusion in the presence of writers. */
+       /** @enum _mali_osk_lock_mode_t
+        *
+        * Lock modes 0x40--0x7F are RESERVED for User-mode */
+} _mali_osk_lock_mode_t;
+
+/** @brief Private types for Mutual Exclusion lock objects */
+typedef struct _mali_osk_lock_debug_s _mali_osk_lock_debug_t;
+typedef struct _mali_osk_spinlock_s _mali_osk_spinlock_t;
+typedef struct _mali_osk_spinlock_irq_s _mali_osk_spinlock_irq_t;
+typedef struct _mali_osk_mutex_s _mali_osk_mutex_t;
+typedef struct _mali_osk_mutex_rw_s _mali_osk_mutex_rw_t;
+
+/** @} */ /* end group _mali_osk_lock */
+
+/** @defgroup _mali_osk_low_level_memory OSK Low-level Memory Operations
+ * @{ */
+
+/**
+ * @brief Private data type for use in IO accesses to/from devices.
+ *
+ * This represents some range that is accessible from the device. Examples
+ * include:
+ * - Device Registers, which could be readable and/or writeable.
+ * - Memory that the device has access to, for storing configuration structures.
+ *
+ * Access to this range must be made through the _mali_osk_mem_ioread32() and
+ * _mali_osk_mem_iowrite32() functions.
+ */
+typedef struct _mali_io_address *mali_io_address;
+
+/** @defgroup _MALI_OSK_CPU_PAGE CPU Physical page size macros.
+ *
+ * The order of the page size is supplied for
+ * ease of use by algorithms that might require it, since it is easier to know
+ * it ahead of time rather than calculating it.
+ *
+ * The Mali Page Mask macro masks off the lower bits of a physical address to
+ * give the start address of the page for that physical address.
+ *
+ * @note The Mali device driver code is designed for systems with 4KB page size.
+ * Changing these macros will not make the entire Mali device driver work with
+ * page sizes other than 4KB.
+ *
+ * @note The CPU Physical Page Size has been assumed to be the same as the Mali
+ * Physical Page Size.
+ *
+ * @{
+ */
+
+/** CPU Page Order, as log to base 2 of the Page size. @see _MALI_OSK_CPU_PAGE_SIZE */
+#define _MALI_OSK_CPU_PAGE_ORDER ((u32)12)
+/** CPU Page Size, in bytes.               */
+#define _MALI_OSK_CPU_PAGE_SIZE (((u32)1) << (_MALI_OSK_CPU_PAGE_ORDER))
+/** CPU Page Mask, which masks off the offset within a page */
+#define _MALI_OSK_CPU_PAGE_MASK (~((((u32)1) << (_MALI_OSK_CPU_PAGE_ORDER)) - ((u32)1)))
+/** @} */ /* end of group _MALI_OSK_CPU_PAGE */
+
+/** @defgroup _MALI_OSK_MALI_PAGE Mali Physical Page size macros
+ *
+ * Mali Physical page size macros. The order of the page size is supplied for
+ * ease of use by algorithms that might require it, since it is easier to know
+ * it ahead of time rather than calculating it.
+ *
+ * The Mali Page Mask macro masks off the lower bits of a physical address to
+ * give the start address of the page for that physical address.
+ *
+ * @note The Mali device driver code is designed for systems with 4KB page size.
+ * Changing these macros will not make the entire Mali device driver work with
+ * page sizes other than 4KB.
+ *
+ * @note The Mali Physical Page Size has been assumed to be the same as the CPU
+ * Physical Page Size.
+ *
+ * @{
+ */
+
+/** Mali Page Order, as log to base 2 of the Page size. @see _MALI_OSK_MALI_PAGE_SIZE */
+#define _MALI_OSK_MALI_PAGE_ORDER PAGE_SHIFT
+/** Mali Page Size, in bytes.               */
+#define _MALI_OSK_MALI_PAGE_SIZE PAGE_SIZE
+/** Mali Page Mask, which masks off the offset within a page */
+#define _MALI_OSK_MALI_PAGE_MASK PAGE_MASK
+/** @} */ /* end of group _MALI_OSK_MALI_PAGE*/
+
+/** @brief flags for mapping a user-accessible memory range
+ *
+ * Where a function with prefix '_mali_osk_mem_mapregion' accepts flags as one
+ * of the function parameters, it will use one of these. These allow per-page
+ * control over mappings. Compare with the mali_memory_allocation_flag type,
+ * which acts over an entire range
+ *
+ * These may be OR'd together with bitwise OR (|), but must be cast back into
+ * the type after OR'ing.
+ */
+typedef enum {
+       _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR = 0x1, /**< Physical address is OS Allocated */
+} _mali_osk_mem_mapregion_flags_t;
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+/** @defgroup _mali_osk_notification OSK Notification Queues
+ * @{ */
+
+/** @brief Private type for notification queue objects */
+typedef struct _mali_osk_notification_queue_t_struct _mali_osk_notification_queue_t;
+
+/** @brief Public notification data object type */
+typedef struct _mali_osk_notification_t_struct {
+       u32 notification_type;   /**< The notification type */
+       u32 result_buffer_size; /**< Size of the result buffer to copy to user space */
+       void *result_buffer;    /**< Buffer containing any type specific data */
+} _mali_osk_notification_t;
+
+/** @} */ /* end group _mali_osk_notification */
+
+
+/** @defgroup _mali_osk_timer OSK Timer Callbacks
+ * @{ */
+
+/** @brief Function to call when a timer expires
+ *
+ * When a timer expires, this function is called. Note that on many systems,
+ * a timer callback will be executed in IRQ context. Therefore, restrictions
+ * may apply on what can be done inside the timer callback.
+ *
+ * If a timer requires more work to be done than can be acheived in an IRQ
+ * context, then it may defer the work with a work-queue. For example, it may
+ * use \ref _mali_osk_wq_schedule_work() to make use of a bottom-half handler
+ * to carry out the remaining work.
+ *
+ * Stopping the timer with \ref _mali_osk_timer_del() blocks on compeletion of
+ * the callback. Therefore, the callback may not obtain any mutexes also held
+ * by any callers of _mali_osk_timer_del(). Otherwise, a deadlock may occur.
+ *
+ * @param arg Function-specific data */
+typedef void (*_mali_osk_timer_callback_t)(void *arg);
+
+/** @brief Private type for Timer Callback Objects */
+typedef struct _mali_osk_timer_t_struct _mali_osk_timer_t;
+/** @} */ /* end group _mali_osk_timer */
+
+
+/** @addtogroup _mali_osk_list OSK Doubly-Linked Circular Lists
+ * @{ */
+
+/** @brief Public List objects.
+ *
+ * To use, add a _mali_osk_list_t member to the structure that may become part
+ * of a list. When traversing the _mali_osk_list_t objects, use the
+ * _MALI_OSK_CONTAINER_OF() macro to recover the structure from its
+ *_mali_osk_list_t member
+ *
+ * Each structure may have multiple _mali_osk_list_t members, so that the
+ * structure is part of multiple lists. When traversing lists, ensure that the
+ * correct _mali_osk_list_t member is used, because type-checking will be
+ * lost by the compiler.
+ */
+typedef struct _mali_osk_list_s {
+       struct _mali_osk_list_s *next;
+       struct _mali_osk_list_s *prev;
+} _mali_osk_list_t;
+/** @} */ /* end group _mali_osk_list */
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief resource description struct
+ *
+ * Platform independent representation of a Mali HW resource
+ */
+typedef struct _mali_osk_resource {
+       const char *description;        /**< short description of the resource */
+       uintptr_t base;                 /**< Physical base address of the resource, as seen by Mali resources. */
+       const char *irq_name;           /**< Name of irq belong to this resource */
+       u32 irq;                        /**< IRQ number delivered to the CPU, or -1 to tell the driver to probe for it (if possible) */
+} _mali_osk_resource_t;
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+/** @defgroup _mali_osk_wait_queue OSK Wait Queue functionality
+ * @{ */
+/** @brief Private type for wait queue objects */
+typedef struct _mali_osk_wait_queue_t_struct _mali_osk_wait_queue_t;
+/** @} */ /* end group _mali_osk_wait_queue */
+
+/** @} */ /* end group osuapi */
+
+/** @} */ /* end group uddapi */
+
+/** @brief Mali print ctx type which uses seq_file
+  */
+typedef struct seq_file _mali_osk_print_ctx;
+
+#define _MALI_OSK_BITMAP_INVALIDATE_INDEX -1
+
+typedef struct _mali_osk_bitmap {
+       u32         reserve;
+       u32         last;
+       u32         max;
+       u32         avail;
+       _mali_osk_spinlock_t   *lock;
+       unsigned long          *table;
+} _mali_osk_bitmap_t;
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_TYPES_H__ */
diff --git a/utgard/r8p0/common/mali_pm.c b/utgard/r8p0/common/mali_pm.c
new file mode 100755 (executable)
index 0000000..1ef03a6
--- /dev/null
@@ -0,0 +1,1362 @@
+/*
+ * Copyright (C) 2011-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_pm.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_scheduler.h"
+#include "mali_group.h"
+#include "mali_pm_domain.h"
+#include "mali_pmu.h"
+
+#include "mali_executor.h"
+#include "mali_control_timer.h"
+
+#if defined(DEBUG)
+u32 num_pm_runtime_resume = 0;
+u32 num_pm_updates = 0;
+u32 num_pm_updates_up = 0;
+u32 num_pm_updates_down = 0;
+#endif
+
+#define MALI_PM_DOMAIN_DUMMY_MASK (1 << MALI_DOMAIN_INDEX_DUMMY)
+
+/* lock protecting power state (including pm_domains) */
+static _mali_osk_spinlock_irq_t *pm_lock_state = NULL;
+
+/* the wanted domain mask (protected by pm_lock_state) */
+static u32 pd_mask_wanted = 0;
+
+/* used to deferring the actual power changes */
+static _mali_osk_wq_work_t *pm_work = NULL;
+
+/* lock protecting power change execution */
+static _mali_osk_mutex_t *pm_lock_exec = NULL;
+
+/* PMU domains which are actually powered on (protected by pm_lock_exec) */
+static u32 pmu_mask_current = 0;
+
+/*
+ * domains which marked as powered on (protected by pm_lock_exec)
+ * This can be different from pmu_mask_current right after GPU power on
+ * if the PMU domains default to powered up.
+ */
+static u32 pd_mask_current = 0;
+
+static u16 domain_config[MALI_MAX_NUMBER_OF_DOMAINS] = {
+       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+       1 << MALI_DOMAIN_INDEX_DUMMY
+};
+
+/* The relative core power cost */
+#define MALI_GP_COST 3
+#define MALI_PP_COST 6
+#define MALI_L2_COST 1
+
+/*
+ *We have MALI_MAX_NUMBER_OF_PP_PHYSICAL_CORES + 1 rows in this matrix
+ *because we mush store the mask of different pp cores: 0, 1, 2, 3, 4, 5, 6, 7, 8.
+ */
+static int mali_pm_domain_power_cost_result[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS + 1][MALI_MAX_NUMBER_OF_DOMAINS];
+/*
+ * Keep track of runtime PM state, so that we know
+ * how to resume during OS resume.
+ */
+#ifdef CONFIG_PM_RUNTIME
+static mali_bool mali_pm_runtime_active = MALI_FALSE;
+#else
+/* when kernel don't enable PM_RUNTIME, set the flag always true,
+ * for GPU will not power off by runtime */
+static mali_bool mali_pm_runtime_active = MALI_TRUE;
+#endif
+
+static void mali_pm_state_lock(void);
+static void mali_pm_state_unlock(void);
+static _mali_osk_errcode_t mali_pm_create_pm_domains(void);
+static void mali_pm_set_pmu_domain_config(void);
+static u32 mali_pm_get_registered_cores_mask(void);
+static void mali_pm_update_sync_internal(void);
+static mali_bool mali_pm_common_suspend(void);
+static void mali_pm_update_work(void *data);
+#if defined(DEBUG)
+const char *mali_pm_mask_to_string(u32 mask);
+const char *mali_pm_group_stats_to_string(void);
+#endif
+
+_mali_osk_errcode_t mali_pm_initialize(void)
+{
+       _mali_osk_errcode_t err;
+       struct mali_pmu_core *pmu;
+
+       pm_lock_state = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED,
+                       _MALI_OSK_LOCK_ORDER_PM_STATE);
+       if (NULL == pm_lock_state) {
+               mali_pm_terminate();
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       pm_lock_exec = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED,
+                                           _MALI_OSK_LOCK_ORDER_PM_STATE);
+       if (NULL == pm_lock_exec) {
+               mali_pm_terminate();
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       pm_work = _mali_osk_wq_create_work(mali_pm_update_work, NULL);
+       if (NULL == pm_work) {
+               mali_pm_terminate();
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       pmu = mali_pmu_get_global_pmu_core();
+       if (NULL != pmu) {
+               /*
+                * We have a Mali PMU, set the correct domain
+                * configuration (default or custom)
+                */
+
+               u32 registered_cores_mask;
+
+               mali_pm_set_pmu_domain_config();
+
+               registered_cores_mask = mali_pm_get_registered_cores_mask();
+               mali_pmu_set_registered_cores_mask(pmu, registered_cores_mask);
+
+               MALI_DEBUG_ASSERT(0 == pd_mask_wanted);
+       }
+
+       /* Create all power domains needed (at least one dummy domain) */
+       err = mali_pm_create_pm_domains();
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_pm_terminate();
+               return err;
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_pm_terminate(void)
+{
+       if (NULL != pm_work) {
+               _mali_osk_wq_delete_work(pm_work);
+               pm_work = NULL;
+       }
+
+       mali_pm_domain_terminate();
+
+       if (NULL != pm_lock_exec) {
+               _mali_osk_mutex_term(pm_lock_exec);
+               pm_lock_exec = NULL;
+       }
+
+       if (NULL != pm_lock_state) {
+               _mali_osk_spinlock_irq_term(pm_lock_state);
+               pm_lock_state = NULL;
+       }
+}
+
+struct mali_pm_domain *mali_pm_register_l2_cache(u32 domain_index,
+               struct mali_l2_cache_core *l2_cache)
+{
+       struct mali_pm_domain *domain;
+
+       domain = mali_pm_domain_get_from_mask(domain_config[domain_index]);
+       if (NULL == domain) {
+               MALI_DEBUG_ASSERT(0 == domain_config[domain_index]);
+               domain = mali_pm_domain_get_from_index(
+                                MALI_DOMAIN_INDEX_DUMMY);
+               domain_config[domain_index] = MALI_PM_DOMAIN_DUMMY_MASK;
+       } else {
+               MALI_DEBUG_ASSERT(0 != domain_config[domain_index]);
+       }
+
+       MALI_DEBUG_ASSERT(NULL != domain);
+
+       mali_pm_domain_add_l2_cache(domain, l2_cache);
+
+       return domain; /* return the actual domain this was registered in */
+}
+
+struct mali_pm_domain *mali_pm_register_group(u32 domain_index,
+               struct mali_group *group)
+{
+       struct mali_pm_domain *domain;
+
+       domain = mali_pm_domain_get_from_mask(domain_config[domain_index]);
+       if (NULL == domain) {
+               MALI_DEBUG_ASSERT(0 == domain_config[domain_index]);
+               domain = mali_pm_domain_get_from_index(
+                                MALI_DOMAIN_INDEX_DUMMY);
+               domain_config[domain_index] = MALI_PM_DOMAIN_DUMMY_MASK;
+       } else {
+               MALI_DEBUG_ASSERT(0 != domain_config[domain_index]);
+       }
+
+       MALI_DEBUG_ASSERT(NULL != domain);
+
+       mali_pm_domain_add_group(domain, group);
+
+       return domain; /* return the actual domain this was registered in */
+}
+
+mali_bool mali_pm_get_domain_refs(struct mali_pm_domain **domains,
+                                 struct mali_group **groups,
+                                 u32 num_domains)
+{
+       mali_bool ret = MALI_TRUE; /* Assume all is powered on instantly */
+       u32 i;
+
+       mali_pm_state_lock();
+
+       for (i = 0; i < num_domains; i++) {
+               MALI_DEBUG_ASSERT_POINTER(domains[i]);
+               pd_mask_wanted |= mali_pm_domain_ref_get(domains[i]);
+               if (MALI_FALSE == mali_pm_domain_power_is_on(domains[i])) {
+                       /*
+                        * Tell caller that the corresponding group
+                        * was not already powered on.
+                        */
+                       ret = MALI_FALSE;
+               } else {
+                       /*
+                        * There is a time gap between we power on the domain and
+                        * set the power state of the corresponding groups to be on.
+                        */
+                       if (NULL != groups[i] &&
+                           MALI_FALSE == mali_group_power_is_on(groups[i])) {
+                               ret = MALI_FALSE;
+                       }
+               }
+       }
+
+       MALI_DEBUG_PRINT(3, ("PM: wanted domain mask = 0x%08X (get refs)\n", pd_mask_wanted));
+
+       mali_pm_state_unlock();
+
+       return ret;
+}
+
+mali_bool mali_pm_put_domain_refs(struct mali_pm_domain **domains,
+                                 u32 num_domains)
+{
+       u32 mask = 0;
+       mali_bool ret;
+       u32 i;
+
+       mali_pm_state_lock();
+
+       for (i = 0; i < num_domains; i++) {
+               MALI_DEBUG_ASSERT_POINTER(domains[i]);
+               mask |= mali_pm_domain_ref_put(domains[i]);
+       }
+
+       if (0 == mask) {
+               /* return false, all domains should still stay on */
+               ret = MALI_FALSE;
+       } else {
+               /* Assert that we are dealing with a change */
+               MALI_DEBUG_ASSERT((pd_mask_wanted & mask) == mask);
+
+               /* Update our desired domain mask */
+               pd_mask_wanted &= ~mask;
+
+               /* return true; one or more domains can now be powered down */
+               ret = MALI_TRUE;
+       }
+
+       MALI_DEBUG_PRINT(3, ("PM: wanted domain mask = 0x%08X (put refs)\n", pd_mask_wanted));
+
+       mali_pm_state_unlock();
+
+       return ret;
+}
+
+void mali_pm_init_begin(void)
+{
+       struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+       _mali_osk_pm_dev_ref_get_sync();
+
+       /* Ensure all PMU domains are on */
+       if (NULL != pmu) {
+               mali_pmu_power_up_all(pmu);
+       }
+}
+
+void mali_pm_init_end(void)
+{
+       struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+       /* Ensure all PMU domains are off */
+       if (NULL != pmu) {
+               mali_pmu_power_down_all(pmu);
+       }
+
+       _mali_osk_pm_dev_ref_put();
+}
+
+void mali_pm_update_sync(void)
+{
+       mali_pm_exec_lock();
+
+       if (MALI_TRUE == mali_pm_runtime_active) {
+               /*
+                * Only update if GPU is powered on.
+                * Deactivation of the last group will result in both a
+                * deferred runtime PM suspend operation and
+                * deferred execution of this function.
+                * mali_pm_runtime_active will be false if runtime PM
+                * executed first and thus the GPU is now fully powered off.
+                */
+               mali_pm_update_sync_internal();
+       }
+
+       mali_pm_exec_unlock();
+}
+
+void mali_pm_update_async(void)
+{
+       _mali_osk_wq_schedule_work(pm_work);
+}
+
+void mali_pm_os_suspend(mali_bool os_suspend)
+{
+       int ret;
+
+       MALI_DEBUG_PRINT(3, ("Mali PM: OS suspend\n"));
+
+       /* Suspend execution of all jobs, and go to inactive state */
+       mali_executor_suspend();
+
+       if (os_suspend) {
+               mali_control_timer_suspend(MALI_TRUE);
+       }
+
+       mali_pm_exec_lock();
+
+       ret = mali_pm_common_suspend();
+
+       MALI_DEBUG_ASSERT(MALI_TRUE == ret);
+       MALI_IGNORE(ret);
+
+       mali_pm_exec_unlock();
+}
+
+void mali_pm_os_resume(void)
+{
+       struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+       MALI_DEBUG_PRINT(3, ("Mali PM: OS resume\n"));
+
+       mali_pm_exec_lock();
+
+#if defined(DEBUG)
+       mali_pm_state_lock();
+
+       /* Assert that things are as we left them in os_suspend(). */
+       MALI_DEBUG_ASSERT(0 == pd_mask_wanted);
+       MALI_DEBUG_ASSERT(0 == pd_mask_current);
+       MALI_DEBUG_ASSERT(0 == pmu_mask_current);
+
+       MALI_DEBUG_ASSERT(MALI_TRUE == mali_pm_domain_all_unused());
+
+       mali_pm_state_unlock();
+#endif
+
+       if (MALI_TRUE == mali_pm_runtime_active) {
+               /* Runtime PM was active, so reset PMU */
+               if (NULL != pmu) {
+                       mali_pmu_reset(pmu);
+                       pmu_mask_current = mali_pmu_get_mask(pmu);
+
+                       MALI_DEBUG_PRINT(3, ("Mali PM: OS resume 0x%x \n", pmu_mask_current));
+               }
+
+               mali_pm_update_sync_internal();
+       }
+
+       mali_pm_exec_unlock();
+
+       /* Start executing jobs again */
+       mali_executor_resume();
+}
+
+mali_bool mali_pm_runtime_suspend(void)
+{
+       mali_bool ret;
+
+       MALI_DEBUG_PRINT(3, ("Mali PM: Runtime suspend\n"));
+
+       mali_pm_exec_lock();
+
+       /*
+        * Put SW state directly into "off" state, and do not bother to power
+        * down each power domain, because entire GPU will be powered off
+        * when we return.
+        * For runtime PM suspend, in contrast to OS suspend, there is a race
+        * between this function and the mali_pm_update_sync_internal(), which
+        * is fine...
+        */
+       ret = mali_pm_common_suspend();
+       if (MALI_TRUE == ret) {
+               mali_pm_runtime_active = MALI_FALSE;
+       } else {
+               /*
+                * Process the "power up" instead,
+                * which could have been "lost"
+                */
+               mali_pm_update_sync_internal();
+       }
+
+       mali_pm_exec_unlock();
+
+       return ret;
+}
+
+void mali_pm_runtime_resume(void)
+{
+       struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+       mali_pm_exec_lock();
+
+       mali_pm_runtime_active = MALI_TRUE;
+
+#if defined(DEBUG)
+       ++num_pm_runtime_resume;
+
+       mali_pm_state_lock();
+
+       /*
+        * Assert that things are as we left them in runtime_suspend(),
+        * except for pd_mask_wanted which normally will be the reason we
+        * got here (job queued => domains wanted)
+        */
+       MALI_DEBUG_ASSERT(0 == pd_mask_current);
+       MALI_DEBUG_ASSERT(0 == pmu_mask_current);
+
+       mali_pm_state_unlock();
+#endif
+
+       if (NULL != pmu) {
+               mali_pmu_reset(pmu);
+               pmu_mask_current = mali_pmu_get_mask(pmu);
+               MALI_DEBUG_PRINT(3, ("Mali PM: Runtime resume 0x%x \n", pmu_mask_current));
+       }
+
+       /*
+        * Normally we are resumed because a job has just been queued.
+        * pd_mask_wanted should thus be != 0.
+        * It is however possible for others to take a Mali Runtime PM ref
+        * without having a job queued.
+        * We should however always call mali_pm_update_sync_internal(),
+        * because this will take care of any potential mismatch between
+        * pmu_mask_current and pd_mask_current.
+        */
+       mali_pm_update_sync_internal();
+
+       mali_pm_exec_unlock();
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_pm_dump_state_domain(struct mali_pm_domain *domain,
+                             char *buf, u32 size)
+{
+       int n = 0;
+
+       n += _mali_osk_snprintf(buf + n, size - n,
+                               "\tPower domain: id %u\n",
+                               mali_pm_domain_get_id(domain));
+
+       n += _mali_osk_snprintf(buf + n, size - n,
+                               "\t\tMask: 0x%04x\n",
+                               mali_pm_domain_get_mask(domain));
+
+       n += _mali_osk_snprintf(buf + n, size - n,
+                               "\t\tUse count: %u\n",
+                               mali_pm_domain_get_use_count(domain));
+
+       n += _mali_osk_snprintf(buf + n, size - n,
+                               "\t\tCurrent power state: %s\n",
+                               (mali_pm_domain_get_mask(domain) & pd_mask_current) ?
+                               "On" : "Off");
+
+       n += _mali_osk_snprintf(buf + n, size - n,
+                               "\t\tWanted power state: %s\n",
+                               (mali_pm_domain_get_mask(domain) & pd_mask_wanted) ?
+                               "On" : "Off");
+
+       return n;
+}
+#endif
+
+static void mali_pm_state_lock(void)
+{
+       _mali_osk_spinlock_irq_lock(pm_lock_state);
+}
+
+static void mali_pm_state_unlock(void)
+{
+       _mali_osk_spinlock_irq_unlock(pm_lock_state);
+}
+
+void mali_pm_exec_lock(void)
+{
+       _mali_osk_mutex_wait(pm_lock_exec);
+}
+
+void mali_pm_exec_unlock(void)
+{
+       _mali_osk_mutex_signal(pm_lock_exec);
+}
+
+static void mali_pm_domain_power_up(u32 power_up_mask,
+                                   struct mali_group *groups_up[MALI_MAX_NUMBER_OF_GROUPS],
+                                   u32 *num_groups_up,
+                                   struct mali_l2_cache_core *l2_up[MALI_MAX_NUMBER_OF_L2_CACHE_CORES],
+                                   u32 *num_l2_up)
+{
+       u32 domain_bit;
+       u32 notify_mask = power_up_mask;
+
+       MALI_DEBUG_ASSERT(0 != power_up_mask);
+       MALI_DEBUG_ASSERT_POINTER(groups_up);
+       MALI_DEBUG_ASSERT_POINTER(num_groups_up);
+       MALI_DEBUG_ASSERT(0 == *num_groups_up);
+       MALI_DEBUG_ASSERT_POINTER(l2_up);
+       MALI_DEBUG_ASSERT_POINTER(num_l2_up);
+       MALI_DEBUG_ASSERT(0 == *num_l2_up);
+
+       MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec);
+       MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_state);
+
+       MALI_DEBUG_PRINT(5,
+                        ("PM update:      Powering up domains: . [%s]\n",
+                         mali_pm_mask_to_string(power_up_mask)));
+
+       pd_mask_current |= power_up_mask;
+
+       domain_bit = _mali_osk_fls(notify_mask);
+       while (0 != domain_bit) {
+               u32 domain_id = domain_bit - 1;
+               struct mali_pm_domain *domain =
+                       mali_pm_domain_get_from_index(
+                               domain_id);
+               struct mali_l2_cache_core *l2_cache;
+               struct mali_l2_cache_core *l2_cache_tmp;
+               struct mali_group *group;
+               struct mali_group *group_tmp;
+
+               /* Mark domain as powered up */
+               mali_pm_domain_set_power_on(domain, MALI_TRUE);
+
+               /*
+                * Make a note of the L2 and/or group(s) to notify
+                * (need to release the PM state lock before doing so)
+                */
+
+               _MALI_OSK_LIST_FOREACHENTRY(l2_cache,
+                                           l2_cache_tmp,
+                                           mali_pm_domain_get_l2_cache_list(
+                                                   domain),
+                                           struct mali_l2_cache_core,
+                                           pm_domain_list) {
+                       MALI_DEBUG_ASSERT(*num_l2_up <
+                                         MALI_MAX_NUMBER_OF_L2_CACHE_CORES);
+                       l2_up[*num_l2_up] = l2_cache;
+                       (*num_l2_up)++;
+               }
+
+               _MALI_OSK_LIST_FOREACHENTRY(group,
+                                           group_tmp,
+                                           mali_pm_domain_get_group_list(domain),
+                                           struct mali_group,
+                                           pm_domain_list) {
+                       MALI_DEBUG_ASSERT(*num_groups_up <
+                                         MALI_MAX_NUMBER_OF_GROUPS);
+                       groups_up[*num_groups_up] = group;
+
+                       (*num_groups_up)++;
+               }
+
+               /* Remove current bit and find next */
+               notify_mask &= ~(1 << (domain_id));
+               domain_bit = _mali_osk_fls(notify_mask);
+       }
+}
+static void mali_pm_domain_power_down(u32 power_down_mask,
+                                     struct mali_group *groups_down[MALI_MAX_NUMBER_OF_GROUPS],
+                                     u32 *num_groups_down,
+                                     struct mali_l2_cache_core *l2_down[MALI_MAX_NUMBER_OF_L2_CACHE_CORES],
+                                     u32 *num_l2_down)
+{
+       u32 domain_bit;
+       u32 notify_mask = power_down_mask;
+
+       MALI_DEBUG_ASSERT(0 != power_down_mask);
+       MALI_DEBUG_ASSERT_POINTER(groups_down);
+       MALI_DEBUG_ASSERT_POINTER(num_groups_down);
+       MALI_DEBUG_ASSERT(0 == *num_groups_down);
+       MALI_DEBUG_ASSERT_POINTER(l2_down);
+       MALI_DEBUG_ASSERT_POINTER(num_l2_down);
+       MALI_DEBUG_ASSERT(0 == *num_l2_down);
+
+       MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec);
+       MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_state);
+
+       MALI_DEBUG_PRINT(5,
+                        ("PM update:      Powering down domains: [%s]\n",
+                         mali_pm_mask_to_string(power_down_mask)));
+
+       pd_mask_current &= ~power_down_mask;
+
+       domain_bit = _mali_osk_fls(notify_mask);
+       while (0 != domain_bit) {
+               u32 domain_id = domain_bit - 1;
+               struct mali_pm_domain *domain =
+                       mali_pm_domain_get_from_index(domain_id);
+               struct mali_l2_cache_core *l2_cache;
+               struct mali_l2_cache_core *l2_cache_tmp;
+               struct mali_group *group;
+               struct mali_group *group_tmp;
+
+               /* Mark domain as powered down */
+               mali_pm_domain_set_power_on(domain, MALI_FALSE);
+
+               /*
+                * Make a note of the L2s and/or groups to notify
+                * (need to release the PM state lock before doing so)
+                */
+
+               _MALI_OSK_LIST_FOREACHENTRY(l2_cache,
+                                           l2_cache_tmp,
+                                           mali_pm_domain_get_l2_cache_list(domain),
+                                           struct mali_l2_cache_core,
+                                           pm_domain_list) {
+                       MALI_DEBUG_ASSERT(*num_l2_down <
+                                         MALI_MAX_NUMBER_OF_L2_CACHE_CORES);
+                       l2_down[*num_l2_down] = l2_cache;
+                       (*num_l2_down)++;
+               }
+
+               _MALI_OSK_LIST_FOREACHENTRY(group,
+                                           group_tmp,
+                                           mali_pm_domain_get_group_list(domain),
+                                           struct mali_group,
+                                           pm_domain_list) {
+                       MALI_DEBUG_ASSERT(*num_groups_down <
+                                         MALI_MAX_NUMBER_OF_GROUPS);
+                       groups_down[*num_groups_down] = group;
+                       (*num_groups_down)++;
+               }
+
+               /* Remove current bit and find next */
+               notify_mask &= ~(1 << (domain_id));
+               domain_bit = _mali_osk_fls(notify_mask);
+       }
+}
+
+/*
+ * Execute pending power domain changes
+ * pm_lock_exec lock must be taken by caller.
+ */
+static void mali_pm_update_sync_internal(void)
+{
+       /*
+        * This should only be called in non-atomic context
+        * (normally as deferred work)
+        *
+        * Look at the pending power domain changes, and execute these.
+        * Make sure group and schedulers are notified about changes.
+        */
+
+       struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+       u32 power_down_mask;
+       u32 power_up_mask;
+
+       MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec);
+
+#if defined(DEBUG)
+       ++num_pm_updates;
+#endif
+
+       /* Hold PM state lock while we look at (and obey) the wanted state */
+       mali_pm_state_lock();
+
+       MALI_DEBUG_PRINT(5, ("PM update pre:  Wanted domain mask: .. [%s]\n",
+                            mali_pm_mask_to_string(pd_mask_wanted)));
+       MALI_DEBUG_PRINT(5, ("PM update pre:  Current domain mask: . [%s]\n",
+                            mali_pm_mask_to_string(pd_mask_current)));
+       MALI_DEBUG_PRINT(5, ("PM update pre:  Current PMU mask: .... [%s]\n",
+                            mali_pm_mask_to_string(pmu_mask_current)));
+       MALI_DEBUG_PRINT(5, ("PM update pre:  Group power stats: ... <%s>\n",
+                            mali_pm_group_stats_to_string()));
+
+       /* Figure out which cores we need to power on */
+       power_up_mask = pd_mask_wanted &
+                       (pd_mask_wanted ^ pd_mask_current);
+
+       if (0 != power_up_mask) {
+               u32 power_up_mask_pmu;
+               struct mali_group *groups_up[MALI_MAX_NUMBER_OF_GROUPS];
+               u32 num_groups_up = 0;
+               struct mali_l2_cache_core *
+                       l2_up[MALI_MAX_NUMBER_OF_L2_CACHE_CORES];
+               u32 num_l2_up = 0;
+               u32 i;
+
+#if defined(DEBUG)
+               ++num_pm_updates_up;
+#endif
+
+               /*
+                * Make sure dummy/global domain is always included when
+                * powering up, since this is controlled by runtime PM,
+                * and device power is on at this stage.
+                */
+               power_up_mask |= MALI_PM_DOMAIN_DUMMY_MASK;
+
+               /* Power up only real PMU domains */
+               power_up_mask_pmu = power_up_mask & ~MALI_PM_DOMAIN_DUMMY_MASK;
+
+               /* But not those that happen to be powered on already */
+               power_up_mask_pmu &= (power_up_mask ^ pmu_mask_current) &
+                                    power_up_mask;
+
+               if (0 != power_up_mask_pmu) {
+                       MALI_DEBUG_ASSERT(NULL != pmu);
+                       pmu_mask_current |= power_up_mask_pmu;
+                       mali_pmu_power_up(pmu, power_up_mask_pmu);
+               }
+
+               /*
+                * Put the domains themselves in power up state.
+                * We get the groups and L2s to notify in return.
+                */
+               mali_pm_domain_power_up(power_up_mask,
+                                       groups_up, &num_groups_up,
+                                       l2_up, &num_l2_up);
+
+               /* Need to unlock PM state lock before notifying L2 + groups */
+               mali_pm_state_unlock();
+
+               /* Notify each L2 cache that we have be powered up */
+               for (i = 0; i < num_l2_up; i++) {
+                       mali_l2_cache_power_up(l2_up[i]);
+               }
+
+               /*
+                * Tell execution module about all the groups we have
+                * powered up. Groups will be notified as a result of this.
+                */
+               mali_executor_group_power_up(groups_up, num_groups_up);
+
+               /* Lock state again before checking for power down */
+               mali_pm_state_lock();
+       }
+
+       /* Figure out which cores we need to power off */
+       power_down_mask = pd_mask_current &
+                         (pd_mask_wanted ^ pd_mask_current);
+
+       /*
+        * Never power down the dummy/global domain here. This is to be done
+        * from a suspend request (since this domain is only physicall powered
+        * down at that point)
+        */
+       power_down_mask &= ~MALI_PM_DOMAIN_DUMMY_MASK;
+
+       if (0 != power_down_mask) {
+               u32 power_down_mask_pmu;
+               struct mali_group *groups_down[MALI_MAX_NUMBER_OF_GROUPS];
+               u32 num_groups_down = 0;
+               struct mali_l2_cache_core *
+                       l2_down[MALI_MAX_NUMBER_OF_L2_CACHE_CORES];
+               u32 num_l2_down = 0;
+               u32 i;
+
+#if defined(DEBUG)
+               ++num_pm_updates_down;
+#endif
+
+               /*
+                * Put the domains themselves in power down state.
+                * We get the groups and L2s to notify in return.
+                */
+               mali_pm_domain_power_down(power_down_mask,
+                                         groups_down, &num_groups_down,
+                                         l2_down, &num_l2_down);
+
+               /* Need to unlock PM state lock before notifying L2 + groups */
+               mali_pm_state_unlock();
+
+               /*
+                * Tell execution module about all the groups we will be
+                * powering down. Groups will be notified as a result of this.
+                */
+               if (0 < num_groups_down) {
+                       mali_executor_group_power_down(groups_down, num_groups_down);
+               }
+
+               /* Notify each L2 cache that we will be powering down */
+               for (i = 0; i < num_l2_down; i++) {
+                       mali_l2_cache_power_down(l2_down[i]);
+               }
+
+               /*
+                * Power down only PMU domains which should not stay on
+                * Some domains might for instance currently be incorrectly
+                * powered up if default domain power state is all on.
+                */
+               power_down_mask_pmu = pmu_mask_current & (~pd_mask_current);
+
+               if (0 != power_down_mask_pmu) {
+                       MALI_DEBUG_ASSERT(NULL != pmu);
+                       pmu_mask_current &= ~power_down_mask_pmu;
+                       mali_pmu_power_down(pmu, power_down_mask_pmu);
+
+               }
+       } else {
+               /*
+                * Power down only PMU domains which should not stay on
+                * Some domains might for instance currently be incorrectly
+                * powered up if default domain power state is all on.
+                */
+               u32 power_down_mask_pmu;
+
+               /* No need for state lock since we'll only update PMU */
+               mali_pm_state_unlock();
+
+               power_down_mask_pmu = pmu_mask_current & (~pd_mask_current);
+
+               if (0 != power_down_mask_pmu) {
+                       MALI_DEBUG_ASSERT(NULL != pmu);
+                       pmu_mask_current &= ~power_down_mask_pmu;
+                       mali_pmu_power_down(pmu, power_down_mask_pmu);
+               }
+       }
+
+       MALI_DEBUG_PRINT(5, ("PM update post: Current domain mask: . [%s]\n",
+                            mali_pm_mask_to_string(pd_mask_current)));
+       MALI_DEBUG_PRINT(5, ("PM update post: Current PMU mask: .... [%s]\n",
+                            mali_pm_mask_to_string(pmu_mask_current)));
+       MALI_DEBUG_PRINT(5, ("PM update post: Group power stats: ... <%s>\n",
+                            mali_pm_group_stats_to_string()));
+}
+
+static mali_bool mali_pm_common_suspend(void)
+{
+       mali_pm_state_lock();
+
+       if (0 != pd_mask_wanted) {
+               MALI_DEBUG_PRINT(5, ("PM: Aborting suspend operation\n\n\n"));
+               mali_pm_state_unlock();
+               return MALI_FALSE;
+       }
+
+       MALI_DEBUG_PRINT(5, ("PM suspend pre: Wanted domain mask: .. [%s]\n",
+                            mali_pm_mask_to_string(pd_mask_wanted)));
+       MALI_DEBUG_PRINT(5, ("PM suspend pre: Current domain mask: . [%s]\n",
+                            mali_pm_mask_to_string(pd_mask_current)));
+       MALI_DEBUG_PRINT(5, ("PM suspend pre: Current PMU mask: .... [%s]\n",
+                            mali_pm_mask_to_string(pmu_mask_current)));
+       MALI_DEBUG_PRINT(5, ("PM suspend pre: Group power stats: ... <%s>\n",
+                            mali_pm_group_stats_to_string()));
+
+       if (0 != pd_mask_current) {
+               /*
+                * We have still some domains powered on.
+                * It is for instance very normal that at least the
+                * dummy/global domain is marked as powered on at this point.
+                * (because it is physically powered on until this function
+                * returns)
+                */
+
+               struct mali_group *groups_down[MALI_MAX_NUMBER_OF_GROUPS];
+               u32 num_groups_down = 0;
+               struct mali_l2_cache_core *
+                       l2_down[MALI_MAX_NUMBER_OF_L2_CACHE_CORES];
+               u32 num_l2_down = 0;
+               u32 i;
+
+               /*
+                * Put the domains themselves in power down state.
+                * We get the groups and L2s to notify in return.
+                */
+               mali_pm_domain_power_down(pd_mask_current,
+                                         groups_down,
+                                         &num_groups_down,
+                                         l2_down,
+                                         &num_l2_down);
+
+               MALI_DEBUG_ASSERT(0 == pd_mask_current);
+               MALI_DEBUG_ASSERT(MALI_TRUE == mali_pm_domain_all_unused());
+
+               /* Need to unlock PM state lock before notifying L2 + groups */
+               mali_pm_state_unlock();
+
+               /*
+                * Tell execution module about all the groups we will be
+                * powering down. Groups will be notified as a result of this.
+                */
+               if (0 < num_groups_down) {
+                       mali_executor_group_power_down(groups_down, num_groups_down);
+               }
+
+               /* Notify each L2 cache that we will be powering down */
+               for (i = 0; i < num_l2_down; i++) {
+                       mali_l2_cache_power_down(l2_down[i]);
+               }
+
+               pmu_mask_current = 0;
+       } else {
+               MALI_DEBUG_ASSERT(0 == pmu_mask_current);
+
+               MALI_DEBUG_ASSERT(MALI_TRUE == mali_pm_domain_all_unused());
+
+               mali_pm_state_unlock();
+       }
+
+       MALI_DEBUG_PRINT(5, ("PM suspend post: Current domain mask:  [%s]\n",
+                            mali_pm_mask_to_string(pd_mask_current)));
+       MALI_DEBUG_PRINT(5, ("PM suspend post: Current PMU mask: ... [%s]\n",
+                            mali_pm_mask_to_string(pmu_mask_current)));
+       MALI_DEBUG_PRINT(5, ("PM suspend post: Group power stats: .. <%s>\n",
+                            mali_pm_group_stats_to_string()));
+
+       return MALI_TRUE;
+}
+
+static void mali_pm_update_work(void *data)
+{
+       MALI_IGNORE(data);
+       mali_pm_update_sync();
+}
+
+static _mali_osk_errcode_t mali_pm_create_pm_domains(void)
+{
+       int i;
+
+       /* Create all domains (including dummy domain) */
+       for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+               if (0x0 == domain_config[i]) continue;
+
+               if (NULL == mali_pm_domain_create(domain_config[i])) {
+                       return _MALI_OSK_ERR_NOMEM;
+               }
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+static void mali_pm_set_default_pm_domain_config(void)
+{
+       MALI_DEBUG_ASSERT(0 != _mali_osk_resource_base_address());
+
+       /* GP core */
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+                   MALI_OFFSET_GP, NULL)) {
+               domain_config[MALI_DOMAIN_INDEX_GP] = 0x01;
+       }
+
+       /* PP0 - PP3 core */
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+                   MALI_OFFSET_PP0, NULL)) {
+               if (mali_is_mali400()) {
+                       domain_config[MALI_DOMAIN_INDEX_PP0] = 0x01 << 2;
+               } else if (mali_is_mali450()) {
+                       domain_config[MALI_DOMAIN_INDEX_PP0] = 0x01 << 1;
+               } else if (mali_is_mali470()) {
+                       domain_config[MALI_DOMAIN_INDEX_PP0] = 0x01 << 0;
+               }
+       }
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+                   MALI_OFFSET_PP1, NULL)) {
+               if (mali_is_mali400()) {
+                       domain_config[MALI_DOMAIN_INDEX_PP1] = 0x01 << 3;
+               } else if (mali_is_mali450()) {
+                       domain_config[MALI_DOMAIN_INDEX_PP1] = 0x01 << 2;
+               } else if (mali_is_mali470()) {
+                       domain_config[MALI_DOMAIN_INDEX_PP1] = 0x01 << 1;
+               }
+       }
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+                   MALI_OFFSET_PP2, NULL)) {
+               if (mali_is_mali400()) {
+                       domain_config[MALI_DOMAIN_INDEX_PP2] = 0x01 << 4;
+               } else if (mali_is_mali450()) {
+                       domain_config[MALI_DOMAIN_INDEX_PP2] = 0x01 << 2;
+               } else if (mali_is_mali470()) {
+                       domain_config[MALI_DOMAIN_INDEX_PP2] = 0x01 << 1;
+               }
+       }
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+                   MALI_OFFSET_PP3, NULL)) {
+               if (mali_is_mali400()) {
+                       domain_config[MALI_DOMAIN_INDEX_PP3] = 0x01 << 5;
+               } else if (mali_is_mali450()) {
+                       domain_config[MALI_DOMAIN_INDEX_PP3] = 0x01 << 2;
+               } else if (mali_is_mali470()) {
+                       domain_config[MALI_DOMAIN_INDEX_PP3] = 0x01 << 1;
+               }
+       }
+
+       /* PP4 - PP7 */
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+                   MALI_OFFSET_PP4, NULL)) {
+               domain_config[MALI_DOMAIN_INDEX_PP4] = 0x01 << 3;
+       }
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+                   MALI_OFFSET_PP5, NULL)) {
+               domain_config[MALI_DOMAIN_INDEX_PP5] = 0x01 << 3;
+       }
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+                   MALI_OFFSET_PP6, NULL)) {
+               domain_config[MALI_DOMAIN_INDEX_PP6] = 0x01 << 3;
+       }
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+                   MALI_OFFSET_PP7, NULL)) {
+               domain_config[MALI_DOMAIN_INDEX_PP7] = 0x01 << 3;
+       }
+
+       /* L2gp/L2PP0/L2PP4 */
+       if (mali_is_mali400()) {
+               if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+                           MALI400_OFFSET_L2_CACHE0, NULL)) {
+                       domain_config[MALI_DOMAIN_INDEX_L20] = 0x01 << 1;
+               }
+       } else if (mali_is_mali450()) {
+               if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+                           MALI450_OFFSET_L2_CACHE0, NULL)) {
+                       domain_config[MALI_DOMAIN_INDEX_L20] = 0x01 << 0;
+               }
+
+               if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+                           MALI450_OFFSET_L2_CACHE1, NULL)) {
+                       domain_config[MALI_DOMAIN_INDEX_L21] = 0x01 << 1;
+               }
+
+               if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+                           MALI450_OFFSET_L2_CACHE2, NULL)) {
+                       domain_config[MALI_DOMAIN_INDEX_L22] = 0x01 << 3;
+               }
+       } else if (mali_is_mali470()) {
+               if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+                           MALI470_OFFSET_L2_CACHE1, NULL)) {
+                       domain_config[MALI_DOMAIN_INDEX_L21] = 0x01 << 0;
+               }
+       }
+}
+
+static u32 mali_pm_get_registered_cores_mask(void)
+{
+       int i = 0;
+       u32 mask = 0;
+
+       for (i = 0; i < MALI_DOMAIN_INDEX_DUMMY; i++) {
+               mask |= domain_config[i];
+       }
+
+       return mask;
+}
+
+static void mali_pm_set_pmu_domain_config(void)
+{
+       int i = 0;
+
+       _mali_osk_device_data_pmu_config_get(domain_config, MALI_MAX_NUMBER_OF_DOMAINS - 1);
+
+       for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS - 1; i++) {
+               if (0 != domain_config[i]) {
+                       MALI_DEBUG_PRINT(2, ("Using customer pmu config:\n"));
+                       break;
+               }
+       }
+
+       if (MALI_MAX_NUMBER_OF_DOMAINS - 1 == i) {
+               MALI_DEBUG_PRINT(2, ("Using hw detect pmu config:\n"));
+               mali_pm_set_default_pm_domain_config();
+       }
+
+       for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS - 1; i++) {
+               if (domain_config[i]) {
+                       MALI_DEBUG_PRINT(2, ("domain_config[%d] = 0x%x \n", i, domain_config[i]));
+               }
+       }
+       /* Can't override dummy domain mask */
+       domain_config[MALI_DOMAIN_INDEX_DUMMY] =
+               1 << MALI_DOMAIN_INDEX_DUMMY;
+}
+
+#if defined(DEBUG)
+const char *mali_pm_mask_to_string(u32 mask)
+{
+       static char bit_str[MALI_MAX_NUMBER_OF_DOMAINS + 1];
+       int bit;
+       int str_pos = 0;
+
+       /* Must be protected by lock since we use shared string buffer */
+       if (NULL != pm_lock_exec) {
+               MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec);
+       }
+
+       for (bit = MALI_MAX_NUMBER_OF_DOMAINS - 1; bit >= 0; bit--) {
+               if (mask & (1 << bit)) {
+                       bit_str[str_pos] = 'X';
+               } else {
+                       bit_str[str_pos] = '-';
+               }
+               str_pos++;
+       }
+
+       bit_str[MALI_MAX_NUMBER_OF_DOMAINS] = '\0';
+
+       return bit_str;
+}
+
+const char *mali_pm_group_stats_to_string(void)
+{
+       static char bit_str[MALI_MAX_NUMBER_OF_GROUPS + 1];
+       u32 num_groups = mali_group_get_glob_num_groups();
+       u32 i;
+
+       /* Must be protected by lock since we use shared string buffer */
+       if (NULL != pm_lock_exec) {
+               MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec);
+       }
+
+       for (i = 0; i < num_groups && i < MALI_MAX_NUMBER_OF_GROUPS; i++) {
+               struct mali_group *group;
+
+               group = mali_group_get_glob_group(i);
+
+               if (MALI_TRUE == mali_group_power_is_on(group)) {
+                       bit_str[i] = 'X';
+               } else {
+                       bit_str[i] = '-';
+               }
+       }
+
+       bit_str[i] = '\0';
+
+       return bit_str;
+}
+#endif
+
+/*
+ * num_pp is the number of PP cores which will be powered on given this mask
+ * cost is the total power cost of cores which will be powered on given this mask
+ */
+static void mali_pm_stat_from_mask(u32 mask, u32 *num_pp, u32 *cost)
+{
+       u32 i;
+
+       /* loop through all cores */
+       for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+               if (!(domain_config[i] & mask)) {
+                       continue;
+               }
+
+               switch (i) {
+               case MALI_DOMAIN_INDEX_GP:
+                       *cost += MALI_GP_COST;
+
+                       break;
+               case MALI_DOMAIN_INDEX_PP0: /* Fall through */
+               case MALI_DOMAIN_INDEX_PP1: /* Fall through */
+               case MALI_DOMAIN_INDEX_PP2: /* Fall through */
+               case MALI_DOMAIN_INDEX_PP3:
+                       if (mali_is_mali400()) {
+                               if ((domain_config[MALI_DOMAIN_INDEX_L20] & mask)
+                                   || (domain_config[MALI_DOMAIN_INDEX_DUMMY]
+                                       == domain_config[MALI_DOMAIN_INDEX_L20])) {
+                                       *num_pp += 1;
+                               }
+                       } else {
+                               if ((domain_config[MALI_DOMAIN_INDEX_L21] & mask)
+                                   || (domain_config[MALI_DOMAIN_INDEX_DUMMY]
+                                       == domain_config[MALI_DOMAIN_INDEX_L21])) {
+                                       *num_pp += 1;
+                               }
+                       }
+
+                       *cost += MALI_PP_COST;
+                       break;
+               case MALI_DOMAIN_INDEX_PP4: /* Fall through */
+               case MALI_DOMAIN_INDEX_PP5: /* Fall through */
+               case MALI_DOMAIN_INDEX_PP6: /* Fall through */
+               case MALI_DOMAIN_INDEX_PP7:
+                       MALI_DEBUG_ASSERT(mali_is_mali450());
+
+                       if ((domain_config[MALI_DOMAIN_INDEX_L22] & mask)
+                           || (domain_config[MALI_DOMAIN_INDEX_DUMMY]
+                               == domain_config[MALI_DOMAIN_INDEX_L22])) {
+                               *num_pp += 1;
+                       }
+
+                       *cost += MALI_PP_COST;
+                       break;
+               case MALI_DOMAIN_INDEX_L20: /* Fall through */
+               case MALI_DOMAIN_INDEX_L21: /* Fall through */
+               case MALI_DOMAIN_INDEX_L22:
+                       *cost += MALI_L2_COST;
+
+                       break;
+               }
+       }
+}
+
+void mali_pm_power_cost_setup(void)
+{
+       /*
+        * Two parallel arrays which store the best domain mask and its cost
+        * The index is the number of PP cores, E.g. Index 0 is for 1 PP option,
+        * might have mask 0x2 and with cost of 1, lower cost is better
+        */
+       u32 best_mask[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS] = { 0 };
+       u32 best_cost[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS] = { 0 };
+       /* Array cores_in_domain is used to store the total pp cores in each pm domain. */
+       u32 cores_in_domain[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 };
+       /* Domain_count is used to represent the max domain we have.*/
+       u32 max_domain_mask = 0;
+       u32 max_domain_id = 0;
+       u32 always_on_pp_cores = 0;
+
+       u32 num_pp, cost, mask;
+       u32 i, j , k;
+
+       /* Initialize statistics */
+       for (i = 0; i < MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS; i++) {
+               best_mask[i] = 0;
+               best_cost[i] = 0xFFFFFFFF; /* lower cost is better */
+       }
+
+       for (i = 0; i < MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS + 1; i++) {
+               for (j = 0; j < MALI_MAX_NUMBER_OF_DOMAINS; j++) {
+                       mali_pm_domain_power_cost_result[i][j] = 0;
+               }
+       }
+
+       /* Caculate number of pp cores of a given domain config. */
+       for (i = MALI_DOMAIN_INDEX_PP0; i <= MALI_DOMAIN_INDEX_PP7; i++) {
+               if (0 < domain_config[i]) {
+                       /* Get the max domain mask value used to caculate power cost
+                        * and we don't count in always on pp cores. */
+                       if (MALI_PM_DOMAIN_DUMMY_MASK != domain_config[i]
+                           && max_domain_mask < domain_config[i]) {
+                               max_domain_mask = domain_config[i];
+                       }
+
+                       if (MALI_PM_DOMAIN_DUMMY_MASK == domain_config[i]) {
+                               always_on_pp_cores++;
+                       }
+               }
+       }
+       max_domain_id = _mali_osk_fls(max_domain_mask);
+
+       /*
+        * Try all combinations of power domains and check how many PP cores
+        * they have and their power cost.
+        */
+       for (mask = 0; mask < (1 << max_domain_id); mask++) {
+               num_pp = 0;
+               cost = 0;
+
+               mali_pm_stat_from_mask(mask, &num_pp, &cost);
+
+               /* This mask is usable for all MP1 up to num_pp PP cores, check statistics for all */
+               for (i = 0; i < num_pp; i++) {
+                       if (best_cost[i] >= cost) {
+                               best_cost[i] = cost;
+                               best_mask[i] = mask;
+                       }
+               }
+       }
+
+       /*
+        * If we want to enable x pp cores, if x is less than number of always_on pp cores,
+        * all of pp cores we will enable must be always_on pp cores.
+        */
+       for (i = 0; i < mali_executor_get_num_cores_total(); i++) {
+               if (i < always_on_pp_cores) {
+                       mali_pm_domain_power_cost_result[i + 1][MALI_MAX_NUMBER_OF_DOMAINS - 1]
+                               = i + 1;
+               } else {
+                       mali_pm_domain_power_cost_result[i + 1][MALI_MAX_NUMBER_OF_DOMAINS - 1]
+                               = always_on_pp_cores;
+               }
+       }
+
+       /* In this loop, variable i represent for the number of non-always on pp cores we want to enabled. */
+       for (i = 0; i < (mali_executor_get_num_cores_total() - always_on_pp_cores); i++) {
+               if (best_mask[i] == 0) {
+                       /* This MP variant is not available */
+                       continue;
+               }
+
+               for (j = 0; j < MALI_MAX_NUMBER_OF_DOMAINS; j++) {
+                       cores_in_domain[j] = 0;
+               }
+
+               for (j = MALI_DOMAIN_INDEX_PP0; j <= MALI_DOMAIN_INDEX_PP7; j++) {
+                       if (0 < domain_config[j]
+                           && (MALI_PM_DOMAIN_DUMMY_MASK != domain_config[i])) {
+                               cores_in_domain[_mali_osk_fls(domain_config[j]) - 1]++;
+                       }
+               }
+
+               /* In this loop, j represent for the number we have already enabled.*/
+               for (j = 0; j <= i;) {
+                       /* j used to visit all of domain to get the number of pp cores remained in it. */
+                       for (k = 0; k < max_domain_id; k++) {
+                               /* If domain k in best_mask[i] is enabled and this domain has extra pp cores,
+                                * we know we must pick at least one pp core from this domain.
+                                * And then we move to next enabled pm domain. */
+                               if ((best_mask[i] & (0x1 << k)) && (0 < cores_in_domain[k])) {
+                                       cores_in_domain[k]--;
+                                       mali_pm_domain_power_cost_result[always_on_pp_cores + i + 1][k]++;
+                                       j++;
+                                       if (j > i) {
+                                               break;
+                                       }
+                               }
+                       }
+               }
+       }
+}
+
+/*
+ * When we are doing core scaling,
+ * this function is called to return the best mask to
+ * achieve the best pp group power cost.
+ */
+void mali_pm_get_best_power_cost_mask(int num_requested, int *dst)
+{
+       MALI_DEBUG_ASSERT((mali_executor_get_num_cores_total() >= num_requested) && (0 <= num_requested));
+
+       _mali_osk_memcpy(dst, mali_pm_domain_power_cost_result[num_requested], MALI_MAX_NUMBER_OF_DOMAINS * sizeof(int));
+}
+
+u32 mali_pm_get_current_mask(void)
+{
+       return pd_mask_current;
+}
+
+u32 mali_pm_get_wanted_mask(void)
+{
+       return pd_mask_wanted;
+}
diff --git a/utgard/r8p0/common/mali_pm.h b/utgard/r8p0/common/mali_pm.h
new file mode 100755 (executable)
index 0000000..3c11977
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2011-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_PM_H__
+#define __MALI_PM_H__
+
+#include "mali_osk.h"
+#include "mali_pm_domain.h"
+
+#define MALI_DOMAIN_INDEX_GP        0
+#define MALI_DOMAIN_INDEX_PP0       1
+#define MALI_DOMAIN_INDEX_PP1       2
+#define MALI_DOMAIN_INDEX_PP2       3
+#define MALI_DOMAIN_INDEX_PP3       4
+#define MALI_DOMAIN_INDEX_PP4       5
+#define MALI_DOMAIN_INDEX_PP5       6
+#define MALI_DOMAIN_INDEX_PP6       7
+#define MALI_DOMAIN_INDEX_PP7       8
+#define MALI_DOMAIN_INDEX_L20       9
+#define MALI_DOMAIN_INDEX_L21      10
+#define MALI_DOMAIN_INDEX_L22      11
+/*
+ * The dummy domain is used when there is no physical power domain
+ * (e.g. no PMU or always on cores)
+ */
+#define MALI_DOMAIN_INDEX_DUMMY    12
+#define MALI_MAX_NUMBER_OF_DOMAINS 13
+
+/**
+ * Initialize the Mali PM module
+ *
+ * PM module covers Mali PM core, PM domains and Mali PMU
+ */
+_mali_osk_errcode_t mali_pm_initialize(void);
+
+/**
+ * Terminate the Mali PM module
+ */
+void mali_pm_terminate(void);
+
+void mali_pm_exec_lock(void);
+void mali_pm_exec_unlock(void);
+
+
+struct mali_pm_domain *mali_pm_register_l2_cache(u32 domain_index,
+               struct mali_l2_cache_core *l2_cache);
+struct mali_pm_domain *mali_pm_register_group(u32 domain_index,
+               struct mali_group *group);
+
+mali_bool mali_pm_get_domain_refs(struct mali_pm_domain **domains,
+                                 struct mali_group **groups,
+                                 u32 num_domains);
+mali_bool mali_pm_put_domain_refs(struct mali_pm_domain **domains,
+                                 u32 num_domains);
+
+void mali_pm_init_begin(void);
+void mali_pm_init_end(void);
+
+void mali_pm_update_sync(void);
+void mali_pm_update_async(void);
+
+/* Callback functions for system power management */
+void mali_pm_os_suspend(mali_bool os_suspend);
+void mali_pm_os_resume(void);
+
+mali_bool mali_pm_runtime_suspend(void);
+void mali_pm_runtime_resume(void);
+
+#if MALI_STATE_TRACKING
+u32 mali_pm_dump_state_domain(struct mali_pm_domain *domain,
+                             char *buf, u32 size);
+#endif
+
+void mali_pm_power_cost_setup(void);
+
+void mali_pm_get_best_power_cost_mask(int num_requested, int *dst);
+
+#if defined(DEBUG)
+const char *mali_pm_mask_to_string(u32 mask);
+#endif
+
+u32 mali_pm_get_current_mask(void);
+u32 mali_pm_get_wanted_mask(void);
+#endif /* __MALI_PM_H__ */
diff --git a/utgard/r8p0/common/mali_pm_domain.c b/utgard/r8p0/common/mali_pm_domain.c
new file mode 100755 (executable)
index 0000000..9db8488
--- /dev/null
@@ -0,0 +1,209 @@
+/*
+ * Copyright (C) 2013-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_pm_domain.h"
+#include "mali_pmu.h"
+#include "mali_group.h"
+#include "mali_pm.h"
+
+static struct mali_pm_domain *mali_pm_domains[MALI_MAX_NUMBER_OF_DOMAINS] =
+{ NULL, };
+
+void mali_pm_domain_initialize(void)
+{
+       /* Domains will be initialized/created on demand */
+}
+
+void mali_pm_domain_terminate(void)
+{
+       int i;
+
+       /* Delete all domains that has been created */
+       for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+               mali_pm_domain_delete(mali_pm_domains[i]);
+               mali_pm_domains[i] = NULL;
+       }
+}
+
+struct mali_pm_domain *mali_pm_domain_create(u32 pmu_mask)
+{
+       struct mali_pm_domain *domain = NULL;
+       u32 domain_id = 0;
+
+       domain = mali_pm_domain_get_from_mask(pmu_mask);
+       if (NULL != domain) return domain;
+
+       MALI_DEBUG_PRINT(2,
+                        ("Mali PM domain: Creating Mali PM domain (mask=0x%08X)\n",
+                         pmu_mask));
+
+       domain = (struct mali_pm_domain *)_mali_osk_malloc(
+                        sizeof(struct mali_pm_domain));
+       if (NULL != domain) {
+               domain->power_is_on = MALI_FALSE;
+               domain->pmu_mask = pmu_mask;
+               domain->use_count = 0;
+               _mali_osk_list_init(&domain->group_list);
+               _mali_osk_list_init(&domain->l2_cache_list);
+
+               domain_id = _mali_osk_fls(pmu_mask) - 1;
+               /* Verify the domain_id */
+               MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > domain_id);
+               /* Verify that pmu_mask only one bit is set */
+               MALI_DEBUG_ASSERT((1 << domain_id) == pmu_mask);
+               mali_pm_domains[domain_id] = domain;
+
+               return domain;
+       } else {
+               MALI_DEBUG_PRINT_ERROR(("Unable to create PM domain\n"));
+       }
+
+       return NULL;
+}
+
+void mali_pm_domain_delete(struct mali_pm_domain *domain)
+{
+       if (NULL == domain) {
+               return;
+       }
+
+       _mali_osk_list_delinit(&domain->group_list);
+       _mali_osk_list_delinit(&domain->l2_cache_list);
+
+       _mali_osk_free(domain);
+}
+
+void mali_pm_domain_add_group(struct mali_pm_domain *domain,
+                             struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(domain);
+       MALI_DEBUG_ASSERT_POINTER(group);
+
+       /*
+        * Use addtail because virtual group is created last and it needs
+        * to be at the end of the list (in order to be activated after
+        * all children.
+        */
+       _mali_osk_list_addtail(&group->pm_domain_list, &domain->group_list);
+}
+
+void mali_pm_domain_add_l2_cache(struct mali_pm_domain *domain,
+                                struct mali_l2_cache_core *l2_cache)
+{
+       MALI_DEBUG_ASSERT_POINTER(domain);
+       MALI_DEBUG_ASSERT_POINTER(l2_cache);
+       _mali_osk_list_add(&l2_cache->pm_domain_list, &domain->l2_cache_list);
+}
+
+struct mali_pm_domain *mali_pm_domain_get_from_mask(u32 mask)
+{
+       u32 id = 0;
+
+       if (0 == mask) {
+               return NULL;
+       }
+
+       id = _mali_osk_fls(mask) - 1;
+
+       MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > id);
+       /* Verify that pmu_mask only one bit is set */
+       MALI_DEBUG_ASSERT((1 << id) == mask);
+
+       return mali_pm_domains[id];
+}
+
+struct mali_pm_domain *mali_pm_domain_get_from_index(u32 id)
+{
+       MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > id);
+
+       return mali_pm_domains[id];
+}
+
+u32 mali_pm_domain_ref_get(struct mali_pm_domain *domain)
+{
+       MALI_DEBUG_ASSERT_POINTER(domain);
+
+       if (0 == domain->use_count) {
+               _mali_osk_pm_dev_ref_get_async();
+       }
+
+       ++domain->use_count;
+       MALI_DEBUG_PRINT(4, ("PM domain %p: ref_get, use_count => %u\n", domain, domain->use_count));
+
+       /* Return our mask so caller can check this against wanted mask */
+       return domain->pmu_mask;
+}
+
+u32 mali_pm_domain_ref_put(struct mali_pm_domain *domain)
+{
+       MALI_DEBUG_ASSERT_POINTER(domain);
+
+       --domain->use_count;
+       MALI_DEBUG_PRINT(4, ("PM domain %p: ref_put, use_count => %u\n", domain, domain->use_count));
+
+       if (0 == domain->use_count) {
+               _mali_osk_pm_dev_ref_put();
+       }
+
+       /*
+        * Return the PMU mask which now could be be powered down
+        * (the bit for this domain).
+        * This is the responsibility of the caller (mali_pm)
+        */
+       return (0 == domain->use_count ? domain->pmu_mask : 0);
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_pm_domain_get_id(struct mali_pm_domain *domain)
+{
+       u32 id = 0;
+
+       MALI_DEBUG_ASSERT_POINTER(domain);
+       MALI_DEBUG_ASSERT(0 != domain->pmu_mask);
+
+       id = _mali_osk_fls(domain->pmu_mask) - 1;
+
+       MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > id);
+       /* Verify that pmu_mask only one bit is set */
+       MALI_DEBUG_ASSERT((1 << id) == domain->pmu_mask);
+       /* Verify that we have stored the domain at right id/index */
+       MALI_DEBUG_ASSERT(domain == mali_pm_domains[id]);
+
+       return id;
+}
+#endif
+
+#if defined(DEBUG)
+mali_bool mali_pm_domain_all_unused(void)
+{
+       int i;
+
+       for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+               if (NULL == mali_pm_domains[i]) {
+                       /* Nothing to check */
+                       continue;
+               }
+
+               if (MALI_TRUE == mali_pm_domains[i]->power_is_on) {
+                       /* Not ready for suspend! */
+                       return MALI_FALSE;
+               }
+
+               if (0 != mali_pm_domains[i]->use_count) {
+                       /* Not ready for suspend! */
+                       return MALI_FALSE;
+               }
+       }
+
+       return MALI_TRUE;
+}
+#endif
diff --git a/utgard/r8p0/common/mali_pm_domain.h b/utgard/r8p0/common/mali_pm_domain.h
new file mode 100755 (executable)
index 0000000..2ac8c0d
--- /dev/null
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2013-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_PM_DOMAIN_H__
+#define __MALI_PM_DOMAIN_H__
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+
+#include "mali_l2_cache.h"
+#include "mali_group.h"
+#include "mali_pmu.h"
+
+/* Instances are protected by PM state lock */
+struct mali_pm_domain {
+       mali_bool power_is_on;
+       s32 use_count;
+       u32 pmu_mask;
+
+       /* Zero or more groups can belong to this domain */
+       _mali_osk_list_t group_list;
+
+       /* Zero or more L2 caches can belong to this domain */
+       _mali_osk_list_t l2_cache_list;
+};
+
+
+void mali_pm_domain_initialize(void);
+void mali_pm_domain_terminate(void);
+
+struct mali_pm_domain *mali_pm_domain_create(u32 pmu_mask);
+void mali_pm_domain_delete(struct mali_pm_domain *domain);
+
+void mali_pm_domain_add_l2_cache(
+       struct mali_pm_domain *domain,
+       struct mali_l2_cache_core *l2_cache);
+void mali_pm_domain_add_group(struct mali_pm_domain *domain,
+                             struct mali_group *group);
+
+struct mali_pm_domain *mali_pm_domain_get_from_mask(u32 mask);
+struct mali_pm_domain *mali_pm_domain_get_from_index(u32 id);
+
+/* Ref counting */
+u32 mali_pm_domain_ref_get(struct mali_pm_domain *domain);
+u32 mali_pm_domain_ref_put(struct mali_pm_domain *domain);
+
+MALI_STATIC_INLINE _mali_osk_list_t *mali_pm_domain_get_group_list(
+       struct mali_pm_domain *domain)
+{
+       MALI_DEBUG_ASSERT_POINTER(domain);
+       return &domain->group_list;
+}
+
+MALI_STATIC_INLINE _mali_osk_list_t *mali_pm_domain_get_l2_cache_list(
+       struct mali_pm_domain *domain)
+{
+       MALI_DEBUG_ASSERT_POINTER(domain);
+       return &domain->l2_cache_list;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pm_domain_power_is_on(
+       struct mali_pm_domain *domain)
+{
+       MALI_DEBUG_ASSERT_POINTER(domain);
+       return domain->power_is_on;
+}
+
+MALI_STATIC_INLINE void mali_pm_domain_set_power_on(
+       struct mali_pm_domain *domain,
+       mali_bool power_is_on)
+{
+       MALI_DEBUG_ASSERT_POINTER(domain);
+       domain->power_is_on = power_is_on;
+}
+
+MALI_STATIC_INLINE u32 mali_pm_domain_get_use_count(
+       struct mali_pm_domain *domain)
+{
+       MALI_DEBUG_ASSERT_POINTER(domain);
+       return domain->use_count;
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_pm_domain_get_id(struct mali_pm_domain *domain);
+
+MALI_STATIC_INLINE u32 mali_pm_domain_get_mask(struct mali_pm_domain *domain)
+{
+       MALI_DEBUG_ASSERT_POINTER(domain);
+       return domain->pmu_mask;
+}
+#endif
+
+#if defined(DEBUG)
+mali_bool mali_pm_domain_all_unused(void);
+#endif
+
+#endif /* __MALI_PM_DOMAIN_H__ */
diff --git a/utgard/r8p0/common/mali_pm_metrics.c b/utgard/r8p0/common/mali_pm_metrics.c
new file mode 100644 (file)
index 0000000..981ec81
--- /dev/null
@@ -0,0 +1,255 @@
+/*
+ * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include "mali_pm_metrics.h"
+#include "mali_osk_locks.h"
+#include "mali_osk_mali.h"
+#include <linux/ktime.h>
+
+#define MALI_PM_TIME_SHIFT 0
+#define MALI_UTILIZATION_MAX_PERIOD 80000000/* ns = 100ms */
+
+_mali_osk_errcode_t mali_pm_metrics_init(struct mali_device *mdev)
+{
+       int i = 0;
+
+       MALI_DEBUG_ASSERT(mdev != NULL);
+
+       mdev->mali_metrics.time_period_start = ktime_get();
+       mdev->mali_metrics.time_period_start_gp = mdev->mali_metrics.time_period_start;
+       mdev->mali_metrics.time_period_start_pp = mdev->mali_metrics.time_period_start;
+
+       mdev->mali_metrics.time_busy = 0;
+       mdev->mali_metrics.time_idle = 0;
+       mdev->mali_metrics.prev_busy = 0;
+       mdev->mali_metrics.prev_idle = 0;
+       mdev->mali_metrics.num_running_gp_cores = 0;
+       mdev->mali_metrics.num_running_pp_cores = 0;
+       mdev->mali_metrics.time_busy_gp = 0;
+       mdev->mali_metrics.time_idle_gp = 0;
+
+       for (i = 0; i < MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS; i++) {
+               mdev->mali_metrics.time_busy_pp[i] = 0;
+               mdev->mali_metrics.time_idle_pp[i] = 0;
+       }
+       mdev->mali_metrics.gpu_active = MALI_FALSE;
+
+       mdev->mali_metrics.lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_UNORDERED, _MALI_OSK_LOCK_ORDER_FIRST);
+       if (NULL == mdev->mali_metrics.lock) {
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_pm_metrics_term(struct mali_device *mdev)
+{
+       _mali_osk_spinlock_irq_term(mdev->mali_metrics.lock);
+}
+
+/*caller needs to hold mdev->mali_metrics.lock before calling this function*/
+void mali_pm_record_job_status(struct mali_device *mdev)
+{
+       ktime_t now;
+       ktime_t diff;
+       u64 ns_time;
+
+       MALI_DEBUG_ASSERT(mdev != NULL);
+
+       now = ktime_get();
+       diff = ktime_sub(now, mdev->mali_metrics.time_period_start);
+
+       ns_time = (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT);
+       mdev->mali_metrics.time_busy += ns_time;
+       mdev->mali_metrics.time_period_start = now;
+}
+
+void mali_pm_record_gpu_idle(mali_bool is_gp)
+{
+       ktime_t now;
+       ktime_t diff;
+       u64 ns_time;
+       struct mali_device *mdev = dev_get_drvdata(&mali_platform_device->dev);
+
+       MALI_DEBUG_ASSERT(mdev != NULL);
+
+       _mali_osk_spinlock_irq_lock(mdev->mali_metrics.lock);
+       now = ktime_get();
+
+       if (MALI_TRUE == is_gp) {
+               --mdev->mali_metrics.num_running_gp_cores;
+               if (0 == mdev->mali_metrics.num_running_gp_cores) {
+                       diff = ktime_sub(now, mdev->mali_metrics.time_period_start_gp);
+                       ns_time = (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT);
+                       mdev->mali_metrics.time_busy_gp += ns_time;
+                       mdev->mali_metrics.time_period_start_gp = now;
+
+                       if (0 == mdev->mali_metrics.num_running_pp_cores) {
+                               MALI_DEBUG_ASSERT(mdev->mali_metrics.gpu_active == MALI_TRUE);
+                               diff = ktime_sub(now, mdev->mali_metrics.time_period_start);
+                               ns_time = (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT);
+                               mdev->mali_metrics.time_busy += ns_time;
+                               mdev->mali_metrics.time_period_start = now;
+                               mdev->mali_metrics.gpu_active = MALI_FALSE;
+                       }
+               }
+       } else {
+               --mdev->mali_metrics.num_running_pp_cores;
+               if (0 == mdev->mali_metrics.num_running_pp_cores) {
+                       diff = ktime_sub(now, mdev->mali_metrics.time_period_start_pp);
+                       ns_time = (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT);
+                       mdev->mali_metrics.time_busy_pp[0] += ns_time;
+                       mdev->mali_metrics.time_period_start_pp = now;
+
+                       if (0 == mdev->mali_metrics.num_running_gp_cores) {
+                               MALI_DEBUG_ASSERT(mdev->mali_metrics.gpu_active == MALI_TRUE);
+                               diff = ktime_sub(now, mdev->mali_metrics.time_period_start);
+                               ns_time = (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT);
+                               mdev->mali_metrics.time_busy += ns_time;
+                               mdev->mali_metrics.time_period_start = now;
+                               mdev->mali_metrics.gpu_active = MALI_FALSE;
+                       }
+               }
+       }
+
+       _mali_osk_spinlock_irq_unlock(mdev->mali_metrics.lock);
+}
+
+void mali_pm_record_gpu_active(mali_bool is_gp)
+{
+       ktime_t now;
+       ktime_t diff;
+       struct mali_device *mdev = dev_get_drvdata(&mali_platform_device->dev);
+
+       MALI_DEBUG_ASSERT(mdev != NULL);
+
+       _mali_osk_spinlock_irq_lock(mdev->mali_metrics.lock);
+       now = ktime_get();
+
+       if (MALI_TRUE == is_gp) {
+               mdev->mali_metrics.num_running_gp_cores++;
+               if (1 == mdev->mali_metrics.num_running_gp_cores) {
+                       diff = ktime_sub(now, mdev->mali_metrics.time_period_start_gp);
+                       mdev->mali_metrics.time_idle_gp += (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT);
+                       mdev->mali_metrics.time_period_start_gp = now;
+                       if (0 == mdev->mali_metrics.num_running_pp_cores) {
+                               MALI_DEBUG_ASSERT(mdev->mali_metrics.gpu_active == MALI_FALSE);
+                               diff = ktime_sub(now, mdev->mali_metrics.time_period_start);
+                               mdev->mali_metrics.time_idle += (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT);
+                               mdev->mali_metrics.time_period_start = now;
+                               mdev->mali_metrics.gpu_active = MALI_TRUE;
+                       }
+               } else {
+                       MALI_DEBUG_ASSERT(mdev->mali_metrics.gpu_active == MALI_TRUE);
+               }
+       } else {
+               mdev->mali_metrics.num_running_pp_cores++;
+               if (1 == mdev->mali_metrics.num_running_pp_cores) {
+                       diff = ktime_sub(now, mdev->mali_metrics.time_period_start_pp);
+                       mdev->mali_metrics.time_idle_pp[0] += (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT);
+                       mdev->mali_metrics.time_period_start_pp = now;
+                       if (0 == mdev->mali_metrics.num_running_gp_cores) {
+                               MALI_DEBUG_ASSERT(mdev->mali_metrics.gpu_active == MALI_FALSE);
+                               diff = ktime_sub(now, mdev->mali_metrics.time_period_start);
+                               mdev->mali_metrics.time_idle += (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT);
+                               mdev->mali_metrics.time_period_start = now;
+                               mdev->mali_metrics.gpu_active = MALI_TRUE;
+                       }
+               } else {
+                       MALI_DEBUG_ASSERT(mdev->mali_metrics.gpu_active == MALI_TRUE);
+               }
+       }
+
+       _mali_osk_spinlock_irq_unlock(mdev->mali_metrics.lock);
+}
+
+
+/*caller needs to hold mdev->mali_metrics.lock before calling this function*/
+static void mali_pm_get_dvfs_utilisation_calc(struct mali_device *mdev, ktime_t now)
+{
+       ktime_t diff;
+
+       MALI_DEBUG_ASSERT(mdev != NULL);
+
+       diff = ktime_sub(now, mdev->mali_metrics.time_period_start);
+
+       if (mdev->mali_metrics.gpu_active) {
+               mdev->mali_metrics.time_busy += (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT);
+       } else {
+               mdev->mali_metrics.time_idle += (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT);
+       }
+}
+
+/* Caller needs to hold mdev->mali_metrics.lock before calling this function. */
+static void mali_pm_reset_dvfs_utilisation_unlocked(struct mali_device *mdev, ktime_t now)
+{
+       /* Store previous value */
+       mdev->mali_metrics.prev_idle = mdev->mali_metrics.time_idle;
+       mdev->mali_metrics.prev_busy = mdev->mali_metrics.time_busy;
+
+       /* Reset current values */
+       mdev->mali_metrics.time_period_start = now;
+       mdev->mali_metrics.time_period_start_gp = now;
+       mdev->mali_metrics.time_period_start_pp = now;
+       mdev->mali_metrics.time_idle = 0;
+       mdev->mali_metrics.time_busy = 0;
+
+       mdev->mali_metrics.time_busy_gp = 0;
+       mdev->mali_metrics.time_idle_gp = 0;
+       mdev->mali_metrics.time_busy_pp[0] = 0;
+       mdev->mali_metrics.time_idle_pp[0] = 0;
+}
+
+void mali_pm_reset_dvfs_utilisation(struct mali_device *mdev)
+{
+       _mali_osk_spinlock_irq_lock(mdev->mali_metrics.lock);
+       mali_pm_reset_dvfs_utilisation_unlocked(mdev, ktime_get());
+       _mali_osk_spinlock_irq_unlock(mdev->mali_metrics.lock);
+}
+
+void mali_pm_get_dvfs_utilisation(struct mali_device *mdev,
+                                 unsigned long *total_out, unsigned long *busy_out)
+{
+       ktime_t now = ktime_get();
+       u64 busy = 0;
+       u64 total = 0;
+
+       _mali_osk_spinlock_irq_lock(mdev->mali_metrics.lock);
+
+       mali_pm_get_dvfs_utilisation_calc(mdev, now);
+
+       busy = mdev->mali_metrics.time_busy;
+       total = busy + mdev->mali_metrics.time_idle;
+
+       /* Reset stats if older than MALI_UTILIZATION_MAX_PERIOD (default
+        * 100ms) */
+       if (total >= MALI_UTILIZATION_MAX_PERIOD) {
+               mali_pm_reset_dvfs_utilisation_unlocked(mdev, now);
+       } else if (total < (MALI_UTILIZATION_MAX_PERIOD / 2)) {
+               total += mdev->mali_metrics.prev_idle +
+                        mdev->mali_metrics.prev_busy;
+               busy += mdev->mali_metrics.prev_busy;
+       }
+
+       *total_out = (unsigned long)total;
+       *busy_out = (unsigned long)busy;
+       _mali_osk_spinlock_irq_unlock(mdev->mali_metrics.lock);
+}
+
+void mali_pm_metrics_spin_lock(void)
+{
+       struct mali_device *mdev = dev_get_drvdata(&mali_platform_device->dev);
+       _mali_osk_spinlock_irq_lock(mdev->mali_metrics.lock);
+}
+
+void mali_pm_metrics_spin_unlock(void)
+{
+       struct mali_device *mdev = dev_get_drvdata(&mali_platform_device->dev);
+       _mali_osk_spinlock_irq_unlock(mdev->mali_metrics.lock);
+}
diff --git a/utgard/r8p0/common/mali_pm_metrics.h b/utgard/r8p0/common/mali_pm_metrics.h
new file mode 100644 (file)
index 0000000..256f448
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_PM_METRICS_H__
+#define __MALI_PM_METRICS_H__
+
+#ifdef CONFIG_MALI_DEVFREQ
+#include "mali_osk_locks.h"
+#include "mali_group.h"
+
+struct mali_device;
+
+/**
+ * Metrics data collected for use by the power management framework.
+ */
+struct mali_pm_metrics_data {
+       ktime_t time_period_start;
+       u64 time_busy;
+       u64 time_idle;
+       u64 prev_busy;
+       u64 prev_idle;
+       u32 num_running_gp_cores;
+       u32 num_running_pp_cores;
+       ktime_t time_period_start_gp;
+       u64 time_busy_gp;
+       u64 time_idle_gp;
+       ktime_t time_period_start_pp;
+       u64 time_busy_pp[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS];
+       u64 time_idle_pp[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS];
+       mali_bool gpu_active;
+       _mali_osk_spinlock_irq_t *lock;
+};
+
+/**
+ * Initialize/start the Mali GPU pm_metrics metrics reporting.
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_pm_metrics_init(struct mali_device *mdev);
+
+/**
+ * Terminate the Mali GPU pm_metrics metrics reporting
+ */
+void mali_pm_metrics_term(struct mali_device *mdev);
+
+/**
+ * Should be called when a job is about to execute a GPU job
+ */
+void mali_pm_record_gpu_active(mali_bool is_gp);
+
+/**
+ * Should be called when a job is finished
+ */
+void mali_pm_record_gpu_idle(mali_bool is_gp);
+
+void mali_pm_reset_dvfs_utilisation(struct mali_device *mdev);
+
+void mali_pm_get_dvfs_utilisation(struct mali_device *mdev, unsigned long *total_out, unsigned long *busy_out);
+
+void mali_pm_metrics_spin_lock(void);
+
+void mali_pm_metrics_spin_unlock(void);
+#else
+void mali_pm_record_gpu_idle(mali_bool is_gp) {}
+void mali_pm_record_gpu_active(mali_bool is_gp) {}
+#endif
+#endif /* __MALI_PM_METRICS_H__ */
diff --git a/utgard/r8p0/common/mali_pmu.c b/utgard/r8p0/common/mali_pmu.c
new file mode 100755 (executable)
index 0000000..bf0d413
--- /dev/null
@@ -0,0 +1,270 @@
+/*
+ * Copyright (C) 2010-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmu.c
+ * Mali driver functions for Mali 400 PMU hardware
+ */
+#include "mali_hw_core.h"
+#include "mali_pmu.h"
+#include "mali_pp.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_pm.h"
+#include "mali_osk_mali.h"
+
+struct mali_pmu_core *mali_global_pmu_core = NULL;
+
+static _mali_osk_errcode_t mali_pmu_wait_for_command_finish(
+       struct mali_pmu_core *pmu);
+
+struct mali_pmu_core *mali_pmu_create(_mali_osk_resource_t *resource)
+{
+       struct mali_pmu_core *pmu;
+
+       MALI_DEBUG_ASSERT(NULL == mali_global_pmu_core);
+       MALI_DEBUG_PRINT(2, ("Mali PMU: Creating Mali PMU core\n"));
+
+       pmu = (struct mali_pmu_core *)_mali_osk_malloc(
+                     sizeof(struct mali_pmu_core));
+       if (NULL != pmu) {
+               pmu->registered_cores_mask = 0; /* to be set later */
+
+               if (_MALI_OSK_ERR_OK == mali_hw_core_create(&pmu->hw_core,
+                               resource, PMU_REGISTER_ADDRESS_SPACE_SIZE)) {
+
+                       pmu->switch_delay = _mali_osk_get_pmu_switch_delay();
+
+                       mali_global_pmu_core = pmu;
+
+                       return pmu;
+               }
+               _mali_osk_free(pmu);
+       }
+
+       return NULL;
+}
+
+void mali_pmu_delete(struct mali_pmu_core *pmu)
+{
+       MALI_DEBUG_ASSERT_POINTER(pmu);
+       MALI_DEBUG_ASSERT(pmu == mali_global_pmu_core);
+
+       MALI_DEBUG_PRINT(2, ("Mali PMU: Deleting Mali PMU core\n"));
+
+       mali_global_pmu_core = NULL;
+
+       mali_hw_core_delete(&pmu->hw_core);
+       _mali_osk_free(pmu);
+}
+
+void mali_pmu_set_registered_cores_mask(struct mali_pmu_core *pmu, u32 mask)
+{
+       pmu->registered_cores_mask = mask;
+}
+
+void mali_pmu_reset(struct mali_pmu_core *pmu)
+{
+       MALI_DEBUG_ASSERT_POINTER(pmu);
+       MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+
+       /* Setup the desired defaults */
+       mali_hw_core_register_write_relaxed(&pmu->hw_core,
+                                           PMU_REG_ADDR_MGMT_INT_MASK, 0);
+       mali_hw_core_register_write_relaxed(&pmu->hw_core,
+                                           PMU_REG_ADDR_MGMT_SW_DELAY, pmu->switch_delay);
+}
+
+void mali_pmu_power_up_all(struct mali_pmu_core *pmu)
+{
+       u32 stat;
+
+       MALI_DEBUG_ASSERT_POINTER(pmu);
+       MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+
+       mali_pm_exec_lock();
+
+       mali_pmu_reset(pmu);
+
+       /* Now simply power up the domains which are marked as powered down */
+       stat = mali_hw_core_register_read(&pmu->hw_core,
+                                         PMU_REG_ADDR_MGMT_STATUS);
+       mali_pmu_power_up(pmu, stat);
+
+       mali_pm_exec_unlock();
+}
+
+void mali_pmu_power_down_all(struct mali_pmu_core *pmu)
+{
+       u32 stat;
+
+       MALI_DEBUG_ASSERT_POINTER(pmu);
+       MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+
+       mali_pm_exec_lock();
+
+       /* Now simply power down the domains which are marked as powered up */
+       stat = mali_hw_core_register_read(&pmu->hw_core,
+                                         PMU_REG_ADDR_MGMT_STATUS);
+       mali_pmu_power_down(pmu, (~stat) & pmu->registered_cores_mask);
+
+       mali_pm_exec_unlock();
+}
+
+_mali_osk_errcode_t mali_pmu_power_down(struct mali_pmu_core *pmu, u32 mask)
+{
+       u32 stat;
+       _mali_osk_errcode_t err;
+
+       MALI_DEBUG_ASSERT_POINTER(pmu);
+       MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+       MALI_DEBUG_ASSERT(mask <= pmu->registered_cores_mask);
+       MALI_DEBUG_ASSERT(0 == (mali_hw_core_register_read(&pmu->hw_core,
+                               PMU_REG_ADDR_MGMT_INT_RAWSTAT) &
+                               PMU_REG_VAL_IRQ));
+
+       MALI_DEBUG_PRINT(3,
+                        ("PMU power down: ...................... [%s]\n",
+                         mali_pm_mask_to_string(mask)));
+
+       stat = mali_hw_core_register_read(&pmu->hw_core,
+                                         PMU_REG_ADDR_MGMT_STATUS);
+
+       /*
+        * Assert that we are not powering down domains which are already
+        * powered down.
+        */
+       MALI_DEBUG_ASSERT(0 == (stat & mask));
+
+       mask  &= ~(0x1 << MALI_DOMAIN_INDEX_DUMMY);
+
+       if (0 == mask || 0 == ((~stat) & mask)) return _MALI_OSK_ERR_OK;
+
+       mali_hw_core_register_write(&pmu->hw_core,
+                                   PMU_REG_ADDR_MGMT_POWER_DOWN, mask);
+
+       /*
+        * Do not wait for interrupt on Mali-300/400 if all domains are
+        * powered off by our power down command, because the HW will simply
+        * not generate an interrupt in this case.
+        */
+       if (mali_is_mali450() || mali_is_mali470() || pmu->registered_cores_mask != (mask | stat)) {
+               err = mali_pmu_wait_for_command_finish(pmu);
+               if (_MALI_OSK_ERR_OK != err) {
+                       return err;
+               }
+       } else {
+               mali_hw_core_register_write(&pmu->hw_core,
+                                           PMU_REG_ADDR_MGMT_INT_CLEAR, PMU_REG_VAL_IRQ);
+       }
+
+#if defined(DEBUG)
+       /* Verify power status of domains after power down */
+       stat = mali_hw_core_register_read(&pmu->hw_core,
+                                         PMU_REG_ADDR_MGMT_STATUS);
+       MALI_DEBUG_ASSERT(mask == (stat & mask));
+#endif
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_pmu_power_up(struct mali_pmu_core *pmu, u32 mask)
+{
+       u32 stat;
+       _mali_osk_errcode_t err;
+#if !defined(CONFIG_MALI_PMU_PARALLEL_POWER_UP)
+       u32 current_domain;
+#endif
+
+       MALI_DEBUG_ASSERT_POINTER(pmu);
+       MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+       MALI_DEBUG_ASSERT(mask <= pmu->registered_cores_mask);
+       MALI_DEBUG_ASSERT(0 == (mali_hw_core_register_read(&pmu->hw_core,
+                               PMU_REG_ADDR_MGMT_INT_RAWSTAT) &
+                               PMU_REG_VAL_IRQ));
+
+       MALI_DEBUG_PRINT(3,
+                        ("PMU power up: ........................ [%s]\n",
+                         mali_pm_mask_to_string(mask)));
+
+       stat = mali_hw_core_register_read(&pmu->hw_core,
+                                         PMU_REG_ADDR_MGMT_STATUS);
+       stat &= pmu->registered_cores_mask;
+
+       mask  &= ~(0x1 << MALI_DOMAIN_INDEX_DUMMY);
+       if (0 == mask || 0 == (stat & mask)) return _MALI_OSK_ERR_OK;
+
+       /*
+        * Assert that we are only powering up domains which are currently
+        * powered down.
+        */
+       MALI_DEBUG_ASSERT(mask == (stat & mask));
+
+#if defined(CONFIG_MALI_PMU_PARALLEL_POWER_UP)
+       mali_hw_core_register_write(&pmu->hw_core,
+                                   PMU_REG_ADDR_MGMT_POWER_UP, mask);
+
+       err = mali_pmu_wait_for_command_finish(pmu);
+       if (_MALI_OSK_ERR_OK != err) {
+               return err;
+       }
+#else
+       for (current_domain = 1;
+            current_domain <= pmu->registered_cores_mask;
+            current_domain <<= 1) {
+               if (current_domain & mask & stat) {
+                       mali_hw_core_register_write(&pmu->hw_core,
+                                                   PMU_REG_ADDR_MGMT_POWER_UP,
+                                                   current_domain);
+
+                       err = mali_pmu_wait_for_command_finish(pmu);
+                       if (_MALI_OSK_ERR_OK != err) {
+                               return err;
+                       }
+               }
+       }
+#endif
+
+#if defined(DEBUG)
+       /* Verify power status of domains after power up */
+       stat = mali_hw_core_register_read(&pmu->hw_core,
+                                         PMU_REG_ADDR_MGMT_STATUS);
+       MALI_DEBUG_ASSERT(0 == (stat & mask));
+#endif /* defined(DEBUG) */
+
+       return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_pmu_wait_for_command_finish(
+       struct mali_pmu_core *pmu)
+{
+       u32 rawstat;
+       u32 timeout = MALI_REG_POLL_COUNT_SLOW;
+
+       MALI_DEBUG_ASSERT(pmu);
+
+       /* Wait for the command to complete */
+       do {
+               rawstat = mali_hw_core_register_read(&pmu->hw_core,
+                                                    PMU_REG_ADDR_MGMT_INT_RAWSTAT);
+               --timeout;
+       } while (0 == (rawstat & PMU_REG_VAL_IRQ) && 0 < timeout);
+
+       MALI_DEBUG_ASSERT(0 < timeout);
+
+       if (0 == timeout) {
+               return _MALI_OSK_ERR_TIMEOUT;
+       }
+
+       mali_hw_core_register_write(&pmu->hw_core,
+                                   PMU_REG_ADDR_MGMT_INT_CLEAR, PMU_REG_VAL_IRQ);
+
+       return _MALI_OSK_ERR_OK;
+}
diff --git a/utgard/r8p0/common/mali_pmu.h b/utgard/r8p0/common/mali_pmu.h
new file mode 100755 (executable)
index 0000000..36ff58e
--- /dev/null
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform.h
+ * Platform specific Mali driver functions
+ */
+
+#ifndef __MALI_PMU_H__
+#define __MALI_PMU_H__
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_hw_core.h"
+
+/** @brief MALI inbuilt PMU hardware info and PMU hardware has knowledge of cores power mask
+ */
+struct mali_pmu_core {
+       struct mali_hw_core hw_core;
+       u32 registered_cores_mask;
+       u32 switch_delay;
+};
+
+/** @brief Register layout for hardware PMU
+ */
+typedef enum {
+       PMU_REG_ADDR_MGMT_POWER_UP                  = 0x00,     /*< Power up register */
+       PMU_REG_ADDR_MGMT_POWER_DOWN                = 0x04,     /*< Power down register */
+       PMU_REG_ADDR_MGMT_STATUS                    = 0x08,     /*< Core sleep status register */
+       PMU_REG_ADDR_MGMT_INT_MASK                  = 0x0C,     /*< Interrupt mask register */
+       PMU_REG_ADDR_MGMT_INT_RAWSTAT               = 0x10,     /*< Interrupt raw status register */
+       PMU_REG_ADDR_MGMT_INT_CLEAR                 = 0x18,     /*< Interrupt clear register */
+       PMU_REG_ADDR_MGMT_SW_DELAY                  = 0x1C,     /*< Switch delay register */
+       PMU_REGISTER_ADDRESS_SPACE_SIZE             = 0x28,     /*< Size of register space */
+} pmu_reg_addr_mgmt_addr;
+
+#define PMU_REG_VAL_IRQ 1
+
+extern struct mali_pmu_core *mali_global_pmu_core;
+
+/** @brief Initialisation of MALI PMU
+ *
+ * This is called from entry point of the driver in order to create and intialize the PMU resource
+ *
+ * @param resource it will be a pointer to a PMU resource
+ * @param number_of_pp_cores Number of found PP resources in configuration
+ * @param number_of_l2_caches Number of found L2 cache resources in configuration
+ * @return The created PMU object, or NULL in case of failure.
+ */
+struct mali_pmu_core *mali_pmu_create(_mali_osk_resource_t *resource);
+
+/** @brief It deallocates the PMU resource
+ *
+ * This is called on the exit of the driver to terminate the PMU resource
+ *
+ * @param pmu Pointer to PMU core object to delete
+ */
+void mali_pmu_delete(struct mali_pmu_core *pmu);
+
+/** @brief Set registered cores mask
+ *
+ * @param pmu Pointer to PMU core object
+ * @param mask All available/valid domain bits
+ */
+void mali_pmu_set_registered_cores_mask(struct mali_pmu_core *pmu, u32 mask);
+
+/** @brief Retrieves the Mali PMU core object (if any)
+ *
+ * @return The Mali PMU object, or NULL if no PMU exists.
+ */
+MALI_STATIC_INLINE struct mali_pmu_core *mali_pmu_get_global_pmu_core(void)
+{
+       return mali_global_pmu_core;
+}
+
+/** @brief Reset PMU core
+ *
+ * @param pmu Pointer to PMU core object to reset
+ */
+void mali_pmu_reset(struct mali_pmu_core *pmu);
+
+void mali_pmu_power_up_all(struct mali_pmu_core *pmu);
+
+void mali_pmu_power_down_all(struct mali_pmu_core *pmu);
+
+/** @brief Returns a mask of the currently powered up domains
+ *
+ * @param pmu Pointer to PMU core object
+ */
+MALI_STATIC_INLINE u32 mali_pmu_get_mask(struct mali_pmu_core *pmu)
+{
+       u32 stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
+       return ((~stat) & pmu->registered_cores_mask);
+}
+
+/** @brief MALI GPU power down using MALI in-built PMU
+ *
+ * Called to power down the specified cores.
+ *
+ * @param pmu Pointer to PMU core object to power down
+ * @param mask Mask specifying which power domains to power down
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_pmu_power_down(struct mali_pmu_core *pmu, u32 mask);
+
+/** @brief MALI GPU power up using MALI in-built PMU
+ *
+ * Called to power up the specified cores.
+ *
+ * @param pmu Pointer to PMU core object to power up
+ * @param mask Mask specifying which power domains to power up
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_pmu_power_up(struct mali_pmu_core *pmu, u32 mask);
+
+#endif /* __MALI_PMU_H__ */
diff --git a/utgard/r8p0/common/mali_pp.c b/utgard/r8p0/common/mali_pp.c
new file mode 100755 (executable)
index 0000000..e1cd470
--- /dev/null
@@ -0,0 +1,508 @@
+/*
+ * Copyright (C) 2011-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_pp_job.h"
+#include "mali_pp.h"
+#include "mali_hw_core.h"
+#include "mali_group.h"
+#include "regs/mali_200_regs.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+
+#if defined(CONFIG_MALI400_PROFILING)
+#include "mali_osk_profiling.h"
+#endif
+
+#include <mali_platform.h>
+
+/* Number of frame registers on Mali-200 */
+#define MALI_PP_MALI200_NUM_FRAME_REGISTERS ((0x04C/4)+1)
+/* Number of frame registers on Mali-300 and later */
+#define MALI_PP_MALI400_NUM_FRAME_REGISTERS ((0x058/4)+1)
+
+static struct mali_pp_core *mali_global_pp_cores[MALI_MAX_NUMBER_OF_PP_CORES] = { NULL };
+static u32 mali_global_num_pp_cores = 0;
+
+/* Interrupt handlers */
+static void mali_pp_irq_probe_trigger(void *data);
+static _mali_osk_errcode_t mali_pp_irq_probe_ack(void *data);
+
+struct mali_pp_core *mali_pp_create(const _mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual, u32 bcast_id)
+{
+       struct mali_pp_core *core = NULL;
+
+       MALI_DEBUG_PRINT(2, ("Mali PP: Creating Mali PP core: %s\n", resource->description));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Base address of PP core: 0x%x\n", resource->base));
+
+       if (mali_global_num_pp_cores >= MALI_MAX_NUMBER_OF_PP_CORES) {
+               MALI_PRINT_ERROR(("Mali PP: Too many PP core objects created\n"));
+               return NULL;
+       }
+
+       core = _mali_osk_calloc(1, sizeof(struct mali_pp_core));
+       if (NULL != core) {
+               core->core_id = mali_global_num_pp_cores;
+               core->bcast_id = bcast_id;
+
+               if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALI200_REG_SIZEOF_REGISTER_BANK)) {
+                       _mali_osk_errcode_t ret;
+
+                       if (!is_virtual) {
+                               ret = mali_pp_reset(core);
+                       } else {
+                               ret = _MALI_OSK_ERR_OK;
+                       }
+
+                       if (_MALI_OSK_ERR_OK == ret) {
+                               ret = mali_group_add_pp_core(group, core);
+                               if (_MALI_OSK_ERR_OK == ret) {
+                                       /* Setup IRQ handlers (which will do IRQ probing if needed) */
+                                       MALI_DEBUG_ASSERT(!is_virtual || -1 != resource->irq);
+
+                                       core->irq = _mali_osk_irq_init(resource->irq,
+                                                                      mali_group_upper_half_pp,
+                                                                      group,
+                                                                      mali_pp_irq_probe_trigger,
+                                                                      mali_pp_irq_probe_ack,
+                                                                      core,
+                                                                      resource->description);
+                                       if (NULL != core->irq) {
+                                               mali_global_pp_cores[mali_global_num_pp_cores] = core;
+                                               mali_global_num_pp_cores++;
+
+                                               return core;
+                                       } else {
+                                               MALI_PRINT_ERROR(("Mali PP: Failed to setup interrupt handlers for PP core %s\n", core->hw_core.description));
+                                       }
+                                       mali_group_remove_pp_core(group);
+                               } else {
+                                       MALI_PRINT_ERROR(("Mali PP: Failed to add core %s to group\n", core->hw_core.description));
+                               }
+                       }
+                       mali_hw_core_delete(&core->hw_core);
+               }
+
+               _mali_osk_free(core);
+       } else {
+               MALI_PRINT_ERROR(("Mali PP: Failed to allocate memory for PP core\n"));
+       }
+
+       return NULL;
+}
+
+void mali_pp_delete(struct mali_pp_core *core)
+{
+       u32 i;
+
+       MALI_DEBUG_ASSERT_POINTER(core);
+
+       _mali_osk_irq_term(core->irq);
+       mali_hw_core_delete(&core->hw_core);
+
+       /* Remove core from global list */
+       for (i = 0; i < mali_global_num_pp_cores; i++) {
+               if (mali_global_pp_cores[i] == core) {
+                       mali_global_pp_cores[i] = NULL;
+                       mali_global_num_pp_cores--;
+
+                       if (i != mali_global_num_pp_cores) {
+                               /* We removed a PP core from the middle of the array -- move the last
+                                * PP core to the current position to close the gap */
+                               mali_global_pp_cores[i] = mali_global_pp_cores[mali_global_num_pp_cores];
+                               mali_global_pp_cores[mali_global_num_pp_cores] = NULL;
+                       }
+
+                       break;
+               }
+       }
+
+       _mali_osk_free(core);
+}
+
+void mali_pp_stop_bus(struct mali_pp_core *core)
+{
+       MALI_DEBUG_ASSERT_POINTER(core);
+       /* Will only send the stop bus command, and not wait for it to complete */
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_STOP_BUS);
+}
+
+_mali_osk_errcode_t mali_pp_stop_bus_wait(struct mali_pp_core *core)
+{
+       int i;
+
+       MALI_DEBUG_ASSERT_POINTER(core);
+
+       /* Send the stop bus command. */
+       mali_pp_stop_bus(core);
+
+       /* Wait for bus to be stopped */
+       for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+               if (mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS) & MALI200_REG_VAL_STATUS_BUS_STOPPED)
+                       break;
+       }
+
+       if (MALI_REG_POLL_COUNT_FAST == i) {
+               MALI_PRINT_ERROR(("Mali PP: Failed to stop bus on %s. Status: 0x%08x\n", core->hw_core.description, mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS)));
+               if (mali_gp_reset_fail < 65533)
+                       mali_gp_reset_fail++;
+               return _MALI_OSK_ERR_FAULT;
+       }
+       return _MALI_OSK_ERR_OK;
+}
+
+/* Frame register reset values.
+ * Taken from the Mali400 TRM, 3.6. Pixel processor control register summary */
+static const u32 mali_frame_registers_reset_values[_MALI_PP_MAX_FRAME_REGISTERS] = {
+       0x0, /* Renderer List Address Register */
+       0x0, /* Renderer State Word Base Address Register */
+       0x0, /* Renderer Vertex Base Register */
+       0x2, /* Feature Enable Register */
+       0x0, /* Z Clear Value Register */
+       0x0, /* Stencil Clear Value Register */
+       0x0, /* ABGR Clear Value 0 Register */
+       0x0, /* ABGR Clear Value 1 Register */
+       0x0, /* ABGR Clear Value 2 Register */
+       0x0, /* ABGR Clear Value 3 Register */
+       0x0, /* Bounding Box Left Right Register */
+       0x0, /* Bounding Box Bottom Register */
+       0x0, /* FS Stack Address Register */
+       0x0, /* FS Stack Size and Initial Value Register */
+       0x0, /* Reserved */
+       0x0, /* Reserved */
+       0x0, /* Origin Offset X Register */
+       0x0, /* Origin Offset Y Register */
+       0x75, /* Subpixel Specifier Register */
+       0x0, /* Tiebreak mode Register */
+       0x0, /* Polygon List Format Register */
+       0x0, /* Scaling Register */
+       0x0 /* Tilebuffer configuration Register */
+};
+
+/* WBx register reset values */
+static const u32 mali_wb_registers_reset_values[_MALI_PP_MAX_WB_REGISTERS] = {
+       0x0, /* WBx Source Select Register */
+       0x0, /* WBx Target Address Register */
+       0x0, /* WBx Target Pixel Format Register */
+       0x0, /* WBx Target AA Format Register */
+       0x0, /* WBx Target Layout */
+       0x0, /* WBx Target Scanline Length */
+       0x0, /* WBx Target Flags Register */
+       0x0, /* WBx MRT Enable Register */
+       0x0, /* WBx MRT Offset Register */
+       0x0, /* WBx Global Test Enable Register */
+       0x0, /* WBx Global Test Reference Value Register */
+       0x0  /* WBx Global Test Compare Function Register */
+};
+
+/* Performance Counter 0 Enable Register reset value */
+static const u32 mali_perf_cnt_enable_reset_value = 0;
+
+extern int pp_hardware_reset;
+_mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core)
+{
+       /* Bus must be stopped before calling this function */
+       const u32 reset_wait_target_register = MALI200_REG_ADDR_MGMT_PERF_CNT_0_LIMIT;
+       const u32 reset_invalid_value = 0xC0FFE000;
+       const u32 reset_check_value = 0xC01A0000;
+       int i;
+
+       MALI_DEBUG_ASSERT_POINTER(core);
+       MALI_DEBUG_PRINT(2, ("Mali PP: Hard reset of core %s\n", core->hw_core.description));
+       pp_hardware_reset ++;
+
+       /* Set register to a bogus value. The register will be used to detect when reset is complete */
+       mali_hw_core_register_write_relaxed(&core->hw_core, reset_wait_target_register, reset_invalid_value);
+       mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_NONE);
+
+       /* Force core to reset */
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET);
+
+       /* Wait for reset to be complete */
+       for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+               mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_check_value);
+               if (reset_check_value == mali_hw_core_register_read(&core->hw_core, reset_wait_target_register)) {
+                       break;
+               }
+       }
+
+       if (MALI_REG_POLL_COUNT_FAST == i) {
+               MALI_PRINT_ERROR(("Mali PP: The hard reset loop didn't work, unable to recover\n"));
+       }
+
+       mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, 0x00000000); /* set it back to the default */
+       /* Re-enable interrupts */
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL);
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_pp_reset_async(struct mali_pp_core *core)
+{
+       MALI_DEBUG_ASSERT_POINTER(core);
+
+       MALI_DEBUG_PRINT(4, ("Mali PP: Reset of core %s\n", core->hw_core.description));
+
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, 0); /* disable the IRQs */
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_MASK_ALL);
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI400PP_REG_VAL_CTRL_MGMT_SOFT_RESET);
+}
+
+_mali_osk_errcode_t mali_pp_reset_wait(struct mali_pp_core *core)
+{
+       int i;
+       u32 rawstat = 0;
+
+       for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+               u32 status =  mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS);
+               if (!(status & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE)) {
+                       rawstat = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT);
+                       if (rawstat == MALI400PP_REG_VAL_IRQ_RESET_COMPLETED) {
+                               break;
+                       }
+               }
+       }
+
+       if (i == MALI_REG_POLL_COUNT_FAST) {
+               MALI_PRINT_ERROR(("Mali PP: Failed to reset core %s, rawstat: 0x%08x\n",
+                                 core->hw_core.description, rawstat));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       /* Re-enable interrupts */
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL);
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_pp_reset(struct mali_pp_core *core)
+{
+       mali_pp_reset_async(core);
+       return mali_pp_reset_wait(core);
+}
+
+void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job, mali_bool restart_virtual)
+{
+       u32 relative_address;
+       u32 start_index;
+       u32 nr_of_regs;
+       u32 *frame_registers = mali_pp_job_get_frame_registers(job);
+       u32 *wb0_registers = mali_pp_job_get_wb0_registers(job);
+       u32 *wb1_registers = mali_pp_job_get_wb1_registers(job);
+       u32 *wb2_registers = mali_pp_job_get_wb2_registers(job);
+       u32 counter_src0 = mali_pp_job_get_perf_counter_src0(job, sub_job);
+       u32 counter_src1 = mali_pp_job_get_perf_counter_src1(job, sub_job);
+
+       MALI_DEBUG_ASSERT_POINTER(core);
+
+       /* Write frame registers */
+
+       /*
+        * There are two frame registers which are different for each sub job:
+        * 1. The Renderer List Address Register (MALI200_REG_ADDR_FRAME)
+        * 2. The FS Stack Address Register (MALI200_REG_ADDR_STACK)
+        */
+       mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_FRAME, mali_pp_job_get_addr_frame(job, sub_job), mali_frame_registers_reset_values[MALI200_REG_ADDR_FRAME / sizeof(u32)]);
+
+       /* For virtual jobs, the stack address shouldn't be broadcast but written individually */
+       if (!mali_pp_job_is_virtual(job) || restart_virtual) {
+               mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_STACK, mali_pp_job_get_addr_stack(job, sub_job), mali_frame_registers_reset_values[MALI200_REG_ADDR_STACK / sizeof(u32)]);
+       }
+
+       /* Write registers between MALI200_REG_ADDR_FRAME and MALI200_REG_ADDR_STACK */
+       relative_address = MALI200_REG_ADDR_RSW;
+       start_index = MALI200_REG_ADDR_RSW / sizeof(u32);
+       nr_of_regs = (MALI200_REG_ADDR_STACK - MALI200_REG_ADDR_RSW) / sizeof(u32);
+
+       mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core,
+                       relative_address, &frame_registers[start_index],
+                       nr_of_regs, &mali_frame_registers_reset_values[start_index]);
+
+       /* MALI200_REG_ADDR_STACK_SIZE */
+       relative_address = MALI200_REG_ADDR_STACK_SIZE;
+       start_index = MALI200_REG_ADDR_STACK_SIZE / sizeof(u32);
+
+       mali_hw_core_register_write_relaxed_conditional(&core->hw_core,
+                       relative_address, frame_registers[start_index],
+                       mali_frame_registers_reset_values[start_index]);
+
+       /* Skip 2 reserved registers */
+
+       /* Write remaining registers */
+       relative_address = MALI200_REG_ADDR_ORIGIN_OFFSET_X;
+       start_index = MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32);
+       nr_of_regs = MALI_PP_MALI400_NUM_FRAME_REGISTERS - MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32);
+
+       mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core,
+                       relative_address, &frame_registers[start_index],
+                       nr_of_regs, &mali_frame_registers_reset_values[start_index]);
+
+       /* Write WBx registers */
+       if (wb0_registers[0]) { /* M200_WB0_REG_SOURCE_SELECT register */
+               mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB0, wb0_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
+       }
+
+       if (wb1_registers[0]) { /* M200_WB1_REG_SOURCE_SELECT register */
+               mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB1, wb1_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
+       }
+
+       if (wb2_registers[0]) { /* M200_WB2_REG_SOURCE_SELECT register */
+               mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB2, wb2_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
+       }
+
+       if (MALI_HW_CORE_NO_COUNTER != counter_src0) {
+               mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC, counter_src0);
+               mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value);
+       }
+       if (MALI_HW_CORE_NO_COUNTER != counter_src1) {
+               mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC, counter_src1);
+               mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value);
+       }
+
+#ifdef CONFIG_MALI400_HEATMAPS_ENABLED
+       if (job->uargs.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_HEATMAP_ENABLE) {
+               mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERFMON_CONTR, ((job->uargs.tilesx & 0x3FF) << 16) | 1);
+               mali_hw_core_register_write_relaxed(&core->hw_core,  MALI200_REG_ADDR_MGMT_PERFMON_BASE, job->uargs.heatmap_mem & 0xFFFFFFF8);
+       }
+#endif /* CONFIG_MALI400_HEATMAPS_ENABLED */
+
+       MALI_DEBUG_PRINT(3, ("Mali PP: Starting job 0x%08X part %u/%u on PP core %s\n", job, sub_job + 1, mali_pp_job_get_sub_job_count(job), core->hw_core.description));
+
+       /* Adding barrier to make sure all rester writes are finished */
+       _mali_osk_write_mem_barrier();
+
+       /* This is the command that starts the core.
+        *
+        * Don't actually run the job if PROFILING_SKIP_PP_JOBS are set, just
+        * force core to assert the completion interrupt.
+        */
+#if !defined(PROFILING_SKIP_PP_JOBS)
+       mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_START_RENDERING);
+#else
+       mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_END_OF_FRAME);
+#endif
+
+       /* Adding barrier to make sure previous rester writes is finished */
+       _mali_osk_write_mem_barrier();
+}
+
+u32 mali_pp_core_get_version(struct mali_pp_core *core)
+{
+       MALI_DEBUG_ASSERT_POINTER(core);
+       return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_VERSION);
+}
+
+struct mali_pp_core *mali_pp_get_global_pp_core(u32 index)
+{
+       if (mali_global_num_pp_cores > index) {
+               return mali_global_pp_cores[index];
+       }
+
+       return NULL;
+}
+
+u32 mali_pp_get_glob_num_pp_cores(void)
+{
+       return mali_global_num_pp_cores;
+}
+
+/* ------------- interrupt handling below ------------------ */
+static void mali_pp_irq_probe_trigger(void *data)
+{
+       struct mali_pp_core *core = (struct mali_pp_core *)data;
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_BUS_ERROR);
+       _mali_osk_mem_barrier();
+}
+
+static _mali_osk_errcode_t mali_pp_irq_probe_ack(void *data)
+{
+       struct mali_pp_core *core = (struct mali_pp_core *)data;
+       u32 irq_readout;
+
+       irq_readout = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS);
+       if (MALI200_REG_VAL_IRQ_BUS_ERROR & irq_readout) {
+               mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_BUS_ERROR);
+               _mali_osk_mem_barrier();
+               return _MALI_OSK_ERR_OK;
+       }
+
+       return _MALI_OSK_ERR_FAULT;
+}
+
+
+#if 0
+static void mali_pp_print_registers(struct mali_pp_core *core)
+{
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_VERSION = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_VERSION)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_RAWSTAT = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_MASK = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC)));
+       MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE)));
+}
+#endif
+
+#if 0
+void mali_pp_print_state(struct mali_pp_core *core)
+{
+       MALI_DEBUG_PRINT(2, ("Mali PP: State: 0x%08x\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS)));
+}
+#endif
+
+void mali_pp_update_performance_counters(struct mali_pp_core *parent, struct mali_pp_core *child, struct mali_pp_job *job, u32 subjob)
+{
+       u32 val0 = 0;
+       u32 val1 = 0;
+       u32 counter_src0 = mali_pp_job_get_perf_counter_src0(job, subjob);
+       u32 counter_src1 = mali_pp_job_get_perf_counter_src1(job, subjob);
+#if defined(CONFIG_MALI400_PROFILING)
+       int counter_index = COUNTER_FP_0_C0 + (2 * child->core_id);
+#endif
+
+       if (MALI_HW_CORE_NO_COUNTER != counter_src0) {
+               val0 = mali_hw_core_register_read(&child->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE);
+               mali_pp_job_set_perf_counter_value0(job, subjob, val0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+               _mali_osk_profiling_report_hw_counter(counter_index, val0);
+               _mali_osk_profiling_record_global_counters(counter_index, val0);
+#endif
+       }
+
+       if (MALI_HW_CORE_NO_COUNTER != counter_src1) {
+               val1 = mali_hw_core_register_read(&child->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+               mali_pp_job_set_perf_counter_value1(job, subjob, val1);
+
+#if defined(CONFIG_MALI400_PROFILING)
+               _mali_osk_profiling_report_hw_counter(counter_index + 1, val1);
+               _mali_osk_profiling_record_global_counters(counter_index + 1, val1);
+#endif
+       }
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_pp_dump_state(struct mali_pp_core *core, char *buf, u32 size)
+{
+       int n = 0;
+
+       n += _mali_osk_snprintf(buf + n, size - n, "\tPP #%d: %s\n", core->core_id, core->hw_core.description);
+
+       return n;
+}
+#endif
diff --git a/utgard/r8p0/common/mali_pp.h b/utgard/r8p0/common/mali_pp.h
new file mode 100755 (executable)
index 0000000..7633ecd
--- /dev/null
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2011-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_PP_H__
+#define __MALI_PP_H__
+
+#include "mali_osk.h"
+#include "mali_pp_job.h"
+#include "mali_hw_core.h"
+
+struct mali_group;
+
+#define MALI_MAX_NUMBER_OF_PP_CORES        9
+
+/**
+ * Definition of the PP core struct
+ * Used to track a PP core in the system.
+ */
+struct mali_pp_core {
+       struct mali_hw_core  hw_core;           /**< Common for all HW cores */
+       _mali_osk_irq_t     *irq;               /**< IRQ handler */
+       u32                  core_id;           /**< Unique core ID */
+       u32                  bcast_id;          /**< The "flag" value used by the Mali-450 broadcast and DLBU unit */
+};
+
+_mali_osk_errcode_t mali_pp_initialize(void);
+void mali_pp_terminate(void);
+
+struct mali_pp_core *mali_pp_create(const _mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual, u32 bcast_id);
+void mali_pp_delete(struct mali_pp_core *core);
+
+void mali_pp_stop_bus(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_stop_bus_wait(struct mali_pp_core *core);
+void mali_pp_reset_async(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_reset_wait(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_reset(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core);
+
+void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job, mali_bool restart_virtual);
+
+u32 mali_pp_core_get_version(struct mali_pp_core *core);
+
+MALI_STATIC_INLINE u32 mali_pp_core_get_id(struct mali_pp_core *core)
+{
+       MALI_DEBUG_ASSERT_POINTER(core);
+       return core->core_id;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_core_get_bcast_id(struct mali_pp_core *core)
+{
+       MALI_DEBUG_ASSERT_POINTER(core);
+       return core->bcast_id;
+}
+
+struct mali_pp_core *mali_pp_get_global_pp_core(u32 index);
+u32 mali_pp_get_glob_num_pp_cores(void);
+
+/* Debug */
+u32 mali_pp_dump_state(struct mali_pp_core *core, char *buf, u32 size);
+
+/**
+ * Put instrumented HW counters from the core(s) to the job object (if enabled)
+ *
+ * parent and child is always the same, except for virtual jobs on Mali-450.
+ * In this case, the counters will be enabled on the virtual core (parent),
+ * but values need to be read from the child cores.
+ *
+ * @param parent The core used to see if the counters was enabled
+ * @param child The core to actually read the values from
+ * @job Job object to update with counter values (if enabled)
+ * @subjob Which subjob the counters are applicable for (core ID for virtual jobs)
+ */
+void mali_pp_update_performance_counters(struct mali_pp_core *parent, struct mali_pp_core *child, struct mali_pp_job *job, u32 subjob);
+
+MALI_STATIC_INLINE const char *mali_pp_core_description(struct mali_pp_core *core)
+{
+       return core->hw_core.description;
+}
+
+MALI_STATIC_INLINE enum mali_interrupt_result mali_pp_get_interrupt_result(struct mali_pp_core *core)
+{
+       u32 rawstat_used = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) &
+                          MALI200_REG_VAL_IRQ_MASK_USED;
+       if (0 == rawstat_used) {
+               return MALI_INTERRUPT_RESULT_NONE;
+       } else if (MALI200_REG_VAL_IRQ_END_OF_FRAME == rawstat_used) {
+               return MALI_INTERRUPT_RESULT_SUCCESS;
+       }
+
+       return MALI_INTERRUPT_RESULT_ERROR;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_get_rawstat(struct mali_pp_core *core)
+{
+       MALI_DEBUG_ASSERT_POINTER(core);
+       return mali_hw_core_register_read(&core->hw_core,
+                                         MALI200_REG_ADDR_MGMT_INT_RAWSTAT);
+}
+
+
+MALI_STATIC_INLINE u32 mali_pp_is_active(struct mali_pp_core *core)
+{
+       u32 status = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS);
+       return (status & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE void mali_pp_mask_all_interrupts(struct mali_pp_core *core)
+{
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_NONE);
+}
+
+MALI_STATIC_INLINE void mali_pp_enable_interrupts(struct mali_pp_core *core)
+{
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+}
+
+MALI_STATIC_INLINE void mali_pp_write_addr_renderer_list(struct mali_pp_core *core,
+               struct mali_pp_job *job, u32 subjob)
+{
+       u32 addr = mali_pp_job_get_addr_frame(job, subjob);
+       mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_FRAME, addr);
+}
+
+
+MALI_STATIC_INLINE void mali_pp_write_addr_stack(struct mali_pp_core *core, struct mali_pp_job *job)
+{
+       u32 addr = mali_pp_job_get_addr_stack(job, core->core_id);
+       mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_STACK, addr);
+}
+
+#endif /* __MALI_PP_H__ */
diff --git a/utgard/r8p0/common/mali_pp_job.c b/utgard/r8p0/common/mali_pp_job.c
new file mode 100755 (executable)
index 0000000..c2ed210
--- /dev/null
@@ -0,0 +1,316 @@
+/*
+ * Copyright (C) 2011-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_pp.h"
+#include "mali_pp_job.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_kernel_common.h"
+#include "mali_uk_types.h"
+#include "mali_executor.h"
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+#include "linux/mali_memory_dma_buf.h"
+#endif
+#include "mali_memory_swap_alloc.h"
+#include "mali_scheduler.h"
+
+static u32 pp_counter_src0 = MALI_HW_CORE_NO_COUNTER;   /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
+static u32 pp_counter_src1 = MALI_HW_CORE_NO_COUNTER;   /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
+static _mali_osk_atomic_t pp_counter_per_sub_job_count; /**< Number of values in the two arrays which is != MALI_HW_CORE_NO_COUNTER */
+static u32 pp_counter_per_sub_job_src0[_MALI_PP_MAX_SUB_JOBS] = { MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER };
+static u32 pp_counter_per_sub_job_src1[_MALI_PP_MAX_SUB_JOBS] = { MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER };
+
+void mali_pp_job_initialize(void)
+{
+       _mali_osk_atomic_init(&pp_counter_per_sub_job_count, 0);
+}
+
+void mali_pp_job_terminate(void)
+{
+       _mali_osk_atomic_term(&pp_counter_per_sub_job_count);
+}
+
+struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session,
+                                      _mali_uk_pp_start_job_s __user *uargs, u32 id)
+{
+       struct mali_pp_job *job;
+       u32 perf_counter_flag;
+
+       job = _mali_osk_calloc(1, sizeof(struct mali_pp_job));
+       if (NULL != job) {
+
+               _mali_osk_list_init(&job->list);
+               _mali_osk_list_init(&job->session_fb_lookup_list);
+               _mali_osk_atomic_inc(&session->number_of_pp_jobs);
+
+               if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_pp_start_job_s))) {
+                       goto fail;
+               }
+
+               if (job->uargs.num_cores > _MALI_PP_MAX_SUB_JOBS) {
+                       MALI_PRINT_ERROR(("Mali PP job: Too many sub jobs specified in job object\n"));
+                       goto fail;
+               }
+
+               if (!mali_pp_job_use_no_notification(job)) {
+                       job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_FINISHED, sizeof(_mali_uk_pp_job_finished_s));
+                       if (NULL == job->finished_notification) goto fail;
+               }
+
+               perf_counter_flag = mali_pp_job_get_perf_counter_flag(job);
+
+               /* case when no counters came from user space
+                * so pass the debugfs / DS-5 provided global ones to the job object */
+               if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) ||
+                     (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))) {
+                       u32 sub_job_count = _mali_osk_atomic_read(&pp_counter_per_sub_job_count);
+
+                       /* These counters apply for all virtual jobs, and where no per sub job counter is specified */
+                       job->uargs.perf_counter_src0 = pp_counter_src0;
+                       job->uargs.perf_counter_src1 = pp_counter_src1;
+
+                       /* We only copy the per sub job array if it is enabled with at least one counter */
+                       if (0 < sub_job_count) {
+                               job->perf_counter_per_sub_job_count = sub_job_count;
+                               _mali_osk_memcpy(job->perf_counter_per_sub_job_src0, pp_counter_per_sub_job_src0, sizeof(pp_counter_per_sub_job_src0));
+                               _mali_osk_memcpy(job->perf_counter_per_sub_job_src1, pp_counter_per_sub_job_src1, sizeof(pp_counter_per_sub_job_src1));
+                       }
+               }
+
+               job->session = session;
+               job->id = id;
+
+               job->sub_jobs_num = job->uargs.num_cores ? job->uargs.num_cores : 1;
+               job->pid = _mali_osk_get_pid();
+               job->tid = _mali_osk_get_tid();
+
+               _mali_osk_atomic_init(&job->sub_jobs_completed, 0);
+               _mali_osk_atomic_init(&job->sub_job_errors, 0);
+               job->swap_status = MALI_NO_SWAP_IN;
+               job->user_notification = MALI_FALSE;
+               job->num_pp_cores_in_virtual = 0;
+
+               if (job->uargs.num_memory_cookies > session->allocation_mgr.mali_allocation_num) {
+                       MALI_PRINT_ERROR(("Mali PP job: The number of memory cookies is invalid !\n"));
+                       goto fail;
+               }
+
+               if (job->uargs.num_memory_cookies > 0) {
+                       u32 size;
+                       u32 __user *memory_cookies = (u32 __user *)(uintptr_t)job->uargs.memory_cookies;
+
+                       size = sizeof(*memory_cookies) * (job->uargs.num_memory_cookies);
+
+                       job->memory_cookies = _mali_osk_malloc(size);
+                       if (NULL == job->memory_cookies) {
+                               MALI_PRINT_ERROR(("Mali PP job: Failed to allocate %d bytes of memory cookies!\n", size));
+                               goto fail;
+                       }
+
+                       if (0 != _mali_osk_copy_from_user(job->memory_cookies, memory_cookies, size)) {
+                               MALI_PRINT_ERROR(("Mali PP job: Failed to copy %d bytes of memory cookies from user!\n", size));
+                               goto fail;
+                       }
+               }
+
+               if (_MALI_OSK_ERR_OK != mali_pp_job_check(job)) {
+                       /* Not a valid job. */
+                       goto fail;
+               }
+
+               mali_timeline_tracker_init(&job->tracker, MALI_TIMELINE_TRACKER_PP, NULL, job);
+               mali_timeline_fence_copy_uk_fence(&(job->tracker.fence), &(job->uargs.fence));
+
+               mali_mem_swap_in_pages(job);
+
+               return job;
+       }
+
+fail:
+       if (NULL != job) {
+               mali_pp_job_delete(job);
+       }
+
+       return NULL;
+}
+
+void mali_pp_job_delete(struct mali_pp_job *job)
+{
+       struct mali_session_data *session;
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
+       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_fb_lookup_list));
+
+       session = mali_pp_job_get_session(job);
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       if (NULL != job->memory_cookies) {
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+               /* Unmap buffers attached to job */
+               mali_dma_buf_unmap_job(job);
+#endif
+               if (MALI_NO_SWAP_IN != job->swap_status) {
+                       mali_mem_swap_out_pages(job);
+               }
+
+               _mali_osk_free(job->memory_cookies);
+       }
+
+       if (job->user_notification) {
+               mali_scheduler_return_pp_job_to_user(job,
+                                                    job->num_pp_cores_in_virtual);
+       }
+
+       if (NULL != job->finished_notification) {
+               _mali_osk_notification_delete(job->finished_notification);
+       }
+
+       _mali_osk_atomic_term(&job->sub_jobs_completed);
+       _mali_osk_atomic_term(&job->sub_job_errors);
+       _mali_osk_atomic_dec(&session->number_of_pp_jobs);
+       _mali_osk_free(job);
+
+       _mali_osk_wait_queue_wake_up(session->wait_queue);
+}
+
+void mali_pp_job_list_add(struct mali_pp_job *job, _mali_osk_list_t *list)
+{
+       struct mali_pp_job *iter;
+       struct mali_pp_job *tmp;
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+
+       /* Find position in list/queue where job should be added. */
+       _MALI_OSK_LIST_FOREACHENTRY_REVERSE(iter, tmp, list,
+                                           struct mali_pp_job, list) {
+               /* job should be started after iter if iter is in progress. */
+               if (0 < iter->sub_jobs_started) {
+                       break;
+               }
+
+               /*
+                * job should be started after iter if it has a higher
+                * job id. A span is used to handle job id wrapping.
+                */
+               if ((mali_pp_job_get_id(job) -
+                    mali_pp_job_get_id(iter)) <
+                   MALI_SCHEDULER_JOB_ID_SPAN) {
+                       break;
+               }
+       }
+
+       _mali_osk_list_add(&job->list, &iter->list);
+}
+
+
+u32 mali_pp_job_get_perf_counter_src0(struct mali_pp_job *job, u32 sub_job)
+{
+       /* Virtual jobs always use the global job counter (or if there are per sub job counters at all) */
+       if (mali_pp_job_is_virtual(job) || 0 == job->perf_counter_per_sub_job_count) {
+               return job->uargs.perf_counter_src0;
+       }
+
+       /* Use per sub job counter if enabled... */
+       if (MALI_HW_CORE_NO_COUNTER != job->perf_counter_per_sub_job_src0[sub_job]) {
+               return job->perf_counter_per_sub_job_src0[sub_job];
+       }
+
+       /* ...else default to global job counter */
+       return job->uargs.perf_counter_src0;
+}
+
+u32 mali_pp_job_get_perf_counter_src1(struct mali_pp_job *job, u32 sub_job)
+{
+       /* Virtual jobs always use the global job counter (or if there are per sub job counters at all) */
+       if (mali_pp_job_is_virtual(job) || 0 == job->perf_counter_per_sub_job_count) {
+               /* Virtual jobs always use the global job counter */
+               return job->uargs.perf_counter_src1;
+       }
+
+       /* Use per sub job counter if enabled... */
+       if (MALI_HW_CORE_NO_COUNTER != job->perf_counter_per_sub_job_src1[sub_job]) {
+               return job->perf_counter_per_sub_job_src1[sub_job];
+       }
+
+       /* ...else default to global job counter */
+       return job->uargs.perf_counter_src1;
+}
+
+void mali_pp_job_set_pp_counter_global_src0(u32 counter)
+{
+       pp_counter_src0 = counter;
+}
+
+void mali_pp_job_set_pp_counter_global_src1(u32 counter)
+{
+       pp_counter_src1 = counter;
+}
+
+void mali_pp_job_set_pp_counter_sub_job_src0(u32 sub_job, u32 counter)
+{
+       MALI_DEBUG_ASSERT(sub_job < _MALI_PP_MAX_SUB_JOBS);
+
+       if (MALI_HW_CORE_NO_COUNTER == pp_counter_per_sub_job_src0[sub_job]) {
+               /* increment count since existing counter was disabled */
+               _mali_osk_atomic_inc(&pp_counter_per_sub_job_count);
+       }
+
+       if (MALI_HW_CORE_NO_COUNTER == counter) {
+               /* decrement count since new counter is disabled */
+               _mali_osk_atomic_dec(&pp_counter_per_sub_job_count);
+       }
+
+       /* PS: A change from MALI_HW_CORE_NO_COUNTER to MALI_HW_CORE_NO_COUNTER will inc and dec, result will be 0 change */
+
+       pp_counter_per_sub_job_src0[sub_job] = counter;
+}
+
+void mali_pp_job_set_pp_counter_sub_job_src1(u32 sub_job, u32 counter)
+{
+       MALI_DEBUG_ASSERT(sub_job < _MALI_PP_MAX_SUB_JOBS);
+
+       if (MALI_HW_CORE_NO_COUNTER == pp_counter_per_sub_job_src1[sub_job]) {
+               /* increment count since existing counter was disabled */
+               _mali_osk_atomic_inc(&pp_counter_per_sub_job_count);
+       }
+
+       if (MALI_HW_CORE_NO_COUNTER == counter) {
+               /* decrement count since new counter is disabled */
+               _mali_osk_atomic_dec(&pp_counter_per_sub_job_count);
+       }
+
+       /* PS: A change from MALI_HW_CORE_NO_COUNTER to MALI_HW_CORE_NO_COUNTER will inc and dec, result will be 0 change */
+
+       pp_counter_per_sub_job_src1[sub_job] = counter;
+}
+
+u32 mali_pp_job_get_pp_counter_global_src0(void)
+{
+       return pp_counter_src0;
+}
+
+u32 mali_pp_job_get_pp_counter_global_src1(void)
+{
+       return pp_counter_src1;
+}
+
+u32 mali_pp_job_get_pp_counter_sub_job_src0(u32 sub_job)
+{
+       MALI_DEBUG_ASSERT(sub_job < _MALI_PP_MAX_SUB_JOBS);
+       return pp_counter_per_sub_job_src0[sub_job];
+}
+
+u32 mali_pp_job_get_pp_counter_sub_job_src1(u32 sub_job)
+{
+       MALI_DEBUG_ASSERT(sub_job < _MALI_PP_MAX_SUB_JOBS);
+       return pp_counter_per_sub_job_src1[sub_job];
+}
diff --git a/utgard/r8p0/common/mali_pp_job.h b/utgard/r8p0/common/mali_pp_job.h
new file mode 100755 (executable)
index 0000000..0ebd138
--- /dev/null
@@ -0,0 +1,594 @@
+/*
+ * Copyright (C) 2011-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_PP_JOB_H__
+#define __MALI_PP_JOB_H__
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_uk_types.h"
+#include "mali_session.h"
+#include "mali_kernel_common.h"
+#include "regs/mali_200_regs.h"
+#include "mali_kernel_core.h"
+#include "mali_dlbu.h"
+#include "mali_timeline.h"
+#include "mali_scheduler.h"
+#include "mali_executor.h"
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+#include "linux/mali_memory_dma_buf.h"
+#endif
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+#include "linux/mali_dma_fence.h"
+#endif
+
+typedef enum pp_job_status {
+       MALI_NO_SWAP_IN,
+       MALI_SWAP_IN_FAIL,
+       MALI_SWAP_IN_SUCC,
+} pp_job_status;
+
+/**
+ * This structure represents a PP job, including all sub jobs.
+ *
+ * The PP job object itself is not protected by any single lock,
+ * but relies on other locks instead (scheduler, executor and timeline lock).
+ * Think of the job object as moving between these sub systems through-out
+ * its lifetime. Different part of the PP job struct is used by different
+ * subsystems. Accessor functions ensure that correct lock is taken.
+ * Do NOT access any data members directly from outside this module!
+ */
+struct mali_pp_job {
+       /*
+        * These members are typically only set at creation,
+        * and only read later on.
+        * They do not require any lock protection.
+        */
+       _mali_uk_pp_start_job_s uargs;                     /**< Arguments from user space */
+       struct mali_session_data *session;                 /**< Session which submitted this job */
+       u32 pid;                                           /**< Process ID of submitting process */
+       u32 tid;                                           /**< Thread ID of submitting thread */
+       u32 id;                                            /**< Identifier for this job in kernel space (sequential numbering) */
+       u32 cache_order;                                   /**< Cache order used for L2 cache flushing (sequential numbering) */
+       struct mali_timeline_tracker tracker;              /**< Timeline tracker for this job */
+       _mali_osk_notification_t *finished_notification;   /**< Notification sent back to userspace on job complete */
+       u32 perf_counter_per_sub_job_count;                /**< Number of values in the two arrays which is != MALI_HW_CORE_NO_COUNTER */
+       u32 perf_counter_per_sub_job_src0[_MALI_PP_MAX_SUB_JOBS]; /**< Per sub job counters src0 */
+       u32 perf_counter_per_sub_job_src1[_MALI_PP_MAX_SUB_JOBS]; /**< Per sub job counters src1 */
+       u32 sub_jobs_num;                                  /**< Number of subjobs; set to 1 for Mali-450 if DLBU is used, otherwise equals number of PP cores */
+
+       pp_job_status swap_status;                         /**< Used to track each PP job swap status, if fail, we need to drop them in scheduler part */
+       mali_bool user_notification;                       /**< When we deferred delete PP job, we need to judge if we need to send job finish notification to user space */
+       u32 num_pp_cores_in_virtual;                       /**< How many PP cores we have when job finished */
+
+       /*
+        * These members are used by both scheduler and executor.
+        * They are "protected" by atomic operations.
+        */
+       _mali_osk_atomic_t sub_jobs_completed;                            /**< Number of completed sub-jobs in this superjob */
+       _mali_osk_atomic_t sub_job_errors;                                /**< Bitfield with errors (errors for each single sub-job is or'ed together) */
+
+       /*
+        * These members are used by scheduler, but only when no one else
+        * knows about this job object but the working function.
+        * No lock is thus needed for these.
+        */
+       u32 *memory_cookies;                               /**< Memory cookies attached to job */
+
+       /*
+        * These members are used by the scheduler,
+        * protected by scheduler lock
+        */
+       _mali_osk_list_t list;                             /**< Used to link jobs together in the scheduler queue */
+       _mali_osk_list_t session_fb_lookup_list;           /**< Used to link jobs together from the same frame builder in the session */
+
+       u32 sub_jobs_started;                              /**< Total number of sub-jobs started (always started in ascending order) */
+
+       /*
+        * Set by executor/group on job completion, read by scheduler when
+        * returning job to user. Hold executor lock when setting,
+        * no lock needed when reading
+        */
+       u32 perf_counter_value0[_MALI_PP_MAX_SUB_JOBS];    /**< Value of performance counter 0 (to be returned to user space), one for each sub job */
+       u32 perf_counter_value1[_MALI_PP_MAX_SUB_JOBS];    /**< Value of performance counter 1 (to be returned to user space), one for each sub job */
+
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+       struct mali_dma_fence_context dma_fence_context; /**< The mali dma fence context to record dma fence waiters that this job wait for */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+       struct dma_fence *rendered_dma_fence; /**< the new dma fence link to this job */
+#else
+       struct fence *rendered_dma_fence; /**< the new dma fence link to this job */
+#endif
+#endif
+};
+
+void mali_pp_job_initialize(void);
+void mali_pp_job_terminate(void);
+
+struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, _mali_uk_pp_start_job_s *uargs, u32 id);
+void mali_pp_job_delete(struct mali_pp_job *job);
+
+u32 mali_pp_job_get_perf_counter_src0(struct mali_pp_job *job, u32 sub_job);
+u32 mali_pp_job_get_perf_counter_src1(struct mali_pp_job *job, u32 sub_job);
+
+void mali_pp_job_set_pp_counter_global_src0(u32 counter);
+void mali_pp_job_set_pp_counter_global_src1(u32 counter);
+void mali_pp_job_set_pp_counter_sub_job_src0(u32 sub_job, u32 counter);
+void mali_pp_job_set_pp_counter_sub_job_src1(u32 sub_job, u32 counter);
+
+u32 mali_pp_job_get_pp_counter_global_src0(void);
+u32 mali_pp_job_get_pp_counter_global_src1(void);
+u32 mali_pp_job_get_pp_counter_sub_job_src0(u32 sub_job);
+u32 mali_pp_job_get_pp_counter_sub_job_src1(u32 sub_job);
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_id(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return (NULL == job) ? 0 : job->id;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_set_cache_order(struct mali_pp_job *job,
+               u32 cache_order)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+       job->cache_order = cache_order;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_cache_order(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return (NULL == job) ? 0 : job->cache_order;
+}
+
+MALI_STATIC_INLINE u64 mali_pp_job_get_user_id(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->uargs.user_job_ptr;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_frame_builder_id(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->uargs.frame_builder_id;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_flush_id(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->uargs.flush_id;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_pid(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->pid;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_tid(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->tid;
+}
+
+MALI_STATIC_INLINE u32 *mali_pp_job_get_frame_registers(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->uargs.frame_registers;
+}
+
+MALI_STATIC_INLINE u32 *mali_pp_job_get_dlbu_registers(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->uargs.dlbu_registers;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_virtual(struct mali_pp_job *job)
+{
+#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470))
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return (0 == job->uargs.num_cores) ? MALI_TRUE : MALI_FALSE;
+#else
+       return MALI_FALSE;
+#endif
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_addr_frame(struct mali_pp_job *job, u32 sub_job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       if (mali_pp_job_is_virtual(job)) {
+               return MALI_DLBU_VIRT_ADDR;
+       } else if (0 == sub_job) {
+               return job->uargs.frame_registers[MALI200_REG_ADDR_FRAME / sizeof(u32)];
+       } else if (sub_job < _MALI_PP_MAX_SUB_JOBS) {
+               return job->uargs.frame_registers_addr_frame[sub_job - 1];
+       }
+
+       return 0;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_addr_stack(struct mali_pp_job *job, u32 sub_job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       if (0 == sub_job) {
+               return job->uargs.frame_registers[MALI200_REG_ADDR_STACK / sizeof(u32)];
+       } else if (sub_job < _MALI_PP_MAX_SUB_JOBS) {
+               return job->uargs.frame_registers_addr_stack[sub_job - 1];
+       }
+
+       return 0;
+}
+
+void mali_pp_job_list_add(struct mali_pp_job *job, _mali_osk_list_t *list);
+
+MALI_STATIC_INLINE void mali_pp_job_list_addtail(struct mali_pp_job *job,
+               _mali_osk_list_t *list)
+{
+       _mali_osk_list_addtail(&job->list, list);
+}
+
+MALI_STATIC_INLINE void mali_pp_job_list_move(struct mali_pp_job *job,
+               _mali_osk_list_t *list)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+       MALI_DEBUG_ASSERT(!_mali_osk_list_empty(&job->list));
+       _mali_osk_list_move(&job->list, list);
+}
+
+MALI_STATIC_INLINE void mali_pp_job_list_remove(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+       _mali_osk_list_delinit(&job->list);
+}
+
+MALI_STATIC_INLINE u32 *mali_pp_job_get_wb0_registers(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->uargs.wb0_registers;
+}
+
+MALI_STATIC_INLINE u32 *mali_pp_job_get_wb1_registers(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->uargs.wb1_registers;
+}
+
+MALI_STATIC_INLINE u32 *mali_pp_job_get_wb2_registers(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->uargs.wb2_registers;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_wb0_source_addr(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR / sizeof(u32)];
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_wb1_source_addr(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR / sizeof(u32)];
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_wb2_source_addr(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR / sizeof(u32)];
+}
+
+MALI_STATIC_INLINE void mali_pp_job_disable_wb0(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_disable_wb1(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_disable_wb2(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_all_writeback_unit_disabled(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       if (job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] ||
+           job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] ||
+           job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT]
+          ) {
+               /* At least one output unit active */
+               return MALI_FALSE;
+       }
+
+       /* All outputs are disabled - we can abort the job */
+       return MALI_TRUE;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_fb_lookup_add(struct mali_pp_job *job)
+{
+       u32 fb_lookup_id;
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+
+       fb_lookup_id = MALI_PP_JOB_FB_LOOKUP_LIST_MASK & job->uargs.frame_builder_id;
+
+       MALI_DEBUG_ASSERT(MALI_PP_JOB_FB_LOOKUP_LIST_SIZE > fb_lookup_id);
+
+       _mali_osk_list_addtail(&job->session_fb_lookup_list,
+                              &job->session->pp_job_fb_lookup_list[fb_lookup_id]);
+}
+
+MALI_STATIC_INLINE void mali_pp_job_fb_lookup_remove(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+       _mali_osk_list_delinit(&job->session_fb_lookup_list);
+}
+
+MALI_STATIC_INLINE struct mali_session_data *mali_pp_job_get_session(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->session;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_has_started_sub_jobs(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+       return (0 < job->sub_jobs_started) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_has_unstarted_sub_jobs(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+       return (job->sub_jobs_started < job->sub_jobs_num) ? MALI_TRUE : MALI_FALSE;
+}
+
+/* Function used when we are terminating a session with jobs. Return TRUE if it has a rendering job.
+   Makes sure that no new subjobs are started. */
+MALI_STATIC_INLINE void mali_pp_job_mark_unstarted_failed(struct mali_pp_job *job)
+{
+       u32 jobs_remaining;
+       u32 i;
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+
+       jobs_remaining = job->sub_jobs_num - job->sub_jobs_started;
+       job->sub_jobs_started += jobs_remaining;
+
+       /* Not the most optimal way, but this is only used in error cases */
+       for (i = 0; i < jobs_remaining; i++) {
+               _mali_osk_atomic_inc(&job->sub_jobs_completed);
+               _mali_osk_atomic_inc(&job->sub_job_errors);
+       }
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_complete(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return (job->sub_jobs_num ==
+               _mali_osk_atomic_read(&job->sub_jobs_completed)) ?
+              MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_first_unstarted_sub_job(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+       return job->sub_jobs_started;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_sub_job_count(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->sub_jobs_num;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_unstarted_sub_job_count(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+       MALI_DEBUG_ASSERT(job->sub_jobs_num >= job->sub_jobs_started);
+       return (job->sub_jobs_num - job->sub_jobs_started);
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_num_memory_cookies(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->uargs.num_memory_cookies;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_memory_cookie(
+       struct mali_pp_job *job, u32 index)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT(index < job->uargs.num_memory_cookies);
+       MALI_DEBUG_ASSERT_POINTER(job->memory_cookies);
+       return job->memory_cookies[index];
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_needs_dma_buf_mapping(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       if (0 < job->uargs.num_memory_cookies) {
+               return MALI_TRUE;
+       }
+
+       return MALI_FALSE;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_mark_sub_job_started(struct mali_pp_job *job, u32 sub_job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+
+       /* Assert that we are marking the "first unstarted sub job" as started */
+       MALI_DEBUG_ASSERT(job->sub_jobs_started == sub_job);
+
+       job->sub_jobs_started++;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_mark_sub_job_completed(struct mali_pp_job *job, mali_bool success)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       _mali_osk_atomic_inc(&job->sub_jobs_completed);
+       if (MALI_FALSE == success) {
+               _mali_osk_atomic_inc(&job->sub_job_errors);
+       }
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_was_success(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       if (0 == _mali_osk_atomic_read(&job->sub_job_errors)) {
+               return MALI_TRUE;
+       }
+       return MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_use_no_notification(
+       struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return (job->uargs.flags & _MALI_PP_JOB_FLAG_NO_NOTIFICATION) ?
+              MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_pilot_job(struct mali_pp_job *job)
+{
+       /*
+        * A pilot job is currently identified as jobs which
+        * require no callback notification.
+        */
+       return mali_pp_job_use_no_notification(job);
+}
+
+MALI_STATIC_INLINE _mali_osk_notification_t *
+mali_pp_job_get_finished_notification(struct mali_pp_job *job)
+{
+       _mali_osk_notification_t *notification;
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_POINTER(job->finished_notification);
+
+       notification = job->finished_notification;
+       job->finished_notification = NULL;
+
+       return notification;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_window_surface(
+       struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return (job->uargs.flags & _MALI_PP_JOB_FLAG_IS_WINDOW_SURFACE)
+              ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_protected_job(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return (job->uargs.flags & _MALI_PP_JOB_FLAG_PROTECTED)
+              ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_flag(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->uargs.perf_counter_flag;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_value0(struct mali_pp_job *job, u32 sub_job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->perf_counter_value0[sub_job];
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_value1(struct mali_pp_job *job, u32 sub_job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->perf_counter_value1[sub_job];
+}
+
+MALI_STATIC_INLINE void mali_pp_job_set_perf_counter_value0(struct mali_pp_job *job, u32 sub_job, u32 value)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       job->perf_counter_value0[sub_job] = value;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_set_perf_counter_value1(struct mali_pp_job *job, u32 sub_job, u32 value)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       job->perf_counter_value1[sub_job] = value;
+}
+
+MALI_STATIC_INLINE _mali_osk_errcode_t mali_pp_job_check(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       if (mali_pp_job_is_virtual(job) && job->sub_jobs_num != 1) {
+               return _MALI_OSK_ERR_FAULT;
+       }
+       return _MALI_OSK_ERR_OK;
+}
+
+/**
+ * Returns MALI_TRUE if this job has more than two sub jobs and all sub jobs are unstarted.
+ *
+ * @param job Job to check.
+ * @return MALI_TRUE if job has more than two sub jobs and all sub jobs are unstarted, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_large_and_unstarted(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+       MALI_DEBUG_ASSERT(!mali_pp_job_is_virtual(job));
+
+       return (0 == job->sub_jobs_started && 2 < job->sub_jobs_num);
+}
+
+/**
+ * Get PP job's Timeline tracker.
+ *
+ * @param job PP job.
+ * @return Pointer to Timeline tracker for the job.
+ */
+MALI_STATIC_INLINE struct mali_timeline_tracker *mali_pp_job_get_tracker(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return &(job->tracker);
+}
+
+MALI_STATIC_INLINE u32 *mali_pp_job_get_timeline_point_ptr(
+       struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return (u32 __user *)(uintptr_t)job->uargs.timeline_point_ptr;
+}
+
+
+#endif /* __MALI_PP_JOB_H__ */
diff --git a/utgard/r8p0/common/mali_scheduler.c b/utgard/r8p0/common/mali_scheduler.c
new file mode 100755 (executable)
index 0000000..5410f86
--- /dev/null
@@ -0,0 +1,1548 @@
+/*
+ * Copyright (C) 2012-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_scheduler.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_profiling.h"
+#include "mali_kernel_utilization.h"
+#include "mali_timeline.h"
+#include "mali_gp_job.h"
+#include "mali_pp_job.h"
+#include "mali_executor.h"
+#include "mali_group.h"
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include "mali_pm_metrics.h"
+
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#include "mali_memory_dma_buf.h"
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+#include "mali_dma_fence.h"
+#include <linux/dma-buf.h>
+#endif
+#endif
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+#include <linux/sched.h>
+#include <trace/events/gpu.h>
+#endif
+/*
+ * ---------- static defines/constants ----------
+ */
+
+/*
+ * If dma_buf with map on demand is used, we defer job queue
+ * if in atomic context, since both might sleep.
+ */
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#if !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+#define MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE 1
+#endif
+#endif
+
+
+/*
+ * ---------- global variables (exported due to inline functions) ----------
+ */
+
+/* Lock protecting this module */
+_mali_osk_spinlock_irq_t *mali_scheduler_lock_obj = NULL;
+
+/* Queue of jobs to be executed on the GP group */
+struct mali_scheduler_job_queue job_queue_gp;
+
+/* Queue of PP jobs */
+struct mali_scheduler_job_queue job_queue_pp;
+
+_mali_osk_atomic_t mali_job_id_autonumber;
+_mali_osk_atomic_t mali_job_cache_order_autonumber;
+/*
+ * ---------- static variables ----------
+ */
+
+_mali_osk_wq_work_t *scheduler_wq_pp_job_delete = NULL;
+_mali_osk_spinlock_irq_t *scheduler_pp_job_delete_lock = NULL;
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(scheduler_pp_job_deletion_queue);
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+static _mali_osk_wq_work_t *scheduler_wq_pp_job_queue = NULL;
+static _mali_osk_spinlock_irq_t *scheduler_pp_job_queue_lock = NULL;
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(scheduler_pp_job_queue_list);
+#endif
+
+/*
+ * ---------- Forward declaration of static functions ----------
+ */
+
+static mali_timeline_point mali_scheduler_submit_gp_job(
+       struct mali_session_data *session, struct mali_gp_job *job);
+static _mali_osk_errcode_t mali_scheduler_submit_pp_job(
+       struct mali_session_data *session, struct mali_pp_job *job, mali_timeline_point *point);
+
+static mali_bool mali_scheduler_queue_gp_job(struct mali_gp_job *job);
+static mali_bool mali_scheduler_queue_pp_job(struct mali_pp_job *job);
+
+static void mali_scheduler_return_gp_job_to_user(struct mali_gp_job *job,
+               mali_bool success);
+
+static void mali_scheduler_deferred_pp_job_delete(struct mali_pp_job *job);
+void mali_scheduler_do_pp_job_delete(void *arg);
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+static void mali_scheduler_deferred_pp_job_queue(struct mali_pp_job *job);
+static void mali_scheduler_do_pp_job_queue(void *arg);
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */
+
+/*
+ * ---------- Actual implementation ----------
+ */
+
+_mali_osk_errcode_t mali_scheduler_initialize(void)
+{
+       _mali_osk_atomic_init(&mali_job_id_autonumber, 0);
+       _mali_osk_atomic_init(&mali_job_cache_order_autonumber, 0);
+
+       _MALI_OSK_INIT_LIST_HEAD(&job_queue_gp.normal_pri);
+       _MALI_OSK_INIT_LIST_HEAD(&job_queue_gp.high_pri);
+       job_queue_gp.depth = 0;
+       job_queue_gp.big_job_num = 0;
+
+       _MALI_OSK_INIT_LIST_HEAD(&job_queue_pp.normal_pri);
+       _MALI_OSK_INIT_LIST_HEAD(&job_queue_pp.high_pri);
+       job_queue_pp.depth = 0;
+       job_queue_pp.big_job_num = 0;
+
+       mali_scheduler_lock_obj = _mali_osk_spinlock_irq_init(
+                                         _MALI_OSK_LOCKFLAG_ORDERED,
+                                         _MALI_OSK_LOCK_ORDER_SCHEDULER);
+       if (NULL == mali_scheduler_lock_obj) {
+               mali_scheduler_terminate();
+       }
+
+       scheduler_wq_pp_job_delete = _mali_osk_wq_create_work(
+                                            mali_scheduler_do_pp_job_delete, NULL);
+       if (NULL == scheduler_wq_pp_job_delete) {
+               mali_scheduler_terminate();
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       scheduler_pp_job_delete_lock = _mali_osk_spinlock_irq_init(
+                                              _MALI_OSK_LOCKFLAG_ORDERED,
+                                              _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED);
+       if (NULL == scheduler_pp_job_delete_lock) {
+               mali_scheduler_terminate();
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+       scheduler_wq_pp_job_queue = _mali_osk_wq_create_work(
+                                           mali_scheduler_do_pp_job_queue, NULL);
+       if (NULL == scheduler_wq_pp_job_queue) {
+               mali_scheduler_terminate();
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       scheduler_pp_job_queue_lock = _mali_osk_spinlock_irq_init(
+                                             _MALI_OSK_LOCKFLAG_ORDERED,
+                                             _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED);
+       if (NULL == scheduler_pp_job_queue_lock) {
+               mali_scheduler_terminate();
+               return _MALI_OSK_ERR_FAULT;
+       }
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_scheduler_terminate(void)
+{
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+       if (NULL != scheduler_pp_job_queue_lock) {
+               _mali_osk_spinlock_irq_term(scheduler_pp_job_queue_lock);
+               scheduler_pp_job_queue_lock = NULL;
+       }
+
+       if (NULL != scheduler_wq_pp_job_queue) {
+               _mali_osk_wq_delete_work(scheduler_wq_pp_job_queue);
+               scheduler_wq_pp_job_queue = NULL;
+       }
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */
+
+       if (NULL != scheduler_pp_job_delete_lock) {
+               _mali_osk_spinlock_irq_term(scheduler_pp_job_delete_lock);
+               scheduler_pp_job_delete_lock = NULL;
+       }
+
+       if (NULL != scheduler_wq_pp_job_delete) {
+               _mali_osk_wq_delete_work(scheduler_wq_pp_job_delete);
+               scheduler_wq_pp_job_delete = NULL;
+       }
+
+       if (NULL != mali_scheduler_lock_obj) {
+               _mali_osk_spinlock_irq_term(mali_scheduler_lock_obj);
+               mali_scheduler_lock_obj = NULL;
+       }
+
+       _mali_osk_atomic_term(&mali_job_cache_order_autonumber);
+       _mali_osk_atomic_term(&mali_job_id_autonumber);
+}
+
+u32 mali_scheduler_job_physical_head_count(mali_bool gpu_mode_is_secure)
+{
+       /*
+        * Count how many physical sub jobs are present from the head of queue
+        * until the first virtual job is present.
+        * Early out when we have reached maximum number of PP cores (8)
+        */
+       u32 count = 0;
+       struct mali_pp_job *job;
+       struct mali_pp_job *temp;
+
+       /* Check for partially started normal pri jobs */
+       if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) {
+               MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
+
+               job = _MALI_OSK_LIST_ENTRY(job_queue_pp.normal_pri.next,
+                                          struct mali_pp_job, list);
+
+               MALI_DEBUG_ASSERT_POINTER(job);
+
+               if (MALI_TRUE == mali_pp_job_has_started_sub_jobs(job)) {
+                       /*
+                        * Remember; virtual jobs can't be queued and started
+                        * at the same time, so this must be a physical job
+                        */
+                       if ((MALI_FALSE  == gpu_mode_is_secure && MALI_FALSE == mali_pp_job_is_protected_job(job))
+                           || (MALI_TRUE  == gpu_mode_is_secure && MALI_TRUE == mali_pp_job_is_protected_job(job))) {
+
+                               count += mali_pp_job_unstarted_sub_job_count(job);
+                               if (MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS <= count) {
+                                       return MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS;
+                               }
+                       }
+               }
+       }
+
+       _MALI_OSK_LIST_FOREACHENTRY(job, temp, &job_queue_pp.high_pri,
+                                   struct mali_pp_job, list) {
+               if ((MALI_FALSE == mali_pp_job_is_virtual(job))
+                   && ((MALI_FALSE  == gpu_mode_is_secure && MALI_FALSE == mali_pp_job_is_protected_job(job))
+                       || (MALI_TRUE  == gpu_mode_is_secure && MALI_TRUE == mali_pp_job_is_protected_job(job)))) {
+
+                       count += mali_pp_job_unstarted_sub_job_count(job);
+                       if (MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS <= count) {
+                               return MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS;
+                       }
+               } else {
+                       /* Came across a virtual job, so stop counting */
+                       return count;
+               }
+       }
+
+       _MALI_OSK_LIST_FOREACHENTRY(job, temp, &job_queue_pp.normal_pri,
+                                   struct mali_pp_job, list) {
+               if ((MALI_FALSE == mali_pp_job_is_virtual(job))
+                   && (MALI_FALSE == mali_pp_job_has_started_sub_jobs(job))
+                   && ((MALI_FALSE  == gpu_mode_is_secure && MALI_FALSE == mali_pp_job_is_protected_job(job))
+                       || (MALI_TRUE  == gpu_mode_is_secure && MALI_TRUE == mali_pp_job_is_protected_job(job)))) {
+
+                       count += mali_pp_job_unstarted_sub_job_count(job);
+                       if (MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS <= count) {
+                               return MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS;
+                       }
+               } else {
+                       /* Came across a virtual job, so stop counting */
+                       return count;
+               }
+       }
+       return count;
+}
+
+struct mali_pp_job *mali_scheduler_job_pp_next(void)
+{
+       struct mali_pp_job *job;
+       struct mali_pp_job *temp;
+
+       MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
+
+       /* Check for partially started normal pri jobs */
+       if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) {
+               MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
+
+               job = _MALI_OSK_LIST_ENTRY(job_queue_pp.normal_pri.next,
+                                          struct mali_pp_job, list);
+
+               MALI_DEBUG_ASSERT_POINTER(job);
+
+               if (MALI_TRUE == mali_pp_job_has_started_sub_jobs(job)) {
+                       return job;
+               }
+       }
+
+       _MALI_OSK_LIST_FOREACHENTRY(job, temp, &job_queue_pp.high_pri,
+                                   struct mali_pp_job, list) {
+               return job;
+       }
+
+       _MALI_OSK_LIST_FOREACHENTRY(job, temp, &job_queue_pp.normal_pri,
+                                   struct mali_pp_job, list) {
+               return job;
+       }
+
+       return NULL;
+}
+
+mali_bool mali_scheduler_job_next_is_virtual(void)
+{
+       struct mali_pp_job *job;
+
+       job = mali_scheduler_job_pp_virtual_peek();
+       if (NULL != job) {
+               MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(job));
+
+               return MALI_TRUE;
+       }
+
+       return MALI_FALSE;
+}
+
+struct mali_gp_job *mali_scheduler_job_gp_get(void)
+{
+       _mali_osk_list_t *queue;
+       struct mali_gp_job *job = NULL;
+
+       MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
+       MALI_DEBUG_ASSERT(0 < job_queue_gp.depth);
+       MALI_DEBUG_ASSERT(job_queue_gp.big_job_num <= job_queue_gp.depth);
+
+       if (!_mali_osk_list_empty(&job_queue_gp.high_pri)) {
+               queue = &job_queue_gp.high_pri;
+       } else {
+               queue = &job_queue_gp.normal_pri;
+               MALI_DEBUG_ASSERT(!_mali_osk_list_empty(queue));
+       }
+
+       job = _MALI_OSK_LIST_ENTRY(queue->next, struct mali_gp_job, list);
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       mali_gp_job_list_remove(job);
+       job_queue_gp.depth--;
+       if (job->big_job) {
+               job_queue_gp.big_job_num --;
+               if (job_queue_gp.big_job_num < MALI_MAX_PENDING_BIG_JOB) {
+                       /* wake up process */
+                       wait_queue_head_t *queue = mali_session_get_wait_queue();
+                       wake_up(queue);
+               }
+       }
+       return job;
+}
+
+struct mali_pp_job *mali_scheduler_job_pp_physical_peek(void)
+{
+       struct mali_pp_job *job = NULL;
+       struct mali_pp_job *tmp_job = NULL;
+
+       MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
+
+       /*
+        * For PP jobs we favour partially started jobs in normal
+        * priority queue over unstarted jobs in high priority queue
+        */
+
+       if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) {
+               MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
+
+               tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.normal_pri.next,
+                                              struct mali_pp_job, list);
+               MALI_DEBUG_ASSERT(NULL != tmp_job);
+
+               if (MALI_FALSE == mali_pp_job_is_virtual(tmp_job)) {
+                       job = tmp_job;
+               }
+       }
+
+       if (NULL == job ||
+           MALI_FALSE == mali_pp_job_has_started_sub_jobs(job)) {
+               /*
+                * There isn't a partially started job in normal queue, so
+                * look in high priority queue.
+                */
+               if (!_mali_osk_list_empty(&job_queue_pp.high_pri)) {
+                       MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
+
+                       tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.high_pri.next,
+                                                      struct mali_pp_job, list);
+                       MALI_DEBUG_ASSERT(NULL != tmp_job);
+
+                       if (MALI_FALSE == mali_pp_job_is_virtual(tmp_job)) {
+                               job = tmp_job;
+                       }
+               }
+       }
+
+       return job;
+}
+
+struct mali_pp_job *mali_scheduler_job_pp_virtual_peek(void)
+{
+       struct mali_pp_job *job = NULL;
+       struct mali_pp_job *tmp_job = NULL;
+
+       MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
+
+       if (!_mali_osk_list_empty(&job_queue_pp.high_pri)) {
+               MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
+
+               tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.high_pri.next,
+                                              struct mali_pp_job, list);
+
+               if (MALI_TRUE == mali_pp_job_is_virtual(tmp_job)) {
+                       job = tmp_job;
+               }
+       }
+
+       if (NULL == job) {
+               if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) {
+                       MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
+
+                       tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.normal_pri.next,
+                                                      struct mali_pp_job, list);
+
+                       if (MALI_TRUE == mali_pp_job_is_virtual(tmp_job)) {
+                               job = tmp_job;
+                       }
+               }
+       }
+
+       return job;
+}
+
+struct mali_pp_job *mali_scheduler_job_pp_physical_get(u32 *sub_job)
+{
+       struct mali_pp_job *job = mali_scheduler_job_pp_physical_peek();
+
+       MALI_DEBUG_ASSERT(MALI_FALSE == mali_pp_job_is_virtual(job));
+
+       if (NULL != job) {
+               *sub_job = mali_pp_job_get_first_unstarted_sub_job(job);
+
+               mali_pp_job_mark_sub_job_started(job, *sub_job);
+               if (MALI_FALSE == mali_pp_job_has_unstarted_sub_jobs(job)) {
+                       /* Remove from queue when last sub job has been retrieved */
+                       mali_pp_job_list_remove(job);
+               }
+
+               job_queue_pp.depth--;
+
+               /*
+                * Job about to start so it is no longer be
+                * possible to discard WB
+                */
+               mali_pp_job_fb_lookup_remove(job);
+       }
+
+       return job;
+}
+
+struct mali_pp_job *mali_scheduler_job_pp_virtual_get(void)
+{
+       struct mali_pp_job *job = mali_scheduler_job_pp_virtual_peek();
+
+       MALI_DEBUG_ASSERT(MALI_TRUE == mali_pp_job_is_virtual(job));
+
+       if (NULL != job) {
+               MALI_DEBUG_ASSERT(0 ==
+                                 mali_pp_job_get_first_unstarted_sub_job(job));
+               MALI_DEBUG_ASSERT(1 ==
+                                 mali_pp_job_get_sub_job_count(job));
+
+               mali_pp_job_mark_sub_job_started(job, 0);
+
+               mali_pp_job_list_remove(job);
+
+               job_queue_pp.depth--;
+
+               /*
+                * Job about to start so it is no longer be
+                * possible to discard WB
+                */
+               mali_pp_job_fb_lookup_remove(job);
+       }
+
+       return job;
+}
+
+mali_scheduler_mask mali_scheduler_activate_gp_job(struct mali_gp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Timeline activation for job %u (0x%08X).\n",
+                            mali_gp_job_get_id(job), job));
+
+       mali_scheduler_lock();
+
+       if (!mali_scheduler_queue_gp_job(job)) {
+               /* Failed to enqueue job, release job (with error) */
+
+               mali_scheduler_unlock();
+
+               mali_timeline_tracker_release(mali_gp_job_get_tracker(job));
+               mali_gp_job_signal_pp_tracker(job, MALI_FALSE);
+
+               /* This will notify user space and close the job object */
+               mali_scheduler_complete_gp_job(job, MALI_FALSE,
+                                              MALI_TRUE, MALI_FALSE);
+
+               return MALI_SCHEDULER_MASK_EMPTY;
+       }
+
+       mali_scheduler_unlock();
+
+       return MALI_SCHEDULER_MASK_GP;
+}
+
+mali_scheduler_mask mali_scheduler_activate_pp_job(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Timeline activation for job %u (0x%08X).\n",
+                            mali_pp_job_get_id(job), job));
+
+       if (MALI_TRUE == mali_timeline_tracker_activation_error(
+                   mali_pp_job_get_tracker(job))) {
+               MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Job %u (0x%08X) activated with error, aborting.\n",
+                                    mali_pp_job_get_id(job), job));
+
+               mali_scheduler_lock();
+               mali_pp_job_fb_lookup_remove(job);
+               mali_pp_job_mark_unstarted_failed(job);
+               mali_scheduler_unlock();
+
+               mali_timeline_tracker_release(mali_pp_job_get_tracker(job));
+
+               /* This will notify user space and close the job object */
+               mali_scheduler_complete_pp_job(job, 0, MALI_TRUE, MALI_FALSE);
+
+               return MALI_SCHEDULER_MASK_EMPTY;
+       }
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+       if (mali_pp_job_needs_dma_buf_mapping(job)) {
+               mali_scheduler_deferred_pp_job_queue(job);
+               return MALI_SCHEDULER_MASK_EMPTY;
+       }
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */
+
+       mali_scheduler_lock();
+
+       if (!mali_scheduler_queue_pp_job(job)) {
+               /* Failed to enqueue job, release job (with error) */
+               mali_pp_job_fb_lookup_remove(job);
+               mali_pp_job_mark_unstarted_failed(job);
+               mali_scheduler_unlock();
+
+               mali_timeline_tracker_release(mali_pp_job_get_tracker(job));
+
+               /* This will notify user space and close the job object */
+               mali_scheduler_complete_pp_job(job, 0, MALI_TRUE, MALI_FALSE);
+
+               return MALI_SCHEDULER_MASK_EMPTY;
+       }
+
+       mali_scheduler_unlock();
+       return MALI_SCHEDULER_MASK_PP;
+}
+
+void mali_scheduler_complete_gp_job(struct mali_gp_job *job,
+                                   mali_bool success,
+                                   mali_bool user_notification,
+                                   mali_bool dequeued)
+{
+       if (user_notification) {
+               mali_scheduler_return_gp_job_to_user(job, success);
+       }
+
+       if (dequeued) {
+               _mali_osk_pm_dev_ref_put();
+
+               if (mali_utilization_enabled()) {
+                       mali_utilization_gp_end();
+               }
+               mali_pm_record_gpu_idle(MALI_TRUE);
+       }
+
+       mali_gp_job_delete(job);
+}
+
+void mali_scheduler_complete_pp_job(struct mali_pp_job *job,
+                                   u32 num_cores_in_virtual,
+                                   mali_bool user_notification,
+                                   mali_bool dequeued)
+{
+       job->user_notification = user_notification;
+       job->num_pp_cores_in_virtual = num_cores_in_virtual;
+
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+       if (NULL != job->rendered_dma_fence)
+               mali_dma_fence_signal_and_put(&job->rendered_dma_fence);
+#endif
+
+       if (dequeued) {
+#if defined(CONFIG_MALI_DVFS)
+               if (mali_pp_job_is_window_surface(job)) {
+                       struct mali_session_data *session;
+                       session = mali_pp_job_get_session(job);
+                       mali_session_inc_num_window_jobs(session);
+               }
+#endif
+               _mali_osk_pm_dev_ref_put();
+
+               if (mali_utilization_enabled()) {
+                       mali_utilization_pp_end();
+               }
+               mali_pm_record_gpu_idle(MALI_FALSE);
+       }
+
+       /* With ZRAM feature enabled, all pp jobs will be force to use deferred delete. */
+       mali_scheduler_deferred_pp_job_delete(job);
+}
+
+void mali_scheduler_abort_session(struct mali_session_data *session)
+{
+       struct mali_gp_job *gp_job;
+       struct mali_gp_job *gp_tmp;
+       struct mali_pp_job *pp_job;
+       struct mali_pp_job *pp_tmp;
+       _MALI_OSK_LIST_HEAD_STATIC_INIT(removed_jobs_gp);
+       _MALI_OSK_LIST_HEAD_STATIC_INIT(removed_jobs_pp);
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+       MALI_DEBUG_ASSERT(session->is_aborting);
+
+       MALI_DEBUG_PRINT(3, ("Mali scheduler: Aborting all queued jobs from session 0x%08X.\n",
+                            session));
+
+       mali_scheduler_lock();
+
+       /* Remove from GP normal priority queue */
+       _MALI_OSK_LIST_FOREACHENTRY(gp_job, gp_tmp, &job_queue_gp.normal_pri,
+                                   struct mali_gp_job, list) {
+               if (mali_gp_job_get_session(gp_job) == session) {
+                       mali_gp_job_list_move(gp_job, &removed_jobs_gp);
+                       job_queue_gp.depth--;
+                       job_queue_gp.big_job_num -= gp_job->big_job ? 1 : 0;
+               }
+       }
+
+       /* Remove from GP high priority queue */
+       _MALI_OSK_LIST_FOREACHENTRY(gp_job, gp_tmp, &job_queue_gp.high_pri,
+                                   struct mali_gp_job, list) {
+               if (mali_gp_job_get_session(gp_job) == session) {
+                       mali_gp_job_list_move(gp_job, &removed_jobs_gp);
+                       job_queue_gp.depth--;
+                       job_queue_gp.big_job_num -= gp_job->big_job ? 1 : 0;
+               }
+       }
+
+       /* Remove from PP normal priority queue */
+       _MALI_OSK_LIST_FOREACHENTRY(pp_job, pp_tmp,
+                                   &job_queue_pp.normal_pri,
+                                   struct mali_pp_job, list) {
+               if (mali_pp_job_get_session(pp_job) == session) {
+                       mali_pp_job_fb_lookup_remove(pp_job);
+
+                       job_queue_pp.depth -=
+                               mali_pp_job_unstarted_sub_job_count(
+                                       pp_job);
+                       mali_pp_job_mark_unstarted_failed(pp_job);
+
+                       if (MALI_FALSE == mali_pp_job_has_unstarted_sub_jobs(pp_job)) {
+                               if (mali_pp_job_is_complete(pp_job)) {
+                                       mali_pp_job_list_move(pp_job,
+                                                             &removed_jobs_pp);
+                               } else {
+                                       mali_pp_job_list_remove(pp_job);
+                               }
+                       }
+               }
+       }
+
+       /* Remove from PP high priority queue */
+       _MALI_OSK_LIST_FOREACHENTRY(pp_job, pp_tmp,
+                                   &job_queue_pp.high_pri,
+                                   struct mali_pp_job, list) {
+               if (mali_pp_job_get_session(pp_job) == session) {
+                       mali_pp_job_fb_lookup_remove(pp_job);
+
+                       job_queue_pp.depth -=
+                               mali_pp_job_unstarted_sub_job_count(
+                                       pp_job);
+                       mali_pp_job_mark_unstarted_failed(pp_job);
+
+                       if (MALI_FALSE == mali_pp_job_has_unstarted_sub_jobs(pp_job)) {
+                               if (mali_pp_job_is_complete(pp_job)) {
+                                       mali_pp_job_list_move(pp_job,
+                                                             &removed_jobs_pp);
+                               } else {
+                                       mali_pp_job_list_remove(pp_job);
+                               }
+                       }
+               }
+       }
+
+       /*
+        * Release scheduler lock so we can release trackers
+        * (which will potentially queue new jobs)
+        */
+       mali_scheduler_unlock();
+
+       /* Release and complete all (non-running) found GP jobs  */
+       _MALI_OSK_LIST_FOREACHENTRY(gp_job, gp_tmp, &removed_jobs_gp,
+                                   struct mali_gp_job, list) {
+               mali_timeline_tracker_release(mali_gp_job_get_tracker(gp_job));
+               mali_gp_job_signal_pp_tracker(gp_job, MALI_FALSE);
+               _mali_osk_list_delinit(&gp_job->list);
+               mali_scheduler_complete_gp_job(gp_job,
+                                              MALI_FALSE, MALI_TRUE, MALI_TRUE);
+       }
+
+       /* Release and complete non-running PP jobs */
+       _MALI_OSK_LIST_FOREACHENTRY(pp_job, pp_tmp, &removed_jobs_pp,
+                                   struct mali_pp_job, list) {
+               mali_timeline_tracker_release(mali_pp_job_get_tracker(pp_job));
+               _mali_osk_list_delinit(&pp_job->list);
+               mali_scheduler_complete_pp_job(pp_job, 0,
+                                              MALI_TRUE, MALI_TRUE);
+       }
+}
+
+_mali_osk_errcode_t _mali_ukk_gp_start_job(void *ctx,
+               _mali_uk_gp_start_job_s *uargs)
+{
+       struct mali_session_data *session;
+       struct mali_gp_job *job;
+       mali_timeline_point point;
+       u32 __user *point_ptr = NULL;
+
+       MALI_DEBUG_ASSERT_POINTER(uargs);
+       MALI_DEBUG_ASSERT_POINTER(ctx);
+
+       session = (struct mali_session_data *)(uintptr_t)ctx;
+
+       job = mali_gp_job_create(session, uargs, mali_scheduler_get_new_id(),
+                                NULL);
+       if (NULL == job) {
+               MALI_PRINT_ERROR(("Failed to create GP job.\n"));
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       point_ptr = (u32 __user *)(uintptr_t)mali_gp_job_get_timeline_point_ptr(job);
+
+       point = mali_scheduler_submit_gp_job(session, job);
+
+       if (0 != _mali_osk_put_user(((u32) point), point_ptr)) {
+               /*
+                * Let user space know that something failed
+                * after the job was started.
+                */
+               return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_pp_start_job(void *ctx,
+               _mali_uk_pp_start_job_s *uargs)
+{
+       _mali_osk_errcode_t ret;
+       struct mali_session_data *session;
+       struct mali_pp_job *job;
+       mali_timeline_point point;
+       u32 __user *point_ptr = NULL;
+
+       MALI_DEBUG_ASSERT_POINTER(uargs);
+       MALI_DEBUG_ASSERT_POINTER(ctx);
+
+       session = (struct mali_session_data *)(uintptr_t)ctx;
+
+       job = mali_pp_job_create(session, uargs, mali_scheduler_get_new_id());
+       if (NULL == job) {
+               MALI_PRINT_ERROR(("Failed to create PP job.\n"));
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       point_ptr = (u32 __user *)(uintptr_t)mali_pp_job_get_timeline_point_ptr(job);
+
+       /* Submit PP job. */
+       ret = mali_scheduler_submit_pp_job(session, job, &point);
+       job = NULL;
+
+       if (_MALI_OSK_ERR_OK == ret) {
+               if (0 != _mali_osk_put_user(((u32) point), point_ptr)) {
+                       /*
+                       * Let user space know that something failed
+                       * after the jobs were started.
+                       */
+                       return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+               }
+       }
+
+       return ret;
+}
+
+_mali_osk_errcode_t _mali_ukk_pp_and_gp_start_job(void *ctx,
+               _mali_uk_pp_and_gp_start_job_s *uargs)
+{
+       _mali_osk_errcode_t ret;
+       struct mali_session_data *session;
+       _mali_uk_pp_and_gp_start_job_s kargs;
+       struct mali_pp_job *pp_job;
+       struct mali_gp_job *gp_job;
+       u32 __user *point_ptr = NULL;
+       mali_timeline_point point;
+       _mali_uk_pp_start_job_s __user *pp_args;
+       _mali_uk_gp_start_job_s __user *gp_args;
+
+       MALI_DEBUG_ASSERT_POINTER(ctx);
+       MALI_DEBUG_ASSERT_POINTER(uargs);
+
+       session = (struct mali_session_data *) ctx;
+
+       if (0 != _mali_osk_copy_from_user(&kargs, uargs,
+                                         sizeof(_mali_uk_pp_and_gp_start_job_s))) {
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       pp_args = (_mali_uk_pp_start_job_s __user *)(uintptr_t)kargs.pp_args;
+       gp_args = (_mali_uk_gp_start_job_s __user *)(uintptr_t)kargs.gp_args;
+
+       pp_job = mali_pp_job_create(session, pp_args,
+                                   mali_scheduler_get_new_id());
+       if (NULL == pp_job) {
+               MALI_PRINT_ERROR(("Failed to create PP job.\n"));
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       gp_job = mali_gp_job_create(session, gp_args,
+                                   mali_scheduler_get_new_id(),
+                                   mali_pp_job_get_tracker(pp_job));
+       if (NULL == gp_job) {
+               MALI_PRINT_ERROR(("Failed to create GP job.\n"));
+               mali_pp_job_delete(pp_job);
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       point_ptr = (u32 __user *)(uintptr_t)mali_pp_job_get_timeline_point_ptr(pp_job);
+
+       /* Submit GP job. */
+       mali_scheduler_submit_gp_job(session, gp_job);
+       gp_job = NULL;
+
+       /* Submit PP job. */
+       ret = mali_scheduler_submit_pp_job(session, pp_job, &point);
+       pp_job = NULL;
+
+       if (_MALI_OSK_ERR_OK == ret) {
+               if (0 != _mali_osk_put_user(((u32) point), point_ptr)) {
+                       /*
+                       * Let user space know that something failed
+                       * after the jobs were started.
+                       */
+                       return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+               }
+       }
+
+       return ret;
+}
+
+void _mali_ukk_pp_job_disable_wb(_mali_uk_pp_disable_wb_s *args)
+{
+       struct mali_session_data *session;
+       struct mali_pp_job *job;
+       struct mali_pp_job *tmp;
+       u32 fb_lookup_id;
+
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+       session = (struct mali_session_data *)(uintptr_t)args->ctx;
+
+       fb_lookup_id = args->fb_id & MALI_PP_JOB_FB_LOOKUP_LIST_MASK;
+
+       mali_scheduler_lock();
+
+       /* Iterate over all jobs for given frame builder_id. */
+       _MALI_OSK_LIST_FOREACHENTRY(job, tmp,
+                                   &session->pp_job_fb_lookup_list[fb_lookup_id],
+                                   struct mali_pp_job, session_fb_lookup_list) {
+               MALI_DEBUG_CODE(u32 disable_mask = 0);
+
+               if (mali_pp_job_get_frame_builder_id(job) !=
+                   (u32) args->fb_id) {
+                       MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Disable WB mismatching FB.\n"));
+                       continue;
+               }
+
+               MALI_DEBUG_CODE(disable_mask |= 0xD << (4 * 3));
+
+               if (mali_pp_job_get_wb0_source_addr(job) == args->wb0_memory) {
+                       MALI_DEBUG_CODE(disable_mask |= 0x1 << (4 * 1));
+                       mali_pp_job_disable_wb0(job);
+               }
+
+               if (mali_pp_job_get_wb1_source_addr(job) == args->wb1_memory) {
+                       MALI_DEBUG_CODE(disable_mask |= 0x2 << (4 * 2));
+                       mali_pp_job_disable_wb1(job);
+               }
+
+               if (mali_pp_job_get_wb2_source_addr(job) == args->wb2_memory) {
+                       MALI_DEBUG_CODE(disable_mask |= 0x3 << (4 * 3));
+                       mali_pp_job_disable_wb2(job);
+               }
+               MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Disable WB: 0x%X.\n",
+                                    disable_mask));
+       }
+
+       mali_scheduler_unlock();
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_scheduler_dump_state(char *buf, u32 size)
+{
+       int n = 0;
+
+       n += _mali_osk_snprintf(buf + n, size - n, "GP queues\n");
+       n += _mali_osk_snprintf(buf + n, size - n,
+                               "\tQueue depth: %u\n", job_queue_gp.depth);
+       n += _mali_osk_snprintf(buf + n, size - n,
+                               "\tNormal priority queue is %s\n",
+                               _mali_osk_list_empty(&job_queue_gp.normal_pri) ?
+                               "empty" : "not empty");
+       n += _mali_osk_snprintf(buf + n, size - n,
+                               "\tHigh priority queue is %s\n",
+                               _mali_osk_list_empty(&job_queue_gp.high_pri) ?
+                               "empty" : "not empty");
+
+       n += _mali_osk_snprintf(buf + n, size - n,
+                               "PP queues\n");
+       n += _mali_osk_snprintf(buf + n, size - n,
+                               "\tQueue depth: %u\n", job_queue_pp.depth);
+       n += _mali_osk_snprintf(buf + n, size - n,
+                               "\tNormal priority queue is %s\n",
+                               _mali_osk_list_empty(&job_queue_pp.normal_pri)
+                               ? "empty" : "not empty");
+       n += _mali_osk_snprintf(buf + n, size - n,
+                               "\tHigh priority queue is %s\n",
+                               _mali_osk_list_empty(&job_queue_pp.high_pri)
+                               ? "empty" : "not empty");
+
+       n += _mali_osk_snprintf(buf + n, size - n, "\n");
+
+       return n;
+}
+#endif
+
+/*
+ * ---------- Implementation of static functions ----------
+ */
+
+static mali_timeline_point mali_scheduler_submit_gp_job(
+       struct mali_session_data *session, struct mali_gp_job *job)
+{
+       mali_timeline_point point;
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       /* Add job to Timeline system. */
+       point = mali_timeline_system_add_tracker(session->timeline_system,
+                       mali_gp_job_get_tracker(job), MALI_TIMELINE_GP);
+
+       return point;
+}
+
+static _mali_osk_errcode_t mali_scheduler_submit_pp_job(
+       struct mali_session_data *session, struct mali_pp_job *job, mali_timeline_point *point)
+
+{
+       _mali_osk_errcode_t ret = _MALI_OSK_ERR_OK;
+
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+       struct ww_acquire_ctx ww_actx;
+       u32 i;
+       u32 num_memory_cookies = 0;
+       struct reservation_object **reservation_object_list = NULL;
+       unsigned int num_reservation_object = 0;
+#endif
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       mali_scheduler_lock();
+       /*
+        * Adding job to the lookup list used to quickly discard
+        * writeback units of queued jobs.
+        */
+       mali_pp_job_fb_lookup_add(job);
+       mali_scheduler_unlock();
+
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+
+       /* Allocate the reservation_object_list to list the dma reservation object of dependent dma buffer */
+       num_memory_cookies = mali_pp_job_num_memory_cookies(job);
+       if (0 < num_memory_cookies) {
+               reservation_object_list = kzalloc(sizeof(struct reservation_object *) * num_memory_cookies, GFP_KERNEL);
+               if (NULL == reservation_object_list) {
+                       MALI_PRINT_ERROR(("Failed to alloc the reservation object list.\n"));
+                       ret = _MALI_OSK_ERR_NOMEM;
+                       goto failed_to_alloc_reservation_object_list;
+               }
+       }
+
+       /* Add the dma reservation object into reservation_object_list*/
+       for (i = 0; i < num_memory_cookies; i++) {
+               mali_mem_backend *mem_backend = NULL;
+               struct reservation_object *tmp_reservation_object = NULL;
+               u32 mali_addr  = mali_pp_job_get_memory_cookie(job, i);
+
+               mem_backend = mali_mem_backend_struct_search(session, mali_addr);
+
+               MALI_DEBUG_ASSERT_POINTER(mem_backend);
+
+               if (NULL == mem_backend) {
+                       MALI_PRINT_ERROR(("Failed to find the memory backend for memory cookie[%d].\n", i));
+                       goto failed_to_find_mem_backend;
+               }
+
+               if (MALI_MEM_DMA_BUF != mem_backend->type)
+                       continue;
+
+               tmp_reservation_object = mem_backend->dma_buf.attachment->buf->resv;
+
+               if (NULL != tmp_reservation_object) {
+                       mali_dma_fence_add_reservation_object_list(tmp_reservation_object,
+                                       reservation_object_list, &num_reservation_object);
+               }
+       }
+
+       /*
+        * Add the mali dma fence callback to wait for all dependent dma buf,
+        * and extend the timeline system to support dma fence,
+        * then create the new internal dma fence to replace all last dma fence for dependent dma buf.
+        */
+       if (0 < num_reservation_object) {
+               int error;
+               int num_dma_fence_waiter = 0;
+               /* Create one new dma fence.*/
+               job->rendered_dma_fence = mali_dma_fence_new(job->session->fence_context,
+                                         _mali_osk_atomic_inc_return(&job->session->fence_seqno));
+
+               if (NULL == job->rendered_dma_fence) {
+                       MALI_PRINT_ERROR(("Failed to creat one new dma fence.\n"));
+                       ret = _MALI_OSK_ERR_FAULT;
+                       goto failed_to_create_dma_fence;
+               }
+
+               /* In order to avoid deadlock, wait/wound mutex lock to lock all dma buffers*/
+
+               error = mali_dma_fence_lock_reservation_object_list(reservation_object_list,
+                               num_reservation_object, &ww_actx);
+
+               if (0 != error) {
+                       MALI_PRINT_ERROR(("Failed to lock all reservation objects.\n"));
+                       ret = _MALI_OSK_ERR_FAULT;
+                       goto failed_to_lock_reservation_object_list;
+               }
+
+               mali_dma_fence_context_init(&job->dma_fence_context,
+                                           mali_timeline_dma_fence_callback, (void *)job);
+
+               /* Add dma fence waiters and dma fence callback. */
+               for (i = 0; i < num_reservation_object; i++) {
+                       ret = mali_dma_fence_context_add_waiters(&job->dma_fence_context, reservation_object_list[i]);
+                       if (_MALI_OSK_ERR_OK != ret) {
+                               MALI_PRINT_ERROR(("Failed to add waiter into mali dma fence context.\n"));
+                               goto failed_to_add_dma_fence_waiter;
+                       }
+               }
+
+               for (i = 0; i < num_reservation_object; i++) {
+                       reservation_object_add_excl_fence(reservation_object_list[i], job->rendered_dma_fence);
+               }
+
+               num_dma_fence_waiter = job->dma_fence_context.num_dma_fence_waiter;
+
+               /* Add job to Timeline system. */
+               (*point) = mali_timeline_system_add_tracker(session->timeline_system,
+                               mali_pp_job_get_tracker(job), MALI_TIMELINE_PP);
+
+               if (0 != num_dma_fence_waiter) {
+                       mali_dma_fence_context_dec_count(&job->dma_fence_context);
+               }
+
+               /* Unlock all wait/wound mutex lock. */
+               mali_dma_fence_unlock_reservation_object_list(reservation_object_list,
+                               num_reservation_object, &ww_actx);
+       } else {
+               /* Add job to Timeline system. */
+               (*point) = mali_timeline_system_add_tracker(session->timeline_system,
+                               mali_pp_job_get_tracker(job), MALI_TIMELINE_PP);
+       }
+
+       kfree(reservation_object_list);
+       return ret;
+#else
+       /* Add job to Timeline system. */
+       (*point) = mali_timeline_system_add_tracker(session->timeline_system,
+                       mali_pp_job_get_tracker(job), MALI_TIMELINE_PP);
+#endif
+
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+failed_to_add_dma_fence_waiter:
+       mali_dma_fence_context_term(&job->dma_fence_context);
+       mali_dma_fence_unlock_reservation_object_list(reservation_object_list,
+                       num_reservation_object, &ww_actx);
+failed_to_lock_reservation_object_list:
+       mali_dma_fence_signal_and_put(&job->rendered_dma_fence);
+failed_to_create_dma_fence:
+failed_to_find_mem_backend:
+       if (NULL != reservation_object_list)
+               kfree(reservation_object_list);
+failed_to_alloc_reservation_object_list:
+       mali_pp_job_fb_lookup_remove(job);
+#endif
+       return ret;
+}
+
+static mali_bool mali_scheduler_queue_gp_job(struct mali_gp_job *job)
+{
+       struct mali_session_data *session;
+       _mali_osk_list_t *queue;
+
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       session = mali_gp_job_get_session(job);
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       if (unlikely(session->is_aborting)) {
+               MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Job %u (0x%08X) queued while session is aborting.\n",
+                                    mali_gp_job_get_id(job), job));
+               return MALI_FALSE; /* job not queued */
+       }
+
+       mali_gp_job_set_cache_order(job, mali_scheduler_get_new_cache_order());
+
+       /* Determine which queue the job should be added to. */
+       if (session->use_high_priority_job_queue) {
+               queue = &job_queue_gp.high_pri;
+       } else {
+               queue = &job_queue_gp.normal_pri;
+       }
+
+       job_queue_gp.depth += 1;
+       job_queue_gp.big_job_num += (job->big_job) ? 1 : 0;
+
+       /* Add job to queue (mali_gp_job_queue_add find correct place). */
+       mali_gp_job_list_add(job, queue);
+
+       /*
+        * We hold a PM reference for every job we hold queued (and running)
+        * It is important that we take this reference after job has been
+        * added the the queue so that any runtime resume could schedule this
+        * job right there and then.
+        */
+       _mali_osk_pm_dev_ref_get_async();
+
+       if (mali_utilization_enabled()) {
+               /*
+                * We cheat a little bit by counting the GP as busy from the
+                * time a GP job is queued. This will be fine because we only
+                * loose the tiny idle gap between jobs, but we will instead
+                * get less utilization work to do (less locks taken)
+                */
+               mali_utilization_gp_start();
+       }
+
+       mali_pm_record_gpu_active(MALI_TRUE);
+
+       /* Add profiling events for job enqueued */
+       _mali_osk_profiling_add_event(
+               MALI_PROFILING_EVENT_TYPE_SINGLE |
+               MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+               MALI_PROFILING_EVENT_REASON_SINGLE_SW_GP_ENQUEUE,
+               mali_gp_job_get_pid(job),
+               mali_gp_job_get_tid(job),
+               mali_gp_job_get_frame_builder_id(job),
+               mali_gp_job_get_flush_id(job),
+               0);
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+       trace_gpu_job_enqueue(mali_gp_job_get_tid(job),
+                             mali_gp_job_get_id(job), "GP");
+#endif
+
+       MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) queued\n",
+                            mali_gp_job_get_id(job), job));
+
+       return MALI_TRUE; /* job queued */
+}
+
+static mali_bool mali_scheduler_queue_pp_job(struct mali_pp_job *job)
+{
+       struct mali_session_data *session;
+       _mali_osk_list_t *queue = NULL;
+
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       session = mali_pp_job_get_session(job);
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       if (unlikely(session->is_aborting)) {
+               MALI_DEBUG_PRINT(2, ("Mali PP scheduler: Job %u (0x%08X) queued while session is aborting.\n",
+                                    mali_pp_job_get_id(job), job));
+               return MALI_FALSE; /* job not queued */
+       } else if (unlikely(MALI_SWAP_IN_FAIL == job->swap_status)) {
+               MALI_DEBUG_PRINT(2, ("Mali PP scheduler: Job %u (0x%08X) queued while swap in failed.\n",
+                                    mali_pp_job_get_id(job), job));
+               return MALI_FALSE;
+       }
+
+       mali_pp_job_set_cache_order(job, mali_scheduler_get_new_cache_order());
+
+       if (session->use_high_priority_job_queue) {
+               queue = &job_queue_pp.high_pri;
+       } else {
+               queue = &job_queue_pp.normal_pri;
+       }
+
+       job_queue_pp.depth +=
+               mali_pp_job_get_sub_job_count(job);
+
+       /* Add job to queue (mali_gp_job_queue_add find correct place). */
+       mali_pp_job_list_add(job, queue);
+
+       /*
+        * We hold a PM reference for every job we hold queued (and running)
+        * It is important that we take this reference after job has been
+        * added the the queue so that any runtime resume could schedule this
+        * job right there and then.
+        */
+       _mali_osk_pm_dev_ref_get_async();
+
+       if (mali_utilization_enabled()) {
+               /*
+                * We cheat a little bit by counting the PP as busy from the
+                * time a PP job is queued. This will be fine because we only
+                * loose the tiny idle gap between jobs, but we will instead
+                * get less utilization work to do (less locks taken)
+                */
+               mali_utilization_pp_start();
+       }
+
+       mali_pm_record_gpu_active(MALI_FALSE);
+
+       /* Add profiling events for job enqueued */
+       _mali_osk_profiling_add_event(
+               MALI_PROFILING_EVENT_TYPE_SINGLE |
+               MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+               MALI_PROFILING_EVENT_REASON_SINGLE_SW_PP_ENQUEUE,
+               mali_pp_job_get_pid(job),
+               mali_pp_job_get_tid(job),
+               mali_pp_job_get_frame_builder_id(job),
+               mali_pp_job_get_flush_id(job),
+               0);
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+       trace_gpu_job_enqueue(mali_pp_job_get_tid(job),
+                             mali_pp_job_get_id(job), "PP");
+#endif
+
+       MALI_DEBUG_PRINT(3, ("Mali PP scheduler: %s job %u (0x%08X) with %u parts queued.\n",
+                            mali_pp_job_is_virtual(job)
+                            ? "Virtual" : "Physical",
+                            mali_pp_job_get_id(job), job,
+                            mali_pp_job_get_sub_job_count(job)));
+
+       return MALI_TRUE; /* job queued */
+}
+
+static void mali_scheduler_return_gp_job_to_user(struct mali_gp_job *job,
+               mali_bool success)
+{
+       _mali_uk_gp_job_finished_s *jobres;
+       struct mali_session_data *session;
+       _mali_osk_notification_t *notification;
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       session = mali_gp_job_get_session(job);
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       notification = mali_gp_job_get_finished_notification(job);
+       MALI_DEBUG_ASSERT_POINTER(notification);
+
+       jobres = notification->result_buffer;
+       MALI_DEBUG_ASSERT_POINTER(jobres);
+
+       jobres->pending_big_job_num = mali_scheduler_job_gp_big_job_count();
+
+       jobres->user_job_ptr = mali_gp_job_get_user_id(job);
+       if (MALI_TRUE == success) {
+               jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS;
+       } else {
+               jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR;
+       }
+       jobres->heap_current_addr = mali_gp_job_get_current_heap_addr(job);
+       jobres->perf_counter0 = mali_gp_job_get_perf_counter_value0(job);
+       jobres->perf_counter1 = mali_gp_job_get_perf_counter_value1(job);
+
+       mali_session_send_notification(session, notification);
+}
+
+void mali_scheduler_return_pp_job_to_user(struct mali_pp_job *job,
+               u32 num_cores_in_virtual)
+{
+       u32 i;
+       u32 num_counters_to_copy;
+       _mali_uk_pp_job_finished_s *jobres;
+       struct mali_session_data *session;
+       _mali_osk_notification_t *notification;
+
+       if (MALI_TRUE == mali_pp_job_use_no_notification(job)) {
+               return;
+       }
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       session = mali_pp_job_get_session(job);
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       notification = mali_pp_job_get_finished_notification(job);
+       MALI_DEBUG_ASSERT_POINTER(notification);
+
+       jobres = notification->result_buffer;
+       MALI_DEBUG_ASSERT_POINTER(jobres);
+
+       jobres->user_job_ptr = mali_pp_job_get_user_id(job);
+       if (MALI_TRUE == mali_pp_job_was_success(job)) {
+               jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS;
+       } else {
+               jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR;
+       }
+
+       if (mali_pp_job_is_virtual(job)) {
+               num_counters_to_copy = num_cores_in_virtual;
+       } else {
+               num_counters_to_copy = mali_pp_job_get_sub_job_count(job);
+       }
+
+       for (i = 0; i < num_counters_to_copy; i++) {
+               jobres->perf_counter0[i] =
+                       mali_pp_job_get_perf_counter_value0(job, i);
+               jobres->perf_counter1[i] =
+                       mali_pp_job_get_perf_counter_value1(job, i);
+               jobres->perf_counter_src0 =
+                       mali_pp_job_get_pp_counter_global_src0();
+               jobres->perf_counter_src1 =
+                       mali_pp_job_get_pp_counter_global_src1();
+       }
+
+       mali_session_send_notification(session, notification);
+}
+
+static void mali_scheduler_deferred_pp_job_delete(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       _mali_osk_spinlock_irq_lock(scheduler_pp_job_delete_lock);
+       mali_pp_job_list_addtail(job, &scheduler_pp_job_deletion_queue);
+       _mali_osk_spinlock_irq_unlock(scheduler_pp_job_delete_lock);
+
+       _mali_osk_wq_schedule_work(scheduler_wq_pp_job_delete);
+}
+
+void mali_scheduler_do_pp_job_delete(void *arg)
+{
+       _MALI_OSK_LIST_HEAD_STATIC_INIT(list);
+       struct mali_pp_job *job;
+       struct mali_pp_job *tmp;
+
+       MALI_IGNORE(arg);
+
+       /*
+        * Quickly "unhook" the jobs pending to be deleted, so we can release
+        * the lock before we start deleting the job objects
+        * (without any locks held)
+        */
+       _mali_osk_spinlock_irq_lock(scheduler_pp_job_delete_lock);
+       _mali_osk_list_move_list(&scheduler_pp_job_deletion_queue, &list);
+       _mali_osk_spinlock_irq_unlock(scheduler_pp_job_delete_lock);
+
+       _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list,
+                                   struct mali_pp_job, list) {
+               _mali_osk_list_delinit(&job->list);
+
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+               mali_dma_fence_context_term(&job->dma_fence_context);
+#endif
+
+               mali_pp_job_delete(job); /* delete the job object itself */
+       }
+}
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+
+static void mali_scheduler_deferred_pp_job_queue(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       _mali_osk_spinlock_irq_lock(scheduler_pp_job_queue_lock);
+       mali_pp_job_list_addtail(job, &scheduler_pp_job_queue_list);
+       _mali_osk_spinlock_irq_unlock(scheduler_pp_job_queue_lock);
+
+       _mali_osk_wq_schedule_work(scheduler_wq_pp_job_queue);
+}
+
+static void mali_scheduler_do_pp_job_queue(void *arg)
+{
+       _MALI_OSK_LIST_HEAD_STATIC_INIT(list);
+       struct mali_pp_job *job;
+       struct mali_pp_job *tmp;
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+       MALI_IGNORE(arg);
+
+       /*
+        * Quickly "unhook" the jobs pending to be queued, so we can release
+        * the lock before we start queueing the job objects
+        * (without any locks held)
+        */
+       _mali_osk_spinlock_irq_lock(scheduler_pp_job_queue_lock);
+       _mali_osk_list_move_list(&scheduler_pp_job_queue_list, &list);
+       _mali_osk_spinlock_irq_unlock(scheduler_pp_job_queue_lock);
+
+       /* First loop through all jobs and do the pre-work (no locks needed) */
+       _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list,
+                                   struct mali_pp_job, list) {
+               if (mali_pp_job_needs_dma_buf_mapping(job)) {
+                       /*
+                        * This operation could fail, but we continue anyway,
+                        * because the worst that could happen is that this
+                        * job will fail due to a Mali page fault.
+                        */
+                       mali_dma_buf_map_job(job);
+               }
+       }
+
+       mali_scheduler_lock();
+
+       /* Then loop through all jobs again to queue them (lock needed) */
+       _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list,
+                                   struct mali_pp_job, list) {
+
+               /* Remove from scheduler_pp_job_queue_list before queueing */
+               mali_pp_job_list_remove(job);
+
+               if (mali_scheduler_queue_pp_job(job)) {
+                       /* Job queued successfully */
+                       schedule_mask |= MALI_SCHEDULER_MASK_PP;
+               } else {
+                       /* Failed to enqueue job, release job (with error) */
+                       mali_pp_job_fb_lookup_remove(job);
+                       mali_pp_job_mark_unstarted_failed(job);
+
+                       /* unlock scheduler in this uncommon case */
+                       mali_scheduler_unlock();
+
+                       schedule_mask |= mali_timeline_tracker_release(
+                                                mali_pp_job_get_tracker(job));
+
+                       /* Notify user space and close the job object */
+                       mali_scheduler_complete_pp_job(job, 0, MALI_TRUE,
+                                                      MALI_FALSE);
+
+                       mali_scheduler_lock();
+               }
+       }
+
+       mali_scheduler_unlock();
+
+       /* Trigger scheduling of jobs */
+       mali_executor_schedule_from_mask(schedule_mask, MALI_FALSE);
+}
+
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */
+
+void mali_scheduler_gp_pp_job_queue_print(void)
+{
+       struct mali_gp_job *gp_job = NULL;
+       struct mali_gp_job *tmp_gp_job = NULL;
+       struct mali_pp_job *pp_job = NULL;
+       struct mali_pp_job *tmp_pp_job = NULL;
+
+       MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
+       MALI_DEBUG_ASSERT_LOCK_HELD(mali_executor_lock_obj);
+
+       /* dump job queup status */
+       if ((0 == job_queue_gp.depth) && (0 == job_queue_pp.depth)) {
+               MALI_PRINT(("No GP&PP job in the job queue.\n"));
+               return;
+       }
+
+       MALI_PRINT(("Total (%d) GP job in the job queue.\n", job_queue_gp.depth));
+       if (job_queue_gp.depth > 0) {
+               if (!_mali_osk_list_empty(&job_queue_gp.high_pri)) {
+                       _MALI_OSK_LIST_FOREACHENTRY(gp_job, tmp_gp_job, &job_queue_gp.high_pri,
+                                                   struct mali_gp_job, list) {
+                               MALI_PRINT(("GP job(%p) id = %d tid = %d pid = %d in the gp job high_pri queue\n", gp_job, gp_job->id, gp_job->tid, gp_job->pid));
+                       }
+               }
+
+               if (!_mali_osk_list_empty(&job_queue_gp.normal_pri)) {
+                       _MALI_OSK_LIST_FOREACHENTRY(gp_job, tmp_gp_job, &job_queue_gp.normal_pri,
+                                                   struct mali_gp_job, list) {
+                               MALI_PRINT(("GP job(%p) id = %d tid = %d pid = %d in the gp job normal_pri queue\n", gp_job, gp_job->id, gp_job->tid, gp_job->pid));
+                       }
+               }
+       }
+
+       MALI_PRINT(("Total (%d) PP job in the job queue.\n", job_queue_pp.depth));
+       if (job_queue_pp.depth > 0) {
+               if (!_mali_osk_list_empty(&job_queue_pp.high_pri)) {
+                       _MALI_OSK_LIST_FOREACHENTRY(pp_job, tmp_pp_job, &job_queue_pp.high_pri,
+                                                   struct mali_pp_job, list) {
+                               if (mali_pp_job_is_virtual(pp_job)) {
+                                       MALI_PRINT(("PP Virtual job(%p) id = %d tid = %d pid = %d in the pp job high_pri queue\n", pp_job, pp_job->id, pp_job->tid, pp_job->pid));
+                               } else {
+                                       MALI_PRINT(("PP Physical job(%p) id = %d tid = %d pid = %d in the pp job high_pri queue\n", pp_job, pp_job->id, pp_job->tid, pp_job->pid));
+                               }
+                       }
+               }
+
+               if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) {
+                       _MALI_OSK_LIST_FOREACHENTRY(pp_job, tmp_pp_job, &job_queue_pp.normal_pri,
+                                                   struct mali_pp_job, list) {
+                               if (mali_pp_job_is_virtual(pp_job)) {
+                                       MALI_PRINT(("PP Virtual job(%p) id = %d tid = %d pid = %d in the pp job normal_pri queue\n", pp_job, pp_job->id, pp_job->tid, pp_job->pid));
+                               } else {
+                                       MALI_PRINT(("PP Physical job(%p) id = %d tid = %d pid = %d in the pp job normal_pri queue\n", pp_job, pp_job->id, pp_job->tid, pp_job->pid));
+                               }
+                       }
+               }
+       }
+
+       /* dump group running job status */
+       mali_executor_running_status_print();
+}
diff --git a/utgard/r8p0/common/mali_scheduler.h b/utgard/r8p0/common/mali_scheduler.h
new file mode 100755 (executable)
index 0000000..8a7ebc9
--- /dev/null
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2012-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_SCHEDULER_H__
+#define __MALI_SCHEDULER_H__
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_scheduler_types.h"
+#include "mali_session.h"
+
+struct mali_scheduler_job_queue {
+       _MALI_OSK_LIST_HEAD(normal_pri); /* Queued jobs with normal priority */
+       _MALI_OSK_LIST_HEAD(high_pri);   /* Queued jobs with high priority */
+       u32 depth;                       /* Depth of combined queues. */
+       u32 big_job_num;
+};
+
+extern _mali_osk_spinlock_irq_t *mali_scheduler_lock_obj;
+
+/* Queue of jobs to be executed on the GP group */
+extern struct mali_scheduler_job_queue job_queue_gp;
+
+/* Queue of PP jobs */
+extern struct mali_scheduler_job_queue job_queue_pp;
+
+extern _mali_osk_atomic_t mali_job_id_autonumber;
+extern _mali_osk_atomic_t mali_job_cache_order_autonumber;
+
+#define MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD() MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
+
+_mali_osk_errcode_t mali_scheduler_initialize(void);
+void mali_scheduler_terminate(void);
+
+MALI_STATIC_INLINE void mali_scheduler_lock(void)
+{
+       _mali_osk_spinlock_irq_lock(mali_scheduler_lock_obj);
+       MALI_DEBUG_PRINT(5, ("Mali scheduler: scheduler lock taken.\n"));
+}
+
+MALI_STATIC_INLINE void mali_scheduler_unlock(void)
+{
+       MALI_DEBUG_PRINT(5, ("Mali scheduler: Releasing scheduler lock.\n"));
+       _mali_osk_spinlock_irq_unlock(mali_scheduler_lock_obj);
+}
+
+MALI_STATIC_INLINE u32 mali_scheduler_job_gp_count(void)
+{
+       return job_queue_gp.depth;
+}
+MALI_STATIC_INLINE u32 mali_scheduler_job_gp_big_job_count(void)
+{
+       return job_queue_gp.big_job_num;
+}
+
+u32 mali_scheduler_job_physical_head_count(mali_bool gpu_mode_is_secure);
+
+mali_bool mali_scheduler_job_next_is_virtual(void);
+struct mali_pp_job *mali_scheduler_job_pp_next(void);
+
+struct mali_gp_job *mali_scheduler_job_gp_get(void);
+struct mali_pp_job *mali_scheduler_job_pp_physical_peek(void);
+struct mali_pp_job *mali_scheduler_job_pp_virtual_peek(void);
+struct mali_pp_job *mali_scheduler_job_pp_physical_get(u32 *sub_job);
+struct mali_pp_job *mali_scheduler_job_pp_virtual_get(void);
+
+MALI_STATIC_INLINE u32 mali_scheduler_get_new_id(void)
+{
+       return _mali_osk_atomic_inc_return(&mali_job_id_autonumber);
+}
+
+MALI_STATIC_INLINE u32 mali_scheduler_get_new_cache_order(void)
+{
+       return _mali_osk_atomic_inc_return(&mali_job_cache_order_autonumber);
+}
+
+/**
+ * @brief Used by the Timeline system to queue a GP job.
+ *
+ * @note @ref mali_executor_schedule_from_mask() should be called if this
+ * function returns non-zero.
+ *
+ * @param job The GP job that is being activated.
+ *
+ * @return A scheduling bitmask that can be used to decide if scheduling is
+ * necessary after this call.
+ */
+mali_scheduler_mask mali_scheduler_activate_gp_job(struct mali_gp_job *job);
+
+/**
+ * @brief Used by the Timeline system to queue a PP job.
+ *
+ * @note @ref mali_executor_schedule_from_mask() should be called if this
+ * function returns non-zero.
+ *
+ * @param job The PP job that is being activated.
+ *
+ * @return A scheduling bitmask that can be used to decide if scheduling is
+ * necessary after this call.
+ */
+mali_scheduler_mask mali_scheduler_activate_pp_job(struct mali_pp_job *job);
+
+void mali_scheduler_complete_gp_job(struct mali_gp_job *job,
+                                   mali_bool success,
+                                   mali_bool user_notification,
+                                   mali_bool dequeued);
+
+void mali_scheduler_complete_pp_job(struct mali_pp_job *job,
+                                   u32 num_cores_in_virtual,
+                                   mali_bool user_notification,
+                                   mali_bool dequeued);
+
+void mali_scheduler_abort_session(struct mali_session_data *session);
+
+void mali_scheduler_return_pp_job_to_user(struct mali_pp_job *job,
+               u32 num_cores_in_virtual);
+
+#if MALI_STATE_TRACKING
+u32 mali_scheduler_dump_state(char *buf, u32 size);
+#endif
+
+void mali_scheduler_gp_pp_job_queue_print(void);
+
+#endif /* __MALI_SCHEDULER_H__ */
diff --git a/utgard/r8p0/common/mali_scheduler_types.h b/utgard/r8p0/common/mali_scheduler_types.h
new file mode 100755 (executable)
index 0000000..0819c59
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2013-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_SCHEDULER_TYPES_H__
+#define __MALI_SCHEDULER_TYPES_H__
+
+#include "mali_osk.h"
+
+#define MALI_SCHEDULER_JOB_ID_SPAN 65535
+
+/**
+ * Bitmask used for defered scheduling of subsystems.
+ */
+typedef u32 mali_scheduler_mask;
+
+#define MALI_SCHEDULER_MASK_GP (1<<0)
+#define MALI_SCHEDULER_MASK_PP (1<<1)
+
+#define MALI_SCHEDULER_MASK_EMPTY 0
+#define MALI_SCHEDULER_MASK_ALL (MALI_SCHEDULER_MASK_GP | MALI_SCHEDULER_MASK_PP)
+
+#endif /* __MALI_SCHEDULER_TYPES_H__ */
diff --git a/utgard/r8p0/common/mali_session.c b/utgard/r8p0/common/mali_session.c
new file mode 100755 (executable)
index 0000000..95819f6
--- /dev/null
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2012-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/sched.h>
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_session.h"
+#include "mali_ukk.h"
+#ifdef MALI_MEM_SWAP_TRACKING
+#include "mali_memory_swap_alloc.h"
+#endif
+
+_MALI_OSK_LIST_HEAD(mali_sessions);
+static u32 mali_session_count = 0;
+
+_mali_osk_spinlock_irq_t *mali_sessions_lock = NULL;
+wait_queue_head_t pending_queue;
+
+_mali_osk_errcode_t mali_session_initialize(void)
+{
+       _MALI_OSK_INIT_LIST_HEAD(&mali_sessions);
+       /* init wait queue for big varying job */
+       init_waitqueue_head(&pending_queue);
+
+       mali_sessions_lock = _mali_osk_spinlock_irq_init(
+                                    _MALI_OSK_LOCKFLAG_ORDERED,
+                                    _MALI_OSK_LOCK_ORDER_SESSIONS);
+       if (NULL == mali_sessions_lock) {
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_session_terminate(void)
+{
+       if (NULL != mali_sessions_lock) {
+               _mali_osk_spinlock_irq_term(mali_sessions_lock);
+               mali_sessions_lock = NULL;
+       }
+}
+
+void mali_session_add(struct mali_session_data *session)
+{
+       mali_session_lock();
+       _mali_osk_list_add(&session->link, &mali_sessions);
+       mali_session_count++;
+       mali_session_unlock();
+}
+
+void mali_session_remove(struct mali_session_data *session)
+{
+       mali_session_lock();
+       _mali_osk_list_delinit(&session->link);
+       mali_session_count--;
+       mali_session_unlock();
+}
+
+u32 mali_session_get_count(void)
+{
+       return mali_session_count;
+}
+
+mali_bool mali_session_pp_job_is_empty(void *data)
+{
+       struct mali_session_data *session = (struct mali_session_data *)data;
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       if (0 == _mali_osk_atomic_read(&session->number_of_pp_jobs)) {
+               return MALI_TRUE;
+       }
+       return MALI_FALSE;
+}
+
+wait_queue_head_t *mali_session_get_wait_queue(void)
+{
+       return &pending_queue;
+}
+
+/*
+ * Get the max completed window jobs from all active session,
+ * which will be used in window render frame per sec calculate
+ */
+#if defined(CONFIG_MALI_DVFS)
+u32 mali_session_max_window_num(void)
+{
+       struct mali_session_data *session, *tmp;
+       u32 max_window_num = 0;
+       u32 tmp_number = 0;
+
+       mali_session_lock();
+
+       MALI_SESSION_FOREACH(session, tmp, link) {
+               tmp_number = _mali_osk_atomic_xchg(
+                                    &session->number_of_window_jobs, 0);
+               if (max_window_num < tmp_number) {
+                       max_window_num = tmp_number;
+               }
+       }
+
+       mali_session_unlock();
+
+       return max_window_num;
+}
+#endif
+void mali_session_memory_tracking(_mali_osk_print_ctx *print_ctx)
+{
+       struct mali_session_data *session, *tmp;
+       char task_comm[TASK_COMM_LEN];
+       char task_default[] = "not found";
+       struct task_struct *ttask;
+       u32 mali_mem_usage;
+       u32 total_mali_mem_size;
+#ifdef MALI_MEM_SWAP_TRACKING
+       u32 swap_pool_size;
+       u32 swap_unlock_size;
+#endif
+
+       MALI_DEBUG_ASSERT_POINTER(print_ctx);
+       mali_session_lock();
+
+       MALI_SESSION_FOREACH(session, tmp, link) {
+               ttask = pid_task(find_vpid(session->pid), PIDTYPE_PID);
+               if (ttask)
+                       strncpy(task_comm, ttask->comm, sizeof(ttask->comm));
+               else
+                       strncpy(task_comm, task_default, sizeof(task_default));
+#ifdef MALI_MEM_SWAP_TRACKING
+               _mali_osk_ctxprintf(print_ctx, "  %-25s  %-10u %-25s %-10u  %-15u  %-15u  %-10u  %-10u %-10u\n",
+                                   session->comm, session->pid, task_comm,
+                                   (unsigned int)((atomic_read(&session->mali_mem_allocated_pages)) * _MALI_OSK_MALI_PAGE_SIZE),
+                                   (unsigned int)session->max_mali_mem_allocated_size,
+                                   (unsigned int)((atomic_read(&session->mali_mem_array[MALI_MEM_EXTERNAL])) * _MALI_OSK_MALI_PAGE_SIZE),
+                                   (unsigned int)((atomic_read(&session->mali_mem_array[MALI_MEM_UMP])) * _MALI_OSK_MALI_PAGE_SIZE),
+                                   (unsigned int)((atomic_read(&session->mali_mem_array[MALI_MEM_DMA_BUF])) * _MALI_OSK_MALI_PAGE_SIZE),
+                                   (unsigned int)((atomic_read(&session->mali_mem_array[MALI_MEM_SWAP])) * _MALI_OSK_MALI_PAGE_SIZE)
+                                  );
+#else
+               _mali_osk_ctxprintf(print_ctx, "  %-25s  %-10u %-25s %-10u  %-15u  %-15u  %-10u  %-10u\n",
+                                   session->comm, session->pid, task_comm,
+                                   (unsigned int)((atomic_read(&session->mali_mem_allocated_pages)) * _MALI_OSK_MALI_PAGE_SIZE),
+                                   (unsigned int)session->max_mali_mem_allocated_size,
+                                   (unsigned int)((atomic_read(&session->mali_mem_array[MALI_MEM_EXTERNAL])) * _MALI_OSK_MALI_PAGE_SIZE),
+                                   (unsigned int)((atomic_read(&session->mali_mem_array[MALI_MEM_UMP])) * _MALI_OSK_MALI_PAGE_SIZE),
+                                   (unsigned int)((atomic_read(&session->mali_mem_array[MALI_MEM_DMA_BUF])) * _MALI_OSK_MALI_PAGE_SIZE)
+                                  );
+
+#endif
+       }
+       mali_session_unlock();
+       mali_mem_usage  = _mali_ukk_report_memory_usage();
+       total_mali_mem_size = _mali_ukk_report_total_memory_size();
+       _mali_osk_ctxprintf(print_ctx, "Mali mem usage: %u\nMali mem limit: %u\n", mali_mem_usage, total_mali_mem_size);
+#ifdef MALI_MEM_SWAP_TRACKING
+       mali_mem_swap_tracking(&swap_pool_size, &swap_unlock_size);
+       _mali_osk_ctxprintf(print_ctx, "Mali swap mem pool : %u\nMali swap mem unlock: %u\n", swap_pool_size, swap_unlock_size);
+#endif
+}
diff --git a/utgard/r8p0/common/mali_session.h b/utgard/r8p0/common/mali_session.h
new file mode 100755 (executable)
index 0000000..f669a31
--- /dev/null
@@ -0,0 +1,136 @@
+/*
+ * Copyright (C) 2010-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_SESSION_H__
+#define __MALI_SESSION_H__
+
+#include "mali_mmu_page_directory.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_memory_types.h"
+#include "mali_memory_manager.h"
+
+struct mali_timeline_system;
+struct mali_soft_system;
+
+/* Number of frame builder job lists per session. */
+#define MALI_PP_JOB_FB_LOOKUP_LIST_SIZE 16
+#define MALI_PP_JOB_FB_LOOKUP_LIST_MASK (MALI_PP_JOB_FB_LOOKUP_LIST_SIZE - 1)
+/*Max pending big job allowed in kernel*/
+#define MALI_MAX_PENDING_BIG_JOB (2)
+
+struct mali_session_data {
+       _mali_osk_notification_queue_t *ioctl_queue;
+
+       _mali_osk_wait_queue_t *wait_queue; /**The wait queue to wait for the number of pp job become 0.*/
+
+       _mali_osk_mutex_t *memory_lock; /**< Lock protecting the vm manipulation */
+       _mali_osk_mutex_t *cow_lock; /** < Lock protecting the cow memory free manipulation */
+#if 0
+       _mali_osk_list_t memory_head; /**< Track all the memory allocated in this session, for freeing on abnormal termination */
+#endif
+       struct mali_page_directory *page_directory; /**< MMU page directory for this session */
+
+       _MALI_OSK_LIST_HEAD(link); /**< Link for list of all sessions */
+       _MALI_OSK_LIST_HEAD(pp_job_list); /**< List of all PP jobs on this session */
+
+#if defined(CONFIG_MALI_DVFS)
+       _mali_osk_atomic_t number_of_window_jobs; /**< Record the window jobs completed on this session in a period */
+#endif
+       _mali_osk_atomic_t number_of_pp_jobs; /** < Record the pp jobs on this session */
+
+       _mali_osk_list_t pp_job_fb_lookup_list[MALI_PP_JOB_FB_LOOKUP_LIST_SIZE]; /**< List of PP job lists per frame builder id.  Used to link jobs from same frame builder. */
+       struct mali_soft_job_system *soft_job_system; /**< Soft job system for this session. */
+       struct mali_timeline_system *timeline_system; /**< Timeline system for this session. */
+
+       mali_bool is_aborting; /**< MALI_TRUE if the session is aborting, MALI_FALSE if not. */
+       mali_bool use_high_priority_job_queue; /**< If MALI_TRUE, jobs added from this session will use the high priority job queues. */
+       u32 pid;
+       char *comm;
+       atomic_t mali_mem_array[MALI_MEM_TYPE_MAX]; /**< The array to record mem types' usage for this session. */
+       atomic_t mali_mem_allocated_pages; /** The current allocated mali memory pages, which include mali os memory and mali dedicated memory.*/
+       size_t max_mali_mem_allocated_size; /**< The past max mali memory allocated size, which include mali os memory and mali dedicated memory. */
+       /* Added for new memroy system */
+       struct mali_allocation_manager allocation_mgr;
+
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+       u32 fence_context;      /** <  The execution dma fence context this fence is run on. */
+       _mali_osk_atomic_t fence_seqno; /** < Alinear increasing sequence number for this dma fence context. */
+#endif
+};
+
+_mali_osk_errcode_t mali_session_initialize(void);
+void mali_session_terminate(void);
+
+/* List of all sessions. Actual list head in mali_kernel_core.c */
+extern _mali_osk_list_t mali_sessions;
+/* Lock to protect modification and access to the mali_sessions list */
+extern _mali_osk_spinlock_irq_t *mali_sessions_lock;
+
+MALI_STATIC_INLINE void mali_session_lock(void)
+{
+       _mali_osk_spinlock_irq_lock(mali_sessions_lock);
+}
+
+MALI_STATIC_INLINE void mali_session_unlock(void)
+{
+       _mali_osk_spinlock_irq_unlock(mali_sessions_lock);
+}
+
+void mali_session_add(struct mali_session_data *session);
+void mali_session_remove(struct mali_session_data *session);
+u32 mali_session_get_count(void);
+mali_bool mali_session_pp_job_is_empty(void *data);
+wait_queue_head_t *mali_session_get_wait_queue(void);
+
+#define MALI_SESSION_FOREACH(session, tmp, link) \
+       _MALI_OSK_LIST_FOREACHENTRY(session, tmp, &mali_sessions, struct mali_session_data, link)
+
+MALI_STATIC_INLINE struct mali_page_directory *mali_session_get_page_directory(struct mali_session_data *session)
+{
+       return session->page_directory;
+}
+
+MALI_STATIC_INLINE void mali_session_memory_lock(struct mali_session_data *session)
+{
+       MALI_DEBUG_ASSERT_POINTER(session);
+       _mali_osk_mutex_wait(session->memory_lock);
+}
+
+MALI_STATIC_INLINE void mali_session_memory_unlock(struct mali_session_data *session)
+{
+       MALI_DEBUG_ASSERT_POINTER(session);
+       _mali_osk_mutex_signal(session->memory_lock);
+}
+
+MALI_STATIC_INLINE void mali_session_send_notification(struct mali_session_data *session, _mali_osk_notification_t *object)
+{
+       _mali_osk_notification_queue_send(session->ioctl_queue, object);
+}
+
+#if defined(CONFIG_MALI_DVFS)
+
+MALI_STATIC_INLINE void mali_session_inc_num_window_jobs(struct mali_session_data *session)
+{
+       MALI_DEBUG_ASSERT_POINTER(session);
+       _mali_osk_atomic_inc(&session->number_of_window_jobs);
+}
+
+/*
+ * Get the max completed window jobs from all active session,
+ * which will be used in  window render frame per sec calculate
+ */
+u32 mali_session_max_window_num(void);
+
+#endif
+
+void mali_session_memory_tracking(_mali_osk_print_ctx *print_ctx);
+
+#endif /* __MALI_SESSION_H__ */
diff --git a/utgard/r8p0/common/mali_soft_job.c b/utgard/r8p0/common/mali_soft_job.c
new file mode 100755 (executable)
index 0000000..739ec78
--- /dev/null
@@ -0,0 +1,440 @@
+/*
+ * Copyright (C) 2013-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_soft_job.h"
+#include "mali_osk.h"
+#include "mali_timeline.h"
+#include "mali_session.h"
+#include "mali_kernel_common.h"
+#include "mali_uk_types.h"
+#include "mali_scheduler.h"
+#include "mali_executor.h"
+
+MALI_STATIC_INLINE void mali_soft_job_system_lock(struct mali_soft_job_system *system)
+{
+       MALI_DEBUG_ASSERT_POINTER(system);
+       _mali_osk_spinlock_irq_lock(system->lock);
+       MALI_DEBUG_PRINT(5, ("Mali Soft Job: soft system %p lock taken\n", system));
+       MALI_DEBUG_ASSERT(0 == system->lock_owner);
+       MALI_DEBUG_CODE(system->lock_owner = _mali_osk_get_tid());
+}
+
+MALI_STATIC_INLINE void mali_soft_job_system_unlock(struct mali_soft_job_system *system)
+{
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_PRINT(5, ("Mali Soft Job: releasing soft system %p lock\n", system));
+       MALI_DEBUG_ASSERT(_mali_osk_get_tid() == system->lock_owner);
+       MALI_DEBUG_CODE(system->lock_owner = 0);
+       _mali_osk_spinlock_irq_unlock(system->lock);
+}
+
+#if defined(DEBUG)
+MALI_STATIC_INLINE void mali_soft_job_system_assert_locked(struct mali_soft_job_system *system)
+{
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT(_mali_osk_get_tid() == system->lock_owner);
+}
+#define MALI_ASSERT_SOFT_JOB_SYSTEM_LOCKED(system) mali_soft_job_system_assert_locked(system)
+#else
+#define MALI_ASSERT_SOFT_JOB_SYSTEM_LOCKED(system)
+#endif /* defined(DEBUG) */
+
+struct mali_soft_job_system *mali_soft_job_system_create(struct mali_session_data *session)
+{
+       struct mali_soft_job_system *system;
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       system = (struct mali_soft_job_system *) _mali_osk_calloc(1, sizeof(struct mali_soft_job_system));
+       if (NULL == system) {
+               return NULL;
+       }
+
+       system->session = session;
+
+       system->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER);
+       if (NULL == system->lock) {
+               mali_soft_job_system_destroy(system);
+               return NULL;
+       }
+       system->lock_owner = 0;
+       system->last_job_id = 0;
+
+       _MALI_OSK_INIT_LIST_HEAD(&(system->jobs_used));
+
+       return system;
+}
+
+void mali_soft_job_system_destroy(struct mali_soft_job_system *system)
+{
+       MALI_DEBUG_ASSERT_POINTER(system);
+
+       /* All jobs should be free at this point. */
+       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&(system->jobs_used)));
+
+       if (NULL != system) {
+               if (NULL != system->lock) {
+                       _mali_osk_spinlock_irq_term(system->lock);
+               }
+               _mali_osk_free(system);
+       }
+}
+
+static void mali_soft_job_system_free_job(struct mali_soft_job_system *system, struct mali_soft_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_POINTER(system);
+
+       mali_soft_job_system_lock(job->system);
+
+       MALI_DEBUG_ASSERT(MALI_SOFT_JOB_INVALID_ID != job->id);
+       MALI_DEBUG_ASSERT(system == job->system);
+
+       _mali_osk_list_del(&(job->system_list));
+
+       mali_soft_job_system_unlock(job->system);
+
+       _mali_osk_free(job);
+}
+
+MALI_STATIC_INLINE struct mali_soft_job *mali_soft_job_system_lookup_job(struct mali_soft_job_system *system, u32 job_id)
+{
+       struct mali_soft_job *job, *tmp;
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_ASSERT_SOFT_JOB_SYSTEM_LOCKED(system);
+
+       _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &system->jobs_used, struct mali_soft_job, system_list) {
+               if (job->id == job_id)
+                       return job;
+       }
+
+       return NULL;
+}
+
+void mali_soft_job_destroy(struct mali_soft_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_POINTER(job->system);
+
+       MALI_DEBUG_PRINT(4, ("Mali Soft Job: destroying soft job %u (0x%08X)\n", job->id, job));
+
+       if (NULL != job) {
+               if (0 < _mali_osk_atomic_dec_return(&job->refcount)) return;
+
+               _mali_osk_atomic_term(&job->refcount);
+
+               if (NULL != job->activated_notification) {
+                       _mali_osk_notification_delete(job->activated_notification);
+                       job->activated_notification = NULL;
+               }
+
+               mali_soft_job_system_free_job(job->system, job);
+       }
+}
+
+struct mali_soft_job *mali_soft_job_create(struct mali_soft_job_system *system, mali_soft_job_type type, u64 user_job)
+{
+       struct mali_soft_job *job;
+       _mali_osk_notification_t *notification = NULL;
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT((MALI_SOFT_JOB_TYPE_USER_SIGNALED == type) ||
+                         (MALI_SOFT_JOB_TYPE_SELF_SIGNALED == type));
+
+       notification = _mali_osk_notification_create(_MALI_NOTIFICATION_SOFT_ACTIVATED, sizeof(_mali_uk_soft_job_activated_s));
+       if (unlikely(NULL == notification)) {
+               MALI_PRINT_ERROR(("Mali Soft Job: failed to allocate notification"));
+               return NULL;
+       }
+
+       job = _mali_osk_malloc(sizeof(struct mali_soft_job));
+       if (unlikely(NULL == job)) {
+               MALI_DEBUG_PRINT(2, ("Mali Soft Job: system alloc job failed. \n"));
+               return NULL;
+       }
+
+       mali_soft_job_system_lock(system);
+
+       job->system = system;
+       job->id = system->last_job_id++;
+       job->state = MALI_SOFT_JOB_STATE_ALLOCATED;
+
+       _mali_osk_list_add(&(job->system_list), &(system->jobs_used));
+
+       job->type = type;
+       job->user_job = user_job;
+       job->activated = MALI_FALSE;
+
+       job->activated_notification = notification;
+
+       _mali_osk_atomic_init(&job->refcount, 1);
+
+       MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_ALLOCATED == job->state);
+       MALI_DEBUG_ASSERT(system == job->system);
+       MALI_DEBUG_ASSERT(MALI_SOFT_JOB_INVALID_ID != job->id);
+
+       mali_soft_job_system_unlock(system);
+
+       return job;
+}
+
+mali_timeline_point mali_soft_job_start(struct mali_soft_job *job, struct mali_timeline_fence *fence)
+{
+       mali_timeline_point point;
+       struct mali_soft_job_system *system;
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_POINTER(fence);
+
+       MALI_DEBUG_ASSERT_POINTER(job->system);
+       system = job->system;
+
+       MALI_DEBUG_ASSERT_POINTER(system->session);
+       MALI_DEBUG_ASSERT_POINTER(system->session->timeline_system);
+
+       mali_soft_job_system_lock(system);
+
+       MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_ALLOCATED == job->state);
+       job->state = MALI_SOFT_JOB_STATE_STARTED;
+
+       mali_soft_job_system_unlock(system);
+
+       MALI_DEBUG_PRINT(4, ("Mali Soft Job: starting soft job %u (0x%08X)\n", job->id, job));
+
+       mali_timeline_tracker_init(&job->tracker, MALI_TIMELINE_TRACKER_SOFT, fence, job);
+       point = mali_timeline_system_add_tracker(system->session->timeline_system, &job->tracker, MALI_TIMELINE_SOFT);
+
+       return point;
+}
+
+static mali_bool mali_soft_job_is_activated(void *data)
+{
+       struct mali_soft_job *job;
+
+       job = (struct mali_soft_job *) data;
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       return job->activated;
+}
+
+_mali_osk_errcode_t mali_soft_job_system_signal_job(struct mali_soft_job_system *system, u32 job_id)
+{
+       struct mali_soft_job *job;
+       struct mali_timeline_system *timeline_system;
+       mali_scheduler_mask schedule_mask;
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+
+       mali_soft_job_system_lock(system);
+
+       job = mali_soft_job_system_lookup_job(system, job_id);
+
+       if ((NULL == job) || (MALI_SOFT_JOB_TYPE_USER_SIGNALED != job->type)
+           || !(MALI_SOFT_JOB_STATE_STARTED == job->state || MALI_SOFT_JOB_STATE_TIMED_OUT == job->state)) {
+               mali_soft_job_system_unlock(system);
+               MALI_PRINT_ERROR(("Mali Soft Job: invalid soft job id %u", job_id));
+               return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+       }
+
+       if (MALI_SOFT_JOB_STATE_TIMED_OUT == job->state) {
+               job->state = MALI_SOFT_JOB_STATE_SIGNALED;
+               mali_soft_job_system_unlock(system);
+
+               MALI_DEBUG_ASSERT(MALI_TRUE == job->activated);
+               MALI_DEBUG_PRINT(4, ("Mali Soft Job: soft job %u (0x%08X) was timed out\n", job->id, job));
+               mali_soft_job_destroy(job);
+
+               return _MALI_OSK_ERR_TIMEOUT;
+       }
+
+       MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state);
+
+       job->state = MALI_SOFT_JOB_STATE_SIGNALED;
+       mali_soft_job_system_unlock(system);
+
+       /* Since the job now is in signaled state, timeouts from the timeline system will be
+        * ignored, and it is not possible to signal this job again. */
+
+       timeline_system = system->session->timeline_system;
+       MALI_DEBUG_ASSERT_POINTER(timeline_system);
+
+       /* Wait until activated. */
+       _mali_osk_wait_queue_wait_event(timeline_system->wait_queue, mali_soft_job_is_activated, (void *) job);
+
+       MALI_DEBUG_PRINT(4, ("Mali Soft Job: signaling soft job %u (0x%08X)\n", job->id, job));
+
+       schedule_mask = mali_timeline_tracker_release(&job->tracker);
+       mali_executor_schedule_from_mask(schedule_mask, MALI_FALSE);
+
+       mali_soft_job_destroy(job);
+
+       return _MALI_OSK_ERR_OK;
+}
+
+static void mali_soft_job_send_activated_notification(struct mali_soft_job *job)
+{
+       if (NULL != job->activated_notification) {
+               _mali_uk_soft_job_activated_s *res = job->activated_notification->result_buffer;
+               res->user_job = job->user_job;
+               mali_session_send_notification(job->system->session, job->activated_notification);
+       }
+       job->activated_notification = NULL;
+}
+
+mali_scheduler_mask mali_soft_job_system_activate_job(struct mali_soft_job *job)
+{
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_POINTER(job->system);
+       MALI_DEBUG_ASSERT_POINTER(job->system->session);
+
+       MALI_DEBUG_PRINT(4, ("Mali Soft Job: Timeline activation for soft job %u (0x%08X).\n", job->id, job));
+
+       mali_soft_job_system_lock(job->system);
+
+       if (unlikely(job->system->session->is_aborting)) {
+               MALI_DEBUG_PRINT(3, ("Mali Soft Job: Soft job %u (0x%08X) activated while session is aborting.\n", job->id, job));
+
+               mali_soft_job_system_unlock(job->system);
+
+               /* Since we are in shutdown, we can ignore the scheduling bitmask. */
+               mali_timeline_tracker_release(&job->tracker);
+               mali_soft_job_destroy(job);
+               return schedule_mask;
+       }
+
+       /* Send activated notification. */
+       mali_soft_job_send_activated_notification(job);
+
+       /* Wake up sleeping signaler. */
+       job->activated = MALI_TRUE;
+
+       /* If job type is self signaled, release tracker, move soft job to free list, and scheduler at once */
+       if (MALI_SOFT_JOB_TYPE_SELF_SIGNALED == job->type) {
+               MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state);
+
+               job->state = MALI_SOFT_JOB_STATE_SIGNALED;
+               mali_soft_job_system_unlock(job->system);
+
+               schedule_mask |= mali_timeline_tracker_release(&job->tracker);
+
+               mali_soft_job_destroy(job);
+       } else {
+               _mali_osk_wait_queue_wake_up(job->tracker.system->wait_queue);
+
+               mali_soft_job_system_unlock(job->system);
+       }
+
+       return schedule_mask;
+}
+
+mali_scheduler_mask mali_soft_job_system_timeout_job(struct mali_soft_job *job)
+{
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_POINTER(job->system);
+       MALI_DEBUG_ASSERT_POINTER(job->system->session);
+       MALI_DEBUG_ASSERT(MALI_TRUE == job->activated);
+
+       MALI_DEBUG_PRINT(4, ("Mali Soft Job: Timeline timeout for soft job %u (0x%08X).\n", job->id, job));
+
+       mali_soft_job_system_lock(job->system);
+
+       MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED  == job->state ||
+                         MALI_SOFT_JOB_STATE_SIGNALED == job->state);
+
+       if (unlikely(job->system->session->is_aborting)) {
+               /* The session is aborting.  This job will be released and destroyed by @ref
+                * mali_soft_job_system_abort(). */
+               mali_soft_job_system_unlock(job->system);
+
+               return MALI_SCHEDULER_MASK_EMPTY;
+       }
+
+       if (MALI_SOFT_JOB_STATE_STARTED != job->state) {
+               MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_SIGNALED == job->state);
+
+               /* The job is about to be signaled, ignore timeout. */
+               MALI_DEBUG_PRINT(4, ("Mali Soft Job: Timeout on soft job %u (0x%08X) in signaled state.\n", job->id, job));
+               mali_soft_job_system_unlock(job->system);
+               return schedule_mask;
+       }
+
+       MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state);
+
+       job->state = MALI_SOFT_JOB_STATE_TIMED_OUT;
+       _mali_osk_atomic_inc(&job->refcount);
+
+       mali_soft_job_system_unlock(job->system);
+
+       schedule_mask = mali_timeline_tracker_release(&job->tracker);
+
+       mali_soft_job_destroy(job);
+
+       return schedule_mask;
+}
+
+void mali_soft_job_system_abort(struct mali_soft_job_system *system)
+{
+       struct mali_soft_job *job, *tmp;
+       _MALI_OSK_LIST_HEAD_STATIC_INIT(jobs);
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT_POINTER(system->session);
+       MALI_DEBUG_ASSERT(system->session->is_aborting);
+
+       MALI_DEBUG_PRINT(3, ("Mali Soft Job: Aborting soft job system for session 0x%08X.\n", system->session));
+
+       mali_soft_job_system_lock(system);
+
+       _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &system->jobs_used, struct mali_soft_job, system_list) {
+               MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED   == job->state ||
+                                 MALI_SOFT_JOB_STATE_TIMED_OUT == job->state);
+
+               if (MALI_SOFT_JOB_STATE_STARTED == job->state) {
+                       /* If the job has been activated, we have to release the tracker and destroy
+                        * the job.  If not, the tracker will be released and the job destroyed when
+                        * it is activated. */
+                       if (MALI_TRUE == job->activated) {
+                               MALI_DEBUG_PRINT(3, ("Mali Soft Job: Aborting unsignaled soft job %u (0x%08X).\n", job->id, job));
+
+                               job->state = MALI_SOFT_JOB_STATE_SIGNALED;
+                               _mali_osk_list_move(&job->system_list, &jobs);
+                       }
+               } else if (MALI_SOFT_JOB_STATE_TIMED_OUT == job->state) {
+                       MALI_DEBUG_PRINT(3, ("Mali Soft Job: Aborting timed out soft job %u (0x%08X).\n", job->id, job));
+
+                       /* We need to destroy this soft job. */
+                       _mali_osk_list_move(&job->system_list, &jobs);
+               }
+       }
+
+       mali_soft_job_system_unlock(system);
+
+       /* Release and destroy jobs. */
+       _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &jobs, struct mali_soft_job, system_list) {
+               MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_SIGNALED  == job->state ||
+                                 MALI_SOFT_JOB_STATE_TIMED_OUT == job->state);
+
+               if (MALI_SOFT_JOB_STATE_SIGNALED == job->state) {
+                       mali_timeline_tracker_release(&job->tracker);
+               }
+
+               /* Move job back to used list before destroying. */
+               _mali_osk_list_move(&job->system_list, &system->jobs_used);
+
+               mali_soft_job_destroy(job);
+       }
+
+       mali_executor_schedule_from_mask(MALI_SCHEDULER_MASK_ALL, MALI_FALSE);
+}
diff --git a/utgard/r8p0/common/mali_soft_job.h b/utgard/r8p0/common/mali_soft_job.h
new file mode 100755 (executable)
index 0000000..4dd0589
--- /dev/null
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2013-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_SOFT_JOB_H__
+#define __MALI_SOFT_JOB_H__
+
+#include "mali_osk.h"
+
+#include "mali_timeline.h"
+
+struct mali_timeline_fence;
+struct mali_session_data;
+struct mali_soft_job;
+struct mali_soft_job_system;
+
+/**
+ * Soft job types.
+ *
+ * Soft jobs of type MALI_SOFT_JOB_TYPE_USER_SIGNALED will only complete after activation if either
+ * they are signaled by user-space (@ref mali_soft_job_system_signaled_job) or if they are timed out
+ * by the Timeline system.
+ * Soft jobs of type MALI_SOFT_JOB_TYPE_SELF_SIGNALED will release job resource automatically
+ * in kernel when the job is activated.
+ */
+typedef enum mali_soft_job_type {
+       MALI_SOFT_JOB_TYPE_SELF_SIGNALED,
+       MALI_SOFT_JOB_TYPE_USER_SIGNALED,
+} mali_soft_job_type;
+
+/**
+ * Soft job state.
+ *
+ * mali_soft_job_system_start_job a job will first be allocated.The job's state set to MALI_SOFT_JOB_STATE_ALLOCATED.
+ * Once the job is added to the timeline system, the state changes to MALI_SOFT_JOB_STATE_STARTED.
+ *
+ * For soft jobs of type MALI_SOFT_JOB_TYPE_USER_SIGNALED the state is changed to
+ * MALI_SOFT_JOB_STATE_SIGNALED when @ref mali_soft_job_system_signal_job is called and the soft
+ * job's state is MALI_SOFT_JOB_STATE_STARTED or MALI_SOFT_JOB_STATE_TIMED_OUT.
+ *
+ * If a soft job of type MALI_SOFT_JOB_TYPE_USER_SIGNALED is timed out before being signaled, the
+ * state is changed to MALI_SOFT_JOB_STATE_TIMED_OUT.  This can only happen to soft jobs in state
+ * MALI_SOFT_JOB_STATE_STARTED.
+ *
+ */
+typedef enum mali_soft_job_state {
+       MALI_SOFT_JOB_STATE_ALLOCATED,
+       MALI_SOFT_JOB_STATE_STARTED,
+       MALI_SOFT_JOB_STATE_SIGNALED,
+       MALI_SOFT_JOB_STATE_TIMED_OUT,
+} mali_soft_job_state;
+
+#define MALI_SOFT_JOB_INVALID_ID ((u32) -1)
+
+/**
+ * Soft job struct.
+ *
+ * Soft job can be used to represent any kind of CPU work done in kernel-space.
+ */
+typedef struct mali_soft_job {
+       mali_soft_job_type            type;                   /**< Soft job type.  Must be one of MALI_SOFT_JOB_TYPE_*. */
+       u64                           user_job;               /**< Identifier for soft job in user space. */
+       _mali_osk_atomic_t            refcount;               /**< Soft jobs are reference counted to prevent premature deletion. */
+       struct mali_timeline_tracker  tracker;                /**< Timeline tracker for soft job. */
+       mali_bool                     activated;              /**< MALI_TRUE if the job has been activated, MALI_FALSE if not. */
+       _mali_osk_notification_t     *activated_notification; /**< Pre-allocated notification object for ACTIVATED_NOTIFICATION. */
+
+       /* Protected by soft job system lock. */
+       u32                           id;                     /**< Used by user-space to find corresponding soft job in kernel-space. */
+       mali_soft_job_state           state;                  /**< State of soft job, must be one of MALI_SOFT_JOB_STATE_*. */
+       struct mali_soft_job_system  *system;                 /**< The soft job system this job is in. */
+       _mali_osk_list_t              system_list;            /**< List element used by soft job system. */
+} mali_soft_job;
+
+/**
+ * Per-session soft job system.
+ *
+ * The soft job system is used to manage all soft jobs that belongs to a session.
+ */
+typedef struct mali_soft_job_system {
+       struct mali_session_data *session;                    /**< The session this soft job system belongs to. */
+       _MALI_OSK_LIST_HEAD(jobs_used);                       /**< List of all allocated soft jobs. */
+
+       _mali_osk_spinlock_irq_t *lock;                       /**< Lock used to protect soft job system and its soft jobs. */
+       u32 lock_owner;                                       /**< Contains tid of thread that locked the system or 0, if not locked. */
+       u32 last_job_id;                                      /**< Recored the last job id protected by lock. */
+} mali_soft_job_system;
+
+/**
+ * Create a soft job system.
+ *
+ * @param session The session this soft job system will belong to.
+ * @return The new soft job system, or NULL if unsuccessful.
+ */
+struct mali_soft_job_system *mali_soft_job_system_create(struct mali_session_data *session);
+
+/**
+ * Destroy a soft job system.
+ *
+ * @note The soft job must not have any started or activated jobs.  Call @ref
+ * mali_soft_job_system_abort first.
+ *
+ * @param system The soft job system we are destroying.
+ */
+void mali_soft_job_system_destroy(struct mali_soft_job_system *system);
+
+/**
+ * Create a soft job.
+ *
+ * @param system Soft job system to create soft job from.
+ * @param type Type of the soft job.
+ * @param user_job Identifier for soft job in user space.
+ * @return New soft job if successful, NULL if not.
+ */
+struct mali_soft_job *mali_soft_job_create(struct mali_soft_job_system *system, mali_soft_job_type type, u64 user_job);
+
+/**
+ * Destroy soft job.
+ *
+ * @param job Soft job to destroy.
+ */
+void mali_soft_job_destroy(struct mali_soft_job *job);
+
+/**
+ * Start a soft job.
+ *
+ * The soft job will be added to the Timeline system which will then activate it after all
+ * dependencies have been resolved.
+ *
+ * Create soft jobs with @ref mali_soft_job_create before starting them.
+ *
+ * @param job Soft job to start.
+ * @param fence Fence representing dependencies for this soft job.
+ * @return Point on soft job timeline.
+ */
+mali_timeline_point mali_soft_job_start(struct mali_soft_job *job, struct mali_timeline_fence *fence);
+
+/**
+ * Use by user-space to signal that a soft job has completed.
+ *
+ * @note Only valid for soft jobs with type MALI_SOFT_JOB_TYPE_USER_SIGNALED.
+ *
+ * @note The soft job must be in state MALI_SOFT_JOB_STATE_STARTED for the signal to be successful.
+ *
+ * @note If the soft job was signaled successfully, or it received a time out, the soft job will be
+ * destroyed after this call and should no longer be used.
+ *
+ * @note This function will block until the soft job has been activated.
+ *
+ * @param system The soft job system the job was started in.
+ * @param job_id ID of soft job we are signaling.
+ *
+ * @return _MALI_OSK_ERR_ITEM_NOT_FOUND if the soft job ID was invalid, _MALI_OSK_ERR_TIMEOUT if the
+ * soft job was timed out or _MALI_OSK_ERR_OK if we successfully signaled the soft job.
+ */
+_mali_osk_errcode_t mali_soft_job_system_signal_job(struct mali_soft_job_system *system, u32 job_id);
+
+/**
+ * Used by the Timeline system to activate a soft job.
+ *
+ * @param job The soft job that is being activated.
+ * @return A scheduling bitmask.
+ */
+mali_scheduler_mask mali_soft_job_system_activate_job(struct mali_soft_job *job);
+
+/**
+ * Used by the Timeline system to timeout a soft job.
+ *
+ * A soft job is timed out if it completes or is signaled later than MALI_TIMELINE_TIMEOUT_HZ after
+ * activation.
+ *
+ * @param job The soft job that is being timed out.
+ * @return A scheduling bitmask.
+ */
+mali_scheduler_mask mali_soft_job_system_timeout_job(struct mali_soft_job *job);
+
+/**
+ * Used to cleanup activated soft jobs in the soft job system on session abort.
+ *
+ * @param system The soft job system that is being aborted.
+ */
+void mali_soft_job_system_abort(struct mali_soft_job_system *system);
+
+#endif /* __MALI_SOFT_JOB_H__ */
diff --git a/utgard/r8p0/common/mali_spinlock_reentrant.c b/utgard/r8p0/common/mali_spinlock_reentrant.c
new file mode 100755 (executable)
index 0000000..28e2e17
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2013, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_spinlock_reentrant.h"
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+struct mali_spinlock_reentrant *mali_spinlock_reentrant_init(_mali_osk_lock_order_t lock_order)
+{
+       struct mali_spinlock_reentrant *spinlock;
+
+       spinlock = _mali_osk_calloc(1, sizeof(struct mali_spinlock_reentrant));
+       if (NULL == spinlock) {
+               return NULL;
+       }
+
+       spinlock->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, lock_order);
+       if (NULL == spinlock->lock) {
+               mali_spinlock_reentrant_term(spinlock);
+               return NULL;
+       }
+
+       return spinlock;
+}
+
+void mali_spinlock_reentrant_term(struct mali_spinlock_reentrant *spinlock)
+{
+       MALI_DEBUG_ASSERT_POINTER(spinlock);
+       MALI_DEBUG_ASSERT(0 == spinlock->counter && 0 == spinlock->owner);
+
+       if (NULL != spinlock->lock) {
+               _mali_osk_spinlock_irq_term(spinlock->lock);
+       }
+
+       _mali_osk_free(spinlock);
+}
+
+void mali_spinlock_reentrant_wait(struct mali_spinlock_reentrant *spinlock, u32 tid)
+{
+       MALI_DEBUG_ASSERT_POINTER(spinlock);
+       MALI_DEBUG_ASSERT_POINTER(spinlock->lock);
+       MALI_DEBUG_ASSERT(0 != tid);
+
+       MALI_DEBUG_PRINT(5, ("%s ^\n", __FUNCTION__));
+
+       if (tid != spinlock->owner) {
+               _mali_osk_spinlock_irq_lock(spinlock->lock);
+               MALI_DEBUG_ASSERT(0 == spinlock->owner && 0 == spinlock->counter);
+               spinlock->owner = tid;
+       }
+
+       MALI_DEBUG_PRINT(5, ("%s v\n", __FUNCTION__));
+
+       ++spinlock->counter;
+}
+
+void mali_spinlock_reentrant_signal(struct mali_spinlock_reentrant *spinlock, u32 tid)
+{
+       MALI_DEBUG_ASSERT_POINTER(spinlock);
+       MALI_DEBUG_ASSERT_POINTER(spinlock->lock);
+       MALI_DEBUG_ASSERT(0 != tid && tid == spinlock->owner);
+
+       --spinlock->counter;
+       if (0 == spinlock->counter) {
+               spinlock->owner = 0;
+               MALI_DEBUG_PRINT(5, ("%s release last\n", __FUNCTION__));
+               _mali_osk_spinlock_irq_unlock(spinlock->lock);
+       }
+}
diff --git a/utgard/r8p0/common/mali_spinlock_reentrant.h b/utgard/r8p0/common/mali_spinlock_reentrant.h
new file mode 100755 (executable)
index 0000000..793875a
--- /dev/null
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2013, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_SPINLOCK_REENTRANT_H__
+#define __MALI_SPINLOCK_REENTRANT_H__
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+/**
+ * Reentrant spinlock.
+ */
+struct mali_spinlock_reentrant {
+       _mali_osk_spinlock_irq_t *lock;
+       u32               owner;
+       u32               counter;
+};
+
+/**
+ * Create a new reentrant spinlock.
+ *
+ * @param lock_order Lock order.
+ * @return New reentrant spinlock.
+ */
+struct mali_spinlock_reentrant *mali_spinlock_reentrant_init(_mali_osk_lock_order_t lock_order);
+
+/**
+ * Terminate reentrant spinlock and free any associated resources.
+ *
+ * @param spinlock Reentrant spinlock to terminate.
+ */
+void mali_spinlock_reentrant_term(struct mali_spinlock_reentrant *spinlock);
+
+/**
+ * Wait for reentrant spinlock to be signaled.
+ *
+ * @param spinlock Reentrant spinlock.
+ * @param tid Thread ID.
+ */
+void mali_spinlock_reentrant_wait(struct mali_spinlock_reentrant *spinlock, u32 tid);
+
+/**
+ * Signal reentrant spinlock.
+ *
+ * @param spinlock Reentrant spinlock.
+ * @param tid Thread ID.
+ */
+void mali_spinlock_reentrant_signal(struct mali_spinlock_reentrant *spinlock, u32 tid);
+
+/**
+ * Check if thread is holding reentrant spinlock.
+ *
+ * @param spinlock Reentrant spinlock.
+ * @param tid Thread ID.
+ * @return MALI_TRUE if thread is holding spinlock, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_spinlock_reentrant_is_held(struct mali_spinlock_reentrant *spinlock, u32 tid)
+{
+       MALI_DEBUG_ASSERT_POINTER(spinlock->lock);
+       return (tid == spinlock->owner && 0 < spinlock->counter);
+}
+
+#endif /* __MALI_SPINLOCK_REENTRANT_H__ */
diff --git a/utgard/r8p0/common/mali_timeline.c b/utgard/r8p0/common/mali_timeline.c
new file mode 100755 (executable)
index 0000000..4ed5f86
--- /dev/null
@@ -0,0 +1,1911 @@
+/*
+ * Copyright (C) 2013-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/file.h>
+#include "mali_timeline.h"
+#include "mali_kernel_common.h"
+#include "mali_scheduler.h"
+#include "mali_soft_job.h"
+#include "mali_timeline_fence_wait.h"
+#include "mali_timeline_sync_fence.h"
+#include "mali_executor.h"
+#include "mali_pp_job.h"
+
+#define MALI_TIMELINE_SYSTEM_LOCKED(system) (mali_spinlock_reentrant_is_held((system)->spinlock, _mali_osk_get_tid()))
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+_mali_osk_wq_work_t *sync_fence_callback_work_t = NULL;
+_mali_osk_spinlock_irq_t *sync_fence_callback_list_lock = NULL;
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(sync_fence_callback_queue);
+#endif
+
+/*
+ * Following three elements are used to record how many
+ * gp, physical pp or virtual pp jobs are delayed in the whole
+ * timeline system, we can use these three value to decide
+ * if need to deactivate idle group.
+ */
+_mali_osk_atomic_t gp_tracker_count;
+_mali_osk_atomic_t phy_pp_tracker_count;
+_mali_osk_atomic_t virt_pp_tracker_count;
+
+static mali_scheduler_mask mali_timeline_system_release_waiter(struct mali_timeline_system *system,
+               struct mali_timeline_waiter *waiter);
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+
+struct mali_deferred_fence_put_entry {
+       struct hlist_node list;
+       struct sync_fence *fence;
+};
+
+static HLIST_HEAD(mali_timeline_sync_fence_to_free_list);
+static DEFINE_SPINLOCK(mali_timeline_sync_fence_to_free_lock);
+
+static void put_sync_fences(struct work_struct *ignore)
+{
+       struct hlist_head list;
+       struct hlist_node *tmp, *pos;
+       unsigned long flags;
+       struct mali_deferred_fence_put_entry *o;
+
+       spin_lock_irqsave(&mali_timeline_sync_fence_to_free_lock, flags);
+       hlist_move_list(&mali_timeline_sync_fence_to_free_list, &list);
+       spin_unlock_irqrestore(&mali_timeline_sync_fence_to_free_lock, flags);
+
+       hlist_for_each_entry_safe(o, pos, tmp, &list, list) {
+               sync_fence_put(o->fence);
+               kfree(o);
+       }
+}
+
+static DECLARE_DELAYED_WORK(delayed_sync_fence_put, put_sync_fences);
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) */
+
+/* Callback that is called when a sync fence a tracker is waiting on is signaled. */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+static void mali_timeline_sync_fence_callback(struct sync_fence *sync_fence, struct sync_fence_waiter *sync_fence_waiter)
+#else
+static void mali_timeline_sync_fence_callback(struct mali_internal_sync_fence *sync_fence, struct mali_internal_sync_fence_waiter *sync_fence_waiter)
+#endif
+{
+       struct mali_timeline_tracker *tracker;
+
+       MALI_IGNORE(sync_fence);
+       MALI_DEBUG_ASSERT_POINTER(sync_fence_waiter);
+
+       tracker = _MALI_OSK_CONTAINER_OF(sync_fence_waiter, struct mali_timeline_tracker, sync_fence_waiter);
+       MALI_DEBUG_ASSERT_POINTER(tracker);
+
+       _mali_osk_spinlock_irq_lock(sync_fence_callback_list_lock);
+       _mali_osk_list_addtail(&tracker->sync_fence_signal_list, &sync_fence_callback_queue);
+       _mali_osk_spinlock_irq_unlock(sync_fence_callback_list_lock);
+
+       _mali_osk_wq_schedule_work(sync_fence_callback_work_t);
+}
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+
+static mali_scheduler_mask mali_timeline_tracker_time_out(struct mali_timeline_tracker *tracker)
+{
+       MALI_DEBUG_ASSERT_POINTER(tracker);
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_SOFT == tracker->type);
+
+       return mali_soft_job_system_timeout_job((struct mali_soft_job *) tracker->job);
+}
+
+static void mali_timeline_timer_callback(void *data)
+{
+       struct mali_timeline_system *system;
+       struct mali_timeline_tracker *tracker;
+       struct mali_timeline *timeline;
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+       u32 tid = _mali_osk_get_tid();
+
+       timeline = (struct mali_timeline *) data;
+       MALI_DEBUG_ASSERT_POINTER(timeline);
+
+       system = timeline->system;
+       MALI_DEBUG_ASSERT_POINTER(system);
+
+       mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+       if (!system->timer_enabled) {
+               mali_spinlock_reentrant_signal(system->spinlock, tid);
+               return;
+       }
+
+       tracker = timeline->tracker_tail;
+       timeline->timer_active = MALI_FALSE;
+
+       if (NULL != tracker && MALI_TRUE == tracker->timer_active) {
+               /* This is likely the delayed work that has been schedule out before cancelled. */
+               if (MALI_TIMELINE_TIMEOUT_HZ > (_mali_osk_time_tickcount() - tracker->os_tick_activate)) {
+                       mali_spinlock_reentrant_signal(system->spinlock, tid);
+                       return;
+               }
+
+               schedule_mask = mali_timeline_tracker_time_out(tracker);
+               tracker->timer_active = MALI_FALSE;
+       } else {
+               MALI_PRINT_ERROR(("Mali Timeline: Soft job timer callback without a waiting tracker.\n"));
+       }
+
+       mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+       mali_executor_schedule_from_mask(schedule_mask, MALI_FALSE);
+}
+
+void mali_timeline_system_stop_timer(struct mali_timeline_system *system)
+{
+       u32 i;
+       u32 tid = _mali_osk_get_tid();
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+
+       mali_spinlock_reentrant_wait(system->spinlock, tid);
+       system->timer_enabled = MALI_FALSE;
+       mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+       for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+               struct mali_timeline *timeline = system->timelines[i];
+
+               MALI_DEBUG_ASSERT_POINTER(timeline);
+
+               if (NULL != timeline->delayed_work) {
+                       _mali_osk_wq_delayed_cancel_work_sync(timeline->delayed_work);
+                       timeline->timer_active = MALI_FALSE;
+               }
+       }
+}
+
+static void mali_timeline_destroy(struct mali_timeline *timeline)
+{
+       MALI_DEBUG_ASSERT_POINTER(timeline);
+       if (NULL != timeline) {
+               /* Assert that the timeline object has been properly cleaned up before destroying it. */
+               MALI_DEBUG_ASSERT(timeline->point_oldest == timeline->point_next);
+               MALI_DEBUG_ASSERT(NULL == timeline->tracker_head);
+               MALI_DEBUG_ASSERT(NULL == timeline->tracker_tail);
+               MALI_DEBUG_ASSERT(NULL == timeline->waiter_head);
+               MALI_DEBUG_ASSERT(NULL == timeline->waiter_tail);
+               MALI_DEBUG_ASSERT(NULL != timeline->system);
+               MALI_DEBUG_ASSERT(MALI_TIMELINE_MAX > timeline->id);
+
+               if (NULL != timeline->delayed_work) {
+                       _mali_osk_wq_delayed_cancel_work_sync(timeline->delayed_work);
+                       _mali_osk_wq_delayed_delete_work_nonflush(timeline->delayed_work);
+               }
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+               if (NULL != timeline->sync_tl) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+                       sync_timeline_destroy(timeline->sync_tl);
+#else
+                       mali_internal_sync_timeline_destroy(timeline->sync_tl);
+#endif
+               }
+#else
+               _mali_osk_free(timeline);
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+       }
+}
+
+static struct mali_timeline *mali_timeline_create(struct mali_timeline_system *system, enum mali_timeline_id id)
+{
+       struct mali_timeline *timeline;
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT(id < MALI_TIMELINE_MAX);
+
+       timeline = (struct mali_timeline *) _mali_osk_calloc(1, sizeof(struct mali_timeline));
+       if (NULL == timeline) {
+               return NULL;
+       }
+
+       /* Initially the timeline is empty. */
+#if defined(MALI_TIMELINE_DEBUG_START_POINT)
+       /* Start the timeline a bit before wrapping when debugging. */
+       timeline->point_next = UINT_MAX - MALI_TIMELINE_MAX_POINT_SPAN - 128;
+#else
+       timeline->point_next = 1;
+#endif
+       timeline->point_oldest = timeline->point_next;
+
+       /* The tracker and waiter lists will initially be empty. */
+
+       timeline->system = system;
+       timeline->id = id;
+
+       timeline->delayed_work = _mali_osk_wq_delayed_create_work(mali_timeline_timer_callback, timeline);
+       if (NULL == timeline->delayed_work) {
+               mali_timeline_destroy(timeline);
+               return NULL;
+       }
+
+       timeline->timer_active = MALI_FALSE;
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+       {
+               char timeline_name[32];
+
+               switch (id) {
+               case MALI_TIMELINE_GP:
+                       _mali_osk_snprintf(timeline_name, 32, "mali-%u-gp", _mali_osk_get_pid());
+                       break;
+               case MALI_TIMELINE_PP:
+                       _mali_osk_snprintf(timeline_name, 32, "mali-%u-pp", _mali_osk_get_pid());
+                       break;
+               case MALI_TIMELINE_SOFT:
+                       _mali_osk_snprintf(timeline_name, 32, "mali-%u-soft", _mali_osk_get_pid());
+                       break;
+               default:
+                       MALI_PRINT_ERROR(("Mali Timeline: Invalid timeline id %d\n", id));
+                       mali_timeline_destroy(timeline);
+                       return NULL;
+               }
+
+               timeline->destroyed = MALI_FALSE;
+
+               timeline->sync_tl = mali_sync_timeline_create(timeline, timeline_name);
+               if (NULL == timeline->sync_tl) {
+                       mali_timeline_destroy(timeline);
+                       return NULL;
+               }
+
+               timeline->spinlock = mali_spinlock_reentrant_init(_MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM);
+               if (NULL == timeline->spinlock) {
+                       mali_timeline_destroy(timeline);
+                       return NULL;
+               }
+       }
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+
+       return timeline;
+}
+
+static void mali_timeline_insert_tracker(struct mali_timeline *timeline, struct mali_timeline_tracker *tracker)
+{
+       MALI_DEBUG_ASSERT_POINTER(timeline);
+       MALI_DEBUG_ASSERT_POINTER(tracker);
+
+       if (mali_timeline_is_full(timeline)) {
+               /* Don't add tracker if timeline is full. */
+               tracker->point = MALI_TIMELINE_NO_POINT;
+               return;
+       }
+
+       tracker->timeline = timeline;
+       tracker->point    = timeline->point_next;
+
+       /* Find next available point. */
+       timeline->point_next++;
+       if (MALI_TIMELINE_NO_POINT == timeline->point_next) {
+               timeline->point_next++;
+       }
+
+       MALI_DEBUG_ASSERT(!mali_timeline_is_empty(timeline));
+
+       if (MALI_TIMELINE_TRACKER_GP == tracker->type) {
+               _mali_osk_atomic_inc(&gp_tracker_count);
+       } else if (MALI_TIMELINE_TRACKER_PP == tracker->type) {
+               if (mali_pp_job_is_virtual((struct mali_pp_job *)tracker->job)) {
+                       _mali_osk_atomic_inc(&virt_pp_tracker_count);
+               } else {
+                       _mali_osk_atomic_inc(&phy_pp_tracker_count);
+               }
+       }
+
+       /* Add tracker as new head on timeline's tracker list. */
+       if (NULL == timeline->tracker_head) {
+               /* Tracker list is empty. */
+               MALI_DEBUG_ASSERT(NULL == timeline->tracker_tail);
+
+               timeline->tracker_tail = tracker;
+
+               MALI_DEBUG_ASSERT(NULL == tracker->timeline_next);
+               MALI_DEBUG_ASSERT(NULL == tracker->timeline_prev);
+       } else {
+               MALI_DEBUG_ASSERT(NULL == timeline->tracker_head->timeline_next);
+
+               tracker->timeline_prev = timeline->tracker_head;
+               timeline->tracker_head->timeline_next = tracker;
+
+               MALI_DEBUG_ASSERT(NULL == tracker->timeline_next);
+       }
+       timeline->tracker_head = tracker;
+
+       MALI_DEBUG_ASSERT(NULL == timeline->tracker_head->timeline_next);
+       MALI_DEBUG_ASSERT(NULL == timeline->tracker_tail->timeline_prev);
+}
+
+/* Inserting the waiter object into the given timeline */
+static void mali_timeline_insert_waiter(struct mali_timeline *timeline, struct mali_timeline_waiter *waiter_new)
+{
+       struct mali_timeline_waiter *waiter_prev;
+       struct mali_timeline_waiter *waiter_next;
+
+       /* Waiter time must be between timeline head and tail, and there must
+        * be less than MALI_TIMELINE_MAX_POINT_SPAN elements between */
+       MALI_DEBUG_ASSERT((waiter_new->point - timeline->point_oldest) < MALI_TIMELINE_MAX_POINT_SPAN);
+       MALI_DEBUG_ASSERT((-waiter_new->point + timeline->point_next) < MALI_TIMELINE_MAX_POINT_SPAN);
+
+       /* Finding out where to put this waiter, in the linked waiter list of the given timeline **/
+       waiter_prev = timeline->waiter_head; /* Insert new after  waiter_prev */
+       waiter_next = NULL;                  /* Insert new before waiter_next */
+
+       /* Iterating backwards from head (newest) to tail (oldest) until we
+        * find the correct spot to insert the new waiter */
+       while (waiter_prev && mali_timeline_point_after(waiter_prev->point, waiter_new->point)) {
+               waiter_next = waiter_prev;
+               waiter_prev = waiter_prev->timeline_prev;
+       }
+
+       if (NULL == waiter_prev && NULL == waiter_next) {
+               /* list is empty */
+               timeline->waiter_head = waiter_new;
+               timeline->waiter_tail = waiter_new;
+       } else if (NULL == waiter_next) {
+               /* insert at head */
+               waiter_new->timeline_prev = timeline->waiter_head;
+               timeline->waiter_head->timeline_next = waiter_new;
+               timeline->waiter_head = waiter_new;
+       } else if (NULL == waiter_prev) {
+               /* insert at tail */
+               waiter_new->timeline_next = timeline->waiter_tail;
+               timeline->waiter_tail->timeline_prev = waiter_new;
+               timeline->waiter_tail = waiter_new;
+       } else {
+               /* insert between */
+               waiter_new->timeline_next = waiter_next;
+               waiter_new->timeline_prev = waiter_prev;
+               waiter_next->timeline_prev = waiter_new;
+               waiter_prev->timeline_next = waiter_new;
+       }
+}
+
+static void mali_timeline_update_delayed_work(struct mali_timeline *timeline)
+{
+       struct mali_timeline_system *system;
+       struct mali_timeline_tracker *oldest_tracker;
+
+       MALI_DEBUG_ASSERT_POINTER(timeline);
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_SOFT == timeline->id);
+
+       system = timeline->system;
+       MALI_DEBUG_ASSERT_POINTER(system);
+
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+       /* Timer is disabled, early out. */
+       if (!system->timer_enabled) return;
+
+       oldest_tracker = timeline->tracker_tail;
+       if (NULL != oldest_tracker && 0 == oldest_tracker->trigger_ref_count) {
+               if (MALI_FALSE == oldest_tracker->timer_active) {
+                       if (MALI_TRUE == timeline->timer_active) {
+                               _mali_osk_wq_delayed_cancel_work_async(timeline->delayed_work);
+                       }
+                       _mali_osk_wq_delayed_schedule_work(timeline->delayed_work, MALI_TIMELINE_TIMEOUT_HZ);
+                       oldest_tracker->timer_active = MALI_TRUE;
+                       timeline->timer_active = MALI_TRUE;
+               }
+       } else if (MALI_TRUE == timeline->timer_active) {
+               _mali_osk_wq_delayed_cancel_work_async(timeline->delayed_work);
+               timeline->timer_active = MALI_FALSE;
+       }
+}
+
+static mali_scheduler_mask mali_timeline_update_oldest_point(struct mali_timeline *timeline)
+{
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+       MALI_DEBUG_ASSERT_POINTER(timeline);
+
+       MALI_DEBUG_CODE({
+               struct mali_timeline_system *system = timeline->system;
+               MALI_DEBUG_ASSERT_POINTER(system);
+
+               MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+       });
+
+       if (NULL != timeline->tracker_tail) {
+               /* Set oldest point to oldest tracker's point */
+               timeline->point_oldest = timeline->tracker_tail->point;
+       } else {
+               /* No trackers, mark point list as empty */
+               timeline->point_oldest = timeline->point_next;
+       }
+
+       /* Release all waiters no longer on the timeline's point list.
+        * Releasing a waiter can trigger this function to be called again, so
+        * we do not store any pointers on stack. */
+       while (NULL != timeline->waiter_tail) {
+               u32 waiter_time_relative;
+               u32 time_head_relative;
+               struct mali_timeline_waiter *waiter = timeline->waiter_tail;
+
+               time_head_relative = timeline->point_next - timeline->point_oldest;
+               waiter_time_relative = waiter->point - timeline->point_oldest;
+
+               if (waiter_time_relative < time_head_relative) {
+                       /* This and all following waiters are on the point list, so we are done. */
+                       break;
+               }
+
+               /* Remove waiter from timeline's waiter list. */
+               if (NULL != waiter->timeline_next) {
+                       waiter->timeline_next->timeline_prev = NULL;
+               } else {
+                       /* This was the last waiter */
+                       timeline->waiter_head = NULL;
+               }
+               timeline->waiter_tail = waiter->timeline_next;
+
+               /* Release waiter.  This could activate a tracker, if this was
+                * the last waiter for the tracker. */
+               schedule_mask |= mali_timeline_system_release_waiter(timeline->system, waiter);
+       }
+
+       return schedule_mask;
+}
+
+void mali_timeline_tracker_init(struct mali_timeline_tracker *tracker,
+                               mali_timeline_tracker_type type,
+                               struct mali_timeline_fence *fence,
+                               void *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(tracker);
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAX > type);
+
+       /* Zero out all tracker members. */
+       _mali_osk_memset(tracker, 0, sizeof(*tracker));
+
+       tracker->type = type;
+       tracker->job = job;
+       tracker->trigger_ref_count = 1;  /* Prevents any callback from trigging while adding it */
+       tracker->os_tick_create = _mali_osk_time_tickcount();
+       MALI_DEBUG_CODE(tracker->magic = MALI_TIMELINE_TRACKER_MAGIC);
+
+       tracker->activation_error = MALI_TIMELINE_ACTIVATION_ERROR_NONE;
+
+       /* Copy fence. */
+       if (NULL != fence) {
+               _mali_osk_memcpy(&tracker->fence, fence, sizeof(struct mali_timeline_fence));
+       }
+}
+
+mali_scheduler_mask mali_timeline_tracker_release(struct mali_timeline_tracker *tracker)
+{
+       struct mali_timeline *timeline;
+       struct mali_timeline_system *system;
+       struct mali_timeline_tracker *tracker_next, *tracker_prev;
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+       u32 tid = _mali_osk_get_tid();
+
+       /* Upon entry a group lock will be held, but not a scheduler lock. */
+       MALI_DEBUG_ASSERT_POINTER(tracker);
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAGIC == tracker->magic);
+
+       /* Tracker should have been triggered */
+       MALI_DEBUG_ASSERT(0 == tracker->trigger_ref_count);
+
+       /* All waiters should have been released at this point */
+       MALI_DEBUG_ASSERT(NULL == tracker->waiter_head);
+       MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail);
+
+       MALI_DEBUG_PRINT(3, ("Mali Timeline: releasing tracker for job 0x%08X\n", tracker->job));
+
+       timeline = tracker->timeline;
+       if (NULL == timeline) {
+               /* Tracker was not on a timeline, there is nothing to release. */
+               return MALI_SCHEDULER_MASK_EMPTY;
+       }
+
+       system = timeline->system;
+       MALI_DEBUG_ASSERT_POINTER(system);
+
+       mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+       /* Tracker should still be on timeline */
+       MALI_DEBUG_ASSERT(!mali_timeline_is_empty(timeline));
+       MALI_DEBUG_ASSERT(mali_timeline_is_point_on(timeline, tracker->point));
+
+       /* Tracker is no longer valid. */
+       MALI_DEBUG_CODE(tracker->magic = 0);
+
+       tracker_next = tracker->timeline_next;
+       tracker_prev = tracker->timeline_prev;
+       tracker->timeline_next = NULL;
+       tracker->timeline_prev = NULL;
+
+       /* Removing tracker from timeline's tracker list */
+       if (NULL == tracker_next) {
+               /* This tracker was the head */
+               timeline->tracker_head = tracker_prev;
+       } else {
+               tracker_next->timeline_prev = tracker_prev;
+       }
+
+       if (NULL == tracker_prev) {
+               /* This tracker was the tail */
+               timeline->tracker_tail = tracker_next;
+               MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+               /* Update the timeline's oldest time and release any waiters */
+               schedule_mask |= mali_timeline_update_oldest_point(timeline);
+               MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+       } else {
+               tracker_prev->timeline_next = tracker_next;
+       }
+
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+       /* Update delayed work only when it is the soft job timeline */
+       if (MALI_TIMELINE_SOFT == tracker->timeline->id) {
+               mali_timeline_update_delayed_work(tracker->timeline);
+       }
+
+       mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+       return schedule_mask;
+}
+
+void mali_timeline_system_release_waiter_list(struct mali_timeline_system *system,
+               struct mali_timeline_waiter *tail,
+               struct mali_timeline_waiter *head)
+{
+       struct mali_timeline_waiter    *waiter = NULL;
+       struct mali_timeline_waiter    *next = NULL;
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT_POINTER(head);
+       MALI_DEBUG_ASSERT_POINTER(tail);
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+       head->tracker_next = system->waiter_empty_list;
+       system->waiter_empty_list = tail;
+
+       waiter = system->waiter_empty_list;
+       while (NULL != waiter) {
+               next = waiter->tracker_next;
+               _mali_osk_free(waiter);
+               waiter = next;
+       }
+       system->waiter_empty_list = NULL;
+}
+
+static mali_scheduler_mask mali_timeline_tracker_activate(struct mali_timeline_tracker *tracker)
+{
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+       struct mali_timeline_system *system;
+       struct mali_timeline *timeline;
+       u32 tid = _mali_osk_get_tid();
+
+       MALI_DEBUG_ASSERT_POINTER(tracker);
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAGIC == tracker->magic);
+
+       system = tracker->system;
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+       tracker->os_tick_activate = _mali_osk_time_tickcount();
+
+       if (NULL != tracker->waiter_head) {
+               mali_timeline_system_release_waiter_list(system, tracker->waiter_tail, tracker->waiter_head);
+               tracker->waiter_head = NULL;
+               tracker->waiter_tail = NULL;
+       }
+
+       switch (tracker->type) {
+       case MALI_TIMELINE_TRACKER_GP:
+               schedule_mask = mali_scheduler_activate_gp_job((struct mali_gp_job *) tracker->job);
+
+               _mali_osk_atomic_dec(&gp_tracker_count);
+               break;
+       case MALI_TIMELINE_TRACKER_PP:
+               if (mali_pp_job_is_virtual((struct mali_pp_job *)tracker->job)) {
+                       _mali_osk_atomic_dec(&virt_pp_tracker_count);
+               } else {
+                       _mali_osk_atomic_dec(&phy_pp_tracker_count);
+               }
+               schedule_mask = mali_scheduler_activate_pp_job((struct mali_pp_job *) tracker->job);
+               break;
+       case MALI_TIMELINE_TRACKER_SOFT:
+               timeline = tracker->timeline;
+               MALI_DEBUG_ASSERT_POINTER(timeline);
+
+               schedule_mask |= mali_soft_job_system_activate_job((struct mali_soft_job *) tracker->job);
+
+               /* Start a soft timer to make sure the soft job be released in a limited time */
+               mali_spinlock_reentrant_wait(system->spinlock, tid);
+               mali_timeline_update_delayed_work(timeline);
+               mali_spinlock_reentrant_signal(system->spinlock, tid);
+               break;
+       case MALI_TIMELINE_TRACKER_WAIT:
+               mali_timeline_fence_wait_activate((struct mali_timeline_fence_wait_tracker *) tracker->job);
+               break;
+       case MALI_TIMELINE_TRACKER_SYNC:
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+               mali_timeline_sync_fence_activate((struct mali_timeline_sync_fence_tracker *) tracker->job);
+#else
+               MALI_PRINT_ERROR(("Mali Timeline: sync tracker not supported\n", tracker->type));
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+               break;
+       default:
+               MALI_PRINT_ERROR(("Mali Timeline - Illegal tracker type: %d\n", tracker->type));
+               break;
+       }
+
+       return schedule_mask;
+}
+
+void mali_timeline_system_tracker_get(struct mali_timeline_system *system, struct mali_timeline_tracker *tracker)
+{
+       u32 tid = _mali_osk_get_tid();
+
+       MALI_DEBUG_ASSERT_POINTER(tracker);
+       MALI_DEBUG_ASSERT_POINTER(system);
+
+       mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+       MALI_DEBUG_ASSERT(0 < tracker->trigger_ref_count);
+       tracker->trigger_ref_count++;
+
+       mali_spinlock_reentrant_signal(system->spinlock, tid);
+}
+
+mali_scheduler_mask mali_timeline_system_tracker_put(struct mali_timeline_system *system, struct mali_timeline_tracker *tracker, mali_timeline_activation_error activation_error)
+{
+       u32 tid = _mali_osk_get_tid();
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+       MALI_DEBUG_ASSERT_POINTER(tracker);
+       MALI_DEBUG_ASSERT_POINTER(system);
+
+       mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+       MALI_DEBUG_ASSERT(0 < tracker->trigger_ref_count);
+       tracker->trigger_ref_count--;
+
+       tracker->activation_error |= activation_error;
+
+       if (0 == tracker->trigger_ref_count) {
+               schedule_mask |= mali_timeline_tracker_activate(tracker);
+               tracker = NULL;
+       }
+
+       mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+       return schedule_mask;
+}
+
+void mali_timeline_fence_copy_uk_fence(struct mali_timeline_fence *fence, _mali_uk_fence_t *uk_fence)
+{
+       u32 i;
+
+       MALI_DEBUG_ASSERT_POINTER(fence);
+       MALI_DEBUG_ASSERT_POINTER(uk_fence);
+
+       for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+               fence->points[i] = uk_fence->points[i];
+       }
+
+       fence->sync_fd = uk_fence->sync_fd;
+}
+
+struct mali_timeline_system *mali_timeline_system_create(struct mali_session_data *session)
+{
+       u32 i;
+       struct mali_timeline_system *system;
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+       MALI_DEBUG_PRINT(4, ("Mali Timeline: creating timeline system\n"));
+
+       system = (struct mali_timeline_system *) _mali_osk_calloc(1, sizeof(struct mali_timeline_system));
+       if (NULL == system) {
+               return NULL;
+       }
+
+       system->spinlock = mali_spinlock_reentrant_init(_MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM);
+       if (NULL == system->spinlock) {
+               mali_timeline_system_destroy(system);
+               return NULL;
+       }
+
+       for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+               system->timelines[i] = mali_timeline_create(system, (enum mali_timeline_id)i);
+               if (NULL == system->timelines[i]) {
+                       mali_timeline_system_destroy(system);
+                       return NULL;
+               }
+       }
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+       system->signaled_sync_tl = mali_sync_timeline_create(NULL, "mali-always-signaled");
+       if (NULL == system->signaled_sync_tl) {
+               mali_timeline_system_destroy(system);
+               return NULL;
+       }
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+
+       system->waiter_empty_list = NULL;
+       system->session = session;
+       system->timer_enabled = MALI_TRUE;
+
+       system->wait_queue = _mali_osk_wait_queue_init();
+       if (NULL == system->wait_queue) {
+               mali_timeline_system_destroy(system);
+               return NULL;
+       }
+
+       return system;
+}
+
+#if defined(CONFIG_MALI_DMA_BUF_FENCE) ||defined(CONFIG_SYNC) ||defined(CONFIG_SYNC_FILE)
+/**
+ * Check if there are any trackers left on timeline.
+ *
+ * Used as a wait queue conditional.
+ *
+ * @param data Timeline.
+ * @return MALI_TRUE if there are no trackers on timeline, MALI_FALSE if not.
+ */
+static mali_bool mali_timeline_has_no_trackers(void *data)
+{
+       struct mali_timeline *timeline = (struct mali_timeline *) data;
+
+       MALI_DEBUG_ASSERT_POINTER(timeline);
+
+       return mali_timeline_is_empty(timeline);
+}
+#if defined(CONFIG_SYNC) ||defined(CONFIG_SYNC_FILE)
+/**
+ * Cancel sync fence waiters waited upon by trackers on all timelines.
+ *
+ * Will return after all timelines have no trackers left.
+ *
+ * @param system Timeline system.
+ */
+static void mali_timeline_cancel_sync_fence_waiters(struct mali_timeline_system *system)
+{
+       u32 i;
+       u32 tid = _mali_osk_get_tid();
+       struct mali_timeline_tracker *tracker, *tracker_next;
+       _MALI_OSK_LIST_HEAD_STATIC_INIT(tracker_list);
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT_POINTER(system->session);
+       MALI_DEBUG_ASSERT(system->session->is_aborting);
+
+       mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+       /* Cancel sync fence waiters. */
+       for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+               struct mali_timeline *timeline = system->timelines[i];
+
+               MALI_DEBUG_ASSERT_POINTER(timeline);
+
+               tracker_next = timeline->tracker_tail;
+               while (NULL != tracker_next) {
+                       tracker = tracker_next;
+                       tracker_next = tracker->timeline_next;
+
+                       if (NULL == tracker->sync_fence) continue;
+
+                       MALI_DEBUG_PRINT(3, ("Mali Timeline: Cancelling sync fence wait for tracker 0x%08X.\n", tracker));
+
+                       /* Cancel sync fence waiter. */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+                       if (0 == sync_fence_cancel_async(tracker->sync_fence, &tracker->sync_fence_waiter)) {
+#else
+                       if (0 == mali_internal_sync_fence_cancel_async(tracker->sync_fence, &tracker->sync_fence_waiter)) {
+#endif
+                               /* Callback was not called, move tracker to local list. */
+                               _mali_osk_list_add(&tracker->sync_fence_cancel_list, &tracker_list);
+                       }
+               }
+       }
+
+       mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+       /* Manually call sync fence callback in order to release waiter and trigger activation of tracker. */
+       _MALI_OSK_LIST_FOREACHENTRY(tracker, tracker_next, &tracker_list, struct mali_timeline_tracker, sync_fence_cancel_list) {
+               mali_timeline_sync_fence_callback(tracker->sync_fence, &tracker->sync_fence_waiter);
+       }
+
+       /* Sleep until all sync fence callbacks are done and all timelines are empty. */
+       for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+               struct mali_timeline *timeline = system->timelines[i];
+
+               MALI_DEBUG_ASSERT_POINTER(timeline);
+
+               _mali_osk_wait_queue_wait_event(system->wait_queue, mali_timeline_has_no_trackers, (void *) timeline);
+       }
+}
+
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+static void mali_timeline_cancel_dma_fence_waiters(struct mali_timeline_system *system)
+{
+       u32 i, j;
+       u32 tid = _mali_osk_get_tid();
+       struct mali_pp_job *pp_job = NULL;
+       struct mali_pp_job *next_pp_job = NULL;
+       struct mali_timeline *timeline = NULL;
+       struct mali_timeline_tracker *tracker, *tracker_next;
+       _MALI_OSK_LIST_HEAD_STATIC_INIT(pp_job_list);
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT_POINTER(system->session);
+       MALI_DEBUG_ASSERT(system->session->is_aborting);
+
+       mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+       /* Cancel dma fence waiters. */
+       timeline = system->timelines[MALI_TIMELINE_PP];
+       MALI_DEBUG_ASSERT_POINTER(timeline);
+
+       tracker_next = timeline->tracker_tail;
+       while (NULL != tracker_next) {
+               mali_bool fence_is_signaled = MALI_TRUE;
+               tracker = tracker_next;
+               tracker_next = tracker->timeline_next;
+
+               if (NULL == tracker->waiter_dma_fence) continue;
+               pp_job = (struct mali_pp_job *)tracker->job;
+               MALI_DEBUG_ASSERT_POINTER(pp_job);
+               MALI_DEBUG_PRINT(3, ("Mali Timeline: Cancelling dma fence waiter for tracker 0x%08X.\n", tracker));
+
+               for (j = 0; j < pp_job->dma_fence_context.num_dma_fence_waiter; j++) {
+                       if (pp_job->dma_fence_context.mali_dma_fence_waiters[j]) {
+                               /* Cancel a previously callback from the fence.
+                               * This function returns true if the callback is successfully removed,
+                               * or false if the fence has already been signaled.
+                               */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+                               bool ret = dma_fence_remove_callback(pp_job->dma_fence_context.mali_dma_fence_waiters[j]->fence,
+                                                                    &pp_job->dma_fence_context.mali_dma_fence_waiters[j]->base);
+
+#else
+                               bool ret = fence_remove_callback(pp_job->dma_fence_context.mali_dma_fence_waiters[j]->fence,
+                                                                &pp_job->dma_fence_context.mali_dma_fence_waiters[j]->base);
+#endif
+                               if (ret) {
+                                       fence_is_signaled = MALI_FALSE;
+                               }
+                       }
+               }
+
+               /* Callbacks were not called, move pp job to local list. */
+               if (MALI_FALSE == fence_is_signaled)
+                       _mali_osk_list_add(&pp_job->list, &pp_job_list);
+       }
+
+       mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+       /* Manually call dma fence callback in order to release waiter and trigger activation of tracker. */
+       _MALI_OSK_LIST_FOREACHENTRY(pp_job, next_pp_job, &pp_job_list, struct mali_pp_job, list) {
+               mali_timeline_dma_fence_callback((void *)pp_job);
+       }
+
+       /* Sleep until all dma fence callbacks are done and all timelines are empty. */
+       for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+               struct mali_timeline *timeline = system->timelines[i];
+               MALI_DEBUG_ASSERT_POINTER(timeline);
+               _mali_osk_wait_queue_wait_event(system->wait_queue, mali_timeline_has_no_trackers, (void *) timeline);
+       }
+}
+#endif
+#endif
+void mali_timeline_system_abort(struct mali_timeline_system *system)
+{
+       MALI_DEBUG_CODE(u32 tid = _mali_osk_get_tid(););
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT_POINTER(system->session);
+       MALI_DEBUG_ASSERT(system->session->is_aborting);
+
+       MALI_DEBUG_PRINT(3, ("Mali Timeline: Aborting timeline system for session 0x%08X.\n", system->session));
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+       mali_timeline_cancel_sync_fence_waiters(system);
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+       mali_timeline_cancel_dma_fence_waiters(system);
+#endif
+
+       /* Should not be any waiters or trackers left at this point. */
+       MALI_DEBUG_CODE({
+               u32 i;
+               mali_spinlock_reentrant_wait(system->spinlock, tid);
+               for (i = 0; i < MALI_TIMELINE_MAX; ++i)
+               {
+                       struct mali_timeline *timeline = system->timelines[i];
+                       MALI_DEBUG_ASSERT_POINTER(timeline);
+                       MALI_DEBUG_ASSERT(timeline->point_oldest == timeline->point_next);
+                       MALI_DEBUG_ASSERT(NULL == timeline->tracker_head);
+                       MALI_DEBUG_ASSERT(NULL == timeline->tracker_tail);
+                       MALI_DEBUG_ASSERT(NULL == timeline->waiter_head);
+                       MALI_DEBUG_ASSERT(NULL == timeline->waiter_tail);
+               }
+               mali_spinlock_reentrant_signal(system->spinlock, tid);
+       });
+}
+
+void mali_timeline_system_destroy(struct mali_timeline_system *system)
+{
+       u32 i;
+       struct mali_timeline_waiter *waiter, *next;
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+       u32 tid = _mali_osk_get_tid();
+#endif
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT_POINTER(system->session);
+
+       MALI_DEBUG_PRINT(4, ("Mali Timeline: destroying timeline system\n"));
+
+       if (NULL != system) {
+
+               /* There should be no waiters left on this queue. */
+               if (NULL != system->wait_queue) {
+                       _mali_osk_wait_queue_term(system->wait_queue);
+                       system->wait_queue = NULL;
+               }
+
+               /* Free all waiters in empty list */
+               waiter = system->waiter_empty_list;
+               while (NULL != waiter) {
+                       next = waiter->tracker_next;
+                       _mali_osk_free(waiter);
+                       waiter = next;
+               }
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+               if (NULL != system->signaled_sync_tl) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+                       sync_timeline_destroy(system->signaled_sync_tl);
+#else
+                       mali_internal_sync_timeline_destroy(system->signaled_sync_tl);
+#endif
+               }
+
+               for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+                       if ((NULL != system->timelines[i]) && (NULL != system->timelines[i]->spinlock)) {
+                               mali_spinlock_reentrant_wait(system->timelines[i]->spinlock, tid);
+                               system->timelines[i]->destroyed = MALI_TRUE;
+                               mali_spinlock_reentrant_signal(system->timelines[i]->spinlock, tid);
+                       }
+               }
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+
+               for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+                       if (NULL != system->timelines[i]) {
+                               mali_timeline_destroy(system->timelines[i]);
+                       }
+               }
+
+               if (NULL != system->spinlock) {
+                       mali_spinlock_reentrant_term(system->spinlock);
+               }
+
+               _mali_osk_free(system);
+       }
+}
+
+/**
+ * Find how many waiters are needed for a given fence.
+ *
+ * @param fence The fence to check.
+ * @return Number of waiters needed for fence.
+ */
+static u32 mali_timeline_fence_num_waiters(struct mali_timeline_fence *fence)
+{
+       u32 i, num_waiters = 0;
+
+       MALI_DEBUG_ASSERT_POINTER(fence);
+
+       for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+               if (MALI_TIMELINE_NO_POINT != fence->points[i]) {
+                       ++num_waiters;
+               }
+       }
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+       if (-1 != fence->sync_fd) ++num_waiters;
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+
+       return num_waiters;
+}
+
+static struct mali_timeline_waiter *mali_timeline_system_get_zeroed_waiter(struct mali_timeline_system *system)
+{
+       struct mali_timeline_waiter *waiter;
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+       waiter = system->waiter_empty_list;
+       if (NULL != waiter) {
+               /* Remove waiter from empty list and zero it */
+               system->waiter_empty_list = waiter->tracker_next;
+               _mali_osk_memset(waiter, 0, sizeof(*waiter));
+       }
+
+       /* Return NULL if list was empty. */
+       return waiter;
+}
+
+static void mali_timeline_system_allocate_waiters(struct mali_timeline_system *system,
+               struct mali_timeline_waiter **tail,
+               struct mali_timeline_waiter **head,
+               int max_num_waiters)
+{
+       u32 i, tid = _mali_osk_get_tid();
+       mali_bool do_alloc;
+       struct mali_timeline_waiter *waiter;
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT_POINTER(tail);
+       MALI_DEBUG_ASSERT_POINTER(head);
+
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+       *head = *tail = NULL;
+       do_alloc = MALI_FALSE;
+       i = 0;
+       while (i < max_num_waiters) {
+               if (MALI_FALSE == do_alloc) {
+                       waiter = mali_timeline_system_get_zeroed_waiter(system);
+                       if (NULL == waiter) {
+                               do_alloc = MALI_TRUE;
+                               mali_spinlock_reentrant_signal(system->spinlock, tid);
+                               continue;
+                       }
+               } else {
+                       waiter = _mali_osk_calloc(1, sizeof(struct mali_timeline_waiter));
+                       if (NULL == waiter) break;
+               }
+               ++i;
+               if (NULL == *tail) {
+                       *tail = waiter;
+                       *head = waiter;
+               } else {
+                       (*head)->tracker_next = waiter;
+                       *head = waiter;
+               }
+       }
+       if (MALI_TRUE == do_alloc) {
+               mali_spinlock_reentrant_wait(system->spinlock, tid);
+       }
+}
+
+/**
+ * Create waiters for the given tracker. The tracker is activated when all waiters are release.
+ *
+ * @note Tracker can potentially be activated before this function returns.
+ *
+ * @param system Timeline system.
+ * @param tracker Tracker we will create waiters for.
+ * @param waiter_tail List of pre-allocated waiters.
+ * @param waiter_head List of pre-allocated waiters.
+ */
+static void mali_timeline_system_create_waiters_and_unlock(struct mali_timeline_system *system,
+               struct mali_timeline_tracker *tracker,
+               struct mali_timeline_waiter *waiter_tail,
+               struct mali_timeline_waiter *waiter_head)
+{
+       int i;
+       u32 tid = _mali_osk_get_tid();
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+       struct sync_fence *sync_fence = NULL;
+#else
+       struct mali_internal_sync_fence *sync_fence = NULL;
+#endif
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT_POINTER(tracker);
+
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+       MALI_DEBUG_ASSERT(NULL == tracker->waiter_head);
+       MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail);
+       MALI_DEBUG_ASSERT(NULL != tracker->job);
+
+       /* Creating waiter object for all the timelines the fence is put on. Inserting this waiter
+        * into the timelines sorted list of waiters */
+       for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+               mali_timeline_point point;
+               struct mali_timeline *timeline;
+               struct mali_timeline_waiter *waiter;
+
+               /* Get point on current timeline from tracker's fence. */
+               point = tracker->fence.points[i];
+
+               if (likely(MALI_TIMELINE_NO_POINT == point)) {
+                       /* Fence contains no point on this timeline so we don't need a waiter. */
+                       continue;
+               }
+
+               timeline = system->timelines[i];
+               MALI_DEBUG_ASSERT_POINTER(timeline);
+
+               if (unlikely(!mali_timeline_is_point_valid(timeline, point))) {
+                       MALI_PRINT_ERROR(("Mali Timeline: point %d is not valid (oldest=%d, next=%d)\n",
+                                         point, timeline->point_oldest, timeline->point_next));
+                       continue;
+               }
+
+               if (likely(mali_timeline_is_point_released(timeline, point))) {
+                       /* Tracker representing the point has been released so we don't need a
+                        * waiter. */
+                       continue;
+               }
+
+               /* The point is on timeline. */
+               MALI_DEBUG_ASSERT(mali_timeline_is_point_on(timeline, point));
+
+               /* Get a new zeroed waiter object. */
+               if (likely(NULL != waiter_tail)) {
+                       waiter = waiter_tail;
+                       waiter_tail = waiter_tail->tracker_next;
+               } else {
+                       MALI_PRINT_ERROR(("Mali Timeline: failed to allocate memory for waiter\n"));
+                       continue;
+               }
+
+               /* Yanking the trigger ref count of the tracker. */
+               tracker->trigger_ref_count++;
+
+               waiter->point   = point;
+               waiter->tracker = tracker;
+
+               /* Insert waiter on tracker's singly-linked waiter list. */
+               if (NULL == tracker->waiter_head) {
+                       /* list is empty */
+                       MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail);
+                       tracker->waiter_tail = waiter;
+               } else {
+                       tracker->waiter_head->tracker_next = waiter;
+               }
+               tracker->waiter_head = waiter;
+
+               /* Add waiter to timeline. */
+               mali_timeline_insert_waiter(timeline, waiter);
+       }
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+       if (-1 != tracker->fence.sync_fd) {
+               int ret;
+               struct mali_timeline_waiter *waiter;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+               sync_fence = sync_fence_fdget(tracker->fence.sync_fd);
+#else
+               sync_fence = mali_internal_sync_fence_fdget(tracker->fence.sync_fd);
+#endif
+               if (unlikely(NULL == sync_fence)) {
+                       MALI_PRINT_ERROR(("Mali Timeline: failed to get sync fence from fd %d\n", tracker->fence.sync_fd));
+                       goto exit;
+               }
+
+               /* Check if we have a zeroed waiter object available. */
+               if (unlikely(NULL == waiter_tail)) {
+                       MALI_PRINT_ERROR(("Mali Timeline: failed to allocate memory for waiter\n"));
+                       goto exit;
+               }
+
+               /* Start asynchronous wait that will release waiter when the fence is signaled. */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+               sync_fence_waiter_init(&tracker->sync_fence_waiter, mali_timeline_sync_fence_callback);
+               ret = sync_fence_wait_async(sync_fence, &tracker->sync_fence_waiter);
+#else
+               mali_internal_sync_fence_waiter_init(&tracker->sync_fence_waiter, mali_timeline_sync_fence_callback);
+               ret = mali_internal_sync_fence_wait_async(sync_fence, &tracker->sync_fence_waiter);
+#endif
+               if (1 == ret) {
+                       /* Fence already signaled, no waiter needed. */
+                       tracker->fence.sync_fd = -1;
+                       goto exit;
+               } else if (0 != ret) {
+                       MALI_PRINT_ERROR(("Mali Timeline: sync fence fd %d signaled with error %d\n", tracker->fence.sync_fd, ret));
+                       tracker->activation_error |= MALI_TIMELINE_ACTIVATION_ERROR_SYNC_BIT;
+                       goto exit;
+               }
+
+               /* Grab new zeroed waiter object. */
+               waiter = waiter_tail;
+               waiter_tail = waiter_tail->tracker_next;
+
+               /* Increase the trigger ref count of the tracker. */
+               tracker->trigger_ref_count++;
+
+               waiter->point   = MALI_TIMELINE_NO_POINT;
+               waiter->tracker = tracker;
+
+               /* Insert waiter on tracker's singly-linked waiter list. */
+               if (NULL == tracker->waiter_head) {
+                       /* list is empty */
+                       MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail);
+                       tracker->waiter_tail = waiter;
+               } else {
+                       tracker->waiter_head->tracker_next = waiter;
+               }
+               tracker->waiter_head = waiter;
+
+               /* Also store waiter in separate field for easy access by sync callback. */
+               tracker->waiter_sync = waiter;
+
+               /* Store the sync fence in tracker so we can retrieve in abort session, if needed. */
+               tracker->sync_fence = sync_fence;
+
+               sync_fence = NULL;
+       }
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)*/
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+       if ((NULL != tracker->timeline) && (MALI_TIMELINE_PP == tracker->timeline->id)) {
+
+               struct mali_pp_job *job = (struct mali_pp_job *)tracker->job;
+
+               if (0 < job->dma_fence_context.num_dma_fence_waiter) {
+                       struct mali_timeline_waiter *waiter;
+                       /* Check if we have a zeroed waiter object available. */
+                       if (unlikely(NULL == waiter_tail)) {
+                               MALI_PRINT_ERROR(("Mali Timeline: failed to allocate memory for waiter\n"));
+                               goto exit;
+                       }
+
+                       /* Grab new zeroed waiter object. */
+                       waiter = waiter_tail;
+                       waiter_tail = waiter_tail->tracker_next;
+
+                       /* Increase the trigger ref count of the tracker. */
+                       tracker->trigger_ref_count++;
+
+                       waiter->point   = MALI_TIMELINE_NO_POINT;
+                       waiter->tracker = tracker;
+
+                       /* Insert waiter on tracker's singly-linked waiter list. */
+                       if (NULL == tracker->waiter_head) {
+                               /* list is empty */
+                               MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail);
+                               tracker->waiter_tail = waiter;
+                       } else {
+                               tracker->waiter_head->tracker_next = waiter;
+                       }
+                       tracker->waiter_head = waiter;
+
+                       /* Also store waiter in separate field for easy access by sync callback. */
+                       tracker->waiter_dma_fence = waiter;
+               }
+       }
+#endif /* defined(CONFIG_MALI_DMA_BUF_FENCE)*/
+
+#if defined(CONFIG_MALI_DMA_BUF_FENCE) ||defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+exit:
+#endif /* defined(CONFIG_MALI_DMA_BUF_FENCE) || defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+
+       if (NULL != waiter_tail) {
+               mali_timeline_system_release_waiter_list(system, waiter_tail, waiter_head);
+       }
+
+       /* Release the initial trigger ref count. */
+       tracker->trigger_ref_count--;
+
+       /* If there were no waiters added to this tracker we activate immediately. */
+       if (0 == tracker->trigger_ref_count) {
+               schedule_mask |= mali_timeline_tracker_activate(tracker);
+       }
+
+       mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+       if (NULL != sync_fence) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+               sync_fence_put(sync_fence);
+#else
+               fput(sync_fence->file);
+#endif
+       }
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+
+       mali_executor_schedule_from_mask(schedule_mask, MALI_FALSE);
+}
+
+mali_timeline_point mali_timeline_system_add_tracker(struct mali_timeline_system *system,
+               struct mali_timeline_tracker *tracker,
+               enum mali_timeline_id timeline_id)
+{
+       int num_waiters = 0;
+       struct mali_timeline_waiter *waiter_tail, *waiter_head;
+       u32 tid = _mali_osk_get_tid();
+
+       mali_timeline_point point = MALI_TIMELINE_NO_POINT;
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT_POINTER(system->session);
+       MALI_DEBUG_ASSERT_POINTER(tracker);
+
+       MALI_DEBUG_ASSERT(MALI_FALSE == system->session->is_aborting);
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAX > tracker->type);
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAGIC == tracker->magic);
+
+       MALI_DEBUG_PRINT(4, ("Mali Timeline: adding tracker for job %p, timeline: %d\n", tracker->job, timeline_id));
+
+       MALI_DEBUG_ASSERT(0 < tracker->trigger_ref_count);
+       tracker->system = system;
+
+       mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+       num_waiters = mali_timeline_fence_num_waiters(&tracker->fence);
+
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+       if (MALI_TIMELINE_PP == timeline_id) {
+               struct mali_pp_job *job = (struct mali_pp_job *)tracker->job;
+               if (0 < job->dma_fence_context.num_dma_fence_waiter)
+                       num_waiters++;
+       }
+#endif
+
+       /* Allocate waiters. */
+       mali_timeline_system_allocate_waiters(system, &waiter_tail, &waiter_head, num_waiters);
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+       /* Add tracker to timeline.  This will allocate a point for the tracker on the timeline. If
+        * timeline ID is MALI_TIMELINE_NONE the tracker will NOT be added to a timeline and the
+        * point will be MALI_TIMELINE_NO_POINT.
+        *
+        * NOTE: the tracker can fail to be added if the timeline is full.  If this happens, the
+        * point will be MALI_TIMELINE_NO_POINT. */
+       MALI_DEBUG_ASSERT(timeline_id < MALI_TIMELINE_MAX || timeline_id == MALI_TIMELINE_NONE);
+       if (likely(timeline_id < MALI_TIMELINE_MAX)) {
+               struct mali_timeline *timeline = system->timelines[timeline_id];
+               mali_timeline_insert_tracker(timeline, tracker);
+               MALI_DEBUG_ASSERT(!mali_timeline_is_empty(timeline));
+       }
+
+       point = tracker->point;
+
+       /* Create waiters for tracker based on supplied fence.  Each waiter will increase the
+        * trigger ref count. */
+       mali_timeline_system_create_waiters_and_unlock(system, tracker, waiter_tail, waiter_head);
+       tracker = NULL;
+
+       /* At this point the tracker object might have been freed so we should no longer
+        * access it. */
+
+
+       /* The tracker will always be activated after calling add_tracker, even if NO_POINT is
+        * returned. */
+       return point;
+}
+
+static mali_scheduler_mask mali_timeline_system_release_waiter(struct mali_timeline_system *system,
+               struct mali_timeline_waiter *waiter)
+{
+       struct mali_timeline_tracker *tracker;
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT_POINTER(waiter);
+
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+       tracker = waiter->tracker;
+       MALI_DEBUG_ASSERT_POINTER(tracker);
+
+       /* At this point the waiter has been removed from the timeline's waiter list, but it is
+        * still on the tracker's waiter list.  All of the tracker's waiters will be released when
+        * the tracker is activated. */
+
+       waiter->point   = MALI_TIMELINE_NO_POINT;
+       waiter->tracker = NULL;
+
+       tracker->trigger_ref_count--;
+       if (0 == tracker->trigger_ref_count) {
+               /* This was the last waiter; activate tracker */
+               schedule_mask |= mali_timeline_tracker_activate(tracker);
+               tracker = NULL;
+       }
+
+       return schedule_mask;
+}
+
+mali_timeline_point mali_timeline_system_get_latest_point(struct mali_timeline_system *system,
+               enum mali_timeline_id timeline_id)
+{
+       mali_timeline_point point;
+       struct mali_timeline *timeline;
+       u32 tid = _mali_osk_get_tid();
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+
+       if (MALI_TIMELINE_MAX <= timeline_id) {
+               return MALI_TIMELINE_NO_POINT;
+       }
+
+       mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+       timeline = system->timelines[timeline_id];
+       MALI_DEBUG_ASSERT_POINTER(timeline);
+
+       point = MALI_TIMELINE_NO_POINT;
+       if (timeline->point_oldest != timeline->point_next) {
+               point = timeline->point_next - 1;
+               if (MALI_TIMELINE_NO_POINT == point) point--;
+       }
+
+       mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+       return point;
+}
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+static void mali_timeline_do_sync_fence_callback(void *arg)
+{
+       _MALI_OSK_LIST_HEAD_STATIC_INIT(list);
+       struct mali_timeline_tracker *tracker;
+       struct mali_timeline_tracker *tmp_tracker;
+       u32 tid = _mali_osk_get_tid();
+
+       MALI_IGNORE(arg);
+
+       /*
+        * Quickly "unhook" the jobs pending to be deleted, so we can release
+        * the lock before we start deleting the job objects
+        * (without any locks held)
+        */
+       _mali_osk_spinlock_irq_lock(sync_fence_callback_list_lock);
+       _mali_osk_list_move_list(&sync_fence_callback_queue, &list);
+       _mali_osk_spinlock_irq_unlock(sync_fence_callback_list_lock);
+
+       _MALI_OSK_LIST_FOREACHENTRY(tracker, tmp_tracker, &list,
+                                   struct mali_timeline_tracker, sync_fence_signal_list) {
+               mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+               mali_bool is_aborting = MALI_FALSE;
+               int fence_status = 0;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+               struct sync_fence *sync_fence = NULL;
+#else
+               struct mali_internal_sync_fence *sync_fence = NULL;
+#endif
+               struct mali_timeline_system  *system = NULL;
+               struct mali_timeline_waiter  *waiter = NULL;
+
+               _mali_osk_list_delinit(&tracker->sync_fence_signal_list);
+
+               sync_fence = tracker->sync_fence;
+               MALI_DEBUG_ASSERT_POINTER(sync_fence);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+               fence_status = sync_fence->status;
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
+               fence_status = atomic_read(&sync_fence->status);
+#else
+               fence_status = sync_fence->fence->ops->signaled(sync_fence->fence);
+#endif
+
+               system = tracker->system;
+               MALI_DEBUG_ASSERT_POINTER(system);
+               MALI_DEBUG_ASSERT_POINTER(system->session);
+
+               mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+               is_aborting = system->session->is_aborting;
+               if (!is_aborting && (0 > fence_status)) {
+                       MALI_PRINT_ERROR(("Mali Timeline: sync fence fd %d signaled with error %d\n", tracker->fence.sync_fd, fence_status));
+                       tracker->activation_error |= MALI_TIMELINE_ACTIVATION_ERROR_SYNC_BIT;
+               }
+
+               waiter = tracker->waiter_sync;
+               MALI_DEBUG_ASSERT_POINTER(waiter);
+
+               tracker->sync_fence = NULL;
+               tracker->fence.sync_fd = -1;
+
+               schedule_mask |= mali_timeline_system_release_waiter(system, waiter);
+
+               /* If aborting, wake up sleepers that are waiting for sync fence callbacks to complete. */
+               if (is_aborting) {
+                       _mali_osk_wait_queue_wake_up(system->wait_queue);
+               }
+
+               mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+               /*
+                * Older versions of Linux, before 3.5, doesn't support fput() in interrupt
+                * context. For those older kernels, allocate a list object and put the
+                * fence object on that and defer the call to sync_fence_put() to a workqueue.
+                */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
+               {
+                       struct mali_deferred_fence_put_entry *obj;
+
+                       obj = kzalloc(sizeof(struct mali_deferred_fence_put_entry), GFP_ATOMIC);
+                       if (obj) {
+                               unsigned long flags;
+                               mali_bool schedule = MALI_FALSE;
+
+                               obj->fence = sync_fence;
+
+                               spin_lock_irqsave(&mali_timeline_sync_fence_to_free_lock, flags);
+                               if (hlist_empty(&mali_timeline_sync_fence_to_free_list))
+                                       schedule = MALI_TRUE;
+                               hlist_add_head(&obj->list, &mali_timeline_sync_fence_to_free_list);
+                               spin_unlock_irqrestore(&mali_timeline_sync_fence_to_free_lock, flags);
+
+                               if (schedule)
+                                       schedule_delayed_work(&delayed_sync_fence_put, 0);
+                       }
+               }
+#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+               sync_fence_put(sync_fence);
+#else
+               fput(sync_fence->file);
+#endif
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) */
+
+               if (!is_aborting) {
+                       mali_executor_schedule_from_mask(schedule_mask, MALI_TRUE);
+               }
+       }
+}
+#endif
+_mali_osk_errcode_t mali_timeline_initialize(void)
+{
+       _mali_osk_atomic_init(&gp_tracker_count, 0);
+       _mali_osk_atomic_init(&phy_pp_tracker_count, 0);
+       _mali_osk_atomic_init(&virt_pp_tracker_count, 0);
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+       sync_fence_callback_list_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_UNORDERED, _MALI_OSK_LOCK_ORDER_FIRST);
+       if (NULL == sync_fence_callback_list_lock) {
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       sync_fence_callback_work_t = _mali_osk_wq_create_work(
+                                            mali_timeline_do_sync_fence_callback, NULL);
+
+       if (NULL == sync_fence_callback_work_t) {
+               return _MALI_OSK_ERR_FAULT;
+       }
+#endif
+       return _MALI_OSK_ERR_OK;
+}
+
+
+void mali_timeline_terminate(void)
+{
+       _mali_osk_atomic_term(&gp_tracker_count);
+       _mali_osk_atomic_term(&phy_pp_tracker_count);
+       _mali_osk_atomic_term(&virt_pp_tracker_count);
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+       if (NULL != sync_fence_callback_list_lock) {
+               _mali_osk_spinlock_irq_term(sync_fence_callback_list_lock);
+               sync_fence_callback_list_lock = NULL;
+       }
+
+       if (NULL != sync_fence_callback_work_t) {
+               _mali_osk_wq_delete_work(sync_fence_callback_work_t);
+               sync_fence_callback_work_t = NULL;
+       }
+#endif
+}
+
+#if defined(MALI_TIMELINE_DEBUG_FUNCTIONS)
+
+static mali_bool is_waiting_on_timeline(struct mali_timeline_tracker *tracker, enum mali_timeline_id id)
+{
+       struct mali_timeline *timeline;
+       struct mali_timeline_system *system;
+
+       MALI_DEBUG_ASSERT_POINTER(tracker);
+
+       MALI_DEBUG_ASSERT_POINTER(tracker->timeline);
+       timeline = tracker->timeline;
+
+       MALI_DEBUG_ASSERT_POINTER(timeline->system);
+       system = timeline->system;
+
+       if (MALI_TIMELINE_MAX > id) {
+               if (MALI_TIMELINE_NO_POINT != tracker->fence.points[id]) {
+                       return mali_timeline_is_point_on(system->timelines[id], tracker->fence.points[id]);
+               } else {
+                       return MALI_FALSE;
+               }
+       } else {
+               MALI_DEBUG_ASSERT(MALI_TIMELINE_NONE == id);
+               return MALI_FALSE;
+       }
+}
+
+static const char *timeline_id_to_string(enum mali_timeline_id id)
+{
+       switch (id) {
+       case MALI_TIMELINE_GP:
+               return "GP";
+       case MALI_TIMELINE_PP:
+               return "PP";
+       case MALI_TIMELINE_SOFT:
+               return "SOFT";
+       default:
+               return "NONE";
+       }
+}
+
+static const char *timeline_tracker_type_to_string(enum mali_timeline_tracker_type type)
+{
+       switch (type) {
+       case MALI_TIMELINE_TRACKER_GP:
+               return "GP";
+       case MALI_TIMELINE_TRACKER_PP:
+               return "PP";
+       case MALI_TIMELINE_TRACKER_SOFT:
+               return "SOFT";
+       case MALI_TIMELINE_TRACKER_WAIT:
+               return "WAIT";
+       case MALI_TIMELINE_TRACKER_SYNC:
+               return "SYNC";
+       default:
+               return "INVALID";
+       }
+}
+
+mali_timeline_tracker_state mali_timeline_debug_get_tracker_state(struct mali_timeline_tracker *tracker)
+{
+       struct mali_timeline *timeline = NULL;
+
+       MALI_DEBUG_ASSERT_POINTER(tracker);
+       timeline = tracker->timeline;
+
+       if (0 != tracker->trigger_ref_count) {
+               return MALI_TIMELINE_TS_WAITING;
+       }
+
+       if (timeline && (timeline->tracker_tail == tracker || NULL != tracker->timeline_prev)) {
+               return MALI_TIMELINE_TS_ACTIVE;
+       }
+
+       if (timeline && (MALI_TIMELINE_NO_POINT == tracker->point)) {
+               return MALI_TIMELINE_TS_INIT;
+       }
+
+       return MALI_TIMELINE_TS_FINISH;
+}
+
+void mali_timeline_debug_print_tracker(struct mali_timeline_tracker *tracker, _mali_osk_print_ctx *print_ctx)
+{
+       const char *tracker_state = "IWAF";
+       char state_char = 'I';
+       char tracker_type[32] = {0};
+
+       MALI_DEBUG_ASSERT_POINTER(tracker);
+
+       state_char = *(tracker_state + mali_timeline_debug_get_tracker_state(tracker));
+       _mali_osk_snprintf(tracker_type, sizeof(tracker_type), "%s", timeline_tracker_type_to_string(tracker->type));
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+       if (0 != tracker->trigger_ref_count) {
+               if (print_ctx)
+                       _mali_osk_ctxprintf(print_ctx, "TL:  %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u), fd:%d, fence:(0x%08X)]  job:(0x%08X)\n",
+                                            tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
+                                            is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0],
+                                            is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1],
+                                            is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2],
+                                            tracker->fence.sync_fd, (unsigned int)(uintptr_t)(tracker->sync_fence), (unsigned int)(uintptr_t)(tracker->job));
+               else
+                       MALI_DEBUG_PRINT(2, ("TL:  %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u), fd:%d, fence:(0x%08X)]  job:(0x%08X)\n",
+                                           tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
+                                           is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0],
+                                           is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1],
+                                           is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2],
+                                           tracker->fence.sync_fd, (unsigned int)(uintptr_t)(tracker->sync_fence), (unsigned int)(uintptr_t)(tracker->job)));
+       } else {
+               if (print_ctx)
+                       _mali_osk_ctxprintf(print_ctx, "TL:  %s %u %c  fd:%d  fence:(0x%08X)  job:(0x%08X)\n",
+                                           tracker_type, tracker->point, state_char,
+                                           tracker->fence.sync_fd, (unsigned int)(uintptr_t)(tracker->sync_fence), (unsigned int)(uintptr_t)(tracker->job));
+               else
+                       MALI_DEBUG_PRINT(2, ("TL:  %s %u %c  fd:%d  fence:(0x%08X)  job:(0x%08X)\n",
+                                            tracker_type, tracker->point, state_char,
+                                            tracker->fence.sync_fd, (unsigned int)(uintptr_t)(tracker->sync_fence), (unsigned int)(uintptr_t)(tracker->job)));
+
+       }
+#else
+       if (0 != tracker->trigger_ref_count) {
+               if (print_ctx)
+                       _mali_osk_ctxprintf(print_ctx, "TL:  %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u)]  job:(0x%08X)\n",
+                                           tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
+                                           is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0],
+                                           is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1],
+                                           is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2],
+                                           (unsigned int)(uintptr_t)(tracker->job));
+               else
+                       MALI_DEBUG_PRINT(2, ("TL:  %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u)]  job:(0x%08X)\n",
+                                            tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
+                                            is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0],
+                                            is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1],
+                                            is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2],
+                                            (unsigned int)(uintptr_t)(tracker->job)));
+       } else {
+               if (print_ctx)
+                       _mali_osk_ctxprintf(print_ctx, "TL:  %s %u %c  job:(0x%08X)\n",
+                                            tracker_type, tracker->point, state_char,
+                                            (unsigned int)(uintptr_t)(tracker->job)));
+               else
+                       MALI_DEBUG_PRINT(2, ("TL:  %s %u %c  job:(0x%08X)\n",
+                                           tracker_type, tracker->point, state_char,
+                                           (unsigned int)(uintptr_t)(tracker->job)));
+
+       }
+#endif
+}
+
+void mali_timeline_debug_print_timeline(struct mali_timeline *timeline, _mali_osk_print_ctx *print_ctx)
+{
+       struct mali_timeline_tracker *tracker = NULL;
+
+       MALI_DEBUG_ASSERT_POINTER(timeline);
+
+       tracker = timeline->tracker_tail;
+       while (NULL != tracker) {
+               mali_timeline_debug_print_tracker(tracker, print_ctx);
+               tracker = tracker->timeline_next;
+       }
+}
+
+#if !(LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0))
+void mali_timeline_debug_direct_print_tracker(struct mali_timeline_tracker *tracker)
+{
+       const char *tracker_state = "IWAF";
+       char state_char = 'I';
+       char tracker_type[32] = {0};
+
+       MALI_DEBUG_ASSERT_POINTER(tracker);
+
+       state_char = *(tracker_state + mali_timeline_debug_get_tracker_state(tracker));
+       _mali_osk_snprintf(tracker_type, sizeof(tracker_type), "%s", timeline_tracker_type_to_string(tracker->type));
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+       if (0 != tracker->trigger_ref_count) {
+               MALI_PRINT(("TL:  %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u), fd:%d, fence:(0x%08X)]  job:(0x%08X)\n",
+                           tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
+                           is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0],
+                           is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1],
+                           is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2],
+                           tracker->fence.sync_fd, tracker->sync_fence, tracker->job));
+       } else {
+               MALI_PRINT(("TL:  %s %u %c  fd:%d  fence:(0x%08X)  job:(0x%08X)\n",
+                           tracker_type, tracker->point, state_char,
+                           tracker->fence.sync_fd, tracker->sync_fence, tracker->job));
+       }
+#else
+       if (0 != tracker->trigger_ref_count) {
+               MALI_PRINT(("TL:  %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u)]  job:(0x%08X)\n",
+                           tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
+                           is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0],
+                           is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1],
+                           is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2],
+                           tracker->job));
+       } else {
+               MALI_PRINT(("TL:  %s %u %c  job:(0x%08X)\n",
+                           tracker_type, tracker->point, state_char,
+                           tracker->job));
+       }
+#endif
+}
+
+void mali_timeline_debug_direct_print_timeline(struct mali_timeline *timeline)
+{
+       struct mali_timeline_tracker *tracker = NULL;
+
+       MALI_DEBUG_ASSERT_POINTER(timeline);
+
+       tracker = timeline->tracker_tail;
+       while (NULL != tracker) {
+               mali_timeline_debug_direct_print_tracker(tracker);
+               tracker = tracker->timeline_next;
+       }
+}
+
+#endif
+
+void mali_timeline_debug_print_system(struct mali_timeline_system *system, _mali_osk_print_ctx *print_ctx)
+{
+       int i;
+       int num_printed = 0;
+       u32 tid = _mali_osk_get_tid();
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+
+       mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+       /* Print all timelines */
+       for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+               struct mali_timeline *timeline = system->timelines[i];
+
+               MALI_DEBUG_ASSERT_POINTER(timeline);
+
+               if (NULL == timeline->tracker_head) continue;
+               if (print_ctx)
+                       _mali_osk_ctxprintf(print_ctx, "TL: Timeline %s:\n",
+                                           timeline_id_to_string((enum mali_timeline_id)i));
+               else
+                       MALI_DEBUG_PRINT(2, ("TL: Timeline %s: oldest (%u) next(%u)\n",
+                                            timeline_id_to_string((enum mali_timeline_id)i), timeline->point_oldest, timeline->point_next));
+
+               mali_timeline_debug_print_timeline(timeline, print_ctx);
+               num_printed++;
+       }
+
+       if (0 == num_printed) {
+               if (print_ctx)
+                       _mali_osk_ctxprintf(print_ctx, "TL: All timelines empty\n");
+               else
+                       MALI_DEBUG_PRINT(2, ("TL: All timelines empty\n"));
+       }
+
+       mali_spinlock_reentrant_signal(system->spinlock, tid);
+}
+
+#endif /* defined(MALI_TIMELINE_DEBUG_FUNCTIONS) */
+
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+void mali_timeline_dma_fence_callback(void *pp_job_ptr)
+{
+       struct mali_timeline_system  *system;
+       struct mali_timeline_waiter  *waiter;
+       struct mali_timeline_tracker *tracker;
+       struct mali_pp_job *pp_job = (struct mali_pp_job *)pp_job_ptr;
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+       u32 tid = _mali_osk_get_tid();
+       mali_bool is_aborting = MALI_FALSE;
+
+       MALI_DEBUG_ASSERT_POINTER(pp_job);
+
+       tracker = &pp_job->tracker;
+       MALI_DEBUG_ASSERT_POINTER(tracker);
+
+       system = tracker->system;
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT_POINTER(system->session);
+
+       mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+       waiter = tracker->waiter_dma_fence;
+       MALI_DEBUG_ASSERT_POINTER(waiter);
+
+       schedule_mask |= mali_timeline_system_release_waiter(system, waiter);
+
+       is_aborting = system->session->is_aborting;
+
+       /* If aborting, wake up sleepers that are waiting for dma fence callbacks to complete. */
+       if (is_aborting) {
+               _mali_osk_wait_queue_wake_up(system->wait_queue);
+       }
+
+       mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+       if (!is_aborting) {
+               mali_executor_schedule_from_mask(schedule_mask, MALI_TRUE);
+       }
+}
+#endif
diff --git a/utgard/r8p0/common/mali_timeline.h b/utgard/r8p0/common/mali_timeline.h
new file mode 100755 (executable)
index 0000000..3aeb75c
--- /dev/null
@@ -0,0 +1,563 @@
+/*
+ * Copyright (C) 2013-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_TIMELINE_H__
+#define __MALI_TIMELINE_H__
+
+#include "mali_osk.h"
+#include "mali_ukk.h"
+#include "mali_session.h"
+#include "mali_kernel_common.h"
+#include "mali_spinlock_reentrant.h"
+#include "mali_sync.h"
+#include "mali_scheduler_types.h"
+#include <linux/version.h>
+
+/**
+ * Soft job timeout.
+ *
+ * Soft jobs have to be signaled as complete after activation.  Normally this is done by user space,
+ * but in order to guarantee that every soft job is completed, we also have a timer.
+ */
+#define MALI_TIMELINE_TIMEOUT_HZ ((unsigned long) (HZ * 3 / 2)) /* 1500 ms. */
+
+/**
+ * Timeline type.
+ */
+typedef enum mali_timeline_id {
+       MALI_TIMELINE_GP   = MALI_UK_TIMELINE_GP,   /**< GP job timeline. */
+       MALI_TIMELINE_PP   = MALI_UK_TIMELINE_PP,   /**< PP job timeline. */
+       MALI_TIMELINE_SOFT = MALI_UK_TIMELINE_SOFT, /**< Soft job timeline. */
+       MALI_TIMELINE_MAX  = MALI_UK_TIMELINE_MAX
+} mali_timeline_id;
+
+/**
+ * Used by trackers that should not be added to a timeline (@ref mali_timeline_system_add_tracker).
+ */
+#define MALI_TIMELINE_NONE MALI_TIMELINE_MAX
+
+/**
+ * Tracker type.
+ */
+typedef enum mali_timeline_tracker_type {
+       MALI_TIMELINE_TRACKER_GP   = 0, /**< Tracker used by GP jobs. */
+       MALI_TIMELINE_TRACKER_PP   = 1, /**< Tracker used by PP jobs. */
+       MALI_TIMELINE_TRACKER_SOFT = 2, /**< Tracker used by soft jobs. */
+       MALI_TIMELINE_TRACKER_WAIT = 3, /**< Tracker used for fence wait. */
+       MALI_TIMELINE_TRACKER_SYNC = 4, /**< Tracker used for sync fence. */
+       MALI_TIMELINE_TRACKER_MAX  = 5,
+} mali_timeline_tracker_type;
+
+/**
+ * Tracker activation error.
+ */
+typedef u32 mali_timeline_activation_error;
+#define MALI_TIMELINE_ACTIVATION_ERROR_NONE      0
+#define MALI_TIMELINE_ACTIVATION_ERROR_SYNC_BIT  (1<<1)
+#define MALI_TIMELINE_ACTIVATION_ERROR_FATAL_BIT (1<<0)
+
+/**
+ * Type used to represent a point on a timeline.
+ */
+typedef u32 mali_timeline_point;
+
+/**
+ * Used to represent that no point on a timeline.
+ */
+#define MALI_TIMELINE_NO_POINT ((mali_timeline_point) 0)
+
+/**
+ * The maximum span of points on a timeline.  A timeline will be considered full if the difference
+ * between the oldest and newest points is equal or larger to this value.
+ */
+#define MALI_TIMELINE_MAX_POINT_SPAN 65536
+
+/**
+ * Magic value used to assert on validity of trackers.
+ */
+#define MALI_TIMELINE_TRACKER_MAGIC 0xabcdabcd
+
+struct mali_timeline;
+struct mali_timeline_waiter;
+struct mali_timeline_tracker;
+
+/**
+ * Timeline fence.
+ */
+struct mali_timeline_fence {
+       mali_timeline_point points[MALI_TIMELINE_MAX]; /**< For each timeline, a point or MALI_TIMELINE_NO_POINT. */
+       s32                 sync_fd;                   /**< A file descriptor representing a sync fence, or -1. */
+};
+
+/**
+ * Timeline system.
+ *
+ * The Timeline system has a set of timelines associated with a session.
+ */
+struct mali_timeline_system {
+       struct mali_spinlock_reentrant *spinlock;   /**< Spin lock protecting the timeline system */
+       struct mali_timeline           *timelines[MALI_TIMELINE_MAX]; /**< The timelines in this system */
+
+       /* Single-linked list of unused waiter objects.  Uses the tracker_next field in tracker. */
+       struct mali_timeline_waiter    *waiter_empty_list;
+
+       struct mali_session_data       *session;    /**< Session that owns this system. */
+
+       mali_bool                       timer_enabled; /**< Set to MALI_TRUE if soft job timer should be enabled, MALI_FALSE if not. */
+
+       _mali_osk_wait_queue_t         *wait_queue; /**< Wait queue. */
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+       struct sync_timeline           *signaled_sync_tl; /**< Special sync timeline used to create pre-signaled sync fences */
+#else
+       struct mali_internal_sync_timeline           *signaled_sync_tl; /**< Special sync timeline used to create pre-signaled sync fences */
+#endif
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+};
+
+/**
+ * Timeline.  Each Timeline system will have MALI_TIMELINE_MAX timelines.
+ */
+struct mali_timeline {
+       mali_timeline_point           point_next;   /**< The next available point. */
+       mali_timeline_point           point_oldest; /**< The oldest point not released. */
+
+       /* Double-linked list of trackers.  Sorted in ascending order by tracker->time_number with
+        * tail pointing to the tracker with the oldest time. */
+       struct mali_timeline_tracker *tracker_head;
+       struct mali_timeline_tracker *tracker_tail;
+
+       /* Double-linked list of waiters.  Sorted in ascending order by waiter->time_number_wait
+        * with tail pointing to the waiter with oldest wait time. */
+       struct mali_timeline_waiter  *waiter_head;
+       struct mali_timeline_waiter  *waiter_tail;
+
+       struct mali_timeline_system  *system;       /**< Timeline system this timeline belongs to. */
+       enum mali_timeline_id         id;           /**< Timeline type. */
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+       struct sync_timeline         *sync_tl;      /**< Sync timeline that corresponds to this timeline. */
+#else
+       struct mali_internal_sync_timeline *sync_tl;
+#endif
+       mali_bool destroyed;
+       struct mali_spinlock_reentrant *spinlock;       /**< Spin lock protecting the timeline system */
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+
+       /* The following fields are used to time out soft job trackers. */
+       _mali_osk_wq_delayed_work_t  *delayed_work;
+       mali_bool                     timer_active;
+};
+
+/**
+ * Timeline waiter.
+ */
+struct mali_timeline_waiter {
+       mali_timeline_point           point;         /**< Point on timeline we are waiting for to be released. */
+       struct mali_timeline_tracker *tracker;       /**< Tracker that is waiting. */
+
+       struct mali_timeline_waiter  *timeline_next; /**< Next waiter on timeline's waiter list. */
+       struct mali_timeline_waiter  *timeline_prev; /**< Previous waiter on timeline's waiter list. */
+
+       struct mali_timeline_waiter  *tracker_next;  /**< Next waiter on tracker's waiter list. */
+};
+
+/**
+ * Timeline tracker.
+ */
+struct mali_timeline_tracker {
+       MALI_DEBUG_CODE(u32            magic); /**< Should always be MALI_TIMELINE_TRACKER_MAGIC for a valid tracker. */
+
+       mali_timeline_point            point; /**< Point on timeline for this tracker */
+
+       struct mali_timeline_tracker  *timeline_next; /**< Next tracker on timeline's tracker list */
+       struct mali_timeline_tracker  *timeline_prev; /**< Previous tracker on timeline's tracker list */
+
+       u32                            trigger_ref_count; /**< When zero tracker will be activated */
+       mali_timeline_activation_error activation_error;  /**< Activation error. */
+       struct mali_timeline_fence     fence;             /**< Fence used to create this tracker */
+
+       /* Single-linked list of waiters.  Sorted in order of insertions with
+        * tail pointing to first waiter. */
+       struct mali_timeline_waiter   *waiter_head;
+       struct mali_timeline_waiter   *waiter_tail;
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+       /* These are only used if the tracker is waiting on a sync fence. */
+       struct mali_timeline_waiter   *waiter_sync; /**< A direct pointer to timeline waiter representing sync fence. */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+       struct sync_fence_waiter       sync_fence_waiter; /**< Used to connect sync fence and tracker in sync fence wait callback. */
+       struct sync_fence             *sync_fence;   /**< The sync fence this tracker is waiting on. */
+#else
+       struct mali_internal_sync_fence_waiter       sync_fence_waiter; /**< Used to connect sync fence and tracker in sync fence wait callback. */
+       struct mali_internal_sync_fence             *sync_fence;   /**< The sync fence this tracker is waiting on. */
+#endif
+       _mali_osk_list_t               sync_fence_cancel_list; /**< List node used to cancel sync fence waiters. */
+       _mali_osk_list_t                sync_fence_signal_list; /** < List node used to singal sync fence callback function. */
+
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+       struct mali_timeline_waiter   *waiter_dma_fence; /**< A direct pointer to timeline waiter representing dma fence. */
+#endif
+
+       struct mali_timeline_system   *system;       /**< Timeline system. */
+       struct mali_timeline          *timeline;     /**< Timeline, or NULL if not on a timeline. */
+       enum mali_timeline_tracker_type type;        /**< Type of tracker. */
+       void                          *job;          /**< Owner of tracker. */
+
+       /* The following fields are used to time out soft job trackers. */
+       unsigned long                 os_tick_create;
+       unsigned long                 os_tick_activate;
+       mali_bool                     timer_active;
+};
+
+extern _mali_osk_atomic_t gp_tracker_count;
+extern _mali_osk_atomic_t phy_pp_tracker_count;
+extern _mali_osk_atomic_t virt_pp_tracker_count;
+
+/**
+ * What follows is a set of functions to check the state of a timeline and to determine where on a
+ * timeline a given point is.  Most of these checks will translate the timeline so the oldest point
+ * on the timeline is aligned with zero.  Remember that all of these calculation are done on
+ * unsigned integers.
+ *
+ * The following example illustrates the three different states a point can be in.  The timeline has
+ * been translated to put the oldest point at zero:
+ *
+ *
+ *
+ *                               [ point is in forbidden zone ]
+ *                                          64k wide
+ *                                MALI_TIMELINE_MAX_POINT_SPAN
+ *
+ *    [ point is on timeline     )                            ( point is released ]
+ *
+ *    0--------------------------##############################--------------------2^32 - 1
+ *    ^                          ^
+ *    \                          |
+ *     oldest point on timeline  |
+ *                               \
+ *                                next point on timeline
+ */
+
+/**
+ * Compare two timeline points
+ *
+ * Returns true if a is after b, false if a is before or equal to b.
+ *
+ * This funcion ignores MALI_TIMELINE_MAX_POINT_SPAN. Wrapping is supported and
+ * the result will be correct if the points is less then UINT_MAX/2 apart.
+ *
+ * @param a Point on timeline
+ * @param b Point on timeline
+ * @return MALI_TRUE if a is after b
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_point_after(mali_timeline_point a, mali_timeline_point b)
+{
+       return 0 > ((s32)b) - ((s32)a);
+}
+
+/**
+ * Check if a point is on timeline.  A point is on a timeline if it is greater than, or equal to,
+ * the oldest point, and less than the next point.
+ *
+ * @param timeline Timeline.
+ * @param point Point on timeline.
+ * @return MALI_TRUE if point is on timeline, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_is_point_on(struct mali_timeline *timeline, mali_timeline_point point)
+{
+       MALI_DEBUG_ASSERT_POINTER(timeline);
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT != point);
+
+       return (point - timeline->point_oldest) < (timeline->point_next - timeline->point_oldest);
+}
+
+/**
+ * Check if a point has been released.  A point is released if it is older than the oldest point on
+ * the timeline, newer than the next point, and also not in the forbidden zone.
+ *
+ * @param timeline Timeline.
+ * @param point Point on timeline.
+ * @return MALI_TRUE if point has been release, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_is_point_released(struct mali_timeline *timeline, mali_timeline_point point)
+{
+       mali_timeline_point point_normalized;
+       mali_timeline_point next_normalized;
+
+       MALI_DEBUG_ASSERT_POINTER(timeline);
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT != point);
+
+       point_normalized = point - timeline->point_oldest;
+       next_normalized = timeline->point_next - timeline->point_oldest;
+
+       return point_normalized > (next_normalized + MALI_TIMELINE_MAX_POINT_SPAN);
+}
+
+/**
+ * Check if a point is valid.  A point is valid if is on the timeline or has been released.
+ *
+ * @param timeline Timeline.
+ * @param point Point on timeline.
+ * @return MALI_TRUE if point is valid, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_is_point_valid(struct mali_timeline *timeline, mali_timeline_point point)
+{
+       MALI_DEBUG_ASSERT_POINTER(timeline);
+       return mali_timeline_is_point_on(timeline, point) || mali_timeline_is_point_released(timeline, point);
+}
+
+/**
+ * Check if timeline is empty (has no points on it).  A timeline is empty if next == oldest.
+ *
+ * @param timeline Timeline.
+ * @return MALI_TRUE if timeline is empty, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_is_empty(struct mali_timeline *timeline)
+{
+       MALI_DEBUG_ASSERT_POINTER(timeline);
+       return timeline->point_next == timeline->point_oldest;
+}
+
+/**
+ * Check if timeline is full.  A valid timeline cannot span more than 64k points (@ref
+ * MALI_TIMELINE_MAX_POINT_SPAN).
+ *
+ * @param timeline Timeline.
+ * @return MALI_TRUE if timeline is full, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_is_full(struct mali_timeline *timeline)
+{
+       MALI_DEBUG_ASSERT_POINTER(timeline);
+       return MALI_TIMELINE_MAX_POINT_SPAN <= (timeline->point_next - timeline->point_oldest);
+}
+
+/**
+ * Create a new timeline system.
+ *
+ * @param session The session this timeline system will belong to.
+ * @return New timeline system.
+ */
+struct mali_timeline_system *mali_timeline_system_create(struct mali_session_data *session);
+
+/**
+ * Abort timeline system.
+ *
+ * This will release all pending waiters in the timeline system causing all trackers to be
+ * activated.
+ *
+ * @param system Timeline system to abort all jobs from.
+ */
+void mali_timeline_system_abort(struct mali_timeline_system *system);
+
+/**
+ * Destroy an empty timeline system.
+ *
+ * @note @ref mali_timeline_system_abort() should be called prior to this function.
+ *
+ * @param system Timeline system to destroy.
+ */
+void mali_timeline_system_destroy(struct mali_timeline_system *system);
+
+/**
+ * Stop the soft job timer.
+ *
+ * @param system Timeline system
+ */
+void mali_timeline_system_stop_timer(struct mali_timeline_system *system);
+
+/**
+ * Add a tracker to a timeline system and optionally also on a timeline.
+ *
+ * Once added to the timeline system, the tracker is guaranteed to be activated.  The tracker can be
+ * activated before this function returns.  Thus, it is also possible that the tracker is released
+ * before this function returns, depending on the tracker type.
+ *
+ * @note Tracker must be initialized (@ref mali_timeline_tracker_init) before being added to the
+ * timeline system.
+ *
+ * @param system Timeline system the tracker will be added to.
+ * @param tracker The tracker to be added.
+ * @param timeline_id Id of the timeline the tracker will be added to, or
+ *                    MALI_TIMELINE_NONE if it should not be added on a timeline.
+ * @return Point on timeline identifying this tracker, or MALI_TIMELINE_NO_POINT if not on timeline.
+ */
+mali_timeline_point mali_timeline_system_add_tracker(struct mali_timeline_system *system,
+               struct mali_timeline_tracker *tracker,
+               enum mali_timeline_id timeline_id);
+
+/**
+ * Get latest point on timeline.
+ *
+ * @param system Timeline system.
+ * @param timeline_id Id of timeline to get latest point from.
+ * @return Latest point on timeline, or MALI_TIMELINE_NO_POINT if the timeline is empty.
+ */
+mali_timeline_point mali_timeline_system_get_latest_point(struct mali_timeline_system *system,
+               enum mali_timeline_id timeline_id);
+
+/**
+ * Initialize tracker.
+ *
+ * Must be called before tracker is added to timeline system (@ref mali_timeline_system_add_tracker).
+ *
+ * @param tracker Tracker to initialize.
+ * @param type Type of tracker.
+ * @param fence Fence used to set up dependencies for tracker.
+ * @param job Pointer to job struct this tracker is associated with.
+ */
+void mali_timeline_tracker_init(struct mali_timeline_tracker *tracker,
+                               mali_timeline_tracker_type type,
+                               struct mali_timeline_fence *fence,
+                               void *job);
+
+/**
+ * Grab trigger ref count on tracker.
+ *
+ * This will prevent tracker from being activated until the trigger ref count reaches zero.
+ *
+ * @note Tracker must have been initialized (@ref mali_timeline_tracker_init).
+ *
+ * @param system Timeline system.
+ * @param tracker Tracker.
+ */
+void mali_timeline_system_tracker_get(struct mali_timeline_system *system, struct mali_timeline_tracker *tracker);
+
+/**
+ * Release trigger ref count on tracker.
+ *
+ * If the trigger ref count reaches zero, the tracker will be activated.
+ *
+ * @param system Timeline system.
+ * @param tracker Tracker.
+ * @param activation_error Error bitmask if activated with error, or MALI_TIMELINE_ACTIVATION_ERROR_NONE if no error.
+ * @return Scheduling bitmask.
+ */
+mali_scheduler_mask mali_timeline_system_tracker_put(struct mali_timeline_system *system, struct mali_timeline_tracker *tracker, mali_timeline_activation_error activation_error);
+
+/**
+ * Release a tracker from the timeline system.
+ *
+ * This is used to signal that the job being tracker is finished, either due to normal circumstances
+ * (job complete/abort) or due to a timeout.
+ *
+ * We may need to schedule some subsystems after a tracker has been released and the returned
+ * bitmask will tell us if it is necessary.  If the return value is non-zero, this value needs to be
+ * sent as an input parameter to @ref mali_scheduler_schedule_from_mask() to do the scheduling.
+ *
+ * @note Tracker must have been activated before being released.
+ * @warning Not calling @ref mali_scheduler_schedule_from_mask() after releasing a tracker can lead
+ * to a deadlock.
+ *
+ * @param tracker Tracker being released.
+ * @return Scheduling bitmask.
+ */
+mali_scheduler_mask mali_timeline_tracker_release(struct mali_timeline_tracker *tracker);
+
+MALI_STATIC_INLINE mali_bool mali_timeline_tracker_activation_error(
+       struct mali_timeline_tracker *tracker)
+{
+       MALI_DEBUG_ASSERT_POINTER(tracker);
+       return (MALI_TIMELINE_ACTIVATION_ERROR_FATAL_BIT &
+               tracker->activation_error) ? MALI_TRUE : MALI_FALSE;
+}
+
+/**
+ * Copy data from a UK fence to a Timeline fence.
+ *
+ * @param fence Timeline fence.
+ * @param uk_fence UK fence.
+ */
+void mali_timeline_fence_copy_uk_fence(struct mali_timeline_fence *fence, _mali_uk_fence_t *uk_fence);
+
+_mali_osk_errcode_t mali_timeline_initialize(void);
+
+void mali_timeline_terminate(void);
+
+MALI_STATIC_INLINE mali_bool mali_timeline_has_gp_job(void)
+{
+       return 0 < _mali_osk_atomic_read(&gp_tracker_count);
+}
+
+MALI_STATIC_INLINE mali_bool mali_timeline_has_physical_pp_job(void)
+{
+       return 0 < _mali_osk_atomic_read(&phy_pp_tracker_count);
+}
+
+MALI_STATIC_INLINE mali_bool mali_timeline_has_virtual_pp_job(void)
+{
+       return 0 < _mali_osk_atomic_read(&virt_pp_tracker_count);
+}
+
+#if defined(DEBUG)
+#define MALI_TIMELINE_DEBUG_FUNCTIONS
+#endif /* DEBUG */
+#if defined(MALI_TIMELINE_DEBUG_FUNCTIONS)
+
+/**
+ * Tracker state.  Used for debug printing.
+ */
+typedef enum mali_timeline_tracker_state {
+       MALI_TIMELINE_TS_INIT    = 0,
+       MALI_TIMELINE_TS_WAITING = 1,
+       MALI_TIMELINE_TS_ACTIVE  = 2,
+       MALI_TIMELINE_TS_FINISH  = 3,
+} mali_timeline_tracker_state;
+
+/**
+ * Get tracker state.
+ *
+ * @param tracker Tracker to check.
+ * @return State of tracker.
+ */
+mali_timeline_tracker_state mali_timeline_debug_get_tracker_state(struct mali_timeline_tracker *tracker);
+
+/**
+ * Print debug information about tracker.
+ *
+ * @param tracker Tracker to print.
+ */
+void mali_timeline_debug_print_tracker(struct mali_timeline_tracker *tracker, _mali_osk_print_ctx *print_ctx);
+
+/**
+ * Print debug information about timeline.
+ *
+ * @param timeline Timeline to print.
+ */
+void mali_timeline_debug_print_timeline(struct mali_timeline *timeline, _mali_osk_print_ctx *print_ctx);
+
+#if !(LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0))
+void mali_timeline_debug_direct_print_tracker(struct mali_timeline_tracker *tracker);
+void mali_timeline_debug_direct_print_timeline(struct mali_timeline *timeline);
+#endif
+
+/**
+ * Print debug information about timeline system.
+ *
+ * @param system Timeline system to print.
+ */
+void mali_timeline_debug_print_system(struct mali_timeline_system *system, _mali_osk_print_ctx *print_ctx);
+
+#endif /* defined(MALI_TIMELINE_DEBUG_FUNCTIONS) */
+
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+/**
+ * The timeline dma fence callback when dma fence signal.
+ *
+ * @param pp_job_ptr The pointer to pp job that link to the signaled dma fence.
+ */
+void mali_timeline_dma_fence_callback(void *pp_job_ptr);
+#endif
+
+#endif /* __MALI_TIMELINE_H__ */
diff --git a/utgard/r8p0/common/mali_timeline_fence_wait.c b/utgard/r8p0/common/mali_timeline_fence_wait.c
new file mode 100755 (executable)
index 0000000..8e709a2
--- /dev/null
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2013-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/file.h>
+#include "mali_timeline_fence_wait.h"
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_spinlock_reentrant.h"
+
+/**
+ * Allocate a fence waiter tracker.
+ *
+ * @return New fence waiter if successful, NULL if not.
+ */
+static struct mali_timeline_fence_wait_tracker *mali_timeline_fence_wait_tracker_alloc(void)
+{
+       return (struct mali_timeline_fence_wait_tracker *) _mali_osk_calloc(1, sizeof(struct mali_timeline_fence_wait_tracker));
+}
+
+/**
+ * Free fence waiter tracker.
+ *
+ * @param wait Fence wait tracker to free.
+ */
+static void mali_timeline_fence_wait_tracker_free(struct mali_timeline_fence_wait_tracker *wait)
+{
+       MALI_DEBUG_ASSERT_POINTER(wait);
+       _mali_osk_atomic_term(&wait->refcount);
+       _mali_osk_free(wait);
+}
+
+/**
+ * Check if fence wait tracker has been activated.  Used as a wait queue condition.
+ *
+ * @param data Fence waiter.
+ * @return MALI_TRUE if tracker has been activated, MALI_FALSE if not.
+ */
+static mali_bool mali_timeline_fence_wait_tracker_is_activated(void *data)
+{
+       struct mali_timeline_fence_wait_tracker *wait;
+
+       wait = (struct mali_timeline_fence_wait_tracker *) data;
+       MALI_DEBUG_ASSERT_POINTER(wait);
+
+       return wait->activated;
+}
+
+/**
+ * Check if fence has been signaled.
+ *
+ * @param system Timeline system.
+ * @param fence Timeline fence.
+ * @return MALI_TRUE if fence is signaled, MALI_FALSE if not.
+ */
+static mali_bool mali_timeline_fence_wait_check_status(struct mali_timeline_system *system, struct mali_timeline_fence *fence)
+{
+       int i;
+       u32 tid = _mali_osk_get_tid();
+       mali_bool ret = MALI_TRUE;
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+       struct sync_fence *sync_fence = NULL;
+#else
+       struct mali_internal_sync_fence *sync_fence = NULL;
+#endif
+#endif
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT_POINTER(fence);
+
+       mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+       for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+               struct mali_timeline *timeline;
+               mali_timeline_point   point;
+
+               point = fence->points[i];
+
+               if (likely(MALI_TIMELINE_NO_POINT == point)) {
+                       /* Fence contains no point on this timeline. */
+                       continue;
+               }
+
+               timeline = system->timelines[i];
+               MALI_DEBUG_ASSERT_POINTER(timeline);
+
+               if (unlikely(!mali_timeline_is_point_valid(timeline, point))) {
+                       MALI_PRINT_ERROR(("Mali Timeline: point %d is not valid (oldest=%d, next=%d)\n", point, timeline->point_oldest, timeline->point_next));
+               }
+
+               if (!mali_timeline_is_point_released(timeline, point)) {
+                       ret = MALI_FALSE;
+                       goto exit;
+               }
+       }
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+       if (-1 != fence->sync_fd) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+               sync_fence = sync_fence_fdget(fence->sync_fd);
+#else
+               sync_fence = mali_internal_sync_fence_fdget(fence->sync_fd);
+#endif
+               if (likely(NULL != sync_fence)) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+                       if (0 == sync_fence->status) {
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
+                       if (0 < atomic_read(&sync_fence->status)) {
+#else
+                       if (0 == sync_fence->fence->ops->signaled(sync_fence->fence)) {
+#endif
+                               ret = MALI_FALSE;
+
+                       } else {
+                               ret = MALI_TRUE;
+                       }
+               } else {
+                       MALI_PRINT_ERROR(("Mali Timeline: failed to get sync fence from fd %d\n", fence->sync_fd));
+               }
+       }
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+
+exit:
+       mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+       if (NULL != sync_fence) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+               sync_fence_put(sync_fence);
+#else
+               fput(sync_fence->file);
+#endif
+       }
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+
+       return ret;
+}
+
+mali_bool mali_timeline_fence_wait(struct mali_timeline_system *system, struct mali_timeline_fence *fence, u32 timeout)
+{
+       struct mali_timeline_fence_wait_tracker *wait;
+       mali_timeline_point point;
+       mali_bool ret;
+
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT_POINTER(fence);
+
+       MALI_DEBUG_PRINT(4, ("Mali Timeline: wait on fence\n"));
+
+       if (MALI_TIMELINE_FENCE_WAIT_TIMEOUT_IMMEDIATELY == timeout) {
+               return mali_timeline_fence_wait_check_status(system, fence);
+       }
+
+       wait = mali_timeline_fence_wait_tracker_alloc();
+       if (unlikely(NULL == wait)) {
+               MALI_PRINT_ERROR(("Mali Timeline: failed to allocate data for fence wait\n"));
+               return MALI_FALSE;
+       }
+
+       wait->activated = MALI_FALSE;
+       wait->system = system;
+
+       /* Initialize refcount to two references.  The reference first will be released by this
+        * function after the wait is over.  The second reference will be released when the tracker
+        * is activated. */
+       _mali_osk_atomic_init(&wait->refcount, 2);
+
+       /* Add tracker to timeline system, but not to a timeline. */
+       mali_timeline_tracker_init(&wait->tracker, MALI_TIMELINE_TRACKER_WAIT, fence, wait);
+       point = mali_timeline_system_add_tracker(system, &wait->tracker, MALI_TIMELINE_NONE);
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT == point);
+       MALI_IGNORE(point);
+
+       /* Wait for the tracker to be activated or time out. */
+       if (MALI_TIMELINE_FENCE_WAIT_TIMEOUT_NEVER == timeout) {
+               _mali_osk_wait_queue_wait_event(system->wait_queue, mali_timeline_fence_wait_tracker_is_activated, (void *) wait);
+       } else {
+               _mali_osk_wait_queue_wait_event_timeout(system->wait_queue, mali_timeline_fence_wait_tracker_is_activated, (void *) wait, timeout);
+       }
+
+       ret = wait->activated;
+
+       if (0 == _mali_osk_atomic_dec_return(&wait->refcount)) {
+               mali_timeline_fence_wait_tracker_free(wait);
+       }
+
+       return ret;
+}
+
+void mali_timeline_fence_wait_activate(struct mali_timeline_fence_wait_tracker *wait)
+{
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+       MALI_DEBUG_ASSERT_POINTER(wait);
+       MALI_DEBUG_ASSERT_POINTER(wait->system);
+
+       MALI_DEBUG_PRINT(4, ("Mali Timeline: activation for fence wait tracker\n"));
+
+       MALI_DEBUG_ASSERT(MALI_FALSE == wait->activated);
+       wait->activated = MALI_TRUE;
+
+       _mali_osk_wait_queue_wake_up(wait->system->wait_queue);
+
+       /* Nothing can wait on this tracker, so nothing to schedule after release. */
+       schedule_mask = mali_timeline_tracker_release(&wait->tracker);
+       MALI_DEBUG_ASSERT(MALI_SCHEDULER_MASK_EMPTY == schedule_mask);
+       MALI_IGNORE(schedule_mask);
+
+       if (0 == _mali_osk_atomic_dec_return(&wait->refcount)) {
+               mali_timeline_fence_wait_tracker_free(wait);
+       }
+}
diff --git a/utgard/r8p0/common/mali_timeline_fence_wait.h b/utgard/r8p0/common/mali_timeline_fence_wait.h
new file mode 100755 (executable)
index 0000000..348652b
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2013, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_timeline_fence_wait.h
+ *
+ * This file contains functions used to wait until a Timeline fence is signaled.
+ */
+
+#ifndef __MALI_TIMELINE_FENCE_WAIT_H__
+#define __MALI_TIMELINE_FENCE_WAIT_H__
+
+#include "mali_osk.h"
+#include "mali_timeline.h"
+
+/**
+ * If used as the timeout argument in @ref mali_timeline_fence_wait, a timer is not used and the
+ * function only returns when the fence is signaled.
+ */
+#define MALI_TIMELINE_FENCE_WAIT_TIMEOUT_NEVER ((u32) -1)
+
+/**
+ * If used as the timeout argument in @ref mali_timeline_fence_wait, the function will return
+ * immediately with the current state of the fence.
+ */
+#define MALI_TIMELINE_FENCE_WAIT_TIMEOUT_IMMEDIATELY 0
+
+/**
+ * Fence wait tracker.
+ *
+ * The fence wait tracker is added to the Timeline system with the fence we are waiting on as a
+ * dependency.  We will then perform a blocking wait, possibly with a timeout, until the tracker is
+ * activated, which happens when the fence is signaled.
+ */
+struct mali_timeline_fence_wait_tracker {
+       mali_bool activated;                  /**< MALI_TRUE if the tracker has been activated, MALI_FALSE if not. */
+       _mali_osk_atomic_t refcount;          /**< Reference count. */
+       struct mali_timeline_system *system;  /**< Timeline system. */
+       struct mali_timeline_tracker tracker; /**< Timeline tracker. */
+};
+
+/**
+ * Wait for a fence to be signaled, or timeout is reached.
+ *
+ * @param system Timeline system.
+ * @param fence Fence to wait on.
+ * @param timeout Timeout in ms, or MALI_TIMELINE_FENCE_WAIT_TIMEOUT_NEVER or
+ * MALI_TIMELINE_FENCE_WAIT_TIMEOUT_IMMEDIATELY.
+ * @return MALI_TRUE if signaled, MALI_FALSE if timed out.
+ */
+mali_bool mali_timeline_fence_wait(struct mali_timeline_system *system, struct mali_timeline_fence *fence, u32 timeout);
+
+/**
+ * Used by the Timeline system to activate a fence wait tracker.
+ *
+ * @param fence_wait_tracker Fence waiter tracker.
+ */
+void mali_timeline_fence_wait_activate(struct mali_timeline_fence_wait_tracker *fence_wait_tracker);
+
+#endif /* __MALI_TIMELINE_FENCE_WAIT_H__ */
diff --git a/utgard/r8p0/common/mali_timeline_sync_fence.c b/utgard/r8p0/common/mali_timeline_sync_fence.c
new file mode 100755 (executable)
index 0000000..4541ca6
--- /dev/null
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2013, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/file.h>
+#include "mali_timeline_sync_fence.h"
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_sync.h"
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+
+/**
+ * Creates a sync fence tracker and a sync fence.  Adds sync fence tracker to Timeline system and
+ * returns sync fence.  The sync fence will be signaled when the sync fence tracker is activated.
+ *
+ * @param timeline Timeline.
+ * @param point Point on timeline.
+ * @return Sync fence that will be signaled when tracker is activated.
+ */
+ #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+static struct sync_fence *mali_timeline_sync_fence_create_and_add_tracker(struct mali_timeline *timeline, mali_timeline_point point)
+#else
+static struct mali_internal_sync_fence *mali_timeline_sync_fence_create_and_add_tracker(struct mali_timeline *timeline, mali_timeline_point point)
+#endif
+{
+       struct mali_timeline_sync_fence_tracker *sync_fence_tracker;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+       struct sync_fence                       *sync_fence;
+#else
+       struct mali_internal_sync_fence                       *sync_fence;
+#endif
+       struct mali_timeline_fence               fence;
+
+       MALI_DEBUG_ASSERT_POINTER(timeline);
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT != point);
+
+       /* Allocate sync fence tracker. */
+       sync_fence_tracker = _mali_osk_calloc(1, sizeof(struct mali_timeline_sync_fence_tracker));
+       if (NULL == sync_fence_tracker) {
+               MALI_PRINT_ERROR(("Mali Timeline: sync_fence_tracker allocation failed\n"));
+               return NULL;
+       }
+
+       /* Create sync flag. */
+       MALI_DEBUG_ASSERT_POINTER(timeline->sync_tl);
+       sync_fence_tracker->flag = mali_sync_flag_create(timeline->sync_tl, point);
+       if (NULL == sync_fence_tracker->flag) {
+               MALI_PRINT_ERROR(("Mali Timeline: sync_flag creation failed\n"));
+               _mali_osk_free(sync_fence_tracker);
+               return NULL;
+       }
+
+       /* Create sync fence from sync flag. */
+       sync_fence = mali_sync_flag_create_fence(sync_fence_tracker->flag);
+       if (NULL == sync_fence) {
+               MALI_PRINT_ERROR(("Mali Timeline: sync_fence creation failed\n"));
+               mali_sync_flag_put(sync_fence_tracker->flag);
+               _mali_osk_free(sync_fence_tracker);
+               return NULL;
+       }
+
+       /* Setup fence for tracker. */
+       _mali_osk_memset(&fence, 0, sizeof(struct mali_timeline_fence));
+       fence.sync_fd = -1;
+       fence.points[timeline->id] = point;
+
+       /* Finally, add the tracker to Timeline system. */
+       mali_timeline_tracker_init(&sync_fence_tracker->tracker, MALI_TIMELINE_TRACKER_SYNC, &fence, sync_fence_tracker);
+       point = mali_timeline_system_add_tracker(timeline->system, &sync_fence_tracker->tracker, MALI_TIMELINE_NONE);
+       MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT == point);
+
+       return sync_fence;
+}
+
+s32 mali_timeline_sync_fence_create(struct mali_timeline_system *system, struct mali_timeline_fence *fence)
+{
+       u32 i;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+       struct sync_fence *sync_fence_acc = NULL;
+#else
+       struct mali_internal_sync_fence *sync_fence_acc = NULL;
+#endif
+       MALI_DEBUG_ASSERT_POINTER(system);
+       MALI_DEBUG_ASSERT_POINTER(fence);
+
+       for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+               struct mali_timeline *timeline;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+               struct sync_fence *sync_fence;
+#else
+               struct mali_internal_sync_fence *sync_fence;
+#endif
+               if (MALI_TIMELINE_NO_POINT == fence->points[i]) continue;
+
+               timeline = system->timelines[i];
+               MALI_DEBUG_ASSERT_POINTER(timeline);
+
+               sync_fence = mali_timeline_sync_fence_create_and_add_tracker(timeline, fence->points[i]);
+               if (NULL == sync_fence) goto error;
+
+               if (NULL != sync_fence_acc) {
+                       /* Merge sync fences. */
+                       sync_fence_acc = mali_sync_fence_merge(sync_fence_acc, sync_fence);
+                       if (NULL == sync_fence_acc) goto error;
+               } else {
+                       /* This was the first sync fence created. */
+                       sync_fence_acc = sync_fence;
+               }
+       }
+
+       if (-1 != fence->sync_fd) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+               struct sync_fence *sync_fence;
+               sync_fence = sync_fence_fdget(fence->sync_fd);
+#else
+               struct mali_internal_sync_fence *sync_fence;
+               sync_fence = mali_internal_sync_fence_fdget(fence->sync_fd);
+#endif
+
+               if (NULL == sync_fence) goto error;
+
+               if (NULL != sync_fence_acc) {
+                       sync_fence_acc = mali_sync_fence_merge(sync_fence_acc, sync_fence);
+                       if (NULL == sync_fence_acc) goto error;
+               } else {
+                       sync_fence_acc = sync_fence;
+               }
+       }
+
+       if (NULL == sync_fence_acc) {
+               MALI_DEBUG_ASSERT_POINTER(system->signaled_sync_tl);
+
+               /* There was nothing to wait on, so return an already signaled fence. */
+
+               sync_fence_acc = mali_sync_timeline_create_signaled_fence(system->signaled_sync_tl);
+               if (NULL == sync_fence_acc) goto error;
+       }
+
+       /* Return file descriptor for the accumulated sync fence. */
+       return mali_sync_fence_fd_alloc(sync_fence_acc);
+
+error:
+       if (NULL != sync_fence_acc) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+               sync_fence_put(sync_fence_acc);
+#else
+               fput(sync_fence_acc->file);
+#endif
+       }
+
+       return -1;
+}
+
+void mali_timeline_sync_fence_activate(struct mali_timeline_sync_fence_tracker *sync_fence_tracker)
+{
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+       MALI_DEBUG_ASSERT_POINTER(sync_fence_tracker);
+       MALI_DEBUG_ASSERT_POINTER(sync_fence_tracker->flag);
+
+       MALI_DEBUG_PRINT(4, ("Mali Timeline: activation for sync fence tracker\n"));
+
+       /* Signal flag and release reference. */
+       mali_sync_flag_signal(sync_fence_tracker->flag, 0);
+       mali_sync_flag_put(sync_fence_tracker->flag);
+
+       /* Nothing can wait on this tracker, so nothing to schedule after release. */
+       schedule_mask = mali_timeline_tracker_release(&sync_fence_tracker->tracker);
+       MALI_DEBUG_ASSERT(MALI_SCHEDULER_MASK_EMPTY == schedule_mask);
+
+       _mali_osk_free(sync_fence_tracker);
+}
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
diff --git a/utgard/r8p0/common/mali_timeline_sync_fence.h b/utgard/r8p0/common/mali_timeline_sync_fence.h
new file mode 100755 (executable)
index 0000000..a88f4fb
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2013, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_timeline_sync_fence.h
+ *
+ * This file contains code related to creating sync fences from timeline fences.
+ */
+
+#ifndef __MALI_TIMELINE_SYNC_FENCE_H__
+#define __MALI_TIMELINE_SYNC_FENCE_H__
+
+#include "mali_timeline.h"
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+
+/**
+ * Sync fence tracker.
+ */
+struct mali_timeline_sync_fence_tracker {
+       struct mali_sync_flag        *flag;    /**< Sync flag used to connect tracker and sync fence. */
+       struct mali_timeline_tracker  tracker; /**< Timeline tracker. */
+};
+
+/**
+ * Create a sync fence that will be signaled when @ref fence is signaled.
+ *
+ * @param system Timeline system.
+ * @param fence Fence to create sync fence from.
+ * @return File descriptor for new sync fence, or -1 on error.
+ */
+s32 mali_timeline_sync_fence_create(struct mali_timeline_system *system, struct mali_timeline_fence *fence);
+
+/**
+ * Used by the Timeline system to activate a sync fence tracker.
+ *
+ * @param sync_fence_tracker Sync fence tracker.
+ *
+ */
+void mali_timeline_sync_fence_activate(struct mali_timeline_sync_fence_tracker *sync_fence_tracker);
+
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+
+#endif /* __MALI_TIMELINE_SYNC_FENCE_H__ */
diff --git a/utgard/r8p0/common/mali_ukk.h b/utgard/r8p0/common/mali_ukk.h
new file mode 100755 (executable)
index 0000000..79829b9
--- /dev/null
@@ -0,0 +1,551 @@
+/*
+ * Copyright (C) 2010-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_ukk.h
+ * Defines the kernel-side interface of the user-kernel interface
+ */
+
+#ifndef __MALI_UKK_H__
+#define __MALI_UKK_H__
+
+#include "mali_osk.h"
+#include "mali_uk_types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup u_k_api UDD User/Kernel Interface (U/K) APIs
+ *
+ * - The _mali_uk functions are an abstraction of the interface to the device
+ * driver. On certain OSs, this would be implemented via the IOCTL interface.
+ * On other OSs, it could be via extension of some Device Driver Class, or
+ * direct function call for Bare metal/RTOSs.
+ * - It is important to note that:
+ *   -  The Device Driver has implemented the _mali_ukk set of functions
+ *   -  The Base Driver calls the corresponding set of _mali_uku functions.
+ * - What requires porting is solely the calling mechanism from User-side to
+ * Kernel-side, and propagating back the results.
+ * - Each U/K function is associated with a (group, number) pair from
+ * \ref _mali_uk_functions to make it possible for a common function in the
+ * Base Driver and Device Driver to route User/Kernel calls from/to the
+ * correct _mali_uk function. For example, in an IOCTL system, the IOCTL number
+ * would be formed based on the group and number assigned to the _mali_uk
+ * function, as listed in \ref _mali_uk_functions. On the user-side, each
+ * _mali_uku function would just make an IOCTL with the IOCTL-code being an
+ * encoded form of the (group, number) pair. On the kernel-side, the Device
+ * Driver's IOCTL handler decodes the IOCTL-code back into a (group, number)
+ * pair, and uses this to determine which corresponding _mali_ukk should be
+ * called.
+ *   - Refer to \ref _mali_uk_functions for more information about this
+ * (group, number) pairing.
+ * - In a system where there is no distinction between user and kernel-side,
+ * the U/K interface may be implemented as:@code
+ * MALI_STATIC_INLINE _mali_osk_errcode_t _mali_uku_examplefunction( _mali_uk_examplefunction_s *args )
+ * {
+ *     return mali_ukk_examplefunction( args );
+ * }
+ * @endcode
+ * - Therefore, all U/K calls behave \em as \em though they were direct
+ * function calls (but the \b implementation \em need \em not be a direct
+ * function calls)
+ *
+ * @note Naming the _mali_uk functions the same on both User and Kernel sides
+ * on non-RTOS systems causes debugging issues when setting breakpoints. In
+ * this case, it is not clear which function the breakpoint is put on.
+ * Therefore the _mali_uk functions in user space are prefixed with \c _mali_uku
+ * and in kernel space with \c _mali_ukk. The naming for the argument
+ * structures is unaffected.
+ *
+ * - The _mali_uk functions are synchronous.
+ * - Arguments to the _mali_uk functions are passed in a structure. The only
+ * parameter passed to the _mali_uk functions is a pointer to this structure.
+ * This first member of this structure, ctx, is a pointer to a context returned
+ * by _mali_uku_open(). For example:@code
+ * typedef struct
+ * {
+ *     void *ctx;
+ *     u32 number_of_cores;
+ * } _mali_uk_get_gp_number_of_cores_s;
+ * @endcode
+ *
+ * - Each _mali_uk function has its own argument structure named after the
+ *  function. The argument is distinguished by the _s suffix.
+ * - The argument types are defined by the base driver and user-kernel
+ *  interface.
+ * - All _mali_uk functions return a standard \ref _mali_osk_errcode_t.
+ * - Only arguments of type input or input/output need be initialized before
+ * calling a _mali_uk function.
+ * - Arguments of type output and input/output are only valid when the
+ * _mali_uk function returns \ref _MALI_OSK_ERR_OK.
+ * - The \c ctx member is always invalid after it has been used by a
+ * _mali_uk function, except for the context management functions
+ *
+ *
+ * \b Interface \b restrictions
+ *
+ * The requirements of the interface mean that an implementation of the
+ * User-kernel interface may do no 'real' work. For example, the following are
+ * illegal in the User-kernel implementation:
+ * - Calling functions necessary for operation on all systems,  which would
+ * not otherwise get called on RTOS systems.
+ *     - For example, a  U/K interface that calls multiple _mali_ukk functions
+ * during one particular U/K call. This could not be achieved by the same code
+ * which uses direct function calls for the U/K interface.
+ * -  Writing in values to the args members, when otherwise these members would
+ * not hold a useful value for a direct function call U/K interface.
+ *     - For example, U/K interface implementation that take NULL members in
+ * their arguments structure from the user side, but those members are
+ * replaced with non-NULL values in the kernel-side of the U/K interface
+ * implementation. A scratch area for writing data is one such example. In this
+ * case, a direct function call U/K interface would segfault, because no code
+ * would be present to replace the NULL pointer with a meaningful pointer.
+ *     - Note that we discourage the case where the U/K implementation changes
+ * a NULL argument member to non-NULL, and then the Device Driver code (outside
+ * of the U/K layer) re-checks this member for NULL, and corrects it when
+ * necessary. Whilst such code works even on direct function call U/K
+ * intefaces, it reduces the testing coverage of the Device Driver code. This
+ * is because we have no way of testing the NULL == value path on an OS
+ * implementation.
+ *
+ * A number of allowable examples exist where U/K interfaces do 'real' work:
+ * - The 'pointer switching' technique for \ref _mali_ukk_get_system_info
+ *     - In this case, without the pointer switching on direct function call
+ * U/K interface, the Device Driver code still sees the same thing: a pointer
+ * to which it can write memory. This is because such a system has no
+ * distinction between a user and kernel pointer.
+ * - Writing an OS-specific value into the ukk_private member for
+ * _mali_ukk_mem_mmap().
+ *     - In this case, this value is passed around by Device Driver code, but
+ * its actual value is never checked. Device Driver code simply passes it from
+ * the U/K layer to the OSK layer, where it can be acted upon. In this case,
+ * \em some OS implementations of the U/K (_mali_ukk_mem_mmap()) and OSK
+ * (_mali_osk_mem_mapregion_init()) functions will collaborate on the
+ *  meaning of ukk_private member. On other OSs, it may be unused by both
+ * U/K and OSK layers
+ *     - Therefore, on error inside the U/K interface implementation itself,
+ * it will be as though the _mali_ukk function itself had failed, and cleaned
+ * up after itself.
+ *     - Compare this to a direct function call U/K implementation, where all
+ * error cleanup is handled by the _mali_ukk function itself. The direct
+ * function call U/K interface implementation is automatically atomic.
+ *
+ * The last example highlights a consequence of all U/K interface
+ * implementations: they must be atomic with respect to the Device Driver code.
+ * And therefore, should Device Driver code succeed but the U/K implementation
+ * fail afterwards (but before return to user-space), then the U/K
+ * implementation must cause appropriate cleanup actions to preserve the
+ * atomicity of the interface.
+ *
+ * @{
+ */
+
+
+/** @defgroup _mali_uk_context U/K Context management
+ *
+ * These functions allow for initialisation of the user-kernel interface once per process.
+ *
+ * Generally the context will store the OS specific object to communicate with the kernel device driver and further
+ * state information required by the specific implementation. The context is shareable among all threads in the caller process.
+ *
+ * On IOCTL systems, this is likely to be a file descriptor as a result of opening the kernel device driver.
+ *
+ * On a bare-metal/RTOS system with no distinction between kernel and
+ * user-space, the U/K interface simply calls the _mali_ukk variant of the
+ * function by direct function call. In this case, the context returned is the
+ * mali_session_data from _mali_ukk_open().
+ *
+ * The kernel side implementations of the U/K interface expect the first member of the argument structure to
+ * be the context created by _mali_uku_open(). On some OS implementations, the meaning of this context
+ * will be different between user-side and kernel-side. In which case, the kernel-side will need to replace this context
+ * with the kernel-side equivalent, because user-side will not have access to kernel-side data. The context parameter
+ * in the argument structure therefore has to be of type input/output.
+ *
+ * It should be noted that the caller cannot reuse the \c ctx member of U/K
+ * argument structure after a U/K call, because it may be overwritten. Instead,
+ * the context handle must always be stored  elsewhere, and copied into
+ * the appropriate U/K argument structure for each user-side call to
+ * the U/K interface. This is not usually a problem, since U/K argument
+ * structures are usually placed on the stack.
+ *
+ * @{ */
+
+/** @brief Begin a new Mali Device Driver session
+ *
+ * This is used to obtain a per-process context handle for all future U/K calls.
+ *
+ * @param context pointer to storage to return a (void*)context handle.
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_open(void **context);
+
+/** @brief End a Mali Device Driver session
+ *
+ * This should be called when the process no longer requires use of the Mali Device Driver.
+ *
+ * The context handle must not be used after it has been closed.
+ *
+ * @param context pointer to a stored (void*)context handle.
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_close(void **context);
+
+/** @} */ /* end group _mali_uk_context */
+
+
+/** @addtogroup _mali_uk_core U/K Core
+ *
+ * The core functions provide the following functionality:
+ * - verify that the user and kernel API are compatible
+ * - retrieve information about the cores and memory banks in the system
+ * - wait for the result of jobs started on a core
+ *
+ * @{ */
+
+/** @brief Waits for a job notification.
+ *
+ * Sleeps until notified or a timeout occurs. Returns information about the notification.
+ *
+ * @param args see _mali_uk_wait_for_notification_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_wait_for_notification(_mali_uk_wait_for_notification_s *args);
+
+/** @brief Post a notification to the notification queue of this application.
+ *
+ * @param args see _mali_uk_post_notification_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_post_notification(_mali_uk_post_notification_s *args);
+
+/** @brief Verifies if the user and kernel side of this API are compatible.
+ *
+ * This function is obsolete, but kept to allow old, incompatible user space
+ * clients to robustly detect the incompatibility.
+ *
+ * @param args see _mali_uk_get_api_version_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_api_version(_mali_uk_get_api_version_s *args);
+
+/** @brief Verifies if the user and kernel side of this API are compatible.
+ *
+ * @param args see _mali_uk_get_api_version_v2_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_api_version_v2(_mali_uk_get_api_version_v2_s *args);
+
+/** @brief Get the user space settings applicable for calling process.
+ *
+ * @param args see _mali_uk_get_user_settings_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_user_settings(_mali_uk_get_user_settings_s *args);
+
+/** @brief Get a user space setting applicable for calling process.
+ *
+ * @param args see _mali_uk_get_user_setting_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_user_setting(_mali_uk_get_user_setting_s *args);
+
+/* @brief Grant or deny high priority scheduling for this session.
+ *
+ * @param args see _mali_uk_request_high_priority_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_request_high_priority(_mali_uk_request_high_priority_s *args);
+
+/** @brief Make process sleep if the pending big job in kernel  >= MALI_MAX_PENDING_BIG_JOB
+ *
+ */
+_mali_osk_errcode_t _mali_ukk_pending_submit(_mali_uk_pending_submit_s *args);
+
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @addtogroup _mali_uk_memory U/K Memory
+ *
+ * The memory functions provide functionality with and without a Mali-MMU present.
+ *
+ * For Mali-MMU based systems, the following functionality is provided:
+ * - Initialize and terminate MALI virtual address space
+ * - Allocate/deallocate physical memory to a MALI virtual address range and map into/unmap from the
+ * current process address space
+ * - Map/unmap external physical memory into the MALI virtual address range
+ *
+ * For Mali-nonMMU based systems:
+ * - Allocate/deallocate MALI memory
+ *
+ * @{ */
+
+/** @brief Map Mali Memory into the current user process
+ *
+ * Maps Mali memory into the current user process in a generic way.
+ *
+ * This function is to be used for Mali-MMU mode. The function is available in both Mali-MMU and Mali-nonMMU modes,
+ * but should not be called by a user process in Mali-nonMMU mode.
+ *
+ * The implementation and operation of _mali_ukk_mem_mmap() is dependant on whether the driver is built for Mali-MMU
+ * or Mali-nonMMU:
+ * - In the nonMMU case, _mali_ukk_mem_mmap() requires a physical address to be specified. For this reason, an OS U/K
+ * implementation should not allow this to be called from user-space. In any case, nonMMU implementations are
+ * inherently insecure, and so the overall impact is minimal. Mali-MMU mode should be used if security is desired.
+ * - In the MMU case, _mali_ukk_mem_mmap() the _mali_uk_mem_mmap_s::phys_addr
+ * member is used for the \em Mali-virtual address desired for the mapping. The
+ * implementation of _mali_ukk_mem_mmap() will allocate both the CPU-virtual
+ * and CPU-physical addresses, and can cope with mapping a contiguous virtual
+ * address range to a sequence of non-contiguous physical pages. In this case,
+ * the CPU-physical addresses are not communicated back to the user-side, as
+ * they are unnecsessary; the \em Mali-virtual address range must be used for
+ * programming Mali structures.
+ *
+ * In the second (MMU) case, _mali_ukk_mem_mmap() handles management of
+ * CPU-virtual and CPU-physical ranges, but the \em caller must manage the
+ * \em Mali-virtual address range from the user-side.
+ *
+ * @note Mali-virtual address ranges are entirely separate between processes.
+ * It is not possible for a process to accidentally corrupt another process'
+ * \em Mali-virtual address space.
+ *
+ * @param args see _mali_uk_mem_mmap_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_mem_mmap(_mali_uk_mem_mmap_s *args);
+
+/** @brief Unmap Mali Memory from the current user process
+ *
+ * Unmaps Mali memory from the current user process in a generic way. This only operates on Mali memory supplied
+ * from _mali_ukk_mem_mmap().
+ *
+ * @param args see _mali_uk_mem_munmap_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_mem_munmap(_mali_uk_mem_munmap_s *args);
+
+/** @brief Determine the buffer size necessary for an MMU page table dump.
+ * @param args see _mali_uk_query_mmu_page_table_dump_size_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size(_mali_uk_query_mmu_page_table_dump_size_s *args);
+/** @brief Dump MMU Page tables.
+ * @param args see _mali_uk_dump_mmu_page_table_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table(_mali_uk_dump_mmu_page_table_s *args);
+
+/** @brief Write user data to specified Mali memory without causing segfaults.
+ * @param args see _mali_uk_mem_write_safe_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_mem_write_safe(_mali_uk_mem_write_safe_s *args);
+
+/** @} */ /* end group _mali_uk_memory */
+
+
+/** @addtogroup _mali_uk_pp U/K Fragment Processor
+ *
+ * The Fragment Processor (aka PP (Pixel Processor)) functions provide the following functionality:
+ * - retrieving version of the fragment processors
+ * - determine number of fragment processors
+ * - starting a job on a fragment processor
+ *
+ * @{ */
+
+/** @brief Issue a request to start a new job on a Fragment Processor.
+ *
+ * If the request fails args->status is set to _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE and you can
+ * try to start the job again.
+ *
+ * An existing job could be returned for requeueing if the new job has a higher priority than a previously started job
+ * which the hardware hasn't actually started processing yet. In this case the new job will be started instead and the
+ * existing one returned, otherwise the new job is started and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED.
+ *
+ * Job completion can be awaited with _mali_ukk_wait_for_notification().
+ *
+ * @param ctx user-kernel context (mali_session)
+ * @param uargs see _mali_uk_pp_start_job_s in "mali_utgard_uk_types.h". Use _mali_osk_copy_from_user to retrieve data!
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_pp_start_job(void *ctx, _mali_uk_pp_start_job_s *uargs);
+
+/**
+ * @brief Issue a request to start new jobs on both Vertex Processor and Fragment Processor.
+ *
+ * @note Will call into @ref _mali_ukk_pp_start_job and @ref _mali_ukk_gp_start_job.
+ *
+ * @param ctx user-kernel context (mali_session)
+ * @param uargs see _mali_uk_pp_and_gp_start_job_s in "mali_utgard_uk_types.h". Use _mali_osk_copy_from_user to retrieve data!
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_pp_and_gp_start_job(void *ctx, _mali_uk_pp_and_gp_start_job_s *uargs);
+
+/** @brief Returns the number of Fragment Processors in the system
+ *
+ * @param args see _mali_uk_get_pp_number_of_cores_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores(_mali_uk_get_pp_number_of_cores_s *args);
+
+/** @brief Returns the version that all Fragment Processor cores are compatible with.
+ *
+ * This function may only be called when _mali_ukk_get_pp_number_of_cores() indicated at least one Fragment
+ * Processor core is available.
+ *
+ * @param args see _mali_uk_get_pp_core_version_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_pp_core_version(_mali_uk_get_pp_core_version_s *args);
+
+/** @brief Disable Write-back unit(s) on specified job
+ *
+ * @param args see _mali_uk_get_pp_core_version_s in "mali_utgard_uk_types.h"
+ */
+void _mali_ukk_pp_job_disable_wb(_mali_uk_pp_disable_wb_s *args);
+
+
+/** @} */ /* end group _mali_uk_pp */
+
+
+/** @addtogroup _mali_uk_gp U/K Vertex Processor
+ *
+ * The Vertex Processor (aka GP (Geometry Processor)) functions provide the following functionality:
+ * - retrieving version of the Vertex Processors
+ * - determine number of Vertex Processors available
+ * - starting a job on a Vertex Processor
+ *
+ * @{ */
+
+/** @brief Issue a request to start a new job on a Vertex Processor.
+ *
+ * If the request fails args->status is set to _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE and you can
+ * try to start the job again.
+ *
+ * An existing job could be returned for requeueing if the new job has a higher priority than a previously started job
+ * which the hardware hasn't actually started processing yet. In this case the new job will be started and the
+ * existing one returned, otherwise the new job is started and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED.
+ *
+ * Job completion can be awaited with _mali_ukk_wait_for_notification().
+ *
+ * @param ctx user-kernel context (mali_session)
+ * @param uargs see _mali_uk_gp_start_job_s in "mali_utgard_uk_types.h". Use _mali_osk_copy_from_user to retrieve data!
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_gp_start_job(void *ctx, _mali_uk_gp_start_job_s *uargs);
+
+/** @brief Returns the number of Vertex Processors in the system.
+ *
+ * @param args see _mali_uk_get_gp_number_of_cores_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores(_mali_uk_get_gp_number_of_cores_s *args);
+
+/** @brief Returns the version that all Vertex Processor cores are compatible with.
+ *
+ * This function may only be called when _mali_uk_get_gp_number_of_cores() indicated at least one Vertex
+ * Processor core is available.
+ *
+ * @param args see _mali_uk_get_gp_core_version_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_gp_core_version(_mali_uk_get_gp_core_version_s *args);
+
+/** @brief Resume or abort suspended Vertex Processor jobs.
+ *
+ * After receiving notification that a Vertex Processor job was suspended from
+ * _mali_ukk_wait_for_notification() you can use this function to resume or abort the job.
+ *
+ * @param args see _mali_uk_gp_suspend_response_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_gp_suspend_response(_mali_uk_gp_suspend_response_s *args);
+
+/** @} */ /* end group _mali_uk_gp */
+
+#if defined(CONFIG_MALI400_PROFILING)
+/** @addtogroup _mali_uk_profiling U/K Timeline profiling module
+ * @{ */
+
+/** @brief Add event to profiling buffer.
+ *
+ * @param args see _mali_uk_profiling_add_event_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args);
+
+/** @brief Get profiling stream fd.
+ *
+ * @param args see _mali_uk_profiling_stream_fd_get_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_stream_fd_get(_mali_uk_profiling_stream_fd_get_s *args);
+
+/** @brief Profiling control set.
+ *
+ * @param args see _mali_uk_profiling_control_set_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_control_set(_mali_uk_profiling_control_set_s *args);
+
+/** @} */ /* end group _mali_uk_profiling */
+#endif
+
+/** @addtogroup _mali_uk_vsync U/K VSYNC reporting module
+ * @{ */
+
+/** @brief Report events related to vsync.
+ *
+ * @note Events should be reported when starting to wait for vsync and when the
+ * waiting is finished. This information can then be used in kernel space to
+ * complement the GPU utilization metric.
+ *
+ * @param args see _mali_uk_vsync_event_report_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args);
+
+/** @} */ /* end group _mali_uk_vsync */
+
+/** @addtogroup _mali_sw_counters_report U/K Software counter reporting
+ * @{ */
+
+/** @brief Report software counters.
+ *
+ * @param args see _mali_uk_sw_counters_report_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_sw_counters_report(_mali_uk_sw_counters_report_s *args);
+
+/** @} */ /* end group _mali_sw_counters_report */
+
+/** @} */ /* end group u_k_api */
+
+/** @} */ /* end group uddapi */
+
+u32 _mali_ukk_report_memory_usage(void);
+
+u32 _mali_ukk_report_total_memory_size(void);
+
+u32 _mali_ukk_utilization_gp_pp(void);
+
+u32 _mali_ukk_utilization_gp(void);
+
+u32 _mali_ukk_utilization_pp(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UKK_H__ */
diff --git a/utgard/r8p0/common/mali_user_settings_db.c b/utgard/r8p0/common/mali_user_settings_db.c
new file mode 100755 (executable)
index 0000000..a1a613f
--- /dev/null
@@ -0,0 +1,147 @@
+/**
+ * Copyright (C) 2012-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+#include "mali_uk_types.h"
+#include "mali_user_settings_db.h"
+#include "mali_session.h"
+
+static u32 mali_user_settings[_MALI_UK_USER_SETTING_MAX];
+const char *_mali_uk_user_setting_descriptions[] = _MALI_UK_USER_SETTING_DESCRIPTIONS;
+
+static void mali_user_settings_notify(_mali_uk_user_setting_t setting, u32 value)
+{
+       mali_bool done = MALI_FALSE;
+
+       /*
+        * This function gets a bit complicated because we can't hold the session lock while
+        * allocating notification objects.
+        */
+
+       while (!done) {
+               u32 i;
+               u32 num_sessions_alloc;
+               u32 num_sessions_with_lock;
+               u32 used_notification_objects = 0;
+               _mali_osk_notification_t **notobjs;
+
+               /* Pre allocate the number of notifications objects we need right now (might change after lock has been taken) */
+               num_sessions_alloc = mali_session_get_count();
+               if (0 == num_sessions_alloc) {
+                       /* No sessions to report to */
+                       return;
+               }
+
+               notobjs = (_mali_osk_notification_t **)_mali_osk_malloc(sizeof(_mali_osk_notification_t *) * num_sessions_alloc);
+               if (NULL == notobjs) {
+                       MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure)\n"));
+                       return;
+               }
+
+               for (i = 0; i < num_sessions_alloc; i++) {
+                       notobjs[i] = _mali_osk_notification_create(_MALI_NOTIFICATION_SETTINGS_CHANGED,
+                                       sizeof(_mali_uk_settings_changed_s));
+                       if (NULL != notobjs[i]) {
+                               _mali_uk_settings_changed_s *data;
+                               data = notobjs[i]->result_buffer;
+
+                               data->setting = setting;
+                               data->value = value;
+                       } else {
+                               MALI_PRINT_ERROR(("Failed to notify user space session about setting change (alloc failure %u)\n", i));
+                       }
+               }
+
+               mali_session_lock();
+
+               /* number of sessions will not change while we hold the lock */
+               num_sessions_with_lock = mali_session_get_count();
+
+               if (num_sessions_alloc >= num_sessions_with_lock) {
+                       /* We have allocated enough notification objects for all the sessions atm */
+                       struct mali_session_data *session, *tmp;
+                       MALI_SESSION_FOREACH(session, tmp, link) {
+                               MALI_DEBUG_ASSERT(used_notification_objects < num_sessions_alloc);
+                               if (NULL != notobjs[used_notification_objects]) {
+                                       mali_session_send_notification(session, notobjs[used_notification_objects]);
+                                       notobjs[used_notification_objects] = NULL; /* Don't track this notification object any more */
+                               }
+                               used_notification_objects++;
+                       }
+                       done = MALI_TRUE;
+               }
+
+               mali_session_unlock();
+
+               /* Delete any remaining/unused notification objects */
+               for (; used_notification_objects < num_sessions_alloc; used_notification_objects++) {
+                       if (NULL != notobjs[used_notification_objects]) {
+                               _mali_osk_notification_delete(notobjs[used_notification_objects]);
+                       }
+               }
+
+               _mali_osk_free(notobjs);
+       }
+}
+
+void mali_set_user_setting(_mali_uk_user_setting_t setting, u32 value)
+{
+       mali_bool notify = MALI_FALSE;
+
+       if (setting >= _MALI_UK_USER_SETTING_MAX) {
+               MALI_DEBUG_PRINT_ERROR(("Invalid user setting %ud\n"));
+               return;
+       }
+
+       if (mali_user_settings[setting] != value) {
+               notify = MALI_TRUE;
+       }
+
+       mali_user_settings[setting] = value;
+
+       if (notify) {
+               mali_user_settings_notify(setting, value);
+       }
+}
+
+u32 mali_get_user_setting(_mali_uk_user_setting_t setting)
+{
+       if (setting >= _MALI_UK_USER_SETTING_MAX) {
+               return 0;
+       }
+
+       return mali_user_settings[setting];
+}
+
+_mali_osk_errcode_t _mali_ukk_get_user_setting(_mali_uk_get_user_setting_s *args)
+{
+       _mali_uk_user_setting_t setting;
+       MALI_DEBUG_ASSERT_POINTER(args);
+
+       setting = args->setting;
+
+       if (_MALI_UK_USER_SETTING_MAX > setting) {
+               args->value = mali_user_settings[setting];
+               return _MALI_OSK_ERR_OK;
+       } else {
+               return _MALI_OSK_ERR_INVALID_ARGS;
+       }
+}
+
+_mali_osk_errcode_t _mali_ukk_get_user_settings(_mali_uk_get_user_settings_s *args)
+{
+       MALI_DEBUG_ASSERT_POINTER(args);
+
+       _mali_osk_memcpy(args->settings, mali_user_settings, sizeof(mali_user_settings));
+
+       return _MALI_OSK_ERR_OK;
+}
diff --git a/utgard/r8p0/common/mali_user_settings_db.h b/utgard/r8p0/common/mali_user_settings_db.h
new file mode 100755 (executable)
index 0000000..6828dc7
--- /dev/null
@@ -0,0 +1,39 @@
+/**
+ * Copyright (C) 2012-2013, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_USER_SETTINGS_DB_H__
+#define __MALI_USER_SETTINGS_DB_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "mali_uk_types.h"
+
+/** @brief Set Mali user setting in DB
+ *
+ * Update the DB with a new value for \a setting. If the value is different from theprevious set value running sessions will be notified of the change.
+ *
+ * @param setting the setting to be changed
+ * @param value the new value to set
+ */
+void mali_set_user_setting(_mali_uk_user_setting_t setting, u32 value);
+
+/** @brief Get current Mali user setting value from DB
+ *
+ * @param setting the setting to extract
+ * @return the value of the selected setting
+ */
+u32 mali_get_user_setting(_mali_uk_user_setting_t setting);
+
+#ifdef __cplusplus
+}
+#endif
+#endif  /* __MALI_KERNEL_USER_SETTING__ */
diff --git a/utgard/r8p0/include/linux/mali/mali_utgard.h b/utgard/r8p0/include/linux/mali/mali_utgard.h
new file mode 100755 (executable)
index 0000000..e58ed1f
--- /dev/null
@@ -0,0 +1,526 @@
+/*
+ * Copyright (C) 2012-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_utgard.h
+ * Defines types and interface exposed by the Mali Utgard device driver
+ */
+
+#ifndef __MALI_UTGARD_H__
+#define __MALI_UTGARD_H__
+
+#include "mali_osk_types.h"
+#ifdef CONFIG_MALI_DEVFREQ
+#include <linux/devfreq.h>
+#include "mali_pm_metrics.h"
+#ifdef CONFIG_DEVFREQ_THERMAL
+#include <linux/devfreq_cooling.h>
+#endif
+#endif
+
+#define MALI_GPU_NAME_UTGARD "mali-utgard"
+
+
+#define MALI_OFFSET_GP                    0x00000
+#define MALI_OFFSET_GP_MMU                0x03000
+
+#define MALI_OFFSET_PP0                   0x08000
+#define MALI_OFFSET_PP0_MMU               0x04000
+#define MALI_OFFSET_PP1                   0x0A000
+#define MALI_OFFSET_PP1_MMU               0x05000
+#define MALI_OFFSET_PP2                   0x0C000
+#define MALI_OFFSET_PP2_MMU               0x06000
+#define MALI_OFFSET_PP3                   0x0E000
+#define MALI_OFFSET_PP3_MMU               0x07000
+
+#define MALI_OFFSET_PP4                   0x28000
+#define MALI_OFFSET_PP4_MMU               0x1C000
+#define MALI_OFFSET_PP5                   0x2A000
+#define MALI_OFFSET_PP5_MMU               0x1D000
+#define MALI_OFFSET_PP6                   0x2C000
+#define MALI_OFFSET_PP6_MMU               0x1E000
+#define MALI_OFFSET_PP7                   0x2E000
+#define MALI_OFFSET_PP7_MMU               0x1F000
+
+#define MALI_OFFSET_L2_RESOURCE0          0x01000
+#define MALI_OFFSET_L2_RESOURCE1          0x10000
+#define MALI_OFFSET_L2_RESOURCE2          0x11000
+
+#define MALI400_OFFSET_L2_CACHE0          MALI_OFFSET_L2_RESOURCE0
+#define MALI450_OFFSET_L2_CACHE0          MALI_OFFSET_L2_RESOURCE1
+#define MALI450_OFFSET_L2_CACHE1          MALI_OFFSET_L2_RESOURCE0
+#define MALI450_OFFSET_L2_CACHE2          MALI_OFFSET_L2_RESOURCE2
+#define MALI470_OFFSET_L2_CACHE1          MALI_OFFSET_L2_RESOURCE0
+
+#define MALI_OFFSET_BCAST                 0x13000
+#define MALI_OFFSET_DLBU                  0x14000
+
+#define MALI_OFFSET_PP_BCAST              0x16000
+#define MALI_OFFSET_PP_BCAST_MMU          0x15000
+
+#define MALI_OFFSET_PMU                   0x02000
+#define MALI_OFFSET_DMA                   0x12000
+
+/* Mali-300 */
+
+#define MALI_GPU_RESOURCES_MALI300(base_addr, gp_irq, gp_mmu_irq, pp_irq, pp_mmu_irq) \
+       MALI_GPU_RESOURCES_MALI400_MP1(base_addr, gp_irq, gp_mmu_irq, pp_irq, pp_mmu_irq)
+
+#define MALI_GPU_RESOURCES_MALI300_PMU(base_addr, gp_irq, gp_mmu_irq, pp_irq, pp_mmu_irq) \
+       MALI_GPU_RESOURCES_MALI400_MP1_PMU(base_addr, gp_irq, gp_mmu_irq, pp_irq, pp_mmu_irq)
+
+/* Mali-400 */
+
+#define MALI_GPU_RESOURCES_MALI400_MP1(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + MALI400_OFFSET_L2_CACHE0) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq)
+
+#define MALI_GPU_RESOURCES_MALI400_MP1_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq) \
+       MALI_GPU_RESOURCES_MALI400_MP1(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU)
+
+#define MALI_GPU_RESOURCES_MALI400_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + MALI400_OFFSET_L2_CACHE0) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq)
+
+#define MALI_GPU_RESOURCES_MALI400_MP2_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq) \
+       MALI_GPU_RESOURCES_MALI400_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU)
+
+#define MALI_GPU_RESOURCES_MALI400_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + MALI400_OFFSET_L2_CACHE0) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq)
+
+#define MALI_GPU_RESOURCES_MALI400_MP3_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq) \
+       MALI_GPU_RESOURCES_MALI400_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq) \
+       MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU)
+
+#define MALI_GPU_RESOURCES_MALI400_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + MALI400_OFFSET_L2_CACHE0) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + MALI_OFFSET_PP3, pp3_irq, base_addr + MALI_OFFSET_PP3_MMU, pp3_mmu_irq)
+
+#define MALI_GPU_RESOURCES_MALI400_MP4_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq) \
+       MALI_GPU_RESOURCES_MALI400_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq) \
+       MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
+       /* Mali-450 */
+#define MALI_GPU_RESOURCES_MALI450_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE0) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE1) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+       MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+       MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) \
+       MALI_GPU_RESOURCE_DMA(base_addr + MALI_OFFSET_DMA)
+
+#define MALI_GPU_RESOURCES_MALI450_MP2_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCES_MALI450_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
+#define MALI_GPU_RESOURCES_MALI450_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE0) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE1) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+       MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+       MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+       MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU)
+
+#define MALI_GPU_RESOURCES_MALI450_MP3_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCES_MALI450_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
+#define MALI_GPU_RESOURCES_MALI450_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE0) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE1) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + MALI_OFFSET_PP3, pp3_irq, base_addr + MALI_OFFSET_PP3_MMU, pp3_mmu_irq) \
+       MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+       MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+       MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) \
+       MALI_GPU_RESOURCE_DMA(base_addr + MALI_OFFSET_DMA)
+
+#define MALI_GPU_RESOURCES_MALI450_MP4_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCES_MALI450_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
+#define MALI_GPU_RESOURCES_MALI450_MP6(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE0) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE1) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE2) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + MALI_OFFSET_PP4, pp3_irq, base_addr + MALI_OFFSET_PP4_MMU, pp3_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(4, base_addr + MALI_OFFSET_PP5, pp4_irq, base_addr + MALI_OFFSET_PP5_MMU, pp4_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(5, base_addr + MALI_OFFSET_PP6, pp5_irq, base_addr + MALI_OFFSET_PP6_MMU, pp5_mmu_irq) \
+       MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+       MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+       MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) \
+       MALI_GPU_RESOURCE_DMA(base_addr + MALI_OFFSET_DMA)
+
+#define MALI_GPU_RESOURCES_MALI450_MP6_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCES_MALI450_MP6(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
+#define MALI_GPU_RESOURCES_MALI450_MP8(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp6_irq, pp6_mmu_irq, pp7_irq, pp7_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE0) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE1) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + MALI_OFFSET_PP3, pp3_irq, base_addr + MALI_OFFSET_PP3_MMU, pp3_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE2) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(4, base_addr + MALI_OFFSET_PP4, pp4_irq, base_addr + MALI_OFFSET_PP4_MMU, pp4_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(5, base_addr + MALI_OFFSET_PP5, pp5_irq, base_addr + MALI_OFFSET_PP5_MMU, pp5_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(6, base_addr + MALI_OFFSET_PP6, pp6_irq, base_addr + MALI_OFFSET_PP6_MMU, pp6_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(7, base_addr + MALI_OFFSET_PP7, pp7_irq, base_addr + MALI_OFFSET_PP7_MMU, pp7_mmu_irq) \
+       MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+       MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+       MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) \
+       MALI_GPU_RESOURCE_DMA(base_addr + MALI_OFFSET_DMA)
+
+#define MALI_GPU_RESOURCES_MALI450_MP8_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp6_irq, pp6_mmu_irq, pp7_irq, pp7_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCES_MALI450_MP8(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp6_irq, pp6_mmu_irq, pp7_irq, pp7_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
+       /* Mali - 470 */
+#define MALI_GPU_RESOURCES_MALI470_MP1(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + MALI470_OFFSET_L2_CACHE1) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+       MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+       MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU)
+
+#define MALI_GPU_RESOURCES_MALI470_MP1_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCES_MALI470_MP1(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
+#define MALI_GPU_RESOURCES_MALI470_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + MALI470_OFFSET_L2_CACHE1) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+       MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+       MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU)
+
+#define MALI_GPU_RESOURCES_MALI470_MP2_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCES_MALI470_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
+#define MALI_GPU_RESOURCES_MALI470_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + MALI470_OFFSET_L2_CACHE1) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+       MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+       MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+       MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU)
+
+#define MALI_GPU_RESOURCES_MALI470_MP3_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCES_MALI470_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
+#define MALI_GPU_RESOURCES_MALI470_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + MALI470_OFFSET_L2_CACHE1) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + MALI_OFFSET_PP3, pp3_irq, base_addr + MALI_OFFSET_PP3_MMU, pp3_mmu_irq) \
+       MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+       MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+       MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU)
+
+#define MALI_GPU_RESOURCES_MALI470_MP4_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCES_MALI470_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
+#define MALI_GPU_RESOURCE_L2(addr) \
+       { \
+               .name = "Mali_L2", \
+                       .flags = IORESOURCE_MEM, \
+                                .start = addr, \
+                                         .end   = addr + 0x200, \
+       },
+
+#define MALI_GPU_RESOURCE_GP(gp_addr, gp_irq) \
+       { \
+               .name = "Mali_GP", \
+                       .flags = IORESOURCE_MEM, \
+                                .start = gp_addr, \
+                                         .end =   gp_addr + 0x100, \
+       }, \
+       { \
+               .name = "Mali_GP_IRQ", \
+                       .flags = IORESOURCE_IRQ, \
+                                .start = gp_irq, \
+                                         .end   = gp_irq, \
+       }, \
+
+#define MALI_GPU_RESOURCE_GP_WITH_MMU(gp_addr, gp_irq, gp_mmu_addr, gp_mmu_irq) \
+       { \
+               .name = "Mali_GP", \
+                       .flags = IORESOURCE_MEM, \
+                                .start = gp_addr, \
+                                         .end =   gp_addr + 0x100, \
+       }, \
+       { \
+               .name = "Mali_GP_IRQ", \
+                       .flags = IORESOURCE_IRQ, \
+                                .start = gp_irq, \
+                                         .end   = gp_irq, \
+       }, \
+       { \
+               .name = "Mali_GP_MMU", \
+                       .flags = IORESOURCE_MEM, \
+                                .start = gp_mmu_addr, \
+                                         .end =   gp_mmu_addr + 0x100, \
+       }, \
+       { \
+               .name = "Mali_GP_MMU_IRQ", \
+                       .flags = IORESOURCE_IRQ, \
+                                .start = gp_mmu_irq, \
+                                         .end =   gp_mmu_irq, \
+       },
+
+#define MALI_GPU_RESOURCE_PP(pp_addr, pp_irq) \
+       { \
+               .name = "Mali_PP", \
+                       .flags = IORESOURCE_MEM, \
+                                .start = pp_addr, \
+                                         .end =   pp_addr + 0x1100, \
+       }, \
+       { \
+               .name = "Mali_PP_IRQ", \
+                       .flags = IORESOURCE_IRQ, \
+                                .start = pp_irq, \
+                                         .end =   pp_irq, \
+       }, \
+
+#define MALI_GPU_RESOURCE_PP_WITH_MMU(id, pp_addr, pp_irq, pp_mmu_addr, pp_mmu_irq) \
+       { \
+               .name = "Mali_PP" #id, \
+                       .flags = IORESOURCE_MEM, \
+                                .start = pp_addr, \
+                                         .end =   pp_addr + 0x1100, \
+       }, \
+       { \
+               .name = "Mali_PP" #id "_IRQ", \
+                       .flags = IORESOURCE_IRQ, \
+                                .start = pp_irq, \
+                                         .end =   pp_irq, \
+       }, \
+       { \
+               .name = "Mali_PP" #id "_MMU", \
+                       .flags = IORESOURCE_MEM, \
+                                .start = pp_mmu_addr, \
+                                         .end =   pp_mmu_addr + 0x100, \
+       }, \
+       { \
+               .name = "Mali_PP" #id "_MMU_IRQ", \
+                       .flags = IORESOURCE_IRQ, \
+                                .start = pp_mmu_irq, \
+                                         .end =   pp_mmu_irq, \
+       },
+
+#define MALI_GPU_RESOURCE_MMU(mmu_addr, mmu_irq) \
+       { \
+               .name = "Mali_MMU", \
+                       .flags = IORESOURCE_MEM, \
+                                .start = mmu_addr, \
+                                         .end =   mmu_addr + 0x100, \
+       }, \
+       { \
+               .name = "Mali_MMU_IRQ", \
+                       .flags = IORESOURCE_IRQ, \
+                                .start = mmu_irq, \
+                                         .end =   mmu_irq, \
+       },
+
+#define MALI_GPU_RESOURCE_PMU(pmu_addr) \
+       { \
+               .name = "Mali_PMU", \
+                       .flags = IORESOURCE_MEM, \
+                                .start = pmu_addr, \
+                                         .end =   pmu_addr + 0x100, \
+       },
+
+#define MALI_GPU_RESOURCE_DMA(dma_addr) \
+       { \
+               .name = "Mali_DMA", \
+                       .flags = IORESOURCE_MEM, \
+                                .start = dma_addr, \
+                                         .end = dma_addr + 0x100, \
+       },
+
+#define MALI_GPU_RESOURCE_DLBU(dlbu_addr) \
+       { \
+               .name = "Mali_DLBU", \
+                       .flags = IORESOURCE_MEM, \
+                                .start = dlbu_addr, \
+                                         .end = dlbu_addr + 0x100, \
+       },
+
+#define MALI_GPU_RESOURCE_BCAST(bcast_addr) \
+       { \
+               .name = "Mali_Broadcast", \
+                       .flags = IORESOURCE_MEM, \
+                                .start = bcast_addr, \
+                                         .end = bcast_addr + 0x100, \
+       },
+
+#define MALI_GPU_RESOURCE_PP_BCAST(pp_addr, pp_irq) \
+       { \
+               .name = "Mali_PP_Broadcast", \
+                       .flags = IORESOURCE_MEM, \
+                                .start = pp_addr, \
+                                         .end =   pp_addr + 0x1100, \
+       }, \
+       { \
+               .name = "Mali_PP_Broadcast_IRQ", \
+                       .flags = IORESOURCE_IRQ, \
+                                .start = pp_irq, \
+                                         .end =   pp_irq, \
+       }, \
+
+#define MALI_GPU_RESOURCE_PP_MMU_BCAST(pp_mmu_bcast_addr) \
+       { \
+               .name = "Mali_PP_MMU_Broadcast", \
+                       .flags = IORESOURCE_MEM, \
+                                .start = pp_mmu_bcast_addr, \
+                                         .end = pp_mmu_bcast_addr + 0x100, \
+       },
+
+       struct mali_gpu_utilization_data {
+               unsigned int utilization_gpu; /* Utilization for GP and all PP cores combined, 0 = no utilization, 256 = full utilization */
+               unsigned int utilization_gp;  /* Utilization for GP core only, 0 = no utilization, 256 = full utilization */
+               unsigned int utilization_pp;  /* Utilization for all PP cores combined, 0 = no utilization, 256 = full utilization */
+       };
+
+       struct mali_gpu_clk_item {
+               unsigned int clock; /* unit(MHz) */
+               unsigned int vol;
+       };
+
+       struct mali_gpu_clock {
+               struct mali_gpu_clk_item *item;
+               unsigned int num_of_steps;
+       };
+
+       struct mali_gpu_device_data {
+               /* Shared GPU memory */
+               unsigned long shared_mem_size;
+
+               /*
+                * Mali PMU switch delay.
+                * Only needed if the power gates are connected to the PMU in a high fanout
+                * network. This value is the number of Mali clock cycles it takes to
+                * enable the power gates and turn on the power mesh.
+                * This value will have no effect if a daisy chain implementation is used.
+                */
+               u32 pmu_switch_delay;
+
+               /* Mali Dynamic power domain configuration in sequence from 0-11
+                *  GP  PP0 PP1  PP2  PP3  PP4  PP5  PP6  PP7, L2$0 L2$1 L2$2
+                */
+               u16 pmu_domain_config[12];
+
+               /* Dedicated GPU memory range (physical). */
+               unsigned long dedicated_mem_start;
+               unsigned long dedicated_mem_size;
+
+               /* Frame buffer memory to be accessible by Mali GPU (physical) */
+               unsigned long fb_start;
+               unsigned long fb_size;
+
+               /* Max runtime [ms] for jobs */
+               int max_job_runtime;
+
+               /* Report GPU utilization and related control in this interval (specified in ms) */
+               unsigned long control_interval;
+
+               /* Function that will receive periodic GPU utilization numbers */
+               void (*utilization_callback)(struct mali_gpu_utilization_data *data);
+
+               /* Fuction that platform callback for freq setting, needed when CONFIG_MALI_DVFS enabled */
+               int (*set_freq)(int setting_clock_step);
+               /* Function that platfrom report it's clock info which driver can set, needed when CONFIG_MALI_DVFS enabled */
+               void (*get_clock_info)(struct mali_gpu_clock **data);
+               /* Function that get the current clock info, needed when CONFIG_MALI_DVFS enabled */
+               int (*get_freq)(void);
+               /* Function that init the mali gpu secure mode */
+               int (*secure_mode_init)(void);
+               /* Function that deinit the mali gpu secure mode */
+               void (*secure_mode_deinit)(void);
+               /* Function that reset GPU and enable gpu secure mode */
+               int (*gpu_reset_and_secure_mode_enable)(void);
+               /* Function that Reset GPU and disable gpu secure mode */
+               int (*gpu_reset_and_secure_mode_disable)(void);
+               /* ipa related interface customer need register */
+#if defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL)
+               struct devfreq_cooling_power *gpu_cooling_ops;
+#endif
+       };
+
+       /**
+        * Pause the scheduling and power state changes of Mali device driver.
+        * mali_dev_resume() must always be called as soon as possible after this function
+        * in order to resume normal operation of the Mali driver.
+        */
+       void mali_dev_pause(void);
+
+       /**
+        * Resume scheduling and allow power changes in Mali device driver.
+        * This must always be called after mali_dev_pause().
+        */
+       void mali_dev_resume(void);
+
+       /** @brief Set the desired number of PP cores to use.
+        *
+        * The internal Mali PMU will be used, if present, to physically power off the PP cores.
+        *
+        * @param num_cores The number of desired cores
+        * @return 0 on success, otherwise error. -EINVAL means an invalid number of cores was specified.
+        */
+       int mali_perf_set_num_pp_cores(unsigned int num_cores);
+
+#endif
diff --git a/utgard/r8p0/include/linux/mali/mali_utgard_ioctl.h b/utgard/r8p0/include/linux/mali/mali_utgard_ioctl.h
new file mode 100755 (executable)
index 0000000..dfe152b
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2010-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ * Class Path Exception
+ * Linking this library statically or dynamically with other modules is making a combined work based on this library. 
+ * Thus, the terms and conditions of the GNU General Public License cover the whole combination.
+ * As a special exception, the copyright holders of this library give you permission to link this library with independent modules 
+ * to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting 
+ * executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions 
+ * of the license of that module. An independent module is a module which is not derived from or based on this library. If you modify 
+ * this library, you may extend this exception to your version of the library, but you are not obligated to do so. 
+ * If you do not wish to do so, delete this exception statement from your version.
+ */
+
+#ifndef __MALI_UTGARD_IOCTL_H__
+#define __MALI_UTGARD_IOCTL_H__
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h>       /* file system operations */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file mali_kernel_ioctl.h
+ * Interface to the Linux device driver.
+ * This file describes the interface needed to use the Linux device driver.
+ * Its interface is designed to used by the HAL implementation through a thin arch layer.
+ */
+
+/**
+ * ioctl commands
+ */
+
+#define MALI_IOC_BASE           0x82
+#define MALI_IOC_CORE_BASE      (_MALI_UK_CORE_SUBSYSTEM      + MALI_IOC_BASE)
+#define MALI_IOC_MEMORY_BASE    (_MALI_UK_MEMORY_SUBSYSTEM    + MALI_IOC_BASE)
+#define MALI_IOC_PP_BASE        (_MALI_UK_PP_SUBSYSTEM        + MALI_IOC_BASE)
+#define MALI_IOC_GP_BASE        (_MALI_UK_GP_SUBSYSTEM        + MALI_IOC_BASE)
+#define MALI_IOC_PROFILING_BASE (_MALI_UK_PROFILING_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_VSYNC_BASE     (_MALI_UK_VSYNC_SUBSYSTEM + MALI_IOC_BASE)
+
+#define MALI_IOC_WAIT_FOR_NOTIFICATION      _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_WAIT_FOR_NOTIFICATION, _mali_uk_wait_for_notification_s)
+#define MALI_IOC_GET_API_VERSION            _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_API_VERSION, u32)
+#define MALI_IOC_GET_API_VERSION_V2         _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_API_VERSION, _mali_uk_get_api_version_v2_s)
+#define MALI_IOC_POST_NOTIFICATION          _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_POST_NOTIFICATION, _mali_uk_post_notification_s)
+#define MALI_IOC_GET_USER_SETTING           _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_USER_SETTING, _mali_uk_get_user_setting_s)
+#define MALI_IOC_GET_USER_SETTINGS          _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_USER_SETTINGS, _mali_uk_get_user_settings_s)
+#define MALI_IOC_REQUEST_HIGH_PRIORITY      _IOW (MALI_IOC_CORE_BASE, _MALI_UK_REQUEST_HIGH_PRIORITY, _mali_uk_request_high_priority_s)
+#define MALI_IOC_TIMELINE_GET_LATEST_POINT  _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_GET_LATEST_POINT, _mali_uk_timeline_get_latest_point_s)
+#define MALI_IOC_TIMELINE_WAIT              _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_WAIT, _mali_uk_timeline_wait_s)
+#define MALI_IOC_TIMELINE_CREATE_SYNC_FENCE _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_CREATE_SYNC_FENCE, _mali_uk_timeline_create_sync_fence_s)
+#define MALI_IOC_SOFT_JOB_START             _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_SOFT_JOB_START, _mali_uk_soft_job_start_s)
+#define MALI_IOC_SOFT_JOB_SIGNAL            _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_SOFT_JOB_SIGNAL, _mali_uk_soft_job_signal_s)
+#define MALI_IOC_PENDING_SUBMIT             _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_PENDING_SUBMIT, _mali_uk_pending_submit_s)
+
+#define MALI_IOC_MEM_ALLOC                  _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_ALLOC_MEM, _mali_uk_alloc_mem_s)
+#define MALI_IOC_MEM_FREE                   _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_FREE_MEM, _mali_uk_free_mem_s)
+#define MALI_IOC_MEM_BIND                   _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_BIND_MEM, _mali_uk_bind_mem_s)
+#define MALI_IOC_MEM_UNBIND                 _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_UNBIND_MEM, _mali_uk_unbind_mem_s)
+#define MALI_IOC_MEM_COW                    _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_COW_MEM, _mali_uk_cow_mem_s)
+#define MALI_IOC_MEM_COW_MODIFY_RANGE       _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_COW_MODIFY_RANGE, _mali_uk_cow_modify_range_s)
+#define MALI_IOC_MEM_RESIZE                 _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_RESIZE_MEM, _mali_uk_mem_resize_s)
+#define MALI_IOC_MEM_DMA_BUF_GET_SIZE       _IOR(MALI_IOC_MEMORY_BASE, _MALI_UK_DMA_BUF_GET_SIZE, _mali_uk_dma_buf_get_size_s)
+#define MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE _IOR (MALI_IOC_MEMORY_BASE, _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, _mali_uk_query_mmu_page_table_dump_size_s)
+#define MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE    _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_DUMP_MMU_PAGE_TABLE, _mali_uk_dump_mmu_page_table_s)
+#define MALI_IOC_MEM_WRITE_SAFE             _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_MEM_WRITE_SAFE, _mali_uk_mem_write_safe_s)
+
+#define MALI_IOC_PP_START_JOB               _IOWR(MALI_IOC_PP_BASE, _MALI_UK_PP_START_JOB, _mali_uk_pp_start_job_s)
+#define MALI_IOC_PP_AND_GP_START_JOB        _IOWR(MALI_IOC_PP_BASE, _MALI_UK_PP_AND_GP_START_JOB, _mali_uk_pp_and_gp_start_job_s)
+#define MALI_IOC_PP_NUMBER_OF_CORES_GET     _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_NUMBER_OF_CORES, _mali_uk_get_pp_number_of_cores_s)
+#define MALI_IOC_PP_CORE_VERSION_GET        _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_CORE_VERSION, _mali_uk_get_pp_core_version_s)
+#define MALI_IOC_PP_DISABLE_WB              _IOW (MALI_IOC_PP_BASE, _MALI_UK_PP_DISABLE_WB, _mali_uk_pp_disable_wb_s)
+
+#define MALI_IOC_GP2_START_JOB              _IOWR(MALI_IOC_GP_BASE, _MALI_UK_GP_START_JOB, _mali_uk_gp_start_job_s)
+#define MALI_IOC_GP2_NUMBER_OF_CORES_GET    _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_NUMBER_OF_CORES, _mali_uk_get_gp_number_of_cores_s)
+#define MALI_IOC_GP2_CORE_VERSION_GET       _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_CORE_VERSION, _mali_uk_get_gp_core_version_s)
+#define MALI_IOC_GP2_SUSPEND_RESPONSE       _IOW (MALI_IOC_GP_BASE, _MALI_UK_GP_SUSPEND_RESPONSE,_mali_uk_gp_suspend_response_s)
+
+#define MALI_IOC_PROFILING_ADD_EVENT        _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_ADD_EVENT, _mali_uk_profiling_add_event_s)
+#define MALI_IOC_PROFILING_REPORT_SW_COUNTERS  _IOW (MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_REPORT_SW_COUNTERS, _mali_uk_sw_counters_report_s)
+#define MALI_IOC_PROFILING_MEMORY_USAGE_GET _IOR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_MEMORY_USAGE_GET, _mali_uk_profiling_memory_usage_get_s)
+#define MALI_IOC_PROFILING_STREAM_FD_GET        _IOR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_STREAM_FD_GET, _mali_uk_profiling_stream_fd_get_s)
+#define MALI_IOC_PROILING_CONTROL_SET   _IOR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_CONTROL_SET, _mali_uk_profiling_control_set_s)
+
+#define MALI_IOC_VSYNC_EVENT_REPORT         _IOW (MALI_IOC_VSYNC_BASE, _MALI_UK_VSYNC_EVENT_REPORT, _mali_uk_vsync_event_report_s)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UTGARD_IOCTL_H__ */
diff --git a/utgard/r8p0/include/linux/mali/mali_utgard_profiling_events.h b/utgard/r8p0/include/linux/mali/mali_utgard_profiling_events.h
new file mode 100755 (executable)
index 0000000..f505e91
--- /dev/null
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2010-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ * Class Path Exception
+ * Linking this library statically or dynamically with other modules is making a combined work based on this library. 
+ * Thus, the terms and conditions of the GNU General Public License cover the whole combination.
+ * As a special exception, the copyright holders of this library give you permission to link this library with independent modules 
+ * to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting 
+ * executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions 
+ * of the license of that module. An independent module is a module which is not derived from or based on this library. If you modify 
+ * this library, you may extend this exception to your version of the library, but you are not obligated to do so. 
+ * If you do not wish to do so, delete this exception statement from your version.
+ */
+
+#ifndef _MALI_UTGARD_PROFILING_EVENTS_H_
+#define _MALI_UTGARD_PROFILING_EVENTS_H_
+
+/*
+ * The event ID is a 32 bit value consisting of different fields
+ * reserved, 4 bits, for future use
+ * event type, 4 bits, cinstr_profiling_event_type_t
+ * event channel, 8 bits, the source of the event.
+ * event data, 16 bit field, data depending on event type
+ */
+
+/**
+ * Specifies what kind of event this is
+ */
+typedef enum {
+       MALI_PROFILING_EVENT_TYPE_SINGLE  = 0 << 24,
+       MALI_PROFILING_EVENT_TYPE_START   = 1 << 24,
+       MALI_PROFILING_EVENT_TYPE_STOP    = 2 << 24,
+       MALI_PROFILING_EVENT_TYPE_SUSPEND = 3 << 24,
+       MALI_PROFILING_EVENT_TYPE_RESUME  = 4 << 24,
+} cinstr_profiling_event_type_t;
+
+
+/**
+ * Secifies the channel/source of the event
+ */
+typedef enum {
+       MALI_PROFILING_EVENT_CHANNEL_SOFTWARE =  0 << 16,
+       MALI_PROFILING_EVENT_CHANNEL_GP0      =  1 << 16,
+       MALI_PROFILING_EVENT_CHANNEL_PP0      =  5 << 16,
+       MALI_PROFILING_EVENT_CHANNEL_PP1      =  6 << 16,
+       MALI_PROFILING_EVENT_CHANNEL_PP2      =  7 << 16,
+       MALI_PROFILING_EVENT_CHANNEL_PP3      =  8 << 16,
+       MALI_PROFILING_EVENT_CHANNEL_PP4      =  9 << 16,
+       MALI_PROFILING_EVENT_CHANNEL_PP5      = 10 << 16,
+       MALI_PROFILING_EVENT_CHANNEL_PP6      = 11 << 16,
+       MALI_PROFILING_EVENT_CHANNEL_PP7      = 12 << 16,
+       MALI_PROFILING_EVENT_CHANNEL_GPU      = 21 << 16,
+} cinstr_profiling_event_channel_t;
+
+
+#define MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(num) (((MALI_PROFILING_EVENT_CHANNEL_GP0 >> 16) + (num)) << 16)
+#define MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(num) (((MALI_PROFILING_EVENT_CHANNEL_PP0 >> 16) + (num)) << 16)
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SINGLE is used from software channel
+ */
+typedef enum {
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_NONE                  = 0,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_NEW_FRAME         = 1,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_FLUSH                 = 2,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_SWAP_BUFFERS      = 3,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_FB_EVENT              = 4,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_GP_ENQUEUE            = 5,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_PP_ENQUEUE            = 6,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_READBACK              = 7,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_WRITEBACK             = 8,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_ENTER_API_FUNC        = 10,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_LEAVE_API_FUNC        = 11,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_DISCARD_ATTACHMENTS   = 13,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_UMP_TRY_LOCK          = 53,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_UMP_LOCK              = 54,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_UMP_UNLOCK            = 55,
+       MALI_PROFILING_EVENT_REASON_SINGLE_LOCK_CONTENDED           = 56,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_MALI_FENCE_DUP    = 57,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_SET_PP_JOB_FENCE  = 58,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_WAIT_SYNC         = 59,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_CREATE_FENCE_SYNC = 60,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_CREATE_NATIVE_FENCE_SYNC = 61,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_FENCE_FLUSH       = 62,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_FLUSH_SERVER_WAITS = 63,
+} cinstr_profiling_event_reason_single_sw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_START/STOP is used from software channel
+ * to inform whether the core is physical or virtual
+ */
+typedef enum {
+       MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL  = 0,
+       MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL   = 1,
+} cinstr_profiling_event_reason_start_stop_hw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_START/STOP is used from software channel
+ */
+typedef enum {
+       /*MALI_PROFILING_EVENT_REASON_START_STOP_SW_NONE            = 0,*/
+       MALI_PROFILING_EVENT_REASON_START_STOP_SW_MALI            = 1,
+       MALI_PROFILING_EVENT_REASON_START_STOP_SW_CALLBACK_THREAD = 2,
+       MALI_PROFILING_EVENT_REASON_START_STOP_SW_WORKER_THREAD   = 3,
+       MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF     = 4,
+       MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF      = 5,
+} cinstr_profiling_event_reason_start_stop_sw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SUSPEND/RESUME is used from software channel
+ */
+typedef enum {
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_NONE                     =  0, /* used */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_PIPELINE_FULL            =  1, /* NOT used */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC                    = 26, /* used in some build configurations */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_IFRAME_WAIT           = 27, /* USED */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_IFRAME_SYNC           = 28, /* USED */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VG_WAIT_FILTER_CLEANUP   = 29, /* used */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VG_WAIT_TEXTURE          = 30, /* used */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_GLES_WAIT_MIPLEVEL       = 31, /* used */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_GLES_WAIT_READPIXELS     = 32, /* used */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_WAIT_SWAP_IMMEDIATE  = 33, /* NOT used */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_ICS_QUEUE_BUFFER         = 34, /* USED */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_ICS_DEQUEUE_BUFFER       = 35, /* USED */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_UMP_LOCK                 = 36, /* Not currently used */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_X11_GLOBAL_LOCK          = 37, /* Not currently used */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_X11_SWAP                 = 38, /* Not currently used */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_MALI_EGL_IMAGE_SYNC_WAIT = 39, /* USED */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_GP_JOB_HANDLING          = 40, /* USED */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_PP_JOB_HANDLING          = 41, /* USED */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_MALI_FENCE_MERGE     = 42, /* USED */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_MALI_FENCE_DUP       = 43,
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_FLUSH_SERVER_WAITS   = 44,
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_WAIT_SYNC            = 45, /* USED */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_JOBS_WAIT             = 46, /* USED */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_NOFRAMES_WAIT         = 47, /* USED */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_NOJOBS_WAIT           = 48, /* USED */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_SUBMIT_LIMITER_WAIT      = 49, /* USED */
+} cinstr_profiling_event_reason_suspend_resume_sw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SINGLE is used from a HW channel (GPx+PPx)
+ */
+typedef enum {
+       MALI_PROFILING_EVENT_REASON_SINGLE_HW_NONE          = 0,
+       MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT     = 1,
+       MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH         = 2,
+} cinstr_profiling_event_reason_single_hw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SINGLE is used from the GPU channel
+ */
+typedef enum {
+       MALI_PROFILING_EVENT_REASON_SINGLE_GPU_NONE              = 0,
+       MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE  = 1,
+       MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS      = 2,
+       MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L21_COUNTERS      = 3,
+       MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L22_COUNTERS      = 4,
+} cinstr_profiling_event_reason_single_gpu_t;
+
+/**
+ * These values are applicable for the 3rd data parameter when
+ * the type MALI_PROFILING_EVENT_TYPE_START is used from the software channel
+ * with the MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF reason.
+ */
+typedef enum {
+       MALI_PROFILING_EVENT_DATA_CORE_GP0             =  1,
+       MALI_PROFILING_EVENT_DATA_CORE_PP0             =  5,
+       MALI_PROFILING_EVENT_DATA_CORE_PP1             =  6,
+       MALI_PROFILING_EVENT_DATA_CORE_PP2             =  7,
+       MALI_PROFILING_EVENT_DATA_CORE_PP3             =  8,
+       MALI_PROFILING_EVENT_DATA_CORE_PP4             =  9,
+       MALI_PROFILING_EVENT_DATA_CORE_PP5             = 10,
+       MALI_PROFILING_EVENT_DATA_CORE_PP6             = 11,
+       MALI_PROFILING_EVENT_DATA_CORE_PP7             = 12,
+       MALI_PROFILING_EVENT_DATA_CORE_GP0_MMU         = 22, /* GP0 + 21 */
+       MALI_PROFILING_EVENT_DATA_CORE_PP0_MMU         = 26, /* PP0 + 21 */
+       MALI_PROFILING_EVENT_DATA_CORE_PP1_MMU         = 27, /* PP1 + 21 */
+       MALI_PROFILING_EVENT_DATA_CORE_PP2_MMU         = 28, /* PP2 + 21 */
+       MALI_PROFILING_EVENT_DATA_CORE_PP3_MMU         = 29, /* PP3 + 21 */
+       MALI_PROFILING_EVENT_DATA_CORE_PP4_MMU         = 30, /* PP4 + 21 */
+       MALI_PROFILING_EVENT_DATA_CORE_PP5_MMU         = 31, /* PP5 + 21 */
+       MALI_PROFILING_EVENT_DATA_CORE_PP6_MMU         = 32, /* PP6 + 21 */
+       MALI_PROFILING_EVENT_DATA_CORE_PP7_MMU         = 33, /* PP7 + 21 */
+
+} cinstr_profiling_event_data_core_t;
+
+#define MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(num) (MALI_PROFILING_EVENT_DATA_CORE_GP0 + (num))
+#define MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(num) (MALI_PROFILING_EVENT_DATA_CORE_GP0_MMU + (num))
+#define MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(num) (MALI_PROFILING_EVENT_DATA_CORE_PP0 + (num))
+#define MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(num) (MALI_PROFILING_EVENT_DATA_CORE_PP0_MMU + (num))
+
+
+#endif /*_MALI_UTGARD_PROFILING_EVENTS_H_*/
diff --git a/utgard/r8p0/include/linux/mali/mali_utgard_profiling_gator_api.h b/utgard/r8p0/include/linux/mali/mali_utgard_profiling_gator_api.h
new file mode 100755 (executable)
index 0000000..c82d8b1
--- /dev/null
@@ -0,0 +1,315 @@
+/*
+ * Copyright (C) 2013, 2015-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ * Class Path Exception
+ * Linking this library statically or dynamically with other modules is making a combined work based on this library. 
+ * Thus, the terms and conditions of the GNU General Public License cover the whole combination.
+ * As a special exception, the copyright holders of this library give you permission to link this library with independent modules 
+ * to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting 
+ * executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions 
+ * of the license of that module. An independent module is a module which is not derived from or based on this library. If you modify 
+ * this library, you may extend this exception to your version of the library, but you are not obligated to do so. 
+ * If you do not wish to do so, delete this exception statement from your version.
+ */
+
+#ifndef __MALI_UTGARD_PROFILING_GATOR_API_H__
+#define __MALI_UTGARD_PROFILING_GATOR_API_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MALI_PROFILING_API_VERSION 4
+
+#define MAX_NUM_L2_CACHE_CORES 3
+#define MAX_NUM_FP_CORES 8
+#define MAX_NUM_VP_CORES 1
+
+#define _MALI_SPCIAL_COUNTER_DESCRIPTIONS \
+       {                                           \
+               "Filmstrip_cnt0",                 \
+               "Frequency",       \
+               "Voltage",       \
+               "vertex",     \
+               "fragment",         \
+               "Total_alloc_pages",        \
+       };
+
+#define _MALI_MEM_COUTNER_DESCRIPTIONS \
+       {                                           \
+               "untyped_memory",                 \
+               "vertex_index_buffer",       \
+               "texture_buffer",       \
+               "varying_buffer",     \
+               "render_target",         \
+               "pbuffer_buffer",        \
+               "plbu_heap",            \
+               "pointer_array_buffer",             \
+               "slave_tilelist",          \
+               "untyped_gp_cmdlist",     \
+               "polygon_cmdlist",               \
+               "texture_descriptor",               \
+               "render_state_word",               \
+               "shader",               \
+               "stream_buffer",               \
+               "fragment_stack",               \
+               "uniform",               \
+               "untyped_frame_pool",               \
+               "untyped_surface",               \
+       };
+
+/** The list of events supported by the Mali DDK. */
+typedef enum {
+       /* Vertex processor activity */
+       ACTIVITY_VP_0 = 0,
+
+       /* Fragment processor activity */
+       ACTIVITY_FP_0,
+       ACTIVITY_FP_1,
+       ACTIVITY_FP_2,
+       ACTIVITY_FP_3,
+       ACTIVITY_FP_4,
+       ACTIVITY_FP_5,
+       ACTIVITY_FP_6,
+       ACTIVITY_FP_7,
+
+       /* L2 cache counters */
+       COUNTER_L2_0_C0,
+       COUNTER_L2_0_C1,
+       COUNTER_L2_1_C0,
+       COUNTER_L2_1_C1,
+       COUNTER_L2_2_C0,
+       COUNTER_L2_2_C1,
+
+       /* Vertex processor counters */
+       COUNTER_VP_0_C0,
+       COUNTER_VP_0_C1,
+
+       /* Fragment processor counters */
+       COUNTER_FP_0_C0,
+       COUNTER_FP_0_C1,
+       COUNTER_FP_1_C0,
+       COUNTER_FP_1_C1,
+       COUNTER_FP_2_C0,
+       COUNTER_FP_2_C1,
+       COUNTER_FP_3_C0,
+       COUNTER_FP_3_C1,
+       COUNTER_FP_4_C0,
+       COUNTER_FP_4_C1,
+       COUNTER_FP_5_C0,
+       COUNTER_FP_5_C1,
+       COUNTER_FP_6_C0,
+       COUNTER_FP_6_C1,
+       COUNTER_FP_7_C0,
+       COUNTER_FP_7_C1,
+
+       /*
+        * If more hardware counters are added, the _mali_osk_hw_counter_table
+        * below should also be updated.
+        */
+
+       /* EGL software counters */
+       COUNTER_EGL_BLIT_TIME,
+
+       /* GLES software counters */
+       COUNTER_GLES_DRAW_ELEMENTS_CALLS,
+       COUNTER_GLES_DRAW_ELEMENTS_NUM_INDICES,
+       COUNTER_GLES_DRAW_ELEMENTS_NUM_TRANSFORMED,
+       COUNTER_GLES_DRAW_ARRAYS_CALLS,
+       COUNTER_GLES_DRAW_ARRAYS_NUM_TRANSFORMED,
+       COUNTER_GLES_DRAW_POINTS,
+       COUNTER_GLES_DRAW_LINES,
+       COUNTER_GLES_DRAW_LINE_LOOP,
+       COUNTER_GLES_DRAW_LINE_STRIP,
+       COUNTER_GLES_DRAW_TRIANGLES,
+       COUNTER_GLES_DRAW_TRIANGLE_STRIP,
+       COUNTER_GLES_DRAW_TRIANGLE_FAN,
+       COUNTER_GLES_NON_VBO_DATA_COPY_TIME,
+       COUNTER_GLES_UNIFORM_BYTES_COPIED_TO_MALI,
+       COUNTER_GLES_UPLOAD_TEXTURE_TIME,
+       COUNTER_GLES_UPLOAD_VBO_TIME,
+       COUNTER_GLES_NUM_FLUSHES,
+       COUNTER_GLES_NUM_VSHADERS_GENERATED,
+       COUNTER_GLES_NUM_FSHADERS_GENERATED,
+       COUNTER_GLES_VSHADER_GEN_TIME,
+       COUNTER_GLES_FSHADER_GEN_TIME,
+       COUNTER_GLES_INPUT_TRIANGLES,
+       COUNTER_GLES_VXCACHE_HIT,
+       COUNTER_GLES_VXCACHE_MISS,
+       COUNTER_GLES_VXCACHE_COLLISION,
+       COUNTER_GLES_CULLED_TRIANGLES,
+       COUNTER_GLES_CULLED_LINES,
+       COUNTER_GLES_BACKFACE_TRIANGLES,
+       COUNTER_GLES_GBCLIP_TRIANGLES,
+       COUNTER_GLES_GBCLIP_LINES,
+       COUNTER_GLES_TRIANGLES_DRAWN,
+       COUNTER_GLES_DRAWCALL_TIME,
+       COUNTER_GLES_TRIANGLES_COUNT,
+       COUNTER_GLES_INDEPENDENT_TRIANGLES_COUNT,
+       COUNTER_GLES_STRIP_TRIANGLES_COUNT,
+       COUNTER_GLES_FAN_TRIANGLES_COUNT,
+       COUNTER_GLES_LINES_COUNT,
+       COUNTER_GLES_INDEPENDENT_LINES_COUNT,
+       COUNTER_GLES_STRIP_LINES_COUNT,
+       COUNTER_GLES_LOOP_LINES_COUNT,
+
+       /* Special counter */
+
+       /* Framebuffer capture pseudo-counter */
+       COUNTER_FILMSTRIP,
+       COUNTER_FREQUENCY,
+       COUNTER_VOLTAGE,
+       COUNTER_VP_ACTIVITY,
+       COUNTER_FP_ACTIVITY,
+       COUNTER_TOTAL_ALLOC_PAGES,
+
+       /* Memory usage counter */
+       COUNTER_MEM_UNTYPED,
+       COUNTER_MEM_VB_IB,
+       COUNTER_MEM_TEXTURE,
+       COUNTER_MEM_VARYING,
+       COUNTER_MEM_RT,
+       COUNTER_MEM_PBUFFER,
+       /* memory usages for gp command */
+       COUNTER_MEM_PLBU_HEAP,
+       COUNTER_MEM_POINTER_ARRAY,
+       COUNTER_MEM_SLAVE_TILELIST,
+       COUNTER_MEM_UNTYPE_GP_CMDLIST,
+       /* memory usages for polygon list command */
+       COUNTER_MEM_POLYGON_CMDLIST,
+       /* memory usages for pp command */
+       COUNTER_MEM_TD,
+       COUNTER_MEM_RSW,
+       /* other memory usages */
+       COUNTER_MEM_SHADER,
+       COUNTER_MEM_STREAMS,
+       COUNTER_MEM_FRAGMENT_STACK,
+       COUNTER_MEM_UNIFORM,
+       /* Special mem usage, which is used for mem pool allocation */
+       COUNTER_MEM_UNTYPE_MEM_POOL,
+       COUNTER_MEM_UNTYPE_SURFACE,
+
+       NUMBER_OF_EVENTS
+} _mali_osk_counter_id;
+
+#define FIRST_ACTIVITY_EVENT    ACTIVITY_VP_0
+#define LAST_ACTIVITY_EVENT     ACTIVITY_FP_7
+
+#define FIRST_HW_COUNTER        COUNTER_L2_0_C0
+#define LAST_HW_COUNTER         COUNTER_FP_7_C1
+
+#define FIRST_SW_COUNTER        COUNTER_EGL_BLIT_TIME
+#define LAST_SW_COUNTER         COUNTER_GLES_LOOP_LINES_COUNT
+
+#define FIRST_SPECIAL_COUNTER   COUNTER_FILMSTRIP
+#define LAST_SPECIAL_COUNTER    COUNTER_TOTAL_ALLOC_PAGES
+
+#define FIRST_MEM_COUNTER               COUNTER_MEM_UNTYPED
+#define LAST_MEM_COUNTER                COUNTER_MEM_UNTYPE_SURFACE
+
+#define MALI_PROFILING_MEM_COUNTERS_NUM (LAST_MEM_COUNTER - FIRST_MEM_COUNTER + 1)
+#define MALI_PROFILING_SPECIAL_COUNTERS_NUM     (LAST_SPECIAL_COUNTER - FIRST_SPECIAL_COUNTER + 1)
+#define MALI_PROFILING_SW_COUNTERS_NUM  (LAST_SW_COUNTER - FIRST_SW_COUNTER + 1)
+
+/**
+ * Define the stream header type for porfiling stream.
+ */
+#define  STREAM_HEADER_FRAMEBUFFER 0x05         /* The stream packet header type for framebuffer dumping. */
+#define STREAM_HEADER_COUNTER_VALUE  0x09       /* The stream packet header type for hw/sw/memory counter sampling. */
+#define STREAM_HEADER_CORE_ACTIVITY 0x0a                /* The stream packet header type for activity counter sampling. */
+#define STREAM_HEADER_SIZE      5
+
+/**
+ * Define the packet header type of profiling control packet.
+ */
+#define PACKET_HEADER_ERROR            0x80             /* The response packet header type if error. */
+#define PACKET_HEADER_ACK              0x81             /* The response packet header type if OK. */
+#define PACKET_HEADER_COUNTERS_REQUEST 0x82             /* The control packet header type to request counter information from ddk. */
+#define PACKET_HEADER_COUNTERS_ACK         0x83         /* The response packet header type to send out counter information. */
+#define PACKET_HEADER_COUNTERS_ENABLE  0x84             /* The control packet header type to enable counters. */
+#define PACKET_HEADER_START_CAPTURE_VALUE            0x85               /* The control packet header type to start capture values. */
+
+#define PACKET_HEADER_SIZE      5
+
+/**
+ * Structure to pass performance counter data of a Mali core
+ */
+typedef struct _mali_profiling_core_counters {
+       u32 source0;
+       u32 value0;
+       u32 source1;
+       u32 value1;
+} _mali_profiling_core_counters;
+
+/**
+ * Structure to pass performance counter data of Mali L2 cache cores
+ */
+typedef struct _mali_profiling_l2_counter_values {
+       struct _mali_profiling_core_counters cores[MAX_NUM_L2_CACHE_CORES];
+} _mali_profiling_l2_counter_values;
+
+/**
+ * Structure to pass data defining Mali instance in use:
+ *
+ * mali_product_id - Mali product id
+ * mali_version_major - Mali version major number
+ * mali_version_minor - Mali version minor number
+ * num_of_l2_cores - number of L2 cache cores
+ * num_of_fp_cores - number of fragment processor cores
+ * num_of_vp_cores - number of vertex processor cores
+ */
+typedef struct _mali_profiling_mali_version {
+       u32 mali_product_id;
+       u32 mali_version_major;
+       u32 mali_version_minor;
+       u32 num_of_l2_cores;
+       u32 num_of_fp_cores;
+       u32 num_of_vp_cores;
+} _mali_profiling_mali_version;
+
+/**
+ * Structure to define the mali profiling counter struct.
+ */
+typedef struct mali_profiling_counter {
+       char counter_name[40];
+       u32 counter_id;
+       u32 counter_event;
+       u32 prev_counter_value;
+       u32 current_counter_value;
+       u32 key;
+       int enabled;
+} mali_profiling_counter;
+
+/*
+ * List of possible actions to be controlled by Streamline.
+ * The following numbers are used by gator to control the frame buffer dumping and s/w counter reporting.
+ * We cannot use the enums in mali_uk_types.h because they are unknown inside gator.
+ */
+#define FBDUMP_CONTROL_ENABLE (1)
+#define FBDUMP_CONTROL_RATE (2)
+#define SW_COUNTER_ENABLE (3)
+#define FBDUMP_CONTROL_RESIZE_FACTOR (4)
+#define MEM_COUNTER_ENABLE (5)
+#define ANNOTATE_PROFILING_ENABLE (6)
+
+void _mali_profiling_control(u32 action, u32 value);
+
+u32 _mali_profiling_get_l2_counters(_mali_profiling_l2_counter_values *values);
+
+int _mali_profiling_set_event(u32 counter_id, s32 event_id);
+
+u32 _mali_profiling_get_api_version(void);
+
+void _mali_profiling_get_mali_version(struct _mali_profiling_mali_version *values);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UTGARD_PROFILING_GATOR_API_H__ */
diff --git a/utgard/r8p0/include/linux/mali/mali_utgard_uk_types.h b/utgard/r8p0/include/linux/mali/mali_utgard_uk_types.h
new file mode 100755 (executable)
index 0000000..99835a5
--- /dev/null
@@ -0,0 +1,1100 @@
+/*
+ * Copyright (C) 2010-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+ * Class Path Exception
+ * Linking this library statically or dynamically with other modules is making a combined work based on this library. 
+ * Thus, the terms and conditions of the GNU General Public License cover the whole combination.
+ * As a special exception, the copyright holders of this library give you permission to link this library with independent modules 
+ * to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting 
+ * executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions 
+ * of the license of that module. An independent module is a module which is not derived from or based on this library. If you modify 
+ * this library, you may extend this exception to your version of the library, but you are not obligated to do so. 
+ * If you do not wish to do so, delete this exception statement from your version.
+ */
+
+/**
+ * @file mali_uk_types.h
+ * Defines the types and constants used in the user-kernel interface
+ */
+
+#ifndef __MALI_UTGARD_UK_TYPES_H__
+#define __MALI_UTGARD_UK_TYPES_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Iteration functions depend on these values being consecutive. */
+#define MALI_UK_TIMELINE_GP   0
+#define MALI_UK_TIMELINE_PP   1
+#define MALI_UK_TIMELINE_SOFT 2
+#define MALI_UK_TIMELINE_MAX  3
+
+#define MALI_UK_BIG_VARYING_SIZE  (1024*1024*2)
+
+typedef struct {
+       u32 points[MALI_UK_TIMELINE_MAX];
+       s32 sync_fd;
+} _mali_uk_fence_t;
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup u_k_api UDD User/Kernel Interface (U/K) APIs
+ *
+ * @{
+ */
+
+/** @defgroup _mali_uk_core U/K Core
+ * @{ */
+
+/** Definition of subsystem numbers, to assist in creating a unique identifier
+ * for each U/K call.
+ *
+ * @see _mali_uk_functions */
+typedef enum {
+       _MALI_UK_CORE_SUBSYSTEM,      /**< Core Group of U/K calls */
+       _MALI_UK_MEMORY_SUBSYSTEM,    /**< Memory Group of U/K calls */
+       _MALI_UK_PP_SUBSYSTEM,        /**< Fragment Processor Group of U/K calls */
+       _MALI_UK_GP_SUBSYSTEM,        /**< Vertex Processor Group of U/K calls */
+       _MALI_UK_PROFILING_SUBSYSTEM, /**< Profiling Group of U/K calls */
+       _MALI_UK_VSYNC_SUBSYSTEM,     /**< VSYNC Group of U/K calls */
+} _mali_uk_subsystem_t;
+
+/** Within a function group each function has its unique sequence number
+ * to assist in creating a unique identifier for each U/K call.
+ *
+ * An ordered pair of numbers selected from
+ * ( \ref _mali_uk_subsystem_t,\ref  _mali_uk_functions) will uniquely identify the
+ * U/K call across all groups of functions, and all functions. */
+typedef enum {
+       /** Core functions */
+
+       _MALI_UK_OPEN                    = 0, /**< _mali_ukk_open() */
+       _MALI_UK_CLOSE,                       /**< _mali_ukk_close() */
+       _MALI_UK_WAIT_FOR_NOTIFICATION,       /**< _mali_ukk_wait_for_notification() */
+       _MALI_UK_GET_API_VERSION,             /**< _mali_ukk_get_api_version() */
+       _MALI_UK_POST_NOTIFICATION,           /**< _mali_ukk_post_notification() */
+       _MALI_UK_GET_USER_SETTING,            /**< _mali_ukk_get_user_setting() *//**< [out] */
+       _MALI_UK_GET_USER_SETTINGS,           /**< _mali_ukk_get_user_settings() *//**< [out] */
+       _MALI_UK_REQUEST_HIGH_PRIORITY,       /**< _mali_ukk_request_high_priority() */
+       _MALI_UK_TIMELINE_GET_LATEST_POINT,   /**< _mali_ukk_timeline_get_latest_point() */
+       _MALI_UK_TIMELINE_WAIT,               /**< _mali_ukk_timeline_wait() */
+       _MALI_UK_TIMELINE_CREATE_SYNC_FENCE,  /**< _mali_ukk_timeline_create_sync_fence() */
+       _MALI_UK_SOFT_JOB_START,              /**< _mali_ukk_soft_job_start() */
+       _MALI_UK_SOFT_JOB_SIGNAL,             /**< _mali_ukk_soft_job_signal() */
+       _MALI_UK_PENDING_SUBMIT,             /**< _mali_ukk_pending_submit() */
+
+       /** Memory functions */
+
+       _MALI_UK_ALLOC_MEM                = 0,   /**< _mali_ukk_alloc_mem() */
+       _MALI_UK_FREE_MEM,                       /**< _mali_ukk_free_mem() */
+       _MALI_UK_BIND_MEM,                       /**< _mali_ukk_mem_bind() */
+       _MALI_UK_UNBIND_MEM,                     /**< _mali_ukk_mem_unbind() */
+       _MALI_UK_COW_MEM,                        /**< _mali_ukk_mem_cow() */
+       _MALI_UK_COW_MODIFY_RANGE,               /**< _mali_ukk_mem_cow_modify_range() */
+       _MALI_UK_RESIZE_MEM,                     /**<._mali_ukk_mem_resize() */
+       _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, /**< _mali_ukk_mem_get_mmu_page_table_dump_size() */
+       _MALI_UK_DUMP_MMU_PAGE_TABLE,            /**< _mali_ukk_mem_dump_mmu_page_table() */
+       _MALI_UK_DMA_BUF_GET_SIZE,               /**< _mali_ukk_dma_buf_get_size() */
+       _MALI_UK_MEM_WRITE_SAFE,                 /**< _mali_uku_mem_write_safe() */
+
+       /** Common functions for each core */
+
+       _MALI_UK_START_JOB           = 0,     /**< Start a Fragment/Vertex Processor Job on a core */
+       _MALI_UK_GET_NUMBER_OF_CORES,         /**< Get the number of Fragment/Vertex Processor cores */
+       _MALI_UK_GET_CORE_VERSION,            /**< Get the Fragment/Vertex Processor version compatible with all cores */
+
+       /** Fragment Processor Functions  */
+
+       _MALI_UK_PP_START_JOB            = _MALI_UK_START_JOB,            /**< _mali_ukk_pp_start_job() */
+       _MALI_UK_GET_PP_NUMBER_OF_CORES  = _MALI_UK_GET_NUMBER_OF_CORES,  /**< _mali_ukk_get_pp_number_of_cores() */
+       _MALI_UK_GET_PP_CORE_VERSION     = _MALI_UK_GET_CORE_VERSION,     /**< _mali_ukk_get_pp_core_version() */
+       _MALI_UK_PP_DISABLE_WB,                                           /**< _mali_ukk_pp_job_disable_wb() */
+       _MALI_UK_PP_AND_GP_START_JOB,                                     /**< _mali_ukk_pp_and_gp_start_job() */
+
+       /** Vertex Processor Functions  */
+
+       _MALI_UK_GP_START_JOB            = _MALI_UK_START_JOB,            /**< _mali_ukk_gp_start_job() */
+       _MALI_UK_GET_GP_NUMBER_OF_CORES  = _MALI_UK_GET_NUMBER_OF_CORES,  /**< _mali_ukk_get_gp_number_of_cores() */
+       _MALI_UK_GET_GP_CORE_VERSION     = _MALI_UK_GET_CORE_VERSION,     /**< _mali_ukk_get_gp_core_version() */
+       _MALI_UK_GP_SUSPEND_RESPONSE,                                     /**< _mali_ukk_gp_suspend_response() */
+
+       /** Profiling functions */
+
+       _MALI_UK_PROFILING_ADD_EVENT     = 0, /**< __mali_uku_profiling_add_event() */
+       _MALI_UK_PROFILING_REPORT_SW_COUNTERS,/**< __mali_uku_profiling_report_sw_counters() */
+       _MALI_UK_PROFILING_MEMORY_USAGE_GET,  /**< __mali_uku_profiling_memory_usage_get() */
+       _MALI_UK_PROFILING_STREAM_FD_GET, /** < __mali_uku_profiling_stream_fd_get() */
+       _MALI_UK_PROFILING_CONTROL_SET, /** < __mali_uku_profiling_control_set() */
+
+       /** VSYNC reporting fuctions */
+       _MALI_UK_VSYNC_EVENT_REPORT      = 0, /**< _mali_ukk_vsync_event_report() */
+} _mali_uk_functions;
+
+/** @defgroup _mali_uk_getsysteminfo U/K Get System Info
+ * @{ */
+
+/**
+ * Type definition for the core version number.
+ * Used when returning the version number read from a core
+ *
+ * Its format is that of the 32-bit Version register for a particular core.
+ * Refer to the "Mali200 and MaliGP2 3D Graphics Processor Technical Reference
+ * Manual", ARM DDI 0415C, for more information.
+ */
+typedef u32 _mali_core_version;
+
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @defgroup _mali_uk_gp U/K Vertex Processor
+ * @{ */
+
+/** @defgroup _mali_uk_gp_suspend_response_s Vertex Processor Suspend Response
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_gp_suspend_response()
+ *
+ * When _mali_wait_for_notification() receives notification that a
+ * Vertex Processor job was suspended, you need to send a response to indicate
+ * what needs to happen with this job. You can either abort or resume the job.
+ *
+ * - set @c code to indicate response code. This is either @c _MALIGP_JOB_ABORT or
+ * @c _MALIGP_JOB_RESUME_WITH_NEW_HEAP to indicate you will provide a new heap
+ * for the job that will resolve the out of memory condition for the job.
+ * - copy the @c cookie value from the @c _mali_uk_gp_job_suspended_s notification;
+ * this is an identifier for the suspended job
+ * - set @c arguments[0] and @c arguments[1] to zero if you abort the job. If
+ * you resume it, @c argument[0] should specify the Mali start address for the new
+ * heap and @c argument[1] the Mali end address of the heap.
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ *
+ */
+typedef enum _maligp_job_suspended_response_code {
+       _MALIGP_JOB_ABORT,                  /**< Abort the Vertex Processor job */
+       _MALIGP_JOB_RESUME_WITH_NEW_HEAP    /**< Resume the Vertex Processor job with a new heap */
+} _maligp_job_suspended_response_code;
+
+typedef struct {
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 cookie;                     /**< [in] cookie from the _mali_uk_gp_job_suspended_s notification */
+       _maligp_job_suspended_response_code code; /**< [in] abort or resume response code, see \ref _maligp_job_suspended_response_code */
+       u32 arguments[2];               /**< [in] 0 when aborting a job. When resuming a job, the Mali start and end address for a new heap to resume the job with */
+} _mali_uk_gp_suspend_response_s;
+
+/** @} */ /* end group _mali_uk_gp_suspend_response_s */
+
+/** @defgroup _mali_uk_gpstartjob_s Vertex Processor Start Job
+ * @{ */
+
+/** @brief Status indicating the result of the execution of a Vertex or Fragment processor job  */
+typedef enum {
+       _MALI_UK_JOB_STATUS_END_SUCCESS         = 1 << (16 + 0),
+       _MALI_UK_JOB_STATUS_END_OOM             = 1 << (16 + 1),
+       _MALI_UK_JOB_STATUS_END_ABORT           = 1 << (16 + 2),
+       _MALI_UK_JOB_STATUS_END_TIMEOUT_SW      = 1 << (16 + 3),
+       _MALI_UK_JOB_STATUS_END_HANG            = 1 << (16 + 4),
+       _MALI_UK_JOB_STATUS_END_SEG_FAULT       = 1 << (16 + 5),
+       _MALI_UK_JOB_STATUS_END_ILLEGAL_JOB     = 1 << (16 + 6),
+       _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR     = 1 << (16 + 7),
+       _MALI_UK_JOB_STATUS_END_SHUTDOWN        = 1 << (16 + 8),
+       _MALI_UK_JOB_STATUS_END_SYSTEM_UNUSABLE = 1 << (16 + 9)
+} _mali_uk_job_status;
+
+#define MALIGP2_NUM_REGS_FRAME (6)
+
+/** @brief Arguments for _mali_ukk_gp_start_job()
+ *
+ * To start a Vertex Processor job
+ * - associate the request with a reference to a @c mali_gp_job_info by setting
+ * user_job_ptr to the address of the @c mali_gp_job_info of the job.
+ * - set @c priority to the priority of the @c mali_gp_job_info
+ * - specify a timeout for the job by setting @c watchdog_msecs to the number of
+ * milliseconds the job is allowed to run. Specifying a value of 0 selects the
+ * default timeout in use by the device driver.
+ * - copy the frame registers from the @c mali_gp_job_info into @c frame_registers.
+ * - set the @c perf_counter_flag, @c perf_counter_src0 and @c perf_counter_src1 to zero
+ * for a non-instrumented build. For an instrumented build you can use up
+ * to two performance counters. Set the corresponding bit in @c perf_counter_flag
+ * to enable them. @c perf_counter_src0 and @c perf_counter_src1 specify
+ * the source of what needs to get counted (e.g. number of vertex loader
+ * cache hits). For source id values, see ARM DDI0415A, Table 3-60.
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ *
+ * When @c _mali_ukk_gp_start_job() returns @c _MALI_OSK_ERR_OK, status contains the
+ * result of the request (see \ref _mali_uk_start_job_status). If the job could
+ * not get started (@c _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE) it should be
+ * tried again.
+ *
+ * After the job has started, @c _mali_wait_for_notification() will be notified
+ * that the job finished or got suspended. It may get suspended due to
+ * resource shortage. If it finished (see _mali_ukk_wait_for_notification())
+ * the notification will contain a @c _mali_uk_gp_job_finished_s result. If
+ * it got suspended the notification will contain a @c _mali_uk_gp_job_suspended_s
+ * result.
+ *
+ * The @c _mali_uk_gp_job_finished_s contains the job status (see \ref _mali_uk_job_status),
+ * the number of milliseconds the job took to render, and values of core registers
+ * when the job finished (irq status, performance counters, renderer list
+ * address). A job has finished succesfully when its status is
+ * @c _MALI_UK_JOB_STATUS_FINISHED. If the hardware detected a timeout while rendering
+ * the job, or software detected the job is taking more than watchdog_msecs to
+ * complete, the status will indicate @c _MALI_UK_JOB_STATUS_HANG.
+ * If the hardware detected a bus error while accessing memory associated with the
+ * job, status will indicate @c _MALI_UK_JOB_STATUS_SEG_FAULT.
+ * status will indicate @c _MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to
+ * stop the job but the job didn't start on the hardware yet, e.g. when the
+ * driver shutdown.
+ *
+ * In case the job got suspended, @c _mali_uk_gp_job_suspended_s contains
+ * the @c user_job_ptr identifier used to start the job with, the @c reason
+ * why the job stalled (see \ref _maligp_job_suspended_reason) and a @c cookie
+ * to identify the core on which the job stalled.  This @c cookie will be needed
+ * when responding to this nofication by means of _mali_ukk_gp_suspend_response().
+ * (see _mali_ukk_gp_suspend_response()). The response is either to abort or
+ * resume the job. If the job got suspended due to an out of memory condition
+ * you may be able to resolve this by providing more memory and resuming the job.
+ *
+ */
+typedef struct {
+       u64 ctx;                          /**< [in,out] user-kernel context (trashed on output) */
+       u64 user_job_ptr;                   /**< [in] identifier for the job in user space, a @c mali_gp_job_info* */
+       u32 priority;                       /**< [in] job priority. A lower number means higher priority */
+       u32 frame_registers[MALIGP2_NUM_REGS_FRAME]; /**< [in] core specific registers associated with this job */
+       u32 perf_counter_flag;              /**< [in] bitmask indicating which performance counters to enable, see \ref _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE and related macro definitions */
+       u32 perf_counter_src0;              /**< [in] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+       u32 perf_counter_src1;              /**< [in] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+       u32 frame_builder_id;               /**< [in] id of the originating frame builder */
+       u32 flush_id;                       /**< [in] flush id within the originating frame builder */
+       _mali_uk_fence_t fence;             /**< [in] fence this job must wait on */
+       u64 timeline_point_ptr;            /**< [in,out] pointer to u32: location where point on gp timeline for this job will be written */
+       u32 varying_memsize;            /** < [in] size of varying memory to use deffer bind*/
+       u32 deferred_mem_num;
+       u64 deferred_mem_list;         /** < [in] memory hanlde list of varying buffer to use deffer bind */
+} _mali_uk_gp_start_job_s;
+
+#define _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE (1<<0) /**< Enable performance counter SRC0 for a job */
+#define _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE (1<<1) /**< Enable performance counter SRC1 for a job */
+#define _MALI_PERFORMANCE_COUNTER_FLAG_HEATMAP_ENABLE (1<<2) /**< Enable per tile (aka heatmap) generation with for a job (using the enabled counter sources) */
+
+/** @} */ /* end group _mali_uk_gpstartjob_s */
+
+typedef struct {
+       u64 user_job_ptr;               /**< [out] identifier for the job in user space */
+       _mali_uk_job_status status;     /**< [out] status of finished job */
+       u32 heap_current_addr;          /**< [out] value of the GP PLB PL heap start address register */
+       u32 perf_counter0;              /**< [out] value of performance counter 0 (see ARM DDI0415A) */
+       u32 perf_counter1;              /**< [out] value of performance counter 1 (see ARM DDI0415A) */
+       u32 pending_big_job_num;
+} _mali_uk_gp_job_finished_s;
+
+typedef struct {
+       u64 user_job_ptr;                    /**< [out] identifier for the job in user space */
+       u32 cookie;                          /**< [out] identifier for the core in kernel space on which the job stalled */
+} _mali_uk_gp_job_suspended_s;
+
+/** @} */ /* end group _mali_uk_gp */
+
+
+/** @defgroup _mali_uk_pp U/K Fragment Processor
+ * @{ */
+
+#define _MALI_PP_MAX_SUB_JOBS 8
+
+#define _MALI_PP_MAX_FRAME_REGISTERS ((0x058/4)+1)
+
+#define _MALI_PP_MAX_WB_REGISTERS ((0x02C/4)+1)
+
+#define _MALI_DLBU_MAX_REGISTERS 4
+
+/** Flag for _mali_uk_pp_start_job_s */
+#define _MALI_PP_JOB_FLAG_NO_NOTIFICATION (1<<0)
+#define _MALI_PP_JOB_FLAG_IS_WINDOW_SURFACE (1<<1)
+#define _MALI_PP_JOB_FLAG_PROTECTED (1<<2)
+
+/** @defgroup _mali_uk_ppstartjob_s Fragment Processor Start Job
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_pp_start_job()
+ *
+ * To start a Fragment Processor job
+ * - associate the request with a reference to a mali_pp_job by setting
+ * @c user_job_ptr to the address of the @c mali_pp_job of the job.
+ * - set @c priority to the priority of the mali_pp_job
+ * - specify a timeout for the job by setting @c watchdog_msecs to the number of
+ * milliseconds the job is allowed to run. Specifying a value of 0 selects the
+ * default timeout in use by the device driver.
+ * - copy the frame registers from the @c mali_pp_job into @c frame_registers.
+ * For MALI200 you also need to copy the write back 0,1 and 2 registers.
+ * - set the @c perf_counter_flag, @c perf_counter_src0 and @c perf_counter_src1 to zero
+ * for a non-instrumented build. For an instrumented build you can use up
+ * to two performance counters. Set the corresponding bit in @c perf_counter_flag
+ * to enable them. @c perf_counter_src0 and @c perf_counter_src1 specify
+ * the source of what needs to get counted (e.g. number of vertex loader
+ * cache hits). For source id values, see ARM DDI0415A, Table 3-60.
+ * - pass in the user-kernel context in @c ctx that was returned from _mali_ukk_open()
+ *
+ * When _mali_ukk_pp_start_job() returns @c _MALI_OSK_ERR_OK, @c status contains the
+ * result of the request (see \ref _mali_uk_start_job_status). If the job could
+ * not get started (@c _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE) it should be
+ * tried again.
+ *
+ * After the job has started, _mali_wait_for_notification() will be notified
+ * when the job finished. The notification will contain a
+ * @c _mali_uk_pp_job_finished_s result. It contains the @c user_job_ptr
+ * identifier used to start the job with, the job @c status (see \ref _mali_uk_job_status),
+ * the number of milliseconds the job took to render, and values of core registers
+ * when the job finished (irq status, performance counters, renderer list
+ * address). A job has finished succesfully when its status is
+ * @c _MALI_UK_JOB_STATUS_FINISHED. If the hardware detected a timeout while rendering
+ * the job, or software detected the job is taking more than @c watchdog_msecs to
+ * complete, the status will indicate @c _MALI_UK_JOB_STATUS_HANG.
+ * If the hardware detected a bus error while accessing memory associated with the
+ * job, status will indicate @c _MALI_UK_JOB_STATUS_SEG_FAULT.
+ * status will indicate @c _MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to
+ * stop the job but the job didn't start on the hardware yet, e.g. when the
+ * driver shutdown.
+ *
+ */
+typedef struct {
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 user_job_ptr;               /**< [in] identifier for the job in user space */
+       u32 priority;                   /**< [in] job priority. A lower number means higher priority */
+       u32 frame_registers[_MALI_PP_MAX_FRAME_REGISTERS];         /**< [in] core specific registers associated with first sub job, see ARM DDI0415A */
+       u32 frame_registers_addr_frame[_MALI_PP_MAX_SUB_JOBS - 1]; /**< [in] ADDR_FRAME registers for sub job 1-7 */
+       u32 frame_registers_addr_stack[_MALI_PP_MAX_SUB_JOBS - 1]; /**< [in] ADDR_STACK registers for sub job 1-7 */
+       u32 wb0_registers[_MALI_PP_MAX_WB_REGISTERS];
+       u32 wb1_registers[_MALI_PP_MAX_WB_REGISTERS];
+       u32 wb2_registers[_MALI_PP_MAX_WB_REGISTERS];
+       u32 dlbu_registers[_MALI_DLBU_MAX_REGISTERS]; /**< [in] Dynamic load balancing unit registers */
+       u32 num_cores;                      /**< [in] Number of cores to set up (valid range: 1-8(M450) or 4(M400)) */
+       u32 perf_counter_flag;              /**< [in] bitmask indicating which performance counters to enable, see \ref _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE and related macro definitions */
+       u32 perf_counter_src0;              /**< [in] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+       u32 perf_counter_src1;              /**< [in] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+       u32 frame_builder_id;               /**< [in] id of the originating frame builder */
+       u32 flush_id;                       /**< [in] flush id within the originating frame builder */
+       u32 flags;                          /**< [in] See _MALI_PP_JOB_FLAG_* for a list of avaiable flags */
+       u32 tilesx;                         /**< [in] number of tiles in the x direction (needed for heatmap generation */
+       u32 tilesy;                         /**< [in] number of tiles in y direction (needed for reading the heatmap memory) */
+       u32 heatmap_mem;                    /**< [in] memory address to store counter values per tile (aka heatmap) */
+       u32 num_memory_cookies;             /**< [in] number of memory cookies attached to job */
+       u64 memory_cookies;               /**< [in] pointer to array of u32 memory cookies attached to job */
+       _mali_uk_fence_t fence;             /**< [in] fence this job must wait on */
+       u64 timeline_point_ptr;           /**< [in,out] pointer to location of u32 where point on pp timeline for this job will be written */
+} _mali_uk_pp_start_job_s;
+
+typedef struct {
+       u64 ctx;       /**< [in,out] user-kernel context (trashed on output) */
+       u64 gp_args;   /**< [in,out] GP uk arguments (see _mali_uk_gp_start_job_s) */
+       u64 pp_args;   /**< [in,out] PP uk arguments (see _mali_uk_pp_start_job_s) */
+} _mali_uk_pp_and_gp_start_job_s;
+
+/** @} */ /* end group _mali_uk_ppstartjob_s */
+
+typedef struct {
+       u64 user_job_ptr;                          /**< [out] identifier for the job in user space */
+       _mali_uk_job_status status;                /**< [out] status of finished job */
+       u32 perf_counter0[_MALI_PP_MAX_SUB_JOBS];  /**< [out] value of perfomance counter 0 (see ARM DDI0415A), one for each sub job */
+       u32 perf_counter1[_MALI_PP_MAX_SUB_JOBS];  /**< [out] value of perfomance counter 1 (see ARM DDI0415A), one for each sub job */
+       u32 perf_counter_src0;
+       u32 perf_counter_src1;
+} _mali_uk_pp_job_finished_s;
+
+typedef struct {
+       u32 number_of_enabled_cores;               /**< [out] the new number of enabled cores */
+} _mali_uk_pp_num_cores_changed_s;
+
+
+
+/**
+ * Flags to indicate write-back units
+ */
+typedef enum {
+       _MALI_UK_PP_JOB_WB0 = 1,
+       _MALI_UK_PP_JOB_WB1 = 2,
+       _MALI_UK_PP_JOB_WB2 = 4,
+} _mali_uk_pp_job_wbx_flag;
+
+typedef struct {
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 fb_id;                      /**< [in] Frame builder ID of job to disable WB units for */
+       u32 wb0_memory;
+       u32 wb1_memory;
+       u32 wb2_memory;
+} _mali_uk_pp_disable_wb_s;
+
+
+/** @} */ /* end group _mali_uk_pp */
+
+/** @defgroup _mali_uk_soft_job U/K Soft Job
+ * @{ */
+
+typedef struct {
+       u64 ctx;                            /**< [in,out] user-kernel context (trashed on output) */
+       u64 user_job;                       /**< [in] identifier for the job in user space */
+       u64 job_id_ptr;                     /**< [in,out] pointer to location of u32 where job id will be written */
+       _mali_uk_fence_t fence;             /**< [in] fence this job must wait on */
+       u32 point;                          /**< [out] point on soft timeline for this job */
+       u32 type;                           /**< [in] type of soft job */
+} _mali_uk_soft_job_start_s;
+
+typedef struct {
+       u64 user_job;                       /**< [out] identifier for the job in user space */
+} _mali_uk_soft_job_activated_s;
+
+typedef struct {
+       u64 ctx;                          /**< [in,out] user-kernel context (trashed on output) */
+       u32 job_id;                         /**< [in] id for soft job */
+} _mali_uk_soft_job_signal_s;
+
+/** @} */ /* end group _mali_uk_soft_job */
+
+typedef struct {
+       u32 counter_id;
+       u32 key;
+       int enable;
+} _mali_uk_annotate_profiling_mem_counter_s;
+
+typedef struct {
+       u32 sampling_rate;
+       int enable;
+} _mali_uk_annotate_profiling_enable_s;
+
+
+/** @addtogroup _mali_uk_core U/K Core
+ * @{ */
+
+/** @defgroup _mali_uk_waitfornotification_s Wait For Notification
+ * @{ */
+
+/** @brief Notification type encodings
+ *
+ * Each Notification type is an ordered pair of (subsystem,id), and is unique.
+ *
+ * The encoding of subsystem,id into a 32-bit word is:
+ * encoding = (( subsystem << _MALI_NOTIFICATION_SUBSYSTEM_SHIFT ) & _MALI_NOTIFICATION_SUBSYSTEM_MASK)
+ *            | (( id <<  _MALI_NOTIFICATION_ID_SHIFT ) & _MALI_NOTIFICATION_ID_MASK)
+ *
+ * @see _mali_uk_wait_for_notification_s
+ */
+typedef enum {
+       /** core notifications */
+
+       _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x20,
+       _MALI_NOTIFICATION_APPLICATION_QUIT = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x40,
+       _MALI_NOTIFICATION_SETTINGS_CHANGED = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x80,
+       _MALI_NOTIFICATION_SOFT_ACTIVATED = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x100,
+
+       /** Fragment Processor notifications */
+
+       _MALI_NOTIFICATION_PP_FINISHED = (_MALI_UK_PP_SUBSYSTEM << 16) | 0x10,
+       _MALI_NOTIFICATION_PP_NUM_CORE_CHANGE = (_MALI_UK_PP_SUBSYSTEM << 16) | 0x20,
+
+       /** Vertex Processor notifications */
+
+       _MALI_NOTIFICATION_GP_FINISHED = (_MALI_UK_GP_SUBSYSTEM << 16) | 0x10,
+       _MALI_NOTIFICATION_GP_STALLED = (_MALI_UK_GP_SUBSYSTEM << 16) | 0x20,
+
+       /** Profiling notifications */
+       _MALI_NOTIFICATION_ANNOTATE_PROFILING_MEM_COUNTER = (_MALI_UK_PROFILING_SUBSYSTEM << 16) | 0x10,
+       _MALI_NOTIFICATION_ANNOTATE_PROFILING_ENABLE = (_MALI_UK_PROFILING_SUBSYSTEM << 16) | 0x20,
+} _mali_uk_notification_type;
+
+/** to assist in splitting up 32-bit notification value in subsystem and id value */
+#define _MALI_NOTIFICATION_SUBSYSTEM_MASK 0xFFFF0000
+#define _MALI_NOTIFICATION_SUBSYSTEM_SHIFT 16
+#define _MALI_NOTIFICATION_ID_MASK 0x0000FFFF
+#define _MALI_NOTIFICATION_ID_SHIFT 0
+
+
+/** @brief Enumeration of possible settings which match mali_setting_t in user space
+ *
+ *
+ */
+typedef enum {
+       _MALI_UK_USER_SETTING_SW_EVENTS_ENABLE = 0,
+       _MALI_UK_USER_SETTING_COLORBUFFER_CAPTURE_ENABLED,
+       _MALI_UK_USER_SETTING_DEPTHBUFFER_CAPTURE_ENABLED,
+       _MALI_UK_USER_SETTING_STENCILBUFFER_CAPTURE_ENABLED,
+       _MALI_UK_USER_SETTING_PER_TILE_COUNTERS_CAPTURE_ENABLED,
+       _MALI_UK_USER_SETTING_BUFFER_CAPTURE_COMPOSITOR,
+       _MALI_UK_USER_SETTING_BUFFER_CAPTURE_WINDOW,
+       _MALI_UK_USER_SETTING_BUFFER_CAPTURE_OTHER,
+       _MALI_UK_USER_SETTING_BUFFER_CAPTURE_N_FRAMES,
+       _MALI_UK_USER_SETTING_BUFFER_CAPTURE_RESIZE_FACTOR,
+       _MALI_UK_USER_SETTING_SW_COUNTER_ENABLED,
+       _MALI_UK_USER_SETTING_MAX,
+} _mali_uk_user_setting_t;
+
+/* See mali_user_settings_db.c */
+extern const char *_mali_uk_user_setting_descriptions[];
+#define _MALI_UK_USER_SETTING_DESCRIPTIONS \
+       {                                           \
+               "sw_events_enable",                 \
+               "colorbuffer_capture_enable",       \
+               "depthbuffer_capture_enable",       \
+               "stencilbuffer_capture_enable",     \
+               "per_tile_counters_enable",         \
+               "buffer_capture_compositor",        \
+               "buffer_capture_window",            \
+               "buffer_capture_other",             \
+               "buffer_capture_n_frames",          \
+               "buffer_capture_resize_factor",     \
+               "sw_counters_enable",               \
+       };
+
+/** @brief struct to hold the value to a particular setting as seen in the kernel space
+ */
+typedef struct {
+       _mali_uk_user_setting_t setting;
+       u32 value;
+} _mali_uk_settings_changed_s;
+
+/** @brief Arguments for _mali_ukk_wait_for_notification()
+ *
+ * On successful return from _mali_ukk_wait_for_notification(), the members of
+ * this structure will indicate the reason for notification.
+ *
+ * Specifically, the source of the notification can be identified by the
+ * subsystem and id fields of the mali_uk_notification_type in the code.type
+ * member. The type member is encoded in a way to divide up the types into a
+ * subsystem field, and a per-subsystem ID field. See
+ * _mali_uk_notification_type for more information.
+ *
+ * Interpreting the data union member depends on the notification type:
+ *
+ * - type == _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS
+ *     - The kernel side is shutting down. No further
+ * _mali_uk_wait_for_notification() calls should be made.
+ *     - In this case, the value of the data union member is undefined.
+ *     - This is used to indicate to the user space client that it should close
+ * the connection to the Mali Device Driver.
+ * - type == _MALI_NOTIFICATION_PP_FINISHED
+ *    - The notification data is of type _mali_uk_pp_job_finished_s. It contains the user_job_ptr
+ * identifier used to start the job with, the job status, the number of milliseconds the job took to render,
+ * and values of core registers when the job finished (irq status, performance counters, renderer list
+ * address).
+ *    - A job has finished succesfully when its status member is _MALI_UK_JOB_STATUS_FINISHED.
+ *    - If the hardware detected a timeout while rendering the job, or software detected the job is
+ * taking more than watchdog_msecs (see _mali_ukk_pp_start_job()) to complete, the status member will
+ * indicate _MALI_UK_JOB_STATUS_HANG.
+ *    - If the hardware detected a bus error while accessing memory associated with the job, status will
+ * indicate _MALI_UK_JOB_STATUS_SEG_FAULT.
+ *    - Status will indicate MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to stop the job but the job
+ * didn't start the hardware yet, e.g. when the driver closes.
+ * - type == _MALI_NOTIFICATION_GP_FINISHED
+ *     - The notification data is of type _mali_uk_gp_job_finished_s. The notification is similar to that of
+ * type == _MALI_NOTIFICATION_PP_FINISHED, except that several other GP core register values are returned.
+ * The status values have the same meaning for type == _MALI_NOTIFICATION_PP_FINISHED.
+ * - type == _MALI_NOTIFICATION_GP_STALLED
+ *     - The nofication data is of type _mali_uk_gp_job_suspended_s. It contains the user_job_ptr
+ * identifier used to start the job with, the reason why the job stalled and a cookie to identify the core on
+ * which the job stalled.
+ *     - The reason member of gp_job_suspended is set to _MALIGP_JOB_SUSPENDED_OUT_OF_MEMORY
+ * when the polygon list builder unit has run out of memory.
+ */
+typedef struct {
+       u64 ctx;                       /**< [in,out] user-kernel context (trashed on output) */
+       _mali_uk_notification_type type; /**< [out] Type of notification available */
+       union {
+               _mali_uk_gp_job_suspended_s gp_job_suspended;/**< [out] Notification data for _MALI_NOTIFICATION_GP_STALLED notification type */
+               _mali_uk_gp_job_finished_s  gp_job_finished; /**< [out] Notification data for _MALI_NOTIFICATION_GP_FINISHED notification type */
+               _mali_uk_pp_job_finished_s  pp_job_finished; /**< [out] Notification data for _MALI_NOTIFICATION_PP_FINISHED notification type */
+               _mali_uk_settings_changed_s setting_changed;/**< [out] Notification data for _MALI_NOTIFICAATION_SETTINGS_CHANGED notification type */
+               _mali_uk_soft_job_activated_s soft_job_activated; /**< [out] Notification data for _MALI_NOTIFICATION_SOFT_ACTIVATED notification type */
+               _mali_uk_annotate_profiling_mem_counter_s profiling_mem_counter;
+               _mali_uk_annotate_profiling_enable_s profiling_enable;
+       } data;
+} _mali_uk_wait_for_notification_s;
+
+/** @brief Arguments for _mali_ukk_post_notification()
+ *
+ * Posts the specified notification to the notification queue for this application.
+ * This is used to send a quit message to the callback thread.
+ */
+typedef struct {
+       u64 ctx;                       /**< [in,out] user-kernel context (trashed on output) */
+       _mali_uk_notification_type type; /**< [in] Type of notification to post */
+} _mali_uk_post_notification_s;
+
+/** @} */ /* end group _mali_uk_waitfornotification_s */
+
+/** @defgroup _mali_uk_getapiversion_s Get API Version
+ * @{ */
+
+/** helpers for Device Driver API version handling */
+
+/** @brief Encode a version ID from a 16-bit input
+ *
+ * @note the input is assumed to be 16 bits. It must not exceed 16 bits. */
+#define _MAKE_VERSION_ID(x) (((x) << 16UL) | (x))
+
+/** @brief Check whether a 32-bit value is likely to be Device Driver API
+ * version ID. */
+#define _IS_VERSION_ID(x) (((x) & 0xFFFF) == (((x) >> 16UL) & 0xFFFF))
+
+/** @brief Decode a 16-bit version number from a 32-bit Device Driver API version
+ * ID */
+#define _GET_VERSION(x) (((x) >> 16UL) & 0xFFFF)
+
+/** @brief Determine whether two 32-bit encoded version IDs match */
+#define _IS_API_MATCH(x, y) (IS_VERSION_ID((x)) && IS_VERSION_ID((y)) && (GET_VERSION((x)) == GET_VERSION((y))))
+
+/**
+ * API version define.
+ * Indicates the version of the kernel API
+ * The version is a 16bit integer incremented on each API change.
+ * The 16bit integer is stored twice in a 32bit integer
+ * For example, for version 1 the value would be 0x00010001
+ */
+#define _MALI_API_VERSION 900
+#define _MALI_UK_API_VERSION _MAKE_VERSION_ID(_MALI_API_VERSION)
+
+/**
+ * The API version is a 16-bit integer stored in both the lower and upper 16-bits
+ * of a 32-bit value. The 16-bit API version value is incremented on each API
+ * change. Version 1 would be 0x00010001. Used in _mali_uk_get_api_version_s.
+ */
+typedef u32 _mali_uk_api_version;
+
+/** @brief Arguments for _mali_uk_get_api_version()
+ *
+ * The user-side interface version must be written into the version member,
+ * encoded using _MAKE_VERSION_ID(). It will be compared to the API version of
+ * the kernel-side interface.
+ *
+ * On successful return, the version member will be the API version of the
+ * kernel-side interface. _MALI_UK_API_VERSION macro defines the current version
+ * of the API.
+ *
+ * The compatible member must be checked to see if the version of the user-side
+ * interface is compatible with the kernel-side interface, since future versions
+ * of the interface may be backwards compatible.
+ */
+typedef struct {
+       u32 ctx;                        /**< [in,out] user-kernel context (trashed on output) */
+       _mali_uk_api_version version;   /**< [in,out] API version of user-side interface. */
+       int compatible;                 /**< [out] @c 1 when @version is compatible, @c 0 otherwise */
+} _mali_uk_get_api_version_s;
+
+/** @brief Arguments for _mali_uk_get_api_version_v2()
+ *
+ * The user-side interface version must be written into the version member,
+ * encoded using _MAKE_VERSION_ID(). It will be compared to the API version of
+ * the kernel-side interface.
+ *
+ * On successful return, the version member will be the API version of the
+ * kernel-side interface. _MALI_UK_API_VERSION macro defines the current version
+ * of the API.
+ *
+ * The compatible member must be checked to see if the version of the user-side
+ * interface is compatible with the kernel-side interface, since future versions
+ * of the interface may be backwards compatible.
+ */
+typedef struct {
+       u64 ctx;                        /**< [in,out] user-kernel context (trashed on output) */
+       _mali_uk_api_version version;   /**< [in,out] API version of user-side interface. */
+       int compatible;                 /**< [out] @c 1 when @version is compatible, @c 0 otherwise */
+} _mali_uk_get_api_version_v2_s;
+
+/** @} */ /* end group _mali_uk_getapiversion_s */
+
+/** @defgroup _mali_uk_get_user_settings_s Get user space settings */
+
+/** @brief struct to keep the matching values of the user space settings within certain context
+ *
+ * Each member of the settings array corresponds to a matching setting in the user space and its value is the value
+ * of that particular setting.
+ *
+ * All settings are given reference to the context pointed to by the ctx pointer.
+ *
+ */
+typedef struct {
+       u64 ctx;                       /**< [in,out] user-kernel context (trashed on output) */
+       u32 settings[_MALI_UK_USER_SETTING_MAX]; /**< [out] The values for all settings */
+} _mali_uk_get_user_settings_s;
+
+/** @brief struct to hold the value of a particular setting from the user space within a given context
+ */
+typedef struct {
+       u64 ctx;                       /**< [in,out] user-kernel context (trashed on output) */
+       _mali_uk_user_setting_t setting; /**< [in] setting to get */
+       u32 value;                       /**< [out] value of setting */
+} _mali_uk_get_user_setting_s;
+
+/** @brief Arguments for _mali_ukk_request_high_priority() */
+typedef struct {
+       u64 ctx;                       /**< [in,out] user-kernel context (trashed on output) */
+} _mali_uk_request_high_priority_s;
+
+/** @brief Arguments for _mali_ukk_pending_submit() */
+typedef struct {
+       u64 ctx;                       /**< [in,out] user-kernel context (trashed on output) */
+} _mali_uk_pending_submit_s;
+
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @defgroup _mali_uk_memory U/K Memory
+ * @{ */
+
+#define _MALI_MEMORY_ALLOCATE_RESIZEABLE  (1<<4) /* BUFFER can trim dow/grow*/
+#define _MALI_MEMORY_ALLOCATE_NO_BIND_GPU (1<<5) /*Not map to GPU when allocate, must call bind later*/
+#define _MALI_MEMORY_ALLOCATE_SWAPPABLE   (1<<6) /* Allocate swappale memory. */
+#define _MALI_MEMORY_ALLOCATE_DEFER_BIND (1<<7) /*Not map to GPU when allocate, must call bind later*/
+#define _MALI_MEMORY_ALLOCATE_SECURE (1<<8) /* Allocate secure memory. */
+
+
+typedef struct {
+       u64 ctx;                                          /**< [in,out] user-kernel context (trashed on output) */
+       u32 gpu_vaddr;                                    /**< [in] GPU virtual address */
+       u32 vsize;                                        /**< [in] vitrual size of the allocation */
+       u32 psize;                                        /**< [in] physical size of the allocation */
+       u32 flags;
+       u64 backend_handle;                               /**< [out] backend handle */
+       s32 secure_shared_fd;                           /** < [in] the mem handle for secure mem */
+} _mali_uk_alloc_mem_s;
+
+
+typedef struct {
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 gpu_vaddr;                /**< [in] use as handle to free allocation */
+       u32 free_pages_nr;      /** < [out] record the number of free pages */
+} _mali_uk_free_mem_s;
+
+
+#define _MALI_MEMORY_BIND_BACKEND_UMP             (1<<8)
+#define _MALI_MEMORY_BIND_BACKEND_DMA_BUF         (1<<9)
+#define _MALI_MEMORY_BIND_BACKEND_MALI_MEMORY     (1<<10)
+#define _MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY (1<<11)
+#define _MALI_MEMORY_BIND_BACKEND_EXT_COW         (1<<12)
+#define _MALI_MEMORY_BIND_BACKEND_HAVE_ALLOCATION (1<<13)
+
+
+#define _MALI_MEMORY_BIND_BACKEND_MASK (_MALI_MEMORY_BIND_BACKEND_UMP| \
+                                       _MALI_MEMORY_BIND_BACKEND_DMA_BUF |\
+                                       _MALI_MEMORY_BIND_BACKEND_MALI_MEMORY |\
+                                       _MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY |\
+                                       _MALI_MEMORY_BIND_BACKEND_EXT_COW |\
+                                       _MALI_MEMORY_BIND_BACKEND_HAVE_ALLOCATION)
+
+
+#define _MALI_MEMORY_GPU_READ_ALLOCATE            (1<<16)
+
+
+typedef struct {
+       u64 ctx;                                        /**< [in,out] user-kernel context (trashed on output) */
+       u32 vaddr;                                      /**< [in] mali address to map the physical memory to */
+       u32 size;                                       /**< [in] size */
+       u32 flags;                                      /**< [in] see_MALI_MEMORY_BIND_BACKEND_* */
+       u32 padding;                                    /** padding for 32/64 struct alignment */
+       union {
+               struct {
+                       u32 secure_id;                  /**< [in] secure id */
+                       u32 rights;                     /**< [in] rights necessary for accessing memory */
+                       u32 flags;                      /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
+               } bind_ump;
+               struct {
+                       u32 mem_fd;                     /**< [in] Memory descriptor */
+                       u32 rights;                     /**< [in] rights necessary for accessing memory */
+                       u32 flags;                      /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
+               } bind_dma_buf;
+               struct {
+                       u32 phys_addr;                  /**< [in] physical address */
+                       u32 rights;                     /**< [in] rights necessary for accessing memory */
+                       u32 flags;                      /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
+               } bind_ext_memory;
+       } mem_union;
+} _mali_uk_bind_mem_s;
+
+typedef struct {
+       u64 ctx;                                        /**< [in,out] user-kernel context (trashed on output) */
+       u32 flags;                                      /**< [in] see_MALI_MEMORY_BIND_BACKEND_* */
+       u32 vaddr;                                      /**<  [in] identifier for mapped memory object in kernel space  */
+} _mali_uk_unbind_mem_s;
+
+typedef struct {
+       u64 ctx;                                        /**< [in,out] user-kernel context (trashed on output) */
+       u32 target_handle;                              /**< [in] handle of allocation need to do COW */
+       u32 target_offset;                              /**< [in] offset in target allocation to do COW(for support COW  a memory allocated from memory_bank, PAGE_SIZE align)*/
+       u32 target_size;                                /**< [in] size of target allocation to do COW (for support memory bank, PAGE_SIZE align)(in byte) */
+       u32 range_start;                                /**< [in] re allocate range start offset, offset from the start of allocation (PAGE_SIZE align)*/
+       u32 range_size;                                 /**< [in] re allocate size (PAGE_SIZE align)*/
+       u32 vaddr;                                      /**< [in] mali address for the new allocaiton */
+       u32 backend_handle;                             /**< [out] backend handle */
+       u32 flags;
+} _mali_uk_cow_mem_s;
+
+typedef struct {
+       u64 ctx;                                        /**< [in,out] user-kernel context (trashed on output) */
+       u32 range_start;                                /**< [in] re allocate range start offset, offset from the start of allocation */
+       u32 size;                                       /**< [in] re allocate size*/
+       u32 vaddr;                                      /**< [in] mali address for the new allocaiton */
+       s32 change_pages_nr;                            /**< [out] record the page number change for cow operation */
+} _mali_uk_cow_modify_range_s;
+
+
+typedef struct {
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 mem_fd;                     /**< [in] Memory descriptor */
+       u32 size;                       /**< [out] size */
+} _mali_uk_dma_buf_get_size_s;
+
+/** Flag for _mali_uk_map_external_mem_s, _mali_uk_attach_ump_mem_s and _mali_uk_attach_dma_buf_s */
+#define _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE (1<<0)
+
+
+typedef struct {
+       u64 ctx;                                /**< [in,out] user-kernel context (trashed on output) */
+       u64 vaddr;                              /* the buffer to do resize*/
+       u32 psize;                              /* wanted physical size of this memory */
+} _mali_uk_mem_resize_s;
+
+/**
+ * @brief Arguments for _mali_uk[uk]_mem_write_safe()
+ */
+typedef struct {
+       u64 ctx;  /**< [in,out] user-kernel context (trashed on output) */
+       u64 src;  /**< [in] Pointer to source data */
+       u64 dest; /**< [in] Destination Mali buffer */
+       u32 size;   /**< [in,out] Number of bytes to write/copy on input, number of bytes actually written/copied on output */
+} _mali_uk_mem_write_safe_s;
+
+typedef struct {
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 size;                       /**< [out] size of MMU page table information (registers + page tables) */
+} _mali_uk_query_mmu_page_table_dump_size_s;
+
+typedef struct {
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 size;                       /**< [in] size of buffer to receive mmu page table information */
+       u64 buffer;                   /**< [in,out] buffer to receive mmu page table information */
+       u32 register_writes_size;       /**< [out] size of MMU register dump */
+       u64 register_writes;           /**< [out] pointer within buffer where MMU register dump is stored */
+       u32 page_table_dump_size;       /**< [out] size of MMU page table dump */
+       u64 page_table_dump;           /**< [out] pointer within buffer where MMU page table dump is stored */
+} _mali_uk_dump_mmu_page_table_s;
+
+/** @} */ /* end group _mali_uk_memory */
+
+
+/** @addtogroup _mali_uk_pp U/K Fragment Processor
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_get_pp_number_of_cores()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_pp_number_of_cores(), @c number_of_cores
+ * will contain the number of Fragment Processor cores in the system.
+ */
+typedef struct {
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 number_of_total_cores;      /**< [out] Total number of Fragment Processor cores in the system */
+       u32 number_of_enabled_cores;    /**< [out] Number of enabled Fragment Processor cores */
+} _mali_uk_get_pp_number_of_cores_s;
+
+/** @brief Arguments for _mali_ukk_get_pp_core_version()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_pp_core_version(), @c version contains
+ * the version that all Fragment Processor cores are compatible with.
+ */
+typedef struct {
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       _mali_core_version version;     /**< [out] version returned from core, see \ref _mali_core_version  */
+       u32 padding;
+} _mali_uk_get_pp_core_version_s;
+
+/** @} */ /* end group _mali_uk_pp */
+
+
+/** @addtogroup _mali_uk_gp U/K Vertex Processor
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_get_gp_number_of_cores()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_gp_number_of_cores(), @c number_of_cores
+ * will contain the number of Vertex Processor cores in the system.
+ */
+typedef struct {
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 number_of_cores;            /**< [out] number of Vertex Processor cores in the system */
+} _mali_uk_get_gp_number_of_cores_s;
+
+/** @brief Arguments for _mali_ukk_get_gp_core_version()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_gp_core_version(), @c version contains
+ * the version that all Vertex Processor cores are compatible with.
+ */
+typedef struct {
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       _mali_core_version version;     /**< [out] version returned from core, see \ref _mali_core_version */
+} _mali_uk_get_gp_core_version_s;
+
+/** @} */ /* end group _mali_uk_gp */
+
+typedef struct {
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 event_id;                   /**< [in] event id to register (see  enum mali_profiling_events for values) */
+       u32 data[5];                    /**< [in] event specific data */
+} _mali_uk_profiling_add_event_s;
+
+typedef struct {
+       u64 ctx;                     /**< [in,out] user-kernel context (trashed on output) */
+       u32 memory_usage;              /**< [out] total memory usage */
+       u32 vaddr;                                      /**< [in] mali address for the cow allocaiton */
+       s32 change_pages_nr;            /**< [out] record the page number change for cow operation */
+} _mali_uk_profiling_memory_usage_get_s;
+
+
+/** @addtogroup _mali_uk_memory U/K Memory
+ * @{ */
+
+/** @brief Arguments to _mali_ukk_mem_mmap()
+ *
+ * Use of the phys_addr member depends on whether the driver is compiled for
+ * Mali-MMU or nonMMU:
+ * - in the nonMMU case, this is the physical address of the memory as seen by
+ * the CPU (which may be a constant offset from that used by Mali)
+ * - in the MMU case, this is the Mali Virtual base address of the memory to
+ * allocate, and the particular physical pages used to back the memory are
+ * entirely determined by _mali_ukk_mem_mmap(). The details of the physical pages
+ * are not reported to user-space for security reasons.
+ *
+ * The cookie member must be stored for use later when freeing the memory by
+ * calling _mali_ukk_mem_munmap(). In the Mali-MMU case, the cookie is secure.
+ *
+ * The ukk_private word must be set to zero when calling from user-space. On
+ * Kernel-side, the  OS implementation of the U/K interface can use it to
+ * communicate data to the OS implementation of the OSK layer. In particular,
+ * _mali_ukk_get_big_block() directly calls _mali_ukk_mem_mmap directly, and
+ * will communicate its own ukk_private word through the ukk_private member
+ * here. The common code itself will not inspect or modify the ukk_private
+ * word, and so it may be safely used for whatever purposes necessary to
+ * integrate Mali Memory handling into the OS.
+ *
+ * The uku_private member is currently reserved for use by the user-side
+ * implementation of the U/K interface. Its value must be zero.
+ */
+typedef struct {
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       void *mapping;                  /**< [out] Returns user-space virtual address for the mapping */
+       u32 size;                       /**< [in] Size of the requested mapping */
+       u32 phys_addr;                  /**< [in] Physical address - could be offset, depending on caller+callee convention */
+       mali_bool writeable;
+} _mali_uk_mem_mmap_s;
+
+/** @brief Arguments to _mali_ukk_mem_munmap()
+ *
+ * The cookie and mapping members must be that returned from the same previous
+ * call to _mali_ukk_mem_mmap(). The size member must correspond to cookie
+ * and mapping - that is, it must be the value originally supplied to a call to
+ * _mali_ukk_mem_mmap that returned the values of mapping and cookie.
+ *
+ * An error will be returned if an attempt is made to unmap only part of the
+ * originally obtained range, or to unmap more than was originally obtained.
+ */
+typedef struct {
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       void *mapping;                  /**< [in] The mapping returned from mmap call */
+       u32 size;                       /**< [in] The size passed to mmap call */
+} _mali_uk_mem_munmap_s;
+/** @} */ /* end group _mali_uk_memory */
+
+/** @defgroup _mali_uk_vsync U/K VSYNC Wait Reporting Module
+ * @{ */
+
+/** @brief VSYNC events
+ *
+ * These events are reported when DDK starts to wait for vsync and when the
+ * vsync has occured and the DDK can continue on the next frame.
+ */
+typedef enum _mali_uk_vsync_event {
+       _MALI_UK_VSYNC_EVENT_BEGIN_WAIT = 0,
+       _MALI_UK_VSYNC_EVENT_END_WAIT
+} _mali_uk_vsync_event;
+
+/** @brief Arguments to _mali_ukk_vsync_event()
+ *
+ */
+typedef struct {
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       _mali_uk_vsync_event event;     /**< [in] VSYNCH event type */
+} _mali_uk_vsync_event_report_s;
+
+/** @} */ /* end group _mali_uk_vsync */
+
+/** @defgroup _mali_uk_sw_counters_report U/K Software Counter Reporting
+ * @{ */
+
+/** @brief Software counter values
+ *
+ * Values recorded for each of the software counters during a single renderpass.
+ */
+typedef struct {
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 counters;                  /**< [in] The array of u32 counter values */
+       u32 num_counters;              /**< [in] The number of elements in counters array */
+} _mali_uk_sw_counters_report_s;
+
+/** @} */ /* end group _mali_uk_sw_counters_report */
+
+/** @defgroup _mali_uk_timeline U/K Mali Timeline
+ * @{ */
+
+typedef struct {
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 timeline;                   /**< [in] timeline id */
+       u32 point;                      /**< [out] latest point on timeline */
+} _mali_uk_timeline_get_latest_point_s;
+
+typedef struct {
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       _mali_uk_fence_t fence;         /**< [in] fence */
+       u32 timeout;                    /**< [in] timeout (0 for no wait, -1 for blocking) */
+       u32 status;                     /**< [out] status of fence (1 if signaled, 0 if timeout) */
+} _mali_uk_timeline_wait_s;
+
+typedef struct {
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       _mali_uk_fence_t fence;         /**< [in] mali fence to create linux sync fence from */
+       s32 sync_fd;                    /**< [out] file descriptor for new linux sync fence */
+} _mali_uk_timeline_create_sync_fence_s;
+
+/** @} */ /* end group _mali_uk_timeline */
+
+/** @} */ /* end group u_k_api */
+
+/** @} */ /* end group uddapi */
+
+typedef struct {
+       u64 ctx;                 /**< [in,out] user-kernel context (trashed on output) */
+       s32 stream_fd;   /**< [in] The profiling kernel base stream fd handle */
+} _mali_uk_profiling_stream_fd_get_s;
+
+typedef struct {
+       u64 ctx;        /**< [in,out] user-kernel context (trashed on output) */
+       u64 control_packet_data; /**< [in] the control packet data for control settings */
+       u32 control_packet_size;  /**< [in] The control packet size */
+       u64 response_packet_data; /** < [out] The response packet data */
+       u32 response_packet_size; /** < [in,out] The response packet data */
+} _mali_uk_profiling_control_set_s;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UTGARD_UK_TYPES_H__ */
diff --git a/utgard/r8p0/linux/license/gpl/mali_kernel_license.h b/utgard/r8p0/linux/license/gpl/mali_kernel_license.h
new file mode 100755 (executable)
index 0000000..264555d
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2010, 2013, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_license.h
+ * Defines for the macro MODULE_LICENSE.
+ */
+
+#ifndef __MALI_KERNEL_LICENSE_H__
+#define __MALI_KERNEL_LICENSE_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MALI_KERNEL_LINUX_LICENSE     "GPL"
+#define MALI_LICENSE_IS_GPL 1
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_LICENSE_H__ */
diff --git a/utgard/r8p0/linux/mali_devfreq.c b/utgard/r8p0/linux/mali_devfreq.c
new file mode 100644 (file)
index 0000000..0b0ba14
--- /dev/null
@@ -0,0 +1,310 @@
+/*
+ * Copyright (C) 2011-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_osk_mali.h"
+#include "mali_kernel_common.h"
+
+#include <linux/clk.h>
+#include <linux/devfreq.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+#ifdef CONFIG_DEVFREQ_THERMAL
+#include <linux/devfreq_cooling.h>
+#endif
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+#include <linux/pm_opp.h>
+#else /* Linux >= 3.13 */
+/* In 3.13 the OPP include header file, types, and functions were all
+ * renamed. Use the old filename for the include, and define the new names to
+ * the old, when an old kernel is detected.
+ */
+#include <linux/opp.h>
+#define dev_pm_opp opp
+#define dev_pm_opp_get_voltage opp_get_voltage
+#define dev_pm_opp_get_opp_count opp_get_opp_count
+#define dev_pm_opp_find_freq_ceil opp_find_freq_ceil
+#endif /* Linux >= 3.13 */
+
+#include "mali_pm_metrics.h"
+
+static int
+mali_devfreq_target(struct device *dev, unsigned long *target_freq, u32 flags)
+{
+       struct mali_device *mdev = dev_get_drvdata(dev);
+       struct dev_pm_opp *opp;
+       unsigned long freq = 0;
+       unsigned long voltage;
+       int err;
+
+       freq = *target_freq;
+
+       rcu_read_lock();
+       opp = devfreq_recommended_opp(dev, &freq, flags);
+       voltage = dev_pm_opp_get_voltage(opp);
+       rcu_read_unlock();
+       if (IS_ERR_OR_NULL(opp)) {
+               MALI_PRINT_ERROR(("Failed to get opp (%ld)\n", PTR_ERR(opp)));
+               return PTR_ERR(opp);
+       }
+
+       MALI_DEBUG_PRINT(2, ("mali_devfreq_target:set_freq = %lld flags = 0x%x\n", freq, flags));
+       /*
+        * Only update if there is a change of frequency
+        */
+       if (mdev->current_freq == freq) {
+               *target_freq = freq;
+               mali_pm_reset_dvfs_utilisation(mdev);
+               return 0;
+       }
+
+#ifdef CONFIG_REGULATOR
+       if (mdev->regulator && mdev->current_voltage != voltage
+           && mdev->current_freq < freq) {
+               err = regulator_set_voltage(mdev->regulator, voltage, voltage);
+               if (err) {
+                       MALI_PRINT_ERROR(("Failed to increase voltage (%d)\n", err));
+                       return err;
+               }
+       }
+#endif
+
+       err = clk_set_rate(mdev->clock, freq);
+       if (err) {
+               MALI_PRINT_ERROR(("Failed to set clock %lu (target %lu)\n", freq, *target_freq));
+               return err;
+       }
+
+#ifdef CONFIG_REGULATOR
+       if (mdev->regulator && mdev->current_voltage != voltage
+           && mdev->current_freq > freq) {
+               err = regulator_set_voltage(mdev->regulator, voltage, voltage);
+               if (err) {
+                       MALI_PRINT_ERROR(("Failed to decrease voltage (%d)\n", err));
+                       return err;
+               }
+       }
+#endif
+
+       *target_freq = freq;
+       mdev->current_voltage = voltage;
+       mdev->current_freq = freq;
+
+       mali_pm_reset_dvfs_utilisation(mdev);
+
+       return err;
+}
+
+static int
+mali_devfreq_cur_freq(struct device *dev, unsigned long *freq)
+{
+       struct mali_device *mdev = dev_get_drvdata(dev);
+
+       *freq = mdev->current_freq;
+
+       MALI_DEBUG_PRINT(2, ("mali_devfreq_cur_freq: freq = %d \n", *freq));
+       return 0;
+}
+
+static int
+mali_devfreq_status(struct device *dev, struct devfreq_dev_status *stat)
+{
+       struct mali_device *mdev = dev_get_drvdata(dev);
+
+       stat->current_frequency = mdev->current_freq;
+
+       mali_pm_get_dvfs_utilisation(mdev,
+                                    &stat->total_time, &stat->busy_time);
+
+       stat->private_data = NULL;
+
+#ifdef CONFIG_DEVFREQ_THERMAL
+       memcpy(&mdev->devfreq->last_status, stat, sizeof(*stat));
+#endif
+
+       return 0;
+}
+
+/* setup platform specific opp in platform.c*/
+int __weak setup_opps(void)
+{
+       return 0;
+}
+
+/* term platform specific opp in platform.c*/
+int __weak term_opps(struct device *dev)
+{
+       return 0;
+}
+
+static int mali_devfreq_init_freq_table(struct mali_device *mdev,
+                                       struct devfreq_dev_profile *dp)
+{
+       int err, count;
+       int i = 0;
+       unsigned long freq = 0;
+       struct dev_pm_opp *opp;
+
+       err = setup_opps();
+       if (err)
+               return err;
+
+       rcu_read_lock();
+       count = dev_pm_opp_get_opp_count(mdev->dev);
+       if (count < 0) {
+               rcu_read_unlock();
+               return count;
+       }
+       rcu_read_unlock();
+
+       MALI_DEBUG_PRINT(2, ("mali devfreq table count %d\n", count));
+
+       dp->freq_table = kmalloc_array(count, sizeof(dp->freq_table[0]),
+                                      GFP_KERNEL);
+       if (!dp->freq_table)
+               return -ENOMEM;
+
+       rcu_read_lock();
+       for (i = 0; i < count; i++, freq++) {
+               opp = dev_pm_opp_find_freq_ceil(mdev->dev, &freq);
+               if (IS_ERR(opp))
+                       break;
+
+               dp->freq_table[i] = freq;
+               MALI_DEBUG_PRINT(2, ("mali devfreq table array[%d] = %d\n", i, freq));
+       }
+       rcu_read_unlock();
+
+       if (count != i)
+               MALI_PRINT_ERROR(("Unable to enumerate all OPPs (%d!=%d)\n",
+                                 count, i));
+
+       dp->max_state = i;
+
+       return 0;
+}
+
+static void mali_devfreq_term_freq_table(struct mali_device *mdev)
+{
+       struct devfreq_dev_profile *dp = mdev->devfreq->profile;
+
+       kfree(dp->freq_table);
+       term_opps(mdev->dev);
+}
+
+static void mali_devfreq_exit(struct device *dev)
+{
+       struct mali_device *mdev = dev_get_drvdata(dev);
+
+       mali_devfreq_term_freq_table(mdev);
+}
+
+int mali_devfreq_init(struct mali_device *mdev)
+{
+#ifdef CONFIG_DEVFREQ_THERMAL
+       struct devfreq_cooling_power *callbacks = NULL;
+       _mali_osk_device_data data;
+#endif
+       struct devfreq_dev_profile *dp;
+       int err;
+
+       MALI_DEBUG_PRINT(2, ("Init Mali devfreq\n"));
+
+       if (!mdev->clock)
+               return -ENODEV;
+
+       mdev->current_freq = clk_get_rate(mdev->clock);
+
+       dp = &mdev->devfreq_profile;
+
+       dp->initial_freq = mdev->current_freq;
+       dp->polling_ms = 100;
+       dp->target = mali_devfreq_target;
+       dp->get_dev_status = mali_devfreq_status;
+       dp->get_cur_freq = mali_devfreq_cur_freq;
+       dp->exit = mali_devfreq_exit;
+
+       if (mali_devfreq_init_freq_table(mdev, dp))
+               return -EFAULT;
+
+       mdev->devfreq = devfreq_add_device(mdev->dev, dp,
+                                          "simple_ondemand", NULL);
+       if (IS_ERR(mdev->devfreq)) {
+               mali_devfreq_term_freq_table(mdev);
+               return PTR_ERR(mdev->devfreq);
+       }
+
+       err = devfreq_register_opp_notifier(mdev->dev, mdev->devfreq);
+       if (err) {
+               MALI_PRINT_ERROR(("Failed to register OPP notifier (%d)\n", err));
+               goto opp_notifier_failed;
+       }
+
+#ifdef CONFIG_DEVFREQ_THERMAL
+       /* Initilization last_status it will be used when first power allocate called */
+       mdev->devfreq->last_status.current_frequency = mdev->current_freq;
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+               if (NULL != data.gpu_cooling_ops) {
+                       callbacks = data.gpu_cooling_ops;
+                       MALI_DEBUG_PRINT(2, ("Mali GPU Thermal: Callback handler installed \n"));
+               }
+       }
+
+       if (callbacks) {
+               mdev->devfreq_cooling = of_devfreq_cooling_register_power(
+                                               mdev->dev->of_node,
+                                               mdev->devfreq,
+                                               callbacks);
+               if (IS_ERR_OR_NULL(mdev->devfreq_cooling)) {
+                       err = PTR_ERR(mdev->devfreq_cooling);
+                       MALI_PRINT_ERROR(("Failed to register cooling device (%d)\n", err));
+                       goto cooling_failed;
+               } else {
+                       MALI_DEBUG_PRINT(2, ("Mali GPU Thermal Cooling installed \n"));
+               }
+       }
+#endif
+
+       return 0;
+
+#ifdef CONFIG_DEVFREQ_THERMAL
+cooling_failed:
+       devfreq_unregister_opp_notifier(mdev->dev, mdev->devfreq);
+#endif /* CONFIG_DEVFREQ_THERMAL */
+opp_notifier_failed:
+       err = devfreq_remove_device(mdev->devfreq);
+       if (err)
+               MALI_PRINT_ERROR(("Failed to terminate devfreq (%d)\n", err));
+       else
+               mdev->devfreq = NULL;
+
+       return err;
+}
+
+void mali_devfreq_term(struct mali_device *mdev)
+{
+       int err;
+
+       MALI_DEBUG_PRINT(2, ("Term Mali devfreq\n"));
+
+#ifdef CONFIG_DEVFREQ_THERMAL
+       devfreq_cooling_unregister(mdev->devfreq_cooling);
+#endif
+
+       devfreq_unregister_opp_notifier(mdev->dev, mdev->devfreq);
+
+       err = devfreq_remove_device(mdev->devfreq);
+       if (err)
+               MALI_PRINT_ERROR(("Failed to terminate devfreq (%d)\n", err));
+       else
+               mdev->devfreq = NULL;
+}
diff --git a/utgard/r8p0/linux/mali_devfreq.h b/utgard/r8p0/linux/mali_devfreq.h
new file mode 100644 (file)
index 0000000..faf84f2
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2011-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#ifndef _MALI_DEVFREQ_H_
+#define _MALI_DEVFREQ_H_
+
+int mali_devfreq_init(struct mali_device *mdev);
+
+void mali_devfreq_term(struct mali_device *mdev);
+
+#endif
diff --git a/utgard/r8p0/linux/mali_device_pause_resume.c b/utgard/r8p0/linux/mali_device_pause_resume.c
new file mode 100755 (executable)
index 0000000..cb2f702
--- /dev/null
@@ -0,0 +1,36 @@
+/**
+ * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_device_pause_resume.c
+ * Implementation of the Mali pause/resume functionality
+ */
+
+#include <linux/module.h>
+#include <linux/mali/mali_utgard.h>
+#include "mali_pm.h"
+
+void mali_dev_pause(void)
+{
+       /*
+        * Deactive all groups to prevent hardware being touched
+        * during the period of mali device pausing
+        */
+       mali_pm_os_suspend(MALI_FALSE);
+}
+
+EXPORT_SYMBOL(mali_dev_pause);
+
+void mali_dev_resume(void)
+{
+       mali_pm_os_resume();
+}
+
+EXPORT_SYMBOL(mali_dev_resume);
diff --git a/utgard/r8p0/linux/mali_dma_fence.c b/utgard/r8p0/linux/mali_dma_fence.c
new file mode 100755 (executable)
index 0000000..7db30b8
--- /dev/null
@@ -0,0 +1,439 @@
+/*
+ * Copyright (C) 2012-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include <linux/version.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)
+#include "mali_dma_fence.h"
+#include <linux/atomic.h>
+#include <linux/workqueue.h>
+#endif
+
+static DEFINE_SPINLOCK(mali_dma_fence_lock);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+static bool mali_dma_fence_enable_signaling(struct dma_fence *fence)
+{
+       MALI_IGNORE(fence);
+       return true;
+}
+
+static const char *mali_dma_fence_get_driver_name(struct dma_fence *fence)
+{
+       MALI_IGNORE(fence);
+       return "mali";
+}
+
+static const char *mali_dma_fence_get_timeline_name(struct dma_fence *fence)
+{
+       MALI_IGNORE(fence);
+       return "mali_dma_fence";
+}
+
+static const struct dma_fence_ops mali_dma_fence_ops = {
+       .get_driver_name = mali_dma_fence_get_driver_name,
+       .get_timeline_name = mali_dma_fence_get_timeline_name,
+       .enable_signaling = mali_dma_fence_enable_signaling,
+       .signaled = NULL,
+       .wait = dma_fence_default_wait,
+       .release = NULL
+};
+#else
+static bool mali_dma_fence_enable_signaling(struct fence *fence)
+{
+       MALI_IGNORE(fence);
+       return true;
+}
+
+static const char *mali_dma_fence_get_driver_name(struct fence *fence)
+{
+       MALI_IGNORE(fence);
+       return "mali";
+}
+
+static const char *mali_dma_fence_get_timeline_name(struct fence *fence)
+{
+       MALI_IGNORE(fence);
+       return "mali_dma_fence";
+}
+
+static const struct fence_ops mali_dma_fence_ops = {
+       .get_driver_name = mali_dma_fence_get_driver_name,
+       .get_timeline_name = mali_dma_fence_get_timeline_name,
+       .enable_signaling = mali_dma_fence_enable_signaling,
+       .signaled = NULL,
+       .wait = fence_default_wait,
+       .release = NULL
+};
+#endif
+
+static void mali_dma_fence_context_cleanup(struct mali_dma_fence_context *dma_fence_context)
+{
+       u32 i;
+
+       MALI_DEBUG_ASSERT_POINTER(dma_fence_context);
+
+       for (i = 0; i < dma_fence_context->num_dma_fence_waiter; i++) {
+               if (dma_fence_context->mali_dma_fence_waiters[i]) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+                       dma_fence_remove_callback(dma_fence_context->mali_dma_fence_waiters[i]->fence,
+                                                 &dma_fence_context->mali_dma_fence_waiters[i]->base);
+                       dma_fence_put(dma_fence_context->mali_dma_fence_waiters[i]->fence);
+
+#else
+                       fence_remove_callback(dma_fence_context->mali_dma_fence_waiters[i]->fence,
+                                             &dma_fence_context->mali_dma_fence_waiters[i]->base);
+                       fence_put(dma_fence_context->mali_dma_fence_waiters[i]->fence);
+#endif
+                       kfree(dma_fence_context->mali_dma_fence_waiters[i]);
+                       dma_fence_context->mali_dma_fence_waiters[i] = NULL;
+               }
+       }
+
+       if (NULL != dma_fence_context->mali_dma_fence_waiters)
+               kfree(dma_fence_context->mali_dma_fence_waiters);
+
+       dma_fence_context->mali_dma_fence_waiters = NULL;
+       dma_fence_context->num_dma_fence_waiter = 0;
+}
+
+static void mali_dma_fence_context_work_func(struct work_struct *work_handle)
+{
+       struct mali_dma_fence_context *dma_fence_context;
+
+       MALI_DEBUG_ASSERT_POINTER(work_handle);
+
+       dma_fence_context = container_of(work_handle, struct mali_dma_fence_context, work_handle);
+
+       dma_fence_context->cb_func(dma_fence_context->pp_job_ptr);
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+static void mali_dma_fence_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
+#else
+static void mali_dma_fence_callback(struct fence *fence, struct fence_cb *cb)
+#endif
+{
+       struct mali_dma_fence_waiter *dma_fence_waiter = NULL;
+       struct mali_dma_fence_context *dma_fence_context = NULL;
+
+       MALI_DEBUG_ASSERT_POINTER(fence);
+       MALI_DEBUG_ASSERT_POINTER(cb);
+
+       MALI_IGNORE(fence);
+
+       dma_fence_waiter = container_of(cb, struct mali_dma_fence_waiter, base);
+       dma_fence_context = dma_fence_waiter->parent;
+
+       MALI_DEBUG_ASSERT_POINTER(dma_fence_context);
+
+       if (atomic_dec_and_test(&dma_fence_context->count))
+               schedule_work(&dma_fence_context->work_handle);
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+static _mali_osk_errcode_t mali_dma_fence_add_callback(struct mali_dma_fence_context *dma_fence_context, struct dma_fence *fence)
+#else
+static _mali_osk_errcode_t mali_dma_fence_add_callback(struct mali_dma_fence_context *dma_fence_context, struct fence *fence)
+#endif
+{
+       int ret = 0;
+       struct mali_dma_fence_waiter *dma_fence_waiter;
+       struct mali_dma_fence_waiter **dma_fence_waiters;
+
+       MALI_DEBUG_ASSERT_POINTER(dma_fence_context);
+       MALI_DEBUG_ASSERT_POINTER(fence);
+
+       dma_fence_waiters = krealloc(dma_fence_context->mali_dma_fence_waiters,
+                                    (dma_fence_context->num_dma_fence_waiter + 1)
+                                    * sizeof(struct mali_dma_fence_waiter *),
+                                    GFP_KERNEL);
+
+       if (NULL == dma_fence_waiters) {
+               MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to realloc the dma fence waiters.\n"));
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       dma_fence_context->mali_dma_fence_waiters = dma_fence_waiters;
+
+       dma_fence_waiter = kzalloc(sizeof(struct mali_dma_fence_waiter), GFP_KERNEL);
+
+       if (NULL == dma_fence_waiter) {
+               MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to create mali dma fence waiter.\n"));
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+       dma_fence_get(fence);
+#else
+       fence_get(fence);
+#endif
+       dma_fence_waiter->fence = fence;
+       dma_fence_waiter->parent = dma_fence_context;
+       atomic_inc(&dma_fence_context->count);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+       ret = dma_fence_add_callback(fence, &dma_fence_waiter->base,
+                                mali_dma_fence_callback);
+#else
+       ret = fence_add_callback(fence, &dma_fence_waiter->base,
+                                mali_dma_fence_callback);
+#endif
+       if (0 > ret) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+               dma_fence_put(fence);
+#else
+               fence_put(fence);
+#endif
+               kfree(dma_fence_waiter);
+               atomic_dec(&dma_fence_context->count);
+               if (-ENOENT == ret) {
+                       /*-ENOENT if fence has already been signaled, return _MALI_OSK_ERR_OK*/
+                       return _MALI_OSK_ERR_OK;
+               }
+               /* Failed to add the fence callback into fence, return _MALI_OSK_ERR_FAULT*/
+               MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to add callback into fence.\n"));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       dma_fence_context->mali_dma_fence_waiters[dma_fence_context->num_dma_fence_waiter] = dma_fence_waiter;
+       dma_fence_context->num_dma_fence_waiter++;
+
+       return _MALI_OSK_ERR_OK;
+}
+
+
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ struct dma_fence *mali_dma_fence_new(u32  context, u32 seqno)
+ #else
+struct fence *mali_dma_fence_new(u32  context, u32 seqno)
+#endif
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+       struct dma_fence *fence = NULL;
+       fence = kzalloc(sizeof(struct dma_fence), GFP_KERNEL);
+#else
+       struct fence *fence = NULL;
+       fence = kzalloc(sizeof(struct fence), GFP_KERNEL);
+#endif
+       if (NULL == fence) {
+               MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to create dma fence.\n"));
+               return fence;
+       }
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+       dma_fence_init(fence,
+                  &mali_dma_fence_ops,
+                  &mali_dma_fence_lock,
+                  context, seqno);
+#else
+       fence_init(fence,
+                  &mali_dma_fence_ops,
+                  &mali_dma_fence_lock,
+                  context, seqno);
+#endif
+       return fence;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+void mali_dma_fence_signal_and_put(struct dma_fence **fence)
+#else
+void mali_dma_fence_signal_and_put(struct fence **fence)
+#endif
+{
+       MALI_DEBUG_ASSERT_POINTER(fence);
+       MALI_DEBUG_ASSERT_POINTER(*fence);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+       dma_fence_signal(*fence);
+       dma_fence_put(*fence);
+#else
+       fence_signal(*fence);
+       fence_put(*fence);
+#endif
+       *fence = NULL;
+}
+
+void mali_dma_fence_context_init(struct mali_dma_fence_context *dma_fence_context,
+                                mali_dma_fence_context_callback_func_t  cb_func,
+                                void *pp_job_ptr)
+{
+       MALI_DEBUG_ASSERT_POINTER(dma_fence_context);
+
+       INIT_WORK(&dma_fence_context->work_handle, mali_dma_fence_context_work_func);
+       atomic_set(&dma_fence_context->count, 1);
+       dma_fence_context->num_dma_fence_waiter = 0;
+       dma_fence_context->mali_dma_fence_waiters = NULL;
+       dma_fence_context->cb_func = cb_func;
+       dma_fence_context->pp_job_ptr = pp_job_ptr;
+}
+
+_mali_osk_errcode_t mali_dma_fence_context_add_waiters(struct mali_dma_fence_context *dma_fence_context,
+               struct reservation_object *dma_reservation_object)
+{
+       _mali_osk_errcode_t ret = _MALI_OSK_ERR_OK;
+       u32 shared_count = 0, i;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+       struct dma_fence *exclusive_fence = NULL;
+       struct dma_fence **shared_fences = NULL;
+#else
+       struct fence *exclusive_fence = NULL;
+       struct fence **shared_fences = NULL;
+#endif
+       MALI_DEBUG_ASSERT_POINTER(dma_fence_context);
+       MALI_DEBUG_ASSERT_POINTER(dma_reservation_object);
+
+       /* Get all the shared/exclusive fences in the reservation object of dma buf*/
+       ret = reservation_object_get_fences_rcu(dma_reservation_object, &exclusive_fence,
+                                               &shared_count, &shared_fences);
+       if (ret < 0) {
+               MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to get  shared or exclusive_fence dma fences from  the reservation object of dma buf.\n"));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       if (exclusive_fence) {
+               ret = mali_dma_fence_add_callback(dma_fence_context, exclusive_fence);
+               if (_MALI_OSK_ERR_OK != ret) {
+                       MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to add callback into exclusive fence.\n"));
+                       mali_dma_fence_context_cleanup(dma_fence_context);
+                       goto ended;
+               }
+       }
+
+
+       for (i = 0; i < shared_count; i++) {
+               ret = mali_dma_fence_add_callback(dma_fence_context, shared_fences[i]);
+               if (_MALI_OSK_ERR_OK != ret) {
+                       MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to add callback into shared fence [%d].\n", i));
+                       mali_dma_fence_context_cleanup(dma_fence_context);
+                       break;
+               }
+       }
+
+ended:
+
+       if (exclusive_fence)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+               dma_fence_put(exclusive_fence);
+#else
+               fence_put(exclusive_fence);
+#endif
+
+       if (shared_fences) {
+               for (i = 0; i < shared_count; i++) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+                       dma_fence_put(shared_fences[i]);
+#else
+                       fence_put(shared_fences[i]);
+#endif
+               }
+               kfree(shared_fences);
+       }
+
+       return ret;
+}
+
+
+void mali_dma_fence_context_term(struct mali_dma_fence_context *dma_fence_context)
+{
+       MALI_DEBUG_ASSERT_POINTER(dma_fence_context);
+       atomic_set(&dma_fence_context->count, 0);
+       if (dma_fence_context->work_handle.func) {
+               cancel_work_sync(&dma_fence_context->work_handle);
+       }
+       mali_dma_fence_context_cleanup(dma_fence_context);
+}
+
+void mali_dma_fence_context_dec_count(struct mali_dma_fence_context *dma_fence_context)
+{
+       MALI_DEBUG_ASSERT_POINTER(dma_fence_context);
+
+       if (atomic_dec_and_test(&dma_fence_context->count))
+               schedule_work(&dma_fence_context->work_handle);
+}
+
+
+void mali_dma_fence_add_reservation_object_list(struct reservation_object *dma_reservation_object,
+               struct reservation_object **dma_reservation_object_list,
+               u32 *num_dma_reservation_object)
+{
+       u32 i;
+
+       MALI_DEBUG_ASSERT_POINTER(dma_reservation_object);
+       MALI_DEBUG_ASSERT_POINTER(dma_reservation_object_list);
+       MALI_DEBUG_ASSERT_POINTER(num_dma_reservation_object);
+
+       for (i = 0; i < *num_dma_reservation_object; i++) {
+               if (dma_reservation_object_list[i] == dma_reservation_object)
+                       return;
+       }
+
+       dma_reservation_object_list[*num_dma_reservation_object] = dma_reservation_object;
+       (*num_dma_reservation_object)++;
+}
+
+int mali_dma_fence_lock_reservation_object_list(struct reservation_object **dma_reservation_object_list,
+               u32 num_dma_reservation_object, struct ww_acquire_ctx *ww_actx)
+{
+       u32 i;
+
+       struct reservation_object *reservation_object_to_slow_lock = NULL;
+
+       MALI_DEBUG_ASSERT_POINTER(dma_reservation_object_list);
+       MALI_DEBUG_ASSERT_POINTER(ww_actx);
+
+       ww_acquire_init(ww_actx, &reservation_ww_class);
+
+again:
+       for (i = 0; i < num_dma_reservation_object; i++) {
+               int ret;
+
+               if (dma_reservation_object_list[i] == reservation_object_to_slow_lock) {
+                       reservation_object_to_slow_lock = NULL;
+                       continue;
+               }
+
+               ret = ww_mutex_lock(&dma_reservation_object_list[i]->lock, ww_actx);
+
+               if (ret < 0) {
+                       u32  slow_lock_index = i;
+
+                       /* unlock all pre locks we have already locked.*/
+                       while (i > 0) {
+                               i--;
+                               ww_mutex_unlock(&dma_reservation_object_list[i]->lock);
+                       }
+
+                       if (NULL != reservation_object_to_slow_lock)
+                               ww_mutex_unlock(&reservation_object_to_slow_lock->lock);
+
+                       if (ret == -EDEADLK) {
+                               reservation_object_to_slow_lock = dma_reservation_object_list[slow_lock_index];
+                               ww_mutex_lock_slow(&reservation_object_to_slow_lock->lock, ww_actx);
+                               goto again;
+                       }
+                       ww_acquire_fini(ww_actx);
+                       MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to lock all dma reservation objects.\n", i));
+                       return ret;
+               }
+       }
+
+       ww_acquire_done(ww_actx);
+       return 0;
+}
+
+void mali_dma_fence_unlock_reservation_object_list(struct reservation_object **dma_reservation_object_list,
+               u32 num_dma_reservation_object, struct ww_acquire_ctx *ww_actx)
+{
+       u32 i;
+
+       for (i = 0; i < num_dma_reservation_object; i++)
+               ww_mutex_unlock(&dma_reservation_object_list[i]->lock);
+
+       ww_acquire_fini(ww_actx);
+}
diff --git a/utgard/r8p0/linux/mali_dma_fence.h b/utgard/r8p0/linux/mali_dma_fence.h
new file mode 100755 (executable)
index 0000000..112fdd1
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2012-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_dma_fence.h
+ *
+ * Mali interface for Linux dma buf fence objects.
+ */
+
+#ifndef _MALI_DMA_FENCE_H_
+#define _MALI_DMA_FENCE_H_
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+#include <linux/dma-fence.h>
+#else
+#include <linux/fence.h>
+#endif
+#include <linux/reservation.h>
+#endif
+
+struct mali_dma_fence_context;
+
+/* The mali dma fence context callback function */
+typedef void (*mali_dma_fence_context_callback_func_t)(void *pp_job_ptr);
+
+struct mali_dma_fence_waiter {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+       struct dma_fence *fence;
+       struct dma_fence_cb base;
+#else
+       struct fence_cb base;
+       struct fence *fence;
+#endif
+       struct mali_dma_fence_context *parent;
+};
+
+struct mali_dma_fence_context {
+       struct work_struct work_handle;
+       struct mali_dma_fence_waiter **mali_dma_fence_waiters;
+       u32 num_dma_fence_waiter;
+       atomic_t count;
+       void *pp_job_ptr; /* the mali pp job pointer */;
+       mali_dma_fence_context_callback_func_t cb_func;
+};
+
+/* Create a dma fence
+ * @param context The execution context this fence is run on
+ * @param seqno A linearly increasing sequence number for this context
+ * @return the new dma fence if success, or NULL on failure.
+ */
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+ struct dma_fence *mali_dma_fence_new(u32  context, u32 seqno);
+ #else
+struct fence *mali_dma_fence_new(u32  context, u32 seqno);
+#endif
+/* Signal and put dma fence
+ * @param fence The dma fence to signal and put
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+void mali_dma_fence_signal_and_put(struct dma_fence **fence);
+#else
+void mali_dma_fence_signal_and_put(struct fence **fence);
+#endif
+/**
+ * Initialize a mali dma fence context for pp job.
+ * @param dma_fence_context The mali dma fence context to initialize.
+ * @param cb_func The dma fence context callback function to call when all dma fence release.
+ * @param pp_job_ptr The pp_job to call function with.
+ */
+void mali_dma_fence_context_init(struct mali_dma_fence_context *dma_fence_context,
+                                mali_dma_fence_context_callback_func_t  cb_func,
+                                void *pp_job_ptr);
+
+/**
+ * Add new mali dma fence waiter into mali dma fence context
+ * @param dma_fence_context The mali dma fence context
+ * @param dma_reservation_object the reservation object to create new mali dma fence waiters
+ * @return _MALI_OSK_ERR_OK if success, or not.
+ */
+_mali_osk_errcode_t mali_dma_fence_context_add_waiters(struct mali_dma_fence_context *dma_fence_context,
+               struct reservation_object *dma_reservation_object);
+
+/**
+ * Release the dma fence context
+ * @param dma_fence_text The mali dma fence context.
+ */
+void mali_dma_fence_context_term(struct mali_dma_fence_context *dma_fence_context);
+
+/**
+ * Decrease the dma fence context atomic count
+ * @param dma_fence_text The mali dma fence context.
+ */
+void mali_dma_fence_context_dec_count(struct mali_dma_fence_context *dma_fence_context);
+
+/**
+ * Get all reservation object
+ * @param dma_reservation_object The reservation object to add into the reservation object list
+ * @param dma_reservation_object_list The reservation object list to store all reservation object
+ * @param num_dma_reservation_object The number of all reservation object
+ */
+void mali_dma_fence_add_reservation_object_list(struct reservation_object *dma_reservation_object,
+               struct reservation_object **dma_reservation_object_list,
+               u32 *num_dma_reservation_object);
+
+/**
+ * Wait/wound mutex lock to lock all reservation object.
+ */
+int mali_dma_fence_lock_reservation_object_list(struct reservation_object **dma_reservation_object_list,
+               u32  num_dma_reservation_object, struct ww_acquire_ctx *ww_actx);
+
+/**
+ * Wait/wound mutex lock to unlock all reservation object.
+ */
+void mali_dma_fence_unlock_reservation_object_list(struct reservation_object **dma_reservation_object_list,
+               u32 num_dma_reservation_object, struct ww_acquire_ctx *ww_actx);
+#endif
diff --git a/utgard/r8p0/linux/mali_internal_sync.c b/utgard/r8p0/linux/mali_internal_sync.c
new file mode 100644 (file)
index 0000000..be8a53e
--- /dev/null
@@ -0,0 +1,781 @@
+/*
+ * Copyright (C) 2011-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_internal_sync.h"
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
+#include <linux/ioctl.h>
+#include <linux/export.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/anon_inodes.h>
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#if defined(DEBUG)
+#include "mali_session.h"
+#include "mali_timeline.h"
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+static const struct dma_fence_ops fence_ops;
+#else
+static const struct fence_ops fence_ops;
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+static struct mali_internal_sync_point *mali_internal_fence_to_sync_pt(struct dma_fence *fence)
+#else
+static struct mali_internal_sync_point *mali_internal_fence_to_sync_pt(struct fence *fence)
+#endif
+{
+       MALI_DEBUG_ASSERT_POINTER(fence);
+       return container_of(fence, struct mali_internal_sync_point, base);
+}
+
+static inline struct mali_internal_sync_timeline *mali_internal_sync_pt_to_sync_timeline(struct mali_internal_sync_point *sync_pt)
+{
+       MALI_DEBUG_ASSERT_POINTER(sync_pt);
+       return container_of(sync_pt->base.lock, struct mali_internal_sync_timeline, sync_pt_list_lock);
+}
+
+static void mali_internal_sync_timeline_free(struct kref *kref_count)
+{
+       struct mali_internal_sync_timeline *sync_timeline;
+
+       MALI_DEBUG_ASSERT_POINTER(kref_count);
+
+       sync_timeline = container_of(kref_count, struct mali_internal_sync_timeline, kref_count);
+
+       if (sync_timeline->ops->release_obj)
+               sync_timeline->ops->release_obj(sync_timeline);
+
+       kfree(sync_timeline);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+static void mali_internal_fence_check_cb_func(struct fence *fence, struct fence_cb *cb)
+#else
+static void mali_internal_fence_check_cb_func(struct dma_fence *fence, struct dma_fence_cb *cb)
+#endif
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
+       struct mali_internal_sync_fence_cb *check;
+#else
+       struct mali_internal_sync_fence_waiter *waiter;
+#endif
+       struct mali_internal_sync_fence *sync_fence;
+       int ret;
+       MALI_DEBUG_ASSERT_POINTER(cb);
+       MALI_IGNORE(fence);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
+       check = container_of(cb, struct mali_internal_sync_fence_cb, cb);
+       sync_fence = check->sync_file;
+#else
+       waiter = container_of(cb, struct mali_internal_sync_fence_waiter, cb);
+       sync_fence = (struct mali_internal_sync_fence *)waiter->work.private;
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
+       ret = atomic_dec_and_test(&sync_fence->status);
+       if (ret)
+               wake_up_all(&sync_fence->wq);
+#else  
+       ret =sync_fence->fence->ops->signaled(sync_fence->fence);
+
+#ifdef DEBUG
+       if (0 > ret)
+               trace_printk("Mali internal sync:fence signaled? ret=%d, fence  0x%p for sync_fence 0x%p.\n", ret, fence, sync_fence);
+#endif
+
+       if (1 == ret)
+               wake_up_all(&sync_fence->wq);
+#endif
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
+static void mali_internal_sync_fence_add_fence(struct mali_internal_sync_fence *sync_fence, struct fence *sync_pt)
+{
+       int fence_num = 0;
+       MALI_DEBUG_ASSERT_POINTER(sync_fence);
+       MALI_DEBUG_ASSERT_POINTER(sync_pt);
+
+       fence_num = sync_fence->num_fences;
+
+       sync_fence->cbs[fence_num].fence = sync_pt;
+       sync_fence->cbs[fence_num].sync_file = sync_fence;
+
+       if (!fence_add_callback(sync_pt, &sync_fence->cbs[fence_num].cb, mali_internal_fence_check_cb_func)) {
+               fence_get(sync_pt);
+               sync_fence->num_fences++;
+               atomic_inc(&sync_fence->status);
+       }
+}
+#endif
+
+static int mali_internal_sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode,
+               int wake_flags, void *key)
+{
+       struct mali_internal_sync_fence_waiter *wait;
+       MALI_IGNORE(mode);
+       MALI_IGNORE(wake_flags);
+       MALI_IGNORE(key);
+
+       wait = container_of(curr, struct mali_internal_sync_fence_waiter, work);
+       list_del_init(&wait->work.task_list);
+
+       wait->callback(wait->work.private, wait);
+       return 1;
+}
+
+struct mali_internal_sync_timeline *mali_internal_sync_timeline_create(const struct mali_internal_sync_timeline_ops *ops,
+               int size, const char *name)
+{
+       struct mali_internal_sync_timeline *sync_timeline = NULL;
+
+       MALI_DEBUG_ASSERT_POINTER(ops);
+
+       if (size < sizeof(struct mali_internal_sync_timeline)) {
+               MALI_PRINT_ERROR(("Mali internal sync:Invalid size to create the mali internal sync timeline.\n"));
+               goto err;
+       }
+
+       sync_timeline = kzalloc(size, GFP_KERNEL);
+       if (NULL == sync_timeline) {
+               MALI_PRINT_ERROR(("Mali internal sync:Failed to  allocate buffer  for the mali internal sync timeline.\n"));
+               goto err;
+       }
+       kref_init(&sync_timeline->kref_count);
+       sync_timeline->ops = ops;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+       sync_timeline->fence_context = dma_fence_context_alloc(1);
+#else
+       sync_timeline->fence_context = fence_context_alloc(1);
+#endif
+       strlcpy(sync_timeline->name, name, sizeof(sync_timeline->name));
+
+       INIT_LIST_HEAD(&sync_timeline->sync_pt_list_head);
+       spin_lock_init(&sync_timeline->sync_pt_list_lock);
+
+       return sync_timeline;
+err:
+       if (NULL != sync_timeline) {
+               kfree(sync_timeline);
+       }
+       return NULL;
+}
+
+void mali_internal_sync_timeline_destroy(struct mali_internal_sync_timeline *sync_timeline)
+{
+       MALI_DEBUG_ASSERT_POINTER(sync_timeline);
+
+       sync_timeline->destroyed = MALI_TRUE;
+
+       smp_wmb();
+
+       mali_internal_sync_timeline_signal(sync_timeline);
+       kref_put(&sync_timeline->kref_count, mali_internal_sync_timeline_free);
+}
+
+void mali_internal_sync_timeline_signal(struct mali_internal_sync_timeline *sync_timeline)
+{
+       unsigned long flags;
+       struct mali_internal_sync_point *sync_pt, *next;
+
+       MALI_DEBUG_ASSERT_POINTER(sync_timeline);
+
+       spin_lock_irqsave(&sync_timeline->sync_pt_list_lock, flags);
+
+       list_for_each_entry_safe(sync_pt, next, &sync_timeline->sync_pt_list_head,
+                                sync_pt_list) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+               if (dma_fence_is_signaled_locked(&sync_pt->base))
+#else
+               if (fence_is_signaled_locked(&sync_pt->base))
+#endif
+                       list_del_init(&sync_pt->sync_pt_list);
+       }
+
+       spin_unlock_irqrestore(&sync_timeline->sync_pt_list_lock, flags);
+}
+
+struct mali_internal_sync_point *mali_internal_sync_point_create(struct mali_internal_sync_timeline *sync_timeline, int size)
+{
+       unsigned long flags;
+       struct mali_internal_sync_point *sync_pt = NULL;
+
+       MALI_DEBUG_ASSERT_POINTER(sync_timeline);
+
+       if (size < sizeof(struct mali_internal_sync_point)) {
+               MALI_PRINT_ERROR(("Mali internal sync:Invalid size to create the mali internal sync point.\n"));
+               goto err;
+       }
+
+       sync_pt = kzalloc(size, GFP_KERNEL);
+       if (NULL == sync_pt) {
+               MALI_PRINT_ERROR(("Mali internal sync:Failed to  allocate buffer  for the mali internal sync point.\n"));
+               goto err;
+       }
+       spin_lock_irqsave(&sync_timeline->sync_pt_list_lock, flags);
+       kref_get(&sync_timeline->kref_count);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+       dma_fence_init(&sync_pt->base, &fence_ops, &sync_timeline->sync_pt_list_lock,
+                      sync_timeline->fence_context, ++sync_timeline->value);
+#else
+       fence_init(&sync_pt->base, &fence_ops, &sync_timeline->sync_pt_list_lock,
+                  sync_timeline->fence_context, ++sync_timeline->value);
+#endif
+       INIT_LIST_HEAD(&sync_pt->sync_pt_list);
+       spin_unlock_irqrestore(&sync_timeline->sync_pt_list_lock, flags);
+
+       return sync_pt;
+err:
+       if (NULL != sync_pt) {
+               kfree(sync_pt);
+       }
+       return NULL;
+}
+
+struct mali_internal_sync_fence *mali_internal_sync_fence_fdget(int fd)
+{
+       struct file *file = fget(fd);
+
+       if (NULL == file) {
+               return NULL;
+       }
+
+       return file->private_data;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
+struct mali_internal_sync_fence *mali_internal_sync_fence_merge(
+       struct mali_internal_sync_fence *sync_fence1, struct mali_internal_sync_fence *sync_fence2)
+{
+       struct mali_internal_sync_fence *new_sync_fence;
+       int i, j, num_fence1, num_fence2, total_fences;
+       struct fence *fence0 = NULL;
+
+       MALI_DEBUG_ASSERT_POINTER(sync_fence1);
+       MALI_DEBUG_ASSERT_POINTER(sync_fence2);
+
+       num_fence1 = sync_fence1->num_fences;
+       num_fence2 = sync_fence2->num_fences;
+
+       total_fences = num_fence1 + num_fence2;
+
+       i =0;
+       j = 0;
+       
+       if (num_fence1 > 0) {
+               fence0 = sync_fence1->cbs[i].fence;
+               i = 1;
+       }
+       else if(num_fence2 > 0) {
+               fence0 = sync_fence2->cbs[i].fence;
+               j =1;
+       }
+               
+       new_sync_fence = (struct mali_internal_sync_fence *)sync_file_create(fence0);
+       if (NULL == new_sync_fence) {
+               MALI_PRINT_ERROR(("Mali internal sync:Failed to  create the mali internal sync fence when merging sync fence.\n"));
+               return NULL;
+       }
+
+       fence_remove_callback(new_sync_fence->cb[0].fence, &new_sync_fence->cb[0].cb);
+       new_sync_fence->num_fences = 0;
+       atomic_dec(&new_sync_fence->status);
+
+       for (; i < num_fence1 && j < num_fence2;) {
+               struct fence *fence1 = sync_fence1->cbs[i].fence;
+               struct fence *fence2 = sync_fence2->cbs[j].fence;
+
+               if (fence1->context < fence2->context) {
+                       mali_internal_sync_fence_add_fence(new_sync_fence, fence1);
+
+                       i++;
+               } else if (fence1->context > fence2->context) {
+                       mali_internal_sync_fence_add_fence(new_sync_fence, fence2);
+
+                       j++;
+               } else {
+                       if (fence1->seqno - fence2->seqno <= INT_MAX)
+                               mali_internal_sync_fence_add_fence(new_sync_fence, fence1);
+                       else
+                               mali_internal_sync_fence_add_fence(new_sync_fence, fence2);
+                       i++;
+                       j++;
+               }
+       }
+
+       for (; i < num_fence1; i++)
+               mali_internal_sync_fence_add_fence(new_sync_fence, sync_fence1->cbs[i].fence);
+
+       for (; j < num_fence2; j++)
+               mali_internal_sync_fence_add_fence(new_sync_fence, sync_fence2->cbs[j].fence);
+
+       return new_sync_fence;
+}
+#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+static struct fence **mali_internal_get_fences(struct mali_internal_sync_fence *sync_fence, int *num_fences)
+#else
+static struct dma_fence **mali_internal_get_fences(struct mali_internal_sync_fence *sync_fence, int *num_fences)
+#endif
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+       if (sync_fence->fence->ops == &fence_array_ops) {
+               struct fence_array *fence_array = container_of(sync_fence->fence, struct fence_array, base);
+               *num_fences = fence_array->num_fences;
+               return fence_array->fences;
+       }
+#else
+       if (sync_fence->fence->ops == &dma_fence_array_ops) {
+               struct dma_fence_array *fence_array = container_of(sync_fence->fence, struct dma_fence_array, base);
+               *num_fences = fence_array->num_fences;
+               return fence_array->fences;
+       }
+#endif
+       *num_fences = 1;
+       return &sync_fence->fence;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+static void mali_internal_add_fence_array(struct fence **fences, int *num_fences, struct fence *fence)
+#else
+static void mali_internal_add_fence_array(struct dma_fence **fences, int *num_fences, struct dma_fence *fence)
+#endif
+{
+       fences[*num_fences] = fence;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+       if (!fence_is_signaled(fence)) {
+               fence_get(fence);
+               (*num_fences)++;
+       }
+#else
+       if (!dma_fence_is_signaled(fence)) {
+               dma_fence_get(fence);
+               (*num_fences)++;
+       }
+#endif
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+static int mali_internal_sync_fence_set_fence_array(struct mali_internal_sync_fence *sync_fence,
+                              struct fence **fences, int num_fences)
+#else
+static int mali_internal_sync_fence_set_fence_array(struct mali_internal_sync_fence *sync_fence,
+                              struct dma_fence **fences, int num_fences)
+#endif
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+       struct fence_array *array;
+#else
+       struct dma_fence_array *array;
+#endif
+       MALI_DEBUG_ASSERT(1 != num_fences);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+       array = fence_array_create(num_fences, fences,
+                                          fence_context_alloc(1), 1, false);
+#else
+       array = dma_fence_array_create(num_fences, fences,
+                                          dma_fence_context_alloc(1), 1, false);
+#endif
+       if (!array)
+               return -ENOMEM;
+
+       sync_fence->fence = &array->base;
+
+       return 0;
+}
+
+struct mali_internal_sync_fence *mali_internal_sync_fence_merge(
+       struct mali_internal_sync_fence *sync_fence1, struct mali_internal_sync_fence *sync_fence2)
+{
+               struct mali_internal_sync_fence *sync_fence;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+               struct fence **fences, **nfences, **fences1, **fences2;
+#else
+               struct dma_fence **fences, **nfences, **fences1, **fences2;
+#endif
+               int real_num_fences, i, j, num_fences, num_fences1, num_fences2;
+
+               fences1 = mali_internal_get_fences(sync_fence1, &num_fences1);
+               fences2 = mali_internal_get_fences(sync_fence2, &num_fences2);
+               
+               num_fences = num_fences1 + num_fences2;
+       
+               fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL);
+               if (!fences) {
+                       MALI_PRINT_ERROR(("Mali internal sync:Failed to  alloc buffer for fences.\n"));
+                       goto fences_alloc_failed;
+               }
+
+               for (real_num_fences = i = j = 0; i < num_fences1 && j < num_fences2; ) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+                       struct fence *fence1 = fences1[i];
+                       struct fence *fence2 = fences2[j];
+#else
+                       struct dma_fence *fence1 = fences1[i];
+                       struct dma_fence *fence2 = fences2[j];
+#endif
+                       if (fence1->context < fence2->context) {
+                               mali_internal_add_fence_array(fences, &real_num_fences, fence1);
+       
+                               i++;
+                       } else if (fence1->context > fence2->context) {
+                               mali_internal_add_fence_array(fences, &real_num_fences, fence2);
+       
+                               j++;
+                       } else {
+                               if (fence1->seqno - fence2->seqno <= INT_MAX)
+                                       mali_internal_add_fence_array(fences, &real_num_fences, fence1);
+                               else
+                                       mali_internal_add_fence_array(fences, &real_num_fences, fence2);
+       
+                               i++;
+                               j++;
+                       }
+               }
+       
+               for (; i < num_fences1; i++)
+                       mali_internal_add_fence_array(fences, &real_num_fences, fences1[i]);
+       
+               for (; j < num_fences2; j++)
+                       mali_internal_add_fence_array(fences, &real_num_fences, fences2[j]);
+
+               if (0 == real_num_fences)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+                       fences[real_num_fences++] = fence_get(fences1[0]);
+#else
+                       fences[real_num_fences++] = dma_fence_get(fences1[0]);
+#endif
+       
+               if (num_fences > real_num_fences) {
+                       nfences = krealloc(fences, real_num_fences * sizeof(*fences),
+                                         GFP_KERNEL);
+                       if (!nfences)
+                               goto nfences_alloc_failed;
+       
+                       fences = nfences;
+               }
+
+               sync_fence = (struct mali_internal_sync_fence *)sync_file_create(fences[0]);
+               if (NULL == sync_fence) {
+                       MALI_PRINT_ERROR(("Mali internal sync:Failed to  create the mali internal sync fence when merging sync fence.\n"));
+                       goto sync_fence_alloc_failed;
+               }
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+               fence_put(fences[0]);
+#else
+               dma_fence_put(fences[0]);
+#endif
+
+               if (mali_internal_sync_fence_set_fence_array(sync_fence, fences, real_num_fences) < 0) {
+                       MALI_PRINT_ERROR(("Mali internal sync:Failed to  set fence for sync fence.\n"));
+                       goto sync_fence_set_failed;
+               }
+       
+               return sync_fence;
+
+sync_fence_set_failed:
+       fput(sync_fence->file);
+sync_fence_alloc_failed:
+       for (i = 0; i < real_num_fences; i++)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+               fence_put(fences[i]);
+#else
+               dma_fence_put(fences[i]);
+#endif
+nfences_alloc_failed:
+       kfree(fences);
+fences_alloc_failed:
+       return NULL;
+}
+#endif
+
+void mali_internal_sync_fence_waiter_init(struct mali_internal_sync_fence_waiter *waiter,
+               mali_internal_sync_callback_t callback)
+{
+       MALI_DEBUG_ASSERT_POINTER(waiter);
+       MALI_DEBUG_ASSERT_POINTER(callback);
+
+       INIT_LIST_HEAD(&waiter->work.task_list);
+       waiter->callback = callback;
+}
+
+int mali_internal_sync_fence_wait_async(struct mali_internal_sync_fence *sync_fence,
+                                       struct mali_internal_sync_fence_waiter *waiter)
+{
+       int err;
+       unsigned long flags;
+
+       MALI_DEBUG_ASSERT_POINTER(sync_fence);
+       MALI_DEBUG_ASSERT_POINTER(waiter);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
+       err = atomic_read(&sync_fence->status);
+
+       if (0 > err)
+               return err;
+
+       if (!err)
+               return 1;
+
+       init_waitqueue_func_entry(&waiter->work, mali_internal_sync_fence_wake_up_wq);
+       waiter->work.private = sync_fence;
+
+       spin_lock_irqsave(&sync_fence->wq.lock, flags);
+       err = atomic_read(&sync_fence->status);
+
+       if (0 < err)
+               __add_wait_queue_tail(&sync_fence->wq, &waiter->work);
+       spin_unlock_irqrestore(&sync_fence->wq.lock, flags);
+
+       if (0 > err)
+               return err;
+
+       return !err;
+#else
+       if ((sync_fence->fence) && (sync_fence->fence->ops) && (sync_fence->fence->ops->signaled))
+               err = sync_fence->fence->ops->signaled(sync_fence->fence);
+       else
+               err = -1;
+
+       if (0 > err) {
+#ifdef DEBUG
+               trace_printk("Mali, line%d, signal error\n", __LINE__);
+#endif
+               return err;
+       }
+
+       if (1 == err)
+               return err;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+       err = dma_fence_add_callback(sync_fence->fence, &waiter->cb, mali_internal_fence_check_cb_func);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
+       err = fence_add_callback(sync_fence->fence, &waiter->cb, mali_internal_fence_check_cb_func);
+#endif
+
+       if (0 != err) {
+#ifdef DEBUG
+               trace_printk("Mali, fence_add_callback error %d\n", err);
+#endif
+               if (-ENOENT == err)
+                       err = 1;
+               return err;
+       }
+
+       init_waitqueue_func_entry(&waiter->work, mali_internal_sync_fence_wake_up_wq);
+       waiter->work.private = sync_fence;
+
+       spin_lock_irqsave(&sync_fence->wq.lock, flags);
+       err =  sync_fence->fence->ops->signaled(sync_fence->fence);
+
+       if (0 == err)
+               __add_wait_queue_tail(&sync_fence->wq, &waiter->work);
+
+       spin_unlock_irqrestore(&sync_fence->wq.lock, flags);
+#ifdef DEBUG
+       if ((1 != err) && (0 != err))
+               trace_printk("Mali, line%d, signal error\n", __LINE__);
+#endif
+
+       return err;
+#endif
+}
+
+int mali_internal_sync_fence_cancel_async(struct mali_internal_sync_fence *sync_fence,
+               struct mali_internal_sync_fence_waiter *waiter)
+{
+       unsigned long flags;
+       int ret = 0;
+
+       MALI_DEBUG_ASSERT_POINTER(sync_fence);
+       MALI_DEBUG_ASSERT_POINTER(waiter);
+
+       spin_lock_irqsave(&sync_fence->wq.lock, flags);
+       if (!list_empty(&waiter->work.task_list))
+               list_del_init(&waiter->work.task_list);
+       else
+               ret = -ENOENT;
+       spin_unlock_irqrestore(&sync_fence->wq.lock, flags);
+
+       if (0 == ret) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+        dma_fence_remove_callback(sync_fence->fence, &waiter->cb);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
+        fence_remove_callback(sync_fence->fence, &waiter->cb);
+#endif
+
+       }
+
+       return ret;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+static const char *mali_internal_fence_get_driver_name(struct dma_fence *fence)
+#else
+static const char *mali_internal_fence_get_driver_name(struct fence *fence)
+#endif
+{
+       struct mali_internal_sync_point *sync_pt;
+       struct mali_internal_sync_timeline *parent;
+
+       MALI_DEBUG_ASSERT_POINTER(fence);
+
+       sync_pt = mali_internal_fence_to_sync_pt(fence);
+       parent = mali_internal_sync_pt_to_sync_timeline(sync_pt);
+
+       return parent->ops->driver_name;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+static const char *mali_internal_fence_get_timeline_name(struct dma_fence *fence)
+#else
+static const char *mali_internal_fence_get_timeline_name(struct fence *fence)
+#endif
+{
+       struct mali_internal_sync_point *sync_pt;
+       struct mali_internal_sync_timeline *parent;
+
+       MALI_DEBUG_ASSERT_POINTER(fence);
+
+       sync_pt = mali_internal_fence_to_sync_pt(fence);
+       parent = mali_internal_sync_pt_to_sync_timeline(sync_pt);
+
+       return parent->name;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+static void mali_internal_fence_release(struct dma_fence *fence)
+#else
+static void mali_internal_fence_release(struct fence *fence)
+#endif
+{
+       unsigned long flags;
+       struct mali_internal_sync_point *sync_pt;
+       struct mali_internal_sync_timeline *parent;
+
+       MALI_DEBUG_ASSERT_POINTER(fence);
+
+       sync_pt = mali_internal_fence_to_sync_pt(fence);
+       parent = mali_internal_sync_pt_to_sync_timeline(sync_pt);
+
+
+       spin_lock_irqsave(fence->lock, flags);
+#if 0
+       if (WARN_ON_ONCE(!list_empty(&sync_pt->sync_pt_list)))
+               list_del(&sync_pt->sync_pt_list);
+#else
+       //sync_pt_list empty is possible, dont show warn.
+       if (!list_empty(&sync_pt->sync_pt_list))
+               list_del(&sync_pt->sync_pt_list);
+#endif
+       spin_unlock_irqrestore(fence->lock, flags);
+
+       if (parent->ops->free_pt)
+               parent->ops->free_pt(sync_pt);
+
+       kref_put(&parent->kref_count, mali_internal_sync_timeline_free);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+       dma_fence_free(&sync_pt->base);
+#else
+       fence_free(&sync_pt->base);
+#endif
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+static bool mali_internal_fence_signaled(struct dma_fence *fence)
+#else
+static bool mali_internal_fence_signaled(struct fence *fence)
+#endif
+{
+       int ret;
+       struct mali_internal_sync_point *sync_pt;
+       struct mali_internal_sync_timeline *parent;
+
+       MALI_DEBUG_ASSERT_POINTER(fence);
+
+       sync_pt = mali_internal_fence_to_sync_pt(fence);
+       parent = mali_internal_sync_pt_to_sync_timeline(sync_pt);
+
+       ret = parent->ops->has_signaled(sync_pt);
+       if (0 > ret)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 68)
+               fence->error = ret;
+#else
+               fence->status = ret;
+#endif
+       return ret;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+static bool mali_internal_fence_enable_signaling(struct dma_fence *fence)
+#else
+static bool mali_internal_fence_enable_signaling(struct fence *fence)
+#endif
+{
+       struct mali_internal_sync_point *sync_pt;
+       struct mali_internal_sync_timeline *parent;
+
+       MALI_DEBUG_ASSERT_POINTER(fence);
+
+       sync_pt = mali_internal_fence_to_sync_pt(fence);
+       parent = mali_internal_sync_pt_to_sync_timeline(sync_pt);
+
+       if (mali_internal_fence_signaled(fence))
+               return false;
+
+       list_add_tail(&sync_pt->sync_pt_list, &parent->sync_pt_list_head);
+       return true;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+static void mali_internal_fence_value_str(struct dma_fence *fence, char *str, int size)
+#else
+static void mali_internal_fence_value_str(struct fence *fence, char *str, int size)
+#endif
+{
+       struct mali_internal_sync_point *sync_pt;
+       struct mali_internal_sync_timeline *parent;
+
+       MALI_DEBUG_ASSERT_POINTER(fence);
+       MALI_IGNORE(str);
+       MALI_IGNORE(size);
+
+       sync_pt = mali_internal_fence_to_sync_pt(fence);
+       parent = mali_internal_sync_pt_to_sync_timeline(sync_pt);
+
+       parent->ops->print_sync_pt(sync_pt);
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+static const struct dma_fence_ops fence_ops = {
+#else
+static const struct fence_ops fence_ops = {
+#endif
+       .get_driver_name = mali_internal_fence_get_driver_name,
+       .get_timeline_name = mali_internal_fence_get_timeline_name,
+       .enable_signaling = mali_internal_fence_enable_signaling,
+       .signaled = mali_internal_fence_signaled,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+       .wait = dma_fence_default_wait,
+#else
+       .wait = fence_default_wait,
+#endif
+       .release = mali_internal_fence_release,
+       .fence_value_str = mali_internal_fence_value_str,
+};
+#endif
diff --git a/utgard/r8p0/linux/mali_internal_sync.h b/utgard/r8p0/linux/mali_internal_sync.h
new file mode 100755 (executable)
index 0000000..88307bc
--- /dev/null
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2011-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+/**
+ * @file mali_internal_sync.h
+ *
+ * Mali internal structure/interface for sync.
+ */
+
+#ifndef _MALI_INTERNAL_SYNC_H
+#define _MALI_INTERNAL_SYNC_H
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
+#include <linux/types.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)
+#include <sync.h>
+#else
+#include <linux/sync_file.h>
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+#include <linux/dma-fence.h>
+#else
+#include <linux/fence.h>
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+#include <linux/fence-array.h>
+#else
+#include <linux/dma-fence-array.h>
+#endif
+#endif
+
+struct mali_internal_sync_timeline;
+struct mali_internal_sync_point;
+struct mali_internal_sync_fence;
+
+struct mali_internal_sync_timeline_ops {
+       const char *driver_name;
+       int (*has_signaled)(struct mali_internal_sync_point *pt);
+       void (*free_pt)(struct mali_internal_sync_point *sync_pt);
+       void (*release_obj)(struct mali_internal_sync_timeline *sync_timeline);
+       void (*print_sync_pt)(struct mali_internal_sync_point *sync_pt);
+};
+
+struct mali_internal_sync_timeline {
+       struct kref             kref_count;
+       const struct mali_internal_sync_timeline_ops  *ops;
+       char                    name[32];
+       bool                    destroyed;
+       int                     fence_context;
+       int                     value;
+       spinlock_t              sync_pt_list_lock;
+       struct list_head        sync_pt_list_head;
+};
+
+struct mali_internal_sync_point {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+       struct dma_fence base;
+#else
+       struct fence base;
+#endif
+       struct list_head        sync_pt_list;
+};
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
+struct mali_internal_sync_fence_cb {
+       struct fence_cb cb;
+       struct fence *fence;
+       struct mali_internal_sync_fence *sync_file;
+};
+#endif
+
+#define mali_internal_sync_fence sync_file
+
+struct mali_internal_sync_fence_waiter;
+
+typedef void (*mali_internal_sync_callback_t)(struct mali_internal_sync_fence *sync_fence,
+               struct mali_internal_sync_fence_waiter *waiter);
+
+struct mali_internal_sync_fence_waiter {
+       wait_queue_t work;
+       mali_internal_sync_callback_t callback;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+       struct fence_cb cb;
+#else
+       struct dma_fence_cb cb;
+#endif
+#endif
+};
+
+/**
+ * Create a mali internal sync timeline.
+ * @param ops The implementation ops for the mali internal sync timeline
+ * @param size The size to allocate
+ * @param name The sync_timeline name
+ * @return The new mali internal sync timeline if successful, NULL if not.
+ */
+struct mali_internal_sync_timeline *mali_internal_sync_timeline_create(const struct mali_internal_sync_timeline_ops *ops,
+               int size, const char *name);
+
+/**
+ * Destroy one mali internal sync timeline.
+ * @param sync_timeline The mali internal sync timeline to destroy.
+ */
+void mali_internal_sync_timeline_destroy(struct mali_internal_sync_timeline *sync_timeline);
+
+/**
+ * Signal one mali internal sync timeline.
+ * @param sync_timeline The mali internal sync timeline to signal.
+ */
+void mali_internal_sync_timeline_signal(struct mali_internal_sync_timeline *sync_timeline);
+
+/**
+ * Create one mali internal sync point.
+ * @param sync_timeline The mali internal sync timeline to add this mali internal sync point.
+  * @return the new mali internal sync point if successful, NULL if not.
+ */
+struct mali_internal_sync_point *mali_internal_sync_point_create(struct mali_internal_sync_timeline *sync_timeline, int size);
+
+/**
+ * Merge mali internal sync fences
+ * @param sync_fence1 The mali internal sync fence to merge
+ * @param sync_fence2 The mali internal sync fence to merge
+ * @return the new mali internal sync fence if successful, NULL if not.
+ */
+struct mali_internal_sync_fence *mali_internal_sync_fence_merge(struct mali_internal_sync_fence *sync_fence1,
+               struct mali_internal_sync_fence *sync_fence2);
+
+/**
+ * Get the mali internal sync fence from sync fd
+ * @param fd The sync handle to get the mali internal sync fence
+ * @return the mali internal sync fence if successful, NULL if not.
+ */
+struct mali_internal_sync_fence *mali_internal_sync_fence_fdget(int fd);
+
+
+void mali_internal_sync_fence_waiter_init(struct mali_internal_sync_fence_waiter *waiter,
+               mali_internal_sync_callback_t callback);
+
+int mali_internal_sync_fence_wait_async(struct mali_internal_sync_fence *sync_fence,
+                                       struct mali_internal_sync_fence_waiter *waiter);
+
+int mali_internal_sync_fence_cancel_async(struct mali_internal_sync_fence *sync_fence,
+               struct mali_internal_sync_fence_waiter *waiter);
+
+#endif /*LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)*/
+#endif /* _MALI_INTERNAL_SYNC_H */
diff --git a/utgard/r8p0/linux/mali_kernel_linux.c b/utgard/r8p0/linux/mali_kernel_linux.c
new file mode 100644 (file)
index 0000000..4a0b9e8
--- /dev/null
@@ -0,0 +1,1149 @@
+/**
+ * Copyright (C) 2010-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_linux.c
+ * Implementation of the Linux device driver entrypoints
+ */
+#include <linux/module.h>   /* kernel module definitions */
+#include <linux/fs.h>       /* file system operations */
+#include <linux/cdev.h>     /* character device definitions */
+#include <linux/mm.h>       /* memory manager definitions */
+#include <linux/mali/mali_utgard_ioctl.h>
+#include <linux/version.h>
+#include <linux/device.h>
+#include "mali_kernel_license.h"
+#include <linux/platform_device.h>
+#include <linux/miscdevice.h>
+#include <linux/bug.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/mali/mali_utgard.h>
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_kernel_core.h"
+#include "mali_osk.h"
+#include "mali_kernel_linux.h"
+#include "mali_ukk.h"
+#include "mali_ukk_wrappers.h"
+#include "mali_kernel_sysfs.h"
+#include "mali_pm.h"
+#include "mali_kernel_license.h"
+#include "mali_memory.h"
+#include "mali_memory_dma_buf.h"
+#include "mali_memory_manager.h"
+#include "mali_memory_swap_alloc.h"
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+#include "mali_profiling_internal.h"
+#endif
+#if defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_MALI_DVFS)
+#include "mali_osk_profiling.h"
+#include "mali_dvfs_policy.h"
+
+static int is_first_resume = 1;
+/*Store the clk and vol for boot/insmod and mali_resume*/
+static struct mali_gpu_clk_item mali_gpu_clk[2];
+#endif
+
+/* Streamline support for the Mali driver */
+#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_MALI400_PROFILING)
+/* Ask Linux to create the tracepoints */
+#define CREATE_TRACE_POINTS
+#include "mali_linux_trace.h"
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_event);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_hw_counter);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_sw_counters);
+#endif /* CONFIG_TRACEPOINTS */
+
+#ifdef CONFIG_MALI_DEVFREQ
+#include "mali_devfreq.h"
+#include "mali_osk_mali.h"
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
+#include <linux/pm_opp.h>
+#else
+/* In 3.13 the OPP include header file, types, and functions were all
+ * renamed. Use the old filename for the include, and define the new names to
+ * the old, when an old kernel is detected.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+#include <linux/pm_opp.h>
+#else
+#include <linux/opp.h>
+#endif /* Linux >= 3.13*/
+#define dev_pm_opp_of_add_table of_init_opp_table
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)
+#define dev_pm_opp_of_remove_table of_free_opp_table
+#endif /* Linux >= 3.19 */
+#endif /* Linux >= 4.4.0 */
+#endif
+
+/* from the __malidrv_build_info.c file that is generated during build */
+extern const char *__malidrv_build_info(void);
+extern void mali_post_init(void);
+extern int mali_pdev_dts_init(struct platform_device* mali_gpu_device);
+extern int mpgpu_class_init(void);
+extern void mpgpu_class_exit(void);
+
+int mali_page_fault = 0;
+module_param(mali_page_fault, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(mali_page_fault, "mali_page_fault");
+
+int pp_hardware_reset = 0;
+module_param(pp_hardware_reset, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(pp_hardware_reset, "mali_hardware_reset");
+/* Module parameter to control log level */
+int mali_debug_level = 2;
+module_param(mali_debug_level, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(mali_debug_level, "Higher number, more dmesg output");
+
+extern int mali_max_job_runtime;
+module_param(mali_max_job_runtime, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_max_job_runtime, "Maximum allowed job runtime in msecs.\nJobs will be killed after this no matter what");
+
+extern int mali_l2_max_reads;
+module_param(mali_l2_max_reads, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(mali_l2_max_reads, "Maximum reads for Mali L2 cache");
+
+extern unsigned int mali_dedicated_mem_start;
+module_param(mali_dedicated_mem_start, uint, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_dedicated_mem_start, "Physical start address of dedicated Mali GPU memory.");
+
+extern unsigned int mali_dedicated_mem_size;
+module_param(mali_dedicated_mem_size, uint, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_dedicated_mem_size, "Size of dedicated Mali GPU memory.");
+
+extern unsigned int mali_shared_mem_size;
+module_param(mali_shared_mem_size, uint, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_shared_mem_size, "Size of shared Mali GPU memory.");
+
+#if defined(CONFIG_MALI400_PROFILING)
+extern int mali_boot_profiling;
+module_param(mali_boot_profiling, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_boot_profiling, "Start profiling as a part of Mali driver initialization");
+#endif
+
+extern int mali_max_pp_cores_group_1;
+module_param(mali_max_pp_cores_group_1, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_max_pp_cores_group_1, "Limit the number of PP cores to use from first PP group.");
+
+extern int mali_max_pp_cores_group_2;
+module_param(mali_max_pp_cores_group_2, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_max_pp_cores_group_2, "Limit the number of PP cores to use from second PP group (Mali-450 only).");
+
+extern unsigned int mali_mem_swap_out_threshold_value;
+module_param(mali_mem_swap_out_threshold_value, uint, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_mem_swap_out_threshold_value, "Threshold value used to limit how much swappable memory cached in Mali driver.");
+
+#if defined(CONFIG_MALI_DVFS)
+/** the max fps the same as display vsync default 60, can set by module insert parameter */
+extern int mali_max_system_fps;
+module_param(mali_max_system_fps, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_max_system_fps, "Max system fps the same as display VSYNC.");
+
+/** a lower limit on their desired FPS default 58, can set by module insert parameter*/
+extern int mali_desired_fps;
+module_param(mali_desired_fps, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_desired_fps, "A bit lower than max_system_fps which user desired fps");
+#endif
+
+#if MALI_ENABLE_CPU_CYCLES
+#include <linux/cpumask.h>
+#include <linux/timer.h>
+#include <asm/smp.h>
+static struct timer_list mali_init_cpu_clock_timers[8];
+static u32 mali_cpu_clock_last_value[8] = {0,};
+#endif
+
+/* Export symbols from common code: mali_user_settings.c */
+#include "mali_user_settings_db.h"
+EXPORT_SYMBOL(mali_set_user_setting);
+EXPORT_SYMBOL(mali_get_user_setting);
+
+static char mali_dev_name[] = "mali"; /* should be const, but the functions we call requires non-cost */
+
+/* This driver only supports one Mali device, and this variable stores this single platform device */
+struct platform_device *mali_platform_device = NULL;
+
+/* This driver only supports one Mali device, and this variable stores the exposed misc device (/dev/mali) */
+static struct miscdevice mali_miscdevice = { 0, };
+
+static int mali_miscdevice_register(struct platform_device *pdev);
+static void mali_miscdevice_unregister(void);
+
+static int mali_open(struct inode *inode, struct file *filp);
+static int mali_release(struct inode *inode, struct file *filp);
+#ifdef HAVE_UNLOCKED_IOCTL
+static long mali_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+#else
+static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
+#endif
+
+static int mali_probe(struct platform_device *pdev);
+static int mali_remove(struct platform_device *pdev);
+
+static int mali_driver_suspend_scheduler(struct device *dev);
+static int mali_driver_resume_scheduler(struct device *dev);
+
+#ifdef CONFIG_PM_RUNTIME
+static int mali_driver_runtime_suspend(struct device *dev);
+static int mali_driver_runtime_resume(struct device *dev);
+static int mali_driver_runtime_idle(struct device *dev);
+#endif
+
+#if defined(MALI_FAKE_PLATFORM_DEVICE)
+#if defined(CONFIG_MALI_DT)
+extern int mali_platform_device_init(struct platform_device *device);
+extern int mali_platform_device_deinit(struct platform_device *device);
+#else
+extern int mali_platform_device_register(void);
+extern int mali_platform_device_unregister(void);
+#endif
+#endif
+
+/* Linux power management operations provided by the Mali device driver */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29))
+struct pm_ext_ops mali_dev_ext_pm_ops = {
+       .base =
+       {
+               .suspend = mali_driver_suspend_scheduler,
+               .resume = mali_driver_resume_scheduler,
+               .freeze = mali_driver_suspend_scheduler,
+               .thaw =   mali_driver_resume_scheduler,
+       },
+};
+#else
+static const struct dev_pm_ops mali_dev_pm_ops = {
+#ifdef CONFIG_PM_RUNTIME
+       .runtime_suspend = mali_driver_runtime_suspend,
+       .runtime_resume = mali_driver_runtime_resume,
+       .runtime_idle = mali_driver_runtime_idle,
+#endif
+       .suspend = mali_driver_suspend_scheduler,
+       .resume = mali_driver_resume_scheduler,
+       .freeze = mali_driver_suspend_scheduler,
+       .thaw = mali_driver_resume_scheduler,
+       .poweroff = mali_driver_suspend_scheduler,
+};
+#endif
+
+#ifdef CONFIG_MALI_DT
+static struct of_device_id base_dt_ids[] = {
+       {.compatible = "arm,mali-300"},
+       {.compatible = "arm,mali-400"},
+       {.compatible = "arm,mali-450"},
+       {.compatible = "arm,mali-470"},
+       {},
+};
+
+MODULE_DEVICE_TABLE(of, base_dt_ids);
+#endif
+
+/* The Mali device driver struct */
+static struct platform_driver mali_platform_driver = {
+       .probe  = mali_probe,
+       .remove = mali_remove,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29))
+       .pm = &mali_dev_ext_pm_ops,
+#endif
+       .driver =
+       {
+               .name   = MALI_GPU_NAME_UTGARD,
+               .owner  = THIS_MODULE,
+               .bus = &platform_bus_type,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29))
+               .pm = &mali_dev_pm_ops,
+#endif
+#ifdef CONFIG_MALI_DT
+               .of_match_table = of_match_ptr(base_dt_ids),
+#endif
+       },
+};
+
+/* Linux misc device operations (/dev/mali) */
+struct file_operations mali_fops = {
+       .owner = THIS_MODULE,
+       .open = mali_open,
+       .release = mali_release,
+#ifdef HAVE_UNLOCKED_IOCTL
+       .unlocked_ioctl = mali_ioctl,
+#else
+       .ioctl = mali_ioctl,
+#endif
+       .compat_ioctl = mali_ioctl,
+       .mmap = mali_mmap
+};
+
+#if MALI_ENABLE_CPU_CYCLES
+void mali_init_cpu_time_counters(int reset, int enable_divide_by_64)
+{
+       /* The CPU assembly reference used is: ARM Architecture Reference Manual ARMv7-AR C.b */
+       u32 write_value;
+
+       /* See B4.1.116 PMCNTENSET, Performance Monitors Count Enable Set register, VMSA */
+       /* setting p15 c9 c12 1 to 0x8000000f==CPU_CYCLE_ENABLE |EVENT_3_ENABLE|EVENT_2_ENABLE|EVENT_1_ENABLE|EVENT_0_ENABLE */
+       asm volatile("mcr p15, 0, %0, c9, c12, 1" :: "r"(0x8000000f));
+
+
+       /* See B4.1.117 PMCR, Performance Monitors Control Register. Writing to p15, c9, c12, 0 */
+       write_value = 1 << 0; /* Bit 0 set. Enable counters */
+       if (reset) {
+               write_value |= 1 << 1; /* Reset event counters */
+               write_value |= 1 << 2; /* Reset cycle counter  */
+       }
+       if (enable_divide_by_64) {
+               write_value |= 1 << 3; /* Enable the Clock divider by 64 */
+       }
+       write_value |= 1 << 4; /* Export enable. Not needed */
+       asm volatile("MCR p15, 0, %0, c9, c12, 0\t\n" :: "r"(write_value));
+
+       /* PMOVSR Overflow Flag Status Register - Clear Clock and Event overflows */
+       asm volatile("MCR p15, 0, %0, c9, c12, 3\t\n" :: "r"(0x8000000f));
+
+
+       /* See B4.1.124 PMUSERENR - setting p15 c9 c14 to 1" */
+       /* User mode access to the Performance Monitors enabled. */
+       /* Lets User space read cpu clock cycles */
+       asm volatile("mcr p15, 0, %0, c9, c14, 0" :: "r"(1));
+}
+
+/** A timer function that configures the cycle clock counter on current CPU.
+ * The function \a mali_init_cpu_time_counters_on_all_cpus sets up this
+ * function to trigger on all Cpus during module load.
+ */
+static void mali_init_cpu_clock_timer_func(unsigned long data)
+{
+       int reset_counters, enable_divide_clock_counter_by_64;
+       int current_cpu = raw_smp_processor_id();
+       unsigned int sample0;
+       unsigned int sample1;
+
+       MALI_IGNORE(data);
+
+       reset_counters = 1;
+       enable_divide_clock_counter_by_64 = 0;
+       mali_init_cpu_time_counters(reset_counters, enable_divide_clock_counter_by_64);
+
+       sample0 = mali_get_cpu_cyclecount();
+       sample1 = mali_get_cpu_cyclecount();
+
+       MALI_DEBUG_PRINT(3, ("Init Cpu %d cycle counter- First two samples: %08x %08x \n", current_cpu, sample0, sample1));
+}
+
+/** A timer functions for storing current time on all cpus.
+ * Used for checking if the clocks have similar values or if they are drifting.
+ */
+static void mali_print_cpu_clock_timer_func(unsigned long data)
+{
+       int current_cpu = raw_smp_processor_id();
+       unsigned int sample0;
+
+       MALI_IGNORE(data);
+       sample0 = mali_get_cpu_cyclecount();
+       if (current_cpu < 8) {
+               mali_cpu_clock_last_value[current_cpu] = sample0;
+       }
+}
+
+/** Init the performance registers on all CPUs to count clock cycles.
+ * For init \a print_only should be 0.
+ * If \a print_only is 1, it will intead print the current clock value of all CPUs.
+ */
+void mali_init_cpu_time_counters_on_all_cpus(int print_only)
+{
+       int i = 0;
+       int cpu_number;
+       int jiffies_trigger;
+       int jiffies_wait;
+
+       jiffies_wait = _mali_osk_time_mstoticks(20);
+       jiffies_trigger = jiffies + jiffies_wait;
+
+       for (i = 0 ; i < 8 ; i++) {
+               init_timer(&mali_init_cpu_clock_timers[i]);
+               if (print_only) mali_init_cpu_clock_timers[i].function = mali_print_cpu_clock_timer_func;
+               else            mali_init_cpu_clock_timers[i].function = mali_init_cpu_clock_timer_func;
+               mali_init_cpu_clock_timers[i].expires = jiffies_trigger ;
+       }
+       cpu_number = cpumask_first(cpu_online_mask);
+       for (i = 0 ; i < 8 ; i++) {
+               int next_cpu;
+               add_timer_on(&mali_init_cpu_clock_timers[i], cpu_number);
+               next_cpu = cpumask_next(cpu_number, cpu_online_mask);
+               if (next_cpu >= nr_cpu_ids) break;
+               cpu_number = next_cpu;
+       }
+
+       while (jiffies_wait) jiffies_wait = schedule_timeout_uninterruptible(jiffies_wait);
+
+       for (i = 0 ; i < 8 ; i++) {
+               del_timer_sync(&mali_init_cpu_clock_timers[i]);
+       }
+
+       if (print_only) {
+               if ((0 == mali_cpu_clock_last_value[2]) && (0 == mali_cpu_clock_last_value[3])) {
+                       /* Diff can be printed if we want to check if the clocks are in sync
+                       int diff = mali_cpu_clock_last_value[0] - mali_cpu_clock_last_value[1];*/
+                       MALI_DEBUG_PRINT(2, ("CPU cycle counters readout all: %08x %08x\n", mali_cpu_clock_last_value[0], mali_cpu_clock_last_value[1]));
+               } else {
+                       MALI_DEBUG_PRINT(2, ("CPU cycle counters readout all: %08x %08x %08x %08x\n", mali_cpu_clock_last_value[0], mali_cpu_clock_last_value[1], mali_cpu_clock_last_value[2], mali_cpu_clock_last_value[3]));
+               }
+       }
+}
+#endif
+
+int mali_module_init(void)
+{
+       int err = 0;
+
+       MALI_DEBUG_PRINT(2, ("Inserting Mali v%d device driver. \n", _MALI_API_VERSION));
+       //MALI_DEBUG_PRINT(2, ("Compiled: %s, time: %s.\n", __DATE__, __TIME__));
+       MALI_DEBUG_PRINT(2, ("Driver revision: %s\n", SVN_REV_STRING));
+
+#if MALI_ENABLE_CPU_CYCLES
+       mali_init_cpu_time_counters_on_all_cpus(0);
+       MALI_DEBUG_PRINT(2, ("CPU cycle counter setup complete\n"));
+       /* Printing the current cpu counters */
+       mali_init_cpu_time_counters_on_all_cpus(1);
+#endif
+
+       /* Initialize module wide settings */
+#ifdef MALI_FAKE_PLATFORM_DEVICE
+#ifndef CONFIG_MALI_DT
+       MALI_DEBUG_PRINT(2, ("mali_module_init() registering device\n"));
+       err = mali_platform_device_register();
+       if (0 != err) {
+               return err;
+       }
+#endif
+#endif
+
+       MALI_DEBUG_PRINT(2, ("mali_module_init() registering driver\n"));
+
+       err = platform_driver_register(&mali_platform_driver);
+
+       if (0 != err) {
+               MALI_DEBUG_PRINT(2, ("mali_module_init() Failed to register driver (%d)\n", err));
+#ifdef MALI_FAKE_PLATFORM_DEVICE
+#ifndef CONFIG_MALI_DT
+               mali_platform_device_unregister();
+#endif
+#endif
+               mali_platform_device = NULL;
+               return err;
+       }
+
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+       err = _mali_internal_profiling_init(mali_boot_profiling ? MALI_TRUE : MALI_FALSE);
+       if (0 != err) {
+               /* No biggie if we wheren't able to initialize the profiling */
+               MALI_PRINT_ERROR(("Failed to initialize profiling, feature will be unavailable\n"));
+       }
+#endif
+
+       /* Tracing the current frequency and voltage from boot/insmod*/
+#if defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_MALI_DVFS)
+       /* Just call mali_get_current_gpu_clk_item(),to record current clk info.*/
+       mali_get_current_gpu_clk_item(&mali_gpu_clk[0]);
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                     MALI_PROFILING_EVENT_CHANNEL_GPU |
+                                     MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+                                     mali_gpu_clk[0].clock,
+                                     mali_gpu_clk[0].vol / 1000,
+                                     0, 0, 0);
+#endif
+
+       MALI_PRINT(("Mali device driver loaded\n"));
+
+       mpgpu_class_init();
+
+       return 0; /* Success */
+}
+
+void mali_module_exit(void)
+{
+       MALI_DEBUG_PRINT(2, ("Unloading Mali v%d device driver.\n", _MALI_API_VERSION));
+
+       MALI_DEBUG_PRINT(2, ("mali_module_exit() unregistering driver\n"));
+
+       platform_driver_unregister(&mali_platform_driver);
+
+#if defined(MALI_FAKE_PLATFORM_DEVICE)
+#ifndef CONFIG_MALI_DT
+       MALI_DEBUG_PRINT(2, ("mali_module_exit() unregistering device\n"));
+       mali_platform_device_unregister();
+#endif
+#endif
+
+       /* Tracing the current frequency and voltage from rmmod*/
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                     MALI_PROFILING_EVENT_CHANNEL_GPU |
+                                     MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+                                     0,
+                                     0,
+                                     0, 0, 0);
+
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+       _mali_internal_profiling_term();
+#endif
+       mpgpu_class_exit();
+
+       MALI_PRINT(("Mali device driver unloaded\n"));
+}
+
+#ifdef CONFIG_MALI_DEVFREQ
+struct mali_device *mali_device_alloc(void)
+{
+       return kzalloc(sizeof(struct mali_device), GFP_KERNEL);
+}
+
+void mali_device_free(struct mali_device *mdev)
+{
+       kfree(mdev);
+}
+#endif
+
+static int mali_probe(struct platform_device *pdev)
+{
+       int err;
+#ifdef CONFIG_MALI_DEVFREQ
+       struct mali_device *mdev;
+#endif
+
+       MALI_DEBUG_PRINT(2, ("mali_probe(): Called for platform device %s\n", pdev->name));
+
+       if (NULL != mali_platform_device) {
+               /* Already connected to a device, return error */
+               MALI_PRINT_ERROR(("mali_probe(): The Mali driver is already connected with a Mali device."));
+               return -EEXIST;
+       }
+
+       mali_platform_device = pdev;
+
+#ifdef CONFIG_MALI_DT
+       /* If we use DT to initialize our DDK, we have to prepare somethings. */
+       err = mali_platform_device_init(mali_platform_device);
+       if (0 != err) {
+               MALI_PRINT_ERROR(("mali_probe(): Failed to initialize platform device."));
+               mali_platform_device = NULL;
+               return -EFAULT;
+       }
+#endif
+
+#ifdef CONFIG_MALI_DEVFREQ
+       mdev = mali_device_alloc();
+       if (!mdev) {
+               MALI_PRINT_ERROR(("Can't allocate mali device private data\n"));
+               return -ENOMEM;
+       }
+
+       mdev->dev = &pdev->dev;
+       dev_set_drvdata(mdev->dev, mdev);
+
+       /*Initilization clock and regulator*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
+                        && defined(CONFIG_REGULATOR)
+       mdev->regulator = regulator_get_optional(mdev->dev, "mali");
+       if (IS_ERR_OR_NULL(mdev->regulator)) {
+               MALI_DEBUG_PRINT(2, ("Continuing without Mali regulator control\n"));
+               mdev->regulator = NULL;
+               /* Allow probe to continue without regulator */
+       }
+#endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) && defined(CONFIG_OF) \
+                        && defined(CONFIG_PM_OPP)
+       /* Register the OPPs if they are available in device tree */
+       if (dev_pm_opp_of_add_table(mdev->dev) < 0)
+               MALI_DEBUG_PRINT(3, ("OPP table not found\n"));
+#endif
+
+       /* Need to name the gpu clock "clk_mali" in the device tree */
+       mdev->clock = clk_get(mdev->dev, "clk_mali");
+       if (IS_ERR_OR_NULL(mdev->clock)) {
+               MALI_DEBUG_PRINT(2, ("Continuing without Mali clock control\n"));
+               mdev->clock = NULL;
+               /* Allow probe to continue without clock. */
+       } else {
+               err = clk_prepare_enable(mdev->clock);
+               if (err) {
+                       MALI_PRINT_ERROR(("Failed to prepare and enable clock (%d)\n", err));
+                       goto clock_prepare_failed;
+               }
+       }
+
+       /* initilize pm metrics related */
+       if (mali_pm_metrics_init(mdev) < 0) {
+               MALI_DEBUG_PRINT(2, ("mali pm metrics init failed\n"));
+               goto pm_metrics_init_failed;
+       }
+
+       if (mali_devfreq_init(mdev) < 0) {
+               MALI_DEBUG_PRINT(2, ("mali devfreq init failed\n"));
+               goto devfreq_init_failed;
+       }
+#endif
+
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_wq_init()) {
+               /* Initialize the Mali GPU HW specified by pdev */
+               if (_MALI_OSK_ERR_OK == mali_initialize_subsystems()) {
+                       /* Register a misc device (so we are accessible from user space) */
+                       err = mali_miscdevice_register(pdev);
+                       if (0 == err) {
+                               /* Setup sysfs entries */
+                               err = mali_sysfs_register(mali_dev_name);
+
+                               if (0 == err) {
+                                       mali_post_init();
+                                       MALI_DEBUG_PRINT(2, ("mali_probe(): Successfully initialized driver for platform device %s\n", pdev->name));
+
+                                       return 0;
+                               } else {
+                                       MALI_PRINT_ERROR(("mali_probe(): failed to register sysfs entries"));
+                               }
+                               mali_miscdevice_unregister();
+                       } else {
+                               MALI_PRINT_ERROR(("mali_probe(): failed to register Mali misc device."));
+                       }
+                       mali_terminate_subsystems();
+               } else {
+                       MALI_PRINT_ERROR(("mali_probe(): Failed to initialize Mali device driver."));
+               }
+               _mali_osk_wq_term();
+       }
+
+#ifdef CONFIG_MALI_DEVFREQ
+       mali_devfreq_term(mdev);
+devfreq_init_failed:
+       mali_pm_metrics_term(mdev);
+pm_metrics_init_failed:
+       clk_disable_unprepare(mdev->clock);
+clock_prepare_failed:
+       clk_put(mdev->clock);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) && defined(CONFIG_OF) \
+                        && defined(CONFIG_PM_OPP)
+       dev_pm_opp_of_remove_table(mdev->dev);
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
+                        && defined(CONFIG_REGULATOR)
+       regulator_put(mdev->regulator);
+#endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
+       mali_device_free(mdev);
+#endif
+
+#ifdef CONFIG_MALI_DT
+       mali_platform_device_deinit(mali_platform_device);
+#endif
+       mali_platform_device = NULL;
+       return -EFAULT;
+}
+
+static int mali_remove(struct platform_device *pdev)
+{
+#ifdef CONFIG_MALI_DEVFREQ
+       struct mali_device *mdev = dev_get_drvdata(&pdev->dev);
+#endif
+
+       MALI_DEBUG_PRINT(2, ("mali_remove() called for platform device %s\n", pdev->name));
+       mali_sysfs_unregister();
+       mali_miscdevice_unregister();
+       mali_terminate_subsystems();
+       _mali_osk_wq_term();
+
+#ifdef CONFIG_MALI_DEVFREQ
+       mali_devfreq_term(mdev);
+
+       mali_pm_metrics_term(mdev);
+
+       if (mdev->clock) {
+               clk_disable_unprepare(mdev->clock);
+               clk_put(mdev->clock);
+               mdev->clock = NULL;
+       }
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) && defined(CONFIG_OF) \
+                        && defined(CONFIG_PM_OPP)
+       dev_pm_opp_of_remove_table(mdev->dev);
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \
+                        && defined(CONFIG_REGULATOR)
+       regulator_put(mdev->regulator);
+#endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
+       mali_device_free(mdev);
+#endif
+
+#ifdef CONFIG_MALI_DT
+       mali_platform_device_deinit(mali_platform_device);
+#endif
+       mali_platform_device = NULL;
+       return 0;
+}
+
+static int mali_miscdevice_register(struct platform_device *pdev)
+{
+       int err;
+
+       mali_miscdevice.minor = MISC_DYNAMIC_MINOR;
+       mali_miscdevice.name = mali_dev_name;
+       mali_miscdevice.fops = &mali_fops;
+       mali_miscdevice.parent = get_device(&pdev->dev);
+
+       err = misc_register(&mali_miscdevice);
+       if (0 != err) {
+               MALI_PRINT_ERROR(("Failed to register misc device, misc_register() returned %d\n", err));
+       }
+
+       return err;
+}
+
+static void mali_miscdevice_unregister(void)
+{
+       misc_deregister(&mali_miscdevice);
+}
+
+static int mali_driver_suspend_scheduler(struct device *dev)
+{
+#ifdef CONFIG_MALI_DEVFREQ
+       struct mali_device *mdev = dev_get_drvdata(dev);
+       if (!mdev)
+               return -ENODEV;
+#endif
+
+#if defined(CONFIG_MALI_DEVFREQ) && \
+                (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+       devfreq_suspend_device(mdev->devfreq);
+#endif
+
+       mali_pm_os_suspend(MALI_TRUE);
+       /* Tracing the frequency and voltage after mali is suspended */
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                     MALI_PROFILING_EVENT_CHANNEL_GPU |
+                                     MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+                                     0,
+                                     0,
+                                     0, 0, 0);
+       return 0;
+}
+
+static int mali_driver_resume_scheduler(struct device *dev)
+{
+#ifdef CONFIG_MALI_DEVFREQ
+       struct mali_device *mdev = dev_get_drvdata(dev);
+       if (!mdev)
+               return -ENODEV;
+#endif
+
+       /* Tracing the frequency and voltage after mali is resumed */
+#if defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_MALI_DVFS)
+       /* Just call mali_get_current_gpu_clk_item() once,to record current clk info.*/
+       if (is_first_resume == 1) {
+               mali_get_current_gpu_clk_item(&mali_gpu_clk[1]);
+               is_first_resume = 0;
+       }
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                     MALI_PROFILING_EVENT_CHANNEL_GPU |
+                                     MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+                                     mali_gpu_clk[1].clock,
+                                     mali_gpu_clk[1].vol / 1000,
+                                     0, 0, 0);
+#endif
+       mali_pm_os_resume();
+
+#if defined(CONFIG_MALI_DEVFREQ) && \
+                (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+       devfreq_resume_device(mdev->devfreq);
+#endif
+
+       return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int mali_driver_runtime_suspend(struct device *dev)
+{
+#ifdef CONFIG_MALI_DEVFREQ
+       struct mali_device *mdev = dev_get_drvdata(dev);
+       if (!mdev)
+               return -ENODEV;
+#endif
+
+       if (MALI_TRUE == mali_pm_runtime_suspend()) {
+               /* Tracing the frequency and voltage after mali is suspended */
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                             MALI_PROFILING_EVENT_CHANNEL_GPU |
+                                             MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+                                             0,
+                                             0,
+                                             0, 0, 0);
+
+#if defined(CONFIG_MALI_DEVFREQ) && \
+                (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+               MALI_DEBUG_PRINT(4, ("devfreq_suspend_device: stop devfreq monitor\n"));
+               devfreq_suspend_device(mdev->devfreq);
+#endif
+
+               return 0;
+       } else {
+               return -EBUSY;
+       }
+}
+
+static int mali_driver_runtime_resume(struct device *dev)
+{
+#ifdef CONFIG_MALI_DEVFREQ
+       struct mali_device *mdev = dev_get_drvdata(dev);
+       if (!mdev)
+               return -ENODEV;
+#endif
+
+       /* Tracing the frequency and voltage after mali is resumed */
+#if defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_MALI_DVFS)
+       /* Just call mali_get_current_gpu_clk_item() once,to record current clk info.*/
+       if (is_first_resume == 1) {
+               mali_get_current_gpu_clk_item(&mali_gpu_clk[1]);
+               is_first_resume = 0;
+       }
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                     MALI_PROFILING_EVENT_CHANNEL_GPU |
+                                     MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+                                     mali_gpu_clk[1].clock,
+                                     mali_gpu_clk[1].vol / 1000,
+                                     0, 0, 0);
+#endif
+
+       mali_pm_runtime_resume();
+
+#if defined(CONFIG_MALI_DEVFREQ) && \
+                (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+       MALI_DEBUG_PRINT(4, ("devfreq_resume_device: start devfreq monitor\n"));
+       devfreq_resume_device(mdev->devfreq);
+#endif
+       return 0;
+}
+
+static int mali_driver_runtime_idle(struct device *dev)
+{
+       /* Nothing to do */
+       return 0;
+}
+#endif
+
+static int mali_open(struct inode *inode, struct file *filp)
+{
+       struct mali_session_data *session_data;
+       _mali_osk_errcode_t err;
+
+       /* input validation */
+       if (mali_miscdevice.minor != iminor(inode)) {
+               MALI_PRINT_ERROR(("mali_open() Minor does not match\n"));
+               return -ENODEV;
+       }
+
+       /* allocated struct to track this session */
+       err = _mali_ukk_open((void **)&session_data);
+       if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+       /* initialize file pointer */
+       filp->f_pos = 0;
+
+       /* link in our session data */
+       filp->private_data = (void *)session_data;
+
+       filp->f_mapping = mali_mem_swap_get_global_swap_file()->f_mapping;
+
+       return 0;
+}
+
+static int mali_release(struct inode *inode, struct file *filp)
+{
+       _mali_osk_errcode_t err;
+
+       /* input validation */
+       if (mali_miscdevice.minor != iminor(inode)) {
+               MALI_PRINT_ERROR(("mali_release() Minor does not match\n"));
+               return -ENODEV;
+       }
+
+       err = _mali_ukk_close((void **)&filp->private_data);
+       if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+       return 0;
+}
+
+int map_errcode(_mali_osk_errcode_t err)
+{
+       switch (err) {
+       case _MALI_OSK_ERR_OK :
+               return 0;
+       case _MALI_OSK_ERR_FAULT:
+               return -EFAULT;
+       case _MALI_OSK_ERR_INVALID_FUNC:
+               return -ENOTTY;
+       case _MALI_OSK_ERR_INVALID_ARGS:
+               return -EINVAL;
+       case _MALI_OSK_ERR_NOMEM:
+               return -ENOMEM;
+       case _MALI_OSK_ERR_TIMEOUT:
+               return -ETIMEDOUT;
+       case _MALI_OSK_ERR_RESTARTSYSCALL:
+               return -ERESTARTSYS;
+       case _MALI_OSK_ERR_ITEM_NOT_FOUND:
+               return -ENOENT;
+       default:
+               return -EFAULT;
+       }
+}
+
+#ifdef HAVE_UNLOCKED_IOCTL
+static long mali_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+#else
+static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
+#endif
+{
+       int err;
+       struct mali_session_data *session_data;
+
+#ifndef HAVE_UNLOCKED_IOCTL
+       /* inode not used */
+       (void)inode;
+#endif
+
+       MALI_DEBUG_PRINT(7, ("Ioctl received 0x%08X 0x%08lX\n", cmd, arg));
+
+       session_data = (struct mali_session_data *)filp->private_data;
+       if (NULL == session_data) {
+               MALI_DEBUG_PRINT(7, ("filp->private_data was NULL\n"));
+               return -ENOTTY;
+       }
+
+       if (NULL == (void *)arg) {
+               MALI_DEBUG_PRINT(7, ("arg was NULL\n"));
+               return -ENOTTY;
+       }
+
+       switch (cmd) {
+       case MALI_IOC_WAIT_FOR_NOTIFICATION:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_wait_for_notification_s), sizeof(u64)));
+               err = wait_for_notification_wrapper(session_data, (_mali_uk_wait_for_notification_s __user *)arg);
+               break;
+
+       case MALI_IOC_GET_API_VERSION_V2:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_api_version_v2_s), sizeof(u64)));
+               err = get_api_version_v2_wrapper(session_data, (_mali_uk_get_api_version_v2_s __user *)arg);
+               break;
+
+       case MALI_IOC_GET_API_VERSION:
+               err = get_api_version_wrapper(session_data, (_mali_uk_get_api_version_s __user *)arg);
+               break;
+
+       case MALI_IOC_POST_NOTIFICATION:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_post_notification_s), sizeof(u64)));
+               err = post_notification_wrapper(session_data, (_mali_uk_post_notification_s __user *)arg);
+               break;
+
+       case MALI_IOC_GET_USER_SETTINGS:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_user_settings_s), sizeof(u64)));
+               err = get_user_settings_wrapper(session_data, (_mali_uk_get_user_settings_s __user *)arg);
+               break;
+
+       case MALI_IOC_REQUEST_HIGH_PRIORITY:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_request_high_priority_s), sizeof(u64)));
+               err = request_high_priority_wrapper(session_data, (_mali_uk_request_high_priority_s __user *)arg);
+               break;
+
+       case MALI_IOC_PENDING_SUBMIT:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_pending_submit_s), sizeof(u64)));
+               err = pending_submit_wrapper(session_data, (_mali_uk_pending_submit_s __user *)arg);
+               break;
+
+#if defined(CONFIG_MALI400_PROFILING)
+       case MALI_IOC_PROFILING_ADD_EVENT:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_profiling_add_event_s), sizeof(u64)));
+               err = profiling_add_event_wrapper(session_data, (_mali_uk_profiling_add_event_s __user *)arg);
+               break;
+
+       case MALI_IOC_PROFILING_REPORT_SW_COUNTERS:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_sw_counters_report_s), sizeof(u64)));
+               err = profiling_report_sw_counters_wrapper(session_data, (_mali_uk_sw_counters_report_s __user *)arg);
+               break;
+
+       case MALI_IOC_PROFILING_STREAM_FD_GET:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_profiling_stream_fd_get_s), sizeof(u64)));
+               err = profiling_get_stream_fd_wrapper(session_data, (_mali_uk_profiling_stream_fd_get_s __user *)arg);
+               break;
+
+       case MALI_IOC_PROILING_CONTROL_SET:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_profiling_control_set_s), sizeof(u64)));
+               err = profiling_control_set_wrapper(session_data, (_mali_uk_profiling_control_set_s __user *)arg);
+               break;
+#else
+
+       case MALI_IOC_PROFILING_ADD_EVENT:          /* FALL-THROUGH */
+       case MALI_IOC_PROFILING_REPORT_SW_COUNTERS: /* FALL-THROUGH */
+               MALI_DEBUG_PRINT(2, ("Profiling not supported\n"));
+               err = -ENOTTY;
+               break;
+#endif
+
+       case MALI_IOC_PROFILING_MEMORY_USAGE_GET:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_profiling_memory_usage_get_s), sizeof(u64)));
+               err = mem_usage_get_wrapper(session_data, (_mali_uk_profiling_memory_usage_get_s __user *)arg);
+               break;
+
+       case MALI_IOC_MEM_ALLOC:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_alloc_mem_s), sizeof(u64)));
+               err = mem_alloc_wrapper(session_data, (_mali_uk_alloc_mem_s __user *)arg);
+               break;
+
+       case MALI_IOC_MEM_FREE:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_free_mem_s), sizeof(u64)));
+               err = mem_free_wrapper(session_data, (_mali_uk_free_mem_s __user *)arg);
+               break;
+
+       case MALI_IOC_MEM_BIND:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_bind_mem_s), sizeof(u64)));
+               err = mem_bind_wrapper(session_data, (_mali_uk_bind_mem_s __user *)arg);
+               break;
+
+       case MALI_IOC_MEM_UNBIND:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_unbind_mem_s), sizeof(u64)));
+               err = mem_unbind_wrapper(session_data, (_mali_uk_unbind_mem_s __user *)arg);
+               break;
+
+       case MALI_IOC_MEM_COW:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_cow_mem_s), sizeof(u64)));
+               err = mem_cow_wrapper(session_data, (_mali_uk_cow_mem_s __user *)arg);
+               break;
+
+       case MALI_IOC_MEM_COW_MODIFY_RANGE:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_cow_modify_range_s), sizeof(u64)));
+               err = mem_cow_modify_range_wrapper(session_data, (_mali_uk_cow_modify_range_s __user *)arg);
+               break;
+
+       case MALI_IOC_MEM_RESIZE:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_mem_resize_s), sizeof(u64)));
+               err = mem_resize_mem_wrapper(session_data, (_mali_uk_mem_resize_s __user *)arg);
+               break;
+
+       case MALI_IOC_MEM_WRITE_SAFE:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_mem_write_safe_s), sizeof(u64)));
+               err = mem_write_safe_wrapper(session_data, (_mali_uk_mem_write_safe_s __user *)arg);
+               break;
+
+       case MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_query_mmu_page_table_dump_size_s), sizeof(u64)));
+               err = mem_query_mmu_page_table_dump_size_wrapper(session_data, (_mali_uk_query_mmu_page_table_dump_size_s __user *)arg);
+               break;
+
+       case MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_dump_mmu_page_table_s), sizeof(u64)));
+               err = mem_dump_mmu_page_table_wrapper(session_data, (_mali_uk_dump_mmu_page_table_s __user *)arg);
+               break;
+
+       case MALI_IOC_MEM_DMA_BUF_GET_SIZE:
+#ifdef CONFIG_DMA_SHARED_BUFFER
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_dma_buf_get_size_s), sizeof(u64)));
+               err = mali_dma_buf_get_size(session_data, (_mali_uk_dma_buf_get_size_s __user *)arg);
+#else
+               MALI_DEBUG_PRINT(2, ("DMA-BUF not supported\n"));
+               err = -ENOTTY;
+#endif
+               break;
+
+       case MALI_IOC_PP_START_JOB:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_pp_start_job_s), sizeof(u64)));
+               err = pp_start_job_wrapper(session_data, (_mali_uk_pp_start_job_s __user *)arg);
+               break;
+
+       case MALI_IOC_PP_AND_GP_START_JOB:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_pp_and_gp_start_job_s), sizeof(u64)));
+               err = pp_and_gp_start_job_wrapper(session_data, (_mali_uk_pp_and_gp_start_job_s __user *)arg);
+               break;
+
+       case MALI_IOC_PP_NUMBER_OF_CORES_GET:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_pp_number_of_cores_s), sizeof(u64)));
+               err = pp_get_number_of_cores_wrapper(session_data, (_mali_uk_get_pp_number_of_cores_s __user *)arg);
+               break;
+
+       case MALI_IOC_PP_CORE_VERSION_GET:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_pp_core_version_s), sizeof(u64)));
+               err = pp_get_core_version_wrapper(session_data, (_mali_uk_get_pp_core_version_s __user *)arg);
+               break;
+
+       case MALI_IOC_PP_DISABLE_WB:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_pp_disable_wb_s), sizeof(u64)));
+               err = pp_disable_wb_wrapper(session_data, (_mali_uk_pp_disable_wb_s __user *)arg);
+               break;
+
+       case MALI_IOC_GP2_START_JOB:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_gp_start_job_s), sizeof(u64)));
+               err = gp_start_job_wrapper(session_data, (_mali_uk_gp_start_job_s __user *)arg);
+               break;
+
+       case MALI_IOC_GP2_NUMBER_OF_CORES_GET:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_gp_number_of_cores_s), sizeof(u64)));
+               err = gp_get_number_of_cores_wrapper(session_data, (_mali_uk_get_gp_number_of_cores_s __user *)arg);
+               break;
+
+       case MALI_IOC_GP2_CORE_VERSION_GET:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_gp_core_version_s), sizeof(u64)));
+               err = gp_get_core_version_wrapper(session_data, (_mali_uk_get_gp_core_version_s __user *)arg);
+               break;
+
+       case MALI_IOC_GP2_SUSPEND_RESPONSE:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_gp_suspend_response_s), sizeof(u64)));
+               err = gp_suspend_response_wrapper(session_data, (_mali_uk_gp_suspend_response_s __user *)arg);
+               break;
+
+       case MALI_IOC_VSYNC_EVENT_REPORT:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_vsync_event_report_s), sizeof(u64)));
+               err = vsync_event_report_wrapper(session_data, (_mali_uk_vsync_event_report_s __user *)arg);
+               break;
+
+       case MALI_IOC_TIMELINE_GET_LATEST_POINT:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_timeline_get_latest_point_s), sizeof(u64)));
+               err = timeline_get_latest_point_wrapper(session_data, (_mali_uk_timeline_get_latest_point_s __user *)arg);
+               break;
+       case MALI_IOC_TIMELINE_WAIT:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_timeline_wait_s), sizeof(u64)));
+               err = timeline_wait_wrapper(session_data, (_mali_uk_timeline_wait_s __user *)arg);
+               break;
+       case MALI_IOC_TIMELINE_CREATE_SYNC_FENCE:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_timeline_create_sync_fence_s), sizeof(u64)));
+               err = timeline_create_sync_fence_wrapper(session_data, (_mali_uk_timeline_create_sync_fence_s __user *)arg);
+               break;
+       case MALI_IOC_SOFT_JOB_START:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_soft_job_start_s), sizeof(u64)));
+               err = soft_job_start_wrapper(session_data, (_mali_uk_soft_job_start_s __user *)arg);
+               break;
+       case MALI_IOC_SOFT_JOB_SIGNAL:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_soft_job_signal_s), sizeof(u64)));
+               err = soft_job_signal_wrapper(session_data, (_mali_uk_soft_job_signal_s __user *)arg);
+               break;
+
+       default:
+               MALI_DEBUG_PRINT(2, ("No handler for ioctl 0x%08X 0x%08lX\n", cmd, arg));
+               err = -ENOTTY;
+       };
+
+       return err;
+}
+
+
+module_init(mali_module_init);
+module_exit(mali_module_exit);
+
+MODULE_LICENSE(MALI_KERNEL_LINUX_LICENSE);
+MODULE_AUTHOR("ARM Ltd.");
+MODULE_VERSION(SVN_REV_STRING);
diff --git a/utgard/r8p0/linux/mali_kernel_linux.h b/utgard/r8p0/linux/mali_kernel_linux.h
new file mode 100755 (executable)
index 0000000..9b4307e
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2010-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_LINUX_H__
+#define __MALI_KERNEL_LINUX_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/cdev.h>     /* character device definitions */
+#include <linux/idr.h>
+#include <linux/rbtree.h>
+#include "mali_kernel_license.h"
+#include "mali_osk_types.h"
+#include <linux/version.h>
+
+extern struct platform_device *mali_platform_device;
+
+/* After 3.19.0 kenrel droped CONFIG_PM_RUNTIME define,define by ourself */
+#if defined(CONFIG_PM) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)
+#define CONFIG_PM_RUNTIME 1
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_LINUX_H__ */
diff --git a/utgard/r8p0/linux/mali_kernel_sysfs.c b/utgard/r8p0/linux/mali_kernel_sysfs.c
new file mode 100755 (executable)
index 0000000..bf0cc88
--- /dev/null
@@ -0,0 +1,1415 @@
+/**
+ * Copyright (C) 2011-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+
+/**
+ * @file mali_kernel_sysfs.c
+ * Implementation of some sysfs data exports
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include "mali_kernel_license.h"
+#include "mali_kernel_common.h"
+#include "mali_ukk.h"
+
+#if MALI_LICENSE_IS_GPL
+
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
+#include <linux/uaccess.h>
+#else
+#include <asm/uaccess.h>
+#endif
+#include <linux/module.h>
+#include <linux/mali/mali_utgard.h>
+#include "mali_kernel_sysfs.h"
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+#include <linux/slab.h>
+#include "mali_osk_profiling.h"
+#endif
+
+#include <linux/mali/mali_utgard.h>
+#include "mali_pm.h"
+#include "mali_pmu.h"
+#include "mali_group.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_l2_cache.h"
+#include "mali_hw_core.h"
+#include "mali_kernel_core.h"
+#include "mali_user_settings_db.h"
+#include "mali_profiling_internal.h"
+#include "mali_gp_job.h"
+#include "mali_pp_job.h"
+#include "mali_executor.h"
+
+#define PRIVATE_DATA_COUNTER_MAKE_GP(src) (src)
+#define PRIVATE_DATA_COUNTER_MAKE_PP(src) ((1 << 24) | src)
+#define PRIVATE_DATA_COUNTER_MAKE_PP_SUB_JOB(src, sub_job) ((1 << 24) | (1 << 16) | (sub_job << 8) | src)
+#define PRIVATE_DATA_COUNTER_IS_PP(a) ((((a) >> 24) & 0xFF) ? MALI_TRUE : MALI_FALSE)
+#define PRIVATE_DATA_COUNTER_GET_SRC(a) (a & 0xFF)
+#define PRIVATE_DATA_COUNTER_IS_SUB_JOB(a) ((((a) >> 16) & 0xFF) ? MALI_TRUE : MALI_FALSE)
+#define PRIVATE_DATA_COUNTER_GET_SUB_JOB(a) (((a) >> 8) & 0xFF)
+
+#define POWER_BUFFER_SIZE 3
+
+static struct dentry *mali_debugfs_dir = NULL;
+
+typedef enum {
+       _MALI_DEVICE_SUSPEND,
+       _MALI_DEVICE_RESUME,
+       _MALI_DEVICE_DVFS_PAUSE,
+       _MALI_DEVICE_DVFS_RESUME,
+       _MALI_MAX_EVENTS
+} _mali_device_debug_power_events;
+
+static const char *const mali_power_events[_MALI_MAX_EVENTS] = {
+       [_MALI_DEVICE_SUSPEND] = "suspend",
+       [_MALI_DEVICE_RESUME] = "resume",
+       [_MALI_DEVICE_DVFS_PAUSE] = "dvfs_pause",
+       [_MALI_DEVICE_DVFS_RESUME] = "dvfs_resume",
+};
+
+static mali_bool power_always_on_enabled = MALI_FALSE;
+
+static int open_copy_private_data(struct inode *inode, struct file *filp)
+{
+       filp->private_data = inode->i_private;
+       return 0;
+}
+
+static ssize_t group_enabled_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+       int r;
+       char buffer[64];
+       struct mali_group *group;
+
+       group = (struct mali_group *)filp->private_data;
+       MALI_DEBUG_ASSERT_POINTER(group);
+
+       r = snprintf(buffer, 64, "%u\n",
+                    mali_executor_group_is_disabled(group) ? 0 : 1);
+
+       return simple_read_from_buffer(buf, count, offp, buffer, r);
+}
+
+static ssize_t group_enabled_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
+{
+       int r;
+       char buffer[64];
+       unsigned long val;
+       struct mali_group *group;
+
+       group = (struct mali_group *)filp->private_data;
+       MALI_DEBUG_ASSERT_POINTER(group);
+
+       if (count >= sizeof(buffer)) {
+               return -ENOMEM;
+       }
+
+       if (copy_from_user(&buffer[0], buf, count)) {
+               return -EFAULT;
+       }
+       buffer[count] = '\0';
+
+       r = kstrtoul(&buffer[0], 10, &val);
+       if (0 != r) {
+               return -EINVAL;
+       }
+
+       switch (val) {
+       case 1:
+               mali_executor_group_enable(group);
+               break;
+       case 0:
+               mali_executor_group_disable(group);
+               break;
+       default:
+               return -EINVAL;
+               break;
+       }
+
+       *offp += count;
+       return count;
+}
+
+static const struct file_operations group_enabled_fops = {
+       .owner = THIS_MODULE,
+       .open  = open_copy_private_data,
+       .read = group_enabled_read,
+       .write = group_enabled_write,
+};
+
+static ssize_t hw_core_base_addr_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+       int r;
+       char buffer[64];
+       struct mali_hw_core *hw_core;
+
+       hw_core = (struct mali_hw_core *)filp->private_data;
+       MALI_DEBUG_ASSERT_POINTER(hw_core);
+
+       r = snprintf(buffer, 64, "0x%lX\n", hw_core->phys_addr);
+
+       return simple_read_from_buffer(buf, count, offp, buffer, r);
+}
+
+static const struct file_operations hw_core_base_addr_fops = {
+       .owner = THIS_MODULE,
+       .open  = open_copy_private_data,
+       .read = hw_core_base_addr_read,
+};
+
+static ssize_t profiling_counter_src_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       u32 is_pp = PRIVATE_DATA_COUNTER_IS_PP((uintptr_t)filp->private_data);
+       u32 src_id = PRIVATE_DATA_COUNTER_GET_SRC((uintptr_t)filp->private_data);
+       mali_bool is_sub_job = PRIVATE_DATA_COUNTER_IS_SUB_JOB((uintptr_t)filp->private_data);
+       u32 sub_job = PRIVATE_DATA_COUNTER_GET_SUB_JOB((uintptr_t)filp->private_data);
+       char buf[64];
+       int r;
+       u32 val;
+
+       if (MALI_TRUE == is_pp) {
+               /* PP counter */
+               if (MALI_TRUE == is_sub_job) {
+                       /* Get counter for a particular sub job */
+                       if (0 == src_id) {
+                               val = mali_pp_job_get_pp_counter_sub_job_src0(sub_job);
+                       } else {
+                               val = mali_pp_job_get_pp_counter_sub_job_src1(sub_job);
+                       }
+               } else {
+                       /* Get default counter for all PP sub jobs */
+                       if (0 == src_id) {
+                               val = mali_pp_job_get_pp_counter_global_src0();
+                       } else {
+                               val = mali_pp_job_get_pp_counter_global_src1();
+                       }
+               }
+       } else {
+               /* GP counter */
+               if (0 == src_id) {
+                       val = mali_gp_job_get_gp_counter_src0();
+               } else {
+                       val = mali_gp_job_get_gp_counter_src1();
+               }
+       }
+
+       if (MALI_HW_CORE_NO_COUNTER == val) {
+               r = snprintf(buf, 64, "-1\n");
+       } else {
+               r = snprintf(buf, 64, "%u\n", val);
+       }
+
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t profiling_counter_src_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       u32 is_pp = PRIVATE_DATA_COUNTER_IS_PP((uintptr_t)filp->private_data);
+       u32 src_id = PRIVATE_DATA_COUNTER_GET_SRC((uintptr_t)filp->private_data);
+       mali_bool is_sub_job = PRIVATE_DATA_COUNTER_IS_SUB_JOB((uintptr_t)filp->private_data);
+       u32 sub_job = PRIVATE_DATA_COUNTER_GET_SUB_JOB((uintptr_t)filp->private_data);
+       char buf[64];
+       long val;
+       int ret;
+
+       if (cnt >= sizeof(buf)) {
+               return -EINVAL;
+       }
+
+       if (copy_from_user(&buf, ubuf, cnt)) {
+               return -EFAULT;
+       }
+
+       buf[cnt] = 0;
+
+       ret = kstrtol(buf, 10, &val);
+       if (ret < 0) {
+               return ret;
+       }
+
+       if (val < 0) {
+               /* any negative input will disable counter */
+               val = MALI_HW_CORE_NO_COUNTER;
+       }
+
+       if (MALI_TRUE == is_pp) {
+               /* PP counter */
+               if (MALI_TRUE == is_sub_job) {
+                       /* Set counter for a particular sub job */
+                       if (0 == src_id) {
+                               mali_pp_job_set_pp_counter_sub_job_src0(sub_job, (u32)val);
+                       } else {
+                               mali_pp_job_set_pp_counter_sub_job_src1(sub_job, (u32)val);
+                       }
+               } else {
+                       /* Set default counter for all PP sub jobs */
+                       if (0 == src_id) {
+                               mali_pp_job_set_pp_counter_global_src0((u32)val);
+                       } else {
+                               mali_pp_job_set_pp_counter_global_src1((u32)val);
+                       }
+               }
+       } else {
+               /* GP counter */
+               if (0 == src_id) {
+                       mali_gp_job_set_gp_counter_src0((u32)val);
+               } else {
+                       mali_gp_job_set_gp_counter_src1((u32)val);
+               }
+       }
+
+       *ppos += cnt;
+       return cnt;
+}
+
+static const struct file_operations profiling_counter_src_fops = {
+       .owner = THIS_MODULE,
+       .open  = open_copy_private_data,
+       .read  = profiling_counter_src_read,
+       .write = profiling_counter_src_write,
+};
+
+static ssize_t l2_l2x_counter_srcx_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+       char buf[64];
+       int r;
+       u32 val;
+       struct mali_l2_cache_core *l2_core = (struct mali_l2_cache_core *)filp->private_data;
+
+       if (0 == src_id) {
+               val = mali_l2_cache_core_get_counter_src0(l2_core);
+       } else {
+               val = mali_l2_cache_core_get_counter_src1(l2_core);
+       }
+
+       if (MALI_HW_CORE_NO_COUNTER == val) {
+               r = snprintf(buf, 64, "-1\n");
+       } else {
+               r = snprintf(buf, 64, "%u\n", val);
+       }
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t l2_l2x_counter_srcx_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+       struct mali_l2_cache_core *l2_core = (struct mali_l2_cache_core *)filp->private_data;
+       char buf[64];
+       long val;
+       int ret;
+
+       if (cnt >= sizeof(buf)) {
+               return -EINVAL;
+       }
+
+       if (copy_from_user(&buf, ubuf, cnt)) {
+               return -EFAULT;
+       }
+
+       buf[cnt] = 0;
+
+       ret = kstrtol(buf, 10, &val);
+       if (ret < 0) {
+               return ret;
+       }
+
+       if (val < 0) {
+               /* any negative input will disable counter */
+               val = MALI_HW_CORE_NO_COUNTER;
+       }
+
+       mali_l2_cache_core_set_counter_src(l2_core, src_id, (u32)val);
+
+       *ppos += cnt;
+       return cnt;
+}
+
+static ssize_t l2_all_counter_srcx_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+       char buf[64];
+       long val;
+       int ret;
+       u32 l2_id;
+       struct mali_l2_cache_core *l2_cache;
+
+       if (cnt >= sizeof(buf)) {
+               return -EINVAL;
+       }
+
+       if (copy_from_user(&buf, ubuf, cnt)) {
+               return -EFAULT;
+       }
+
+       buf[cnt] = 0;
+
+       ret = kstrtol(buf, 10, &val);
+       if (ret < 0) {
+               return ret;
+       }
+
+       if (val < 0) {
+               /* any negative input will disable counter */
+               val = MALI_HW_CORE_NO_COUNTER;
+       }
+
+       l2_id = 0;
+       l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id);
+       while (NULL != l2_cache) {
+               mali_l2_cache_core_set_counter_src(l2_cache, src_id, (u32)val);
+
+               /* try next L2 */
+               l2_id++;
+               l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id);
+       }
+
+       *ppos += cnt;
+       return cnt;
+}
+
+static ssize_t l2_l2x_counter_src0_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       return l2_l2x_counter_srcx_read(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t l2_l2x_counter_src1_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       return l2_l2x_counter_srcx_read(filp, ubuf, cnt, ppos, 1);
+}
+
+static ssize_t l2_l2x_counter_src0_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       return l2_l2x_counter_srcx_write(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t l2_l2x_counter_src1_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       return l2_l2x_counter_srcx_write(filp, ubuf, cnt, ppos, 1);
+}
+
+static ssize_t l2_all_counter_src0_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       return l2_all_counter_srcx_write(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t l2_all_counter_src1_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       return l2_all_counter_srcx_write(filp, ubuf, cnt, ppos, 1);
+}
+
+static const struct file_operations l2_l2x_counter_src0_fops = {
+       .owner = THIS_MODULE,
+       .open  = open_copy_private_data,
+       .read  = l2_l2x_counter_src0_read,
+       .write = l2_l2x_counter_src0_write,
+};
+
+static const struct file_operations l2_l2x_counter_src1_fops = {
+       .owner = THIS_MODULE,
+       .open  = open_copy_private_data,
+       .read  = l2_l2x_counter_src1_read,
+       .write = l2_l2x_counter_src1_write,
+};
+
+static const struct file_operations l2_all_counter_src0_fops = {
+       .owner = THIS_MODULE,
+       .write = l2_all_counter_src0_write,
+};
+
+static const struct file_operations l2_all_counter_src1_fops = {
+       .owner = THIS_MODULE,
+       .write = l2_all_counter_src1_write,
+};
+
+static ssize_t l2_l2x_counter_valx_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+       char buf[64];
+       int r;
+       u32 src0 = 0;
+       u32 val0 = 0;
+       u32 src1 = 0;
+       u32 val1 = 0;
+       u32 val = -1;
+       struct mali_l2_cache_core *l2_core = (struct mali_l2_cache_core *)filp->private_data;
+
+       mali_l2_cache_core_get_counter_values(l2_core, &src0, &val0, &src1, &val1);
+
+       if (0 == src_id) {
+               if (MALI_HW_CORE_NO_COUNTER != val0) {
+                       val = val0;
+               }
+       } else {
+               if (MALI_HW_CORE_NO_COUNTER != val1) {
+                       val = val1;
+               }
+       }
+
+       r = snprintf(buf, 64, "%u\n", val);
+
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t l2_l2x_counter_val0_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       return l2_l2x_counter_valx_read(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t l2_l2x_counter_val1_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       return l2_l2x_counter_valx_read(filp, ubuf, cnt, ppos, 1);
+}
+
+static const struct file_operations l2_l2x_counter_val0_fops = {
+       .owner = THIS_MODULE,
+       .open  = open_copy_private_data,
+       .read  = l2_l2x_counter_val0_read,
+};
+
+static const struct file_operations l2_l2x_counter_val1_fops = {
+       .owner = THIS_MODULE,
+       .open  = open_copy_private_data,
+       .read  = l2_l2x_counter_val1_read,
+};
+
+static ssize_t power_always_on_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       unsigned long val;
+       int ret;
+       char buf[32];
+
+       cnt = min(cnt, sizeof(buf) - 1);
+       if (copy_from_user(buf, ubuf, cnt)) {
+               return -EFAULT;
+       }
+       buf[cnt] = '\0';
+
+       ret = kstrtoul(buf, 10, &val);
+       if (0 != ret) {
+               return ret;
+       }
+
+       /* Update setting (not exactly thread safe) */
+       if (1 == val && MALI_FALSE == power_always_on_enabled) {
+               power_always_on_enabled = MALI_TRUE;
+               _mali_osk_pm_dev_ref_get_sync();
+       } else if (0 == val && MALI_TRUE == power_always_on_enabled) {
+               power_always_on_enabled = MALI_FALSE;
+               _mali_osk_pm_dev_ref_put();
+       }
+
+       *ppos += cnt;
+       return cnt;
+}
+
+static ssize_t power_always_on_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       if (MALI_TRUE == power_always_on_enabled) {
+               return simple_read_from_buffer(ubuf, cnt, ppos, "1\n", 2);
+       } else {
+               return simple_read_from_buffer(ubuf, cnt, ppos, "0\n", 2);
+       }
+}
+
+static const struct file_operations power_always_on_fops = {
+       .owner = THIS_MODULE,
+       .read  = power_always_on_read,
+       .write = power_always_on_write,
+};
+
+static ssize_t power_power_events_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_SUSPEND], strlen(mali_power_events[_MALI_DEVICE_SUSPEND]) - 1)) {
+               mali_pm_os_suspend(MALI_TRUE);
+       } else if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_RESUME], strlen(mali_power_events[_MALI_DEVICE_RESUME]) - 1)) {
+               mali_pm_os_resume();
+       } else if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_DVFS_PAUSE], strlen(mali_power_events[_MALI_DEVICE_DVFS_PAUSE]) - 1)) {
+               mali_dev_pause();
+       } else if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_DVFS_RESUME], strlen(mali_power_events[_MALI_DEVICE_DVFS_RESUME]) - 1)) {
+               mali_dev_resume();
+       }
+       *ppos += cnt;
+       return cnt;
+}
+
+static loff_t power_power_events_seek(struct file *file, loff_t offset, int orig)
+{
+       file->f_pos = offset;
+       return 0;
+}
+
+static const struct file_operations power_power_events_fops = {
+       .owner = THIS_MODULE,
+       .write = power_power_events_write,
+       .llseek = power_power_events_seek,
+};
+
+#if MALI_STATE_TRACKING
+static int mali_seq_internal_state_show(struct seq_file *seq_file, void *v)
+{
+       u32 len = 0;
+       u32 size;
+       char *buf;
+
+       size = seq_get_buf(seq_file, &buf);
+
+       if (!size) {
+               return -ENOMEM;
+       }
+
+       /* Create the internal state dump. */
+       len  = snprintf(buf + len, size - len, "Mali device driver %s\n", SVN_REV_STRING);
+       len += snprintf(buf + len, size - len, "License: %s\n\n", MALI_KERNEL_LINUX_LICENSE);
+
+       len += _mali_kernel_core_dump_state(buf + len, size - len);
+
+       seq_commit(seq_file, len);
+
+       return 0;
+}
+
+static int mali_seq_internal_state_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, mali_seq_internal_state_show, NULL);
+}
+
+static const struct file_operations mali_seq_internal_state_fops = {
+       .owner = THIS_MODULE,
+       .open = mali_seq_internal_state_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+#endif /* MALI_STATE_TRACKING */
+
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+static ssize_t profiling_record_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       char buf[64];
+       int r;
+
+       r = snprintf(buf, 64, "%u\n", _mali_internal_profiling_is_recording() ? 1 : 0);
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t profiling_record_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       char buf[64];
+       unsigned long val;
+       int ret;
+
+       if (cnt >= sizeof(buf)) {
+               return -EINVAL;
+       }
+
+       if (copy_from_user(&buf, ubuf, cnt)) {
+               return -EFAULT;
+       }
+
+       buf[cnt] = 0;
+
+       ret = kstrtoul(buf, 10, &val);
+       if (ret < 0) {
+               return ret;
+       }
+
+       if (val != 0) {
+               u32 limit = MALI_PROFILING_MAX_BUFFER_ENTRIES; /* This can be made configurable at a later stage if we need to */
+
+               /* check if we are already recording */
+               if (MALI_TRUE == _mali_internal_profiling_is_recording()) {
+                       MALI_DEBUG_PRINT(3, ("Recording of profiling events already in progress\n"));
+                       return -EFAULT;
+               }
+
+               /* check if we need to clear out an old recording first */
+               if (MALI_TRUE == _mali_internal_profiling_have_recording()) {
+                       if (_MALI_OSK_ERR_OK != _mali_internal_profiling_clear()) {
+                               MALI_DEBUG_PRINT(3, ("Failed to clear existing recording of profiling events\n"));
+                               return -EFAULT;
+                       }
+               }
+
+               /* start recording profiling data */
+               if (_MALI_OSK_ERR_OK != _mali_internal_profiling_start(&limit)) {
+                       MALI_DEBUG_PRINT(3, ("Failed to start recording of profiling events\n"));
+                       return -EFAULT;
+               }
+
+               MALI_DEBUG_PRINT(3, ("Profiling recording started (max %u events)\n", limit));
+       } else {
+               /* stop recording profiling data */
+               u32 count = 0;
+               if (_MALI_OSK_ERR_OK != _mali_internal_profiling_stop(&count)) {
+                       MALI_DEBUG_PRINT(2, ("Failed to stop recording of profiling events\n"));
+                       return -EFAULT;
+               }
+
+               MALI_DEBUG_PRINT(2, ("Profiling recording stopped (recorded %u events)\n", count));
+       }
+
+       *ppos += cnt;
+       return cnt;
+}
+
+static const struct file_operations profiling_record_fops = {
+       .owner = THIS_MODULE,
+       .read  = profiling_record_read,
+       .write = profiling_record_write,
+};
+
+static void *profiling_events_start(struct seq_file *s, loff_t *pos)
+{
+       loff_t *spos;
+
+       /* check if we have data avaiable */
+       if (MALI_TRUE != _mali_internal_profiling_have_recording()) {
+               return NULL;
+       }
+
+       spos = kmalloc(sizeof(loff_t), GFP_KERNEL);
+       if (NULL == spos) {
+               return NULL;
+       }
+
+       *spos = *pos;
+       return spos;
+}
+
+static void *profiling_events_next(struct seq_file *s, void *v, loff_t *pos)
+{
+       loff_t *spos = v;
+
+       /* check if we have data avaiable */
+       if (MALI_TRUE != _mali_internal_profiling_have_recording()) {
+               return NULL;
+       }
+
+       /* check if the next entry actually is avaiable */
+       if (_mali_internal_profiling_get_count() <= (u32)(*spos + 1)) {
+               return NULL;
+       }
+
+       *pos = ++*spos;
+       return spos;
+}
+
+static void profiling_events_stop(struct seq_file *s, void *v)
+{
+       kfree(v);
+}
+
+static int profiling_events_show(struct seq_file *seq_file, void *v)
+{
+       loff_t *spos = v;
+       u32 index;
+       u64 timestamp;
+       u32 event_id;
+       u32 data[5];
+
+       index = (u32) * spos;
+
+       /* Retrieve all events */
+       if (_MALI_OSK_ERR_OK == _mali_internal_profiling_get_event(index, &timestamp, &event_id, data)) {
+               seq_printf(seq_file, "%llu %u %u %u %u %u %u\n", timestamp, event_id, data[0], data[1], data[2], data[3], data[4]);
+               return 0;
+       }
+
+       return 0;
+}
+
+static int profiling_events_show_human_readable(struct seq_file *seq_file, void *v)
+{
+#define MALI_EVENT_ID_IS_HW(event_id) (((event_id & 0x00FF0000) >= MALI_PROFILING_EVENT_CHANNEL_GP0) && ((event_id & 0x00FF0000) <= MALI_PROFILING_EVENT_CHANNEL_PP7))
+
+       static u64 start_time = 0;
+       loff_t *spos = v;
+       u32 index;
+       u64 timestamp;
+       u32 event_id;
+       u32 data[5];
+
+       index = (u32) * spos;
+
+       /* Retrieve all events */
+       if (_MALI_OSK_ERR_OK == _mali_internal_profiling_get_event(index, &timestamp, &event_id, data)) {
+               seq_printf(seq_file, "%llu %u %u %u %u %u %u # ", timestamp, event_id, data[0], data[1], data[2], data[3], data[4]);
+
+               if (0 == index) {
+                       start_time = timestamp;
+               }
+
+               seq_printf(seq_file, "[%06u] ", index);
+
+               switch (event_id & 0x0F000000) {
+               case MALI_PROFILING_EVENT_TYPE_SINGLE:
+                       seq_printf(seq_file, "SINGLE | ");
+                       break;
+               case MALI_PROFILING_EVENT_TYPE_START:
+                       seq_printf(seq_file, "START | ");
+                       break;
+               case MALI_PROFILING_EVENT_TYPE_STOP:
+                       seq_printf(seq_file, "STOP | ");
+                       break;
+               case MALI_PROFILING_EVENT_TYPE_SUSPEND:
+                       seq_printf(seq_file, "SUSPEND | ");
+                       break;
+               case MALI_PROFILING_EVENT_TYPE_RESUME:
+                       seq_printf(seq_file, "RESUME | ");
+                       break;
+               default:
+                       seq_printf(seq_file, "0x%01X | ", (event_id & 0x0F000000) >> 24);
+                       break;
+               }
+
+               switch (event_id & 0x00FF0000) {
+               case MALI_PROFILING_EVENT_CHANNEL_SOFTWARE:
+                       seq_printf(seq_file, "SW | ");
+                       break;
+               case MALI_PROFILING_EVENT_CHANNEL_GP0:
+                       seq_printf(seq_file, "GP0 | ");
+                       break;
+               case MALI_PROFILING_EVENT_CHANNEL_PP0:
+                       seq_printf(seq_file, "PP0 | ");
+                       break;
+               case MALI_PROFILING_EVENT_CHANNEL_PP1:
+                       seq_printf(seq_file, "PP1 | ");
+                       break;
+               case MALI_PROFILING_EVENT_CHANNEL_PP2:
+                       seq_printf(seq_file, "PP2 | ");
+                       break;
+               case MALI_PROFILING_EVENT_CHANNEL_PP3:
+                       seq_printf(seq_file, "PP3 | ");
+                       break;
+               case MALI_PROFILING_EVENT_CHANNEL_PP4:
+                       seq_printf(seq_file, "PP4 | ");
+                       break;
+               case MALI_PROFILING_EVENT_CHANNEL_PP5:
+                       seq_printf(seq_file, "PP5 | ");
+                       break;
+               case MALI_PROFILING_EVENT_CHANNEL_PP6:
+                       seq_printf(seq_file, "PP6 | ");
+                       break;
+               case MALI_PROFILING_EVENT_CHANNEL_PP7:
+                       seq_printf(seq_file, "PP7 | ");
+                       break;
+               case MALI_PROFILING_EVENT_CHANNEL_GPU:
+                       seq_printf(seq_file, "GPU | ");
+                       break;
+               default:
+                       seq_printf(seq_file, "0x%02X | ", (event_id & 0x00FF0000) >> 16);
+                       break;
+               }
+
+               if (MALI_EVENT_ID_IS_HW(event_id)) {
+                       if (((event_id & 0x0F000000) == MALI_PROFILING_EVENT_TYPE_START) || ((event_id & 0x0F000000) == MALI_PROFILING_EVENT_TYPE_STOP)) {
+                               switch (event_id & 0x0000FFFF) {
+                               case MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL:
+                                       seq_printf(seq_file, "PHYSICAL | ");
+                                       break;
+                               case MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL:
+                                       seq_printf(seq_file, "VIRTUAL | ");
+                                       break;
+                               default:
+                                       seq_printf(seq_file, "0x%04X | ", event_id & 0x0000FFFF);
+                                       break;
+                               }
+                       } else {
+                               seq_printf(seq_file, "0x%04X | ", event_id & 0x0000FFFF);
+                       }
+               } else {
+                       seq_printf(seq_file, "0x%04X | ", event_id & 0x0000FFFF);
+               }
+
+               seq_printf(seq_file, "T0 + 0x%016llX\n", timestamp - start_time);
+
+               return 0;
+       }
+
+       return 0;
+}
+
+static const struct seq_operations profiling_events_seq_ops = {
+       .start = profiling_events_start,
+       .next  = profiling_events_next,
+       .stop  = profiling_events_stop,
+       .show  = profiling_events_show
+};
+
+static int profiling_events_open(struct inode *inode, struct file *file)
+{
+       return seq_open(file, &profiling_events_seq_ops);
+}
+
+static const struct file_operations profiling_events_fops = {
+       .owner = THIS_MODULE,
+       .open = profiling_events_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = seq_release,
+};
+
+static const struct seq_operations profiling_events_human_readable_seq_ops = {
+       .start = profiling_events_start,
+       .next  = profiling_events_next,
+       .stop  = profiling_events_stop,
+       .show  = profiling_events_show_human_readable
+};
+
+static int profiling_events_human_readable_open(struct inode *inode, struct file *file)
+{
+       return seq_open(file, &profiling_events_human_readable_seq_ops);
+}
+
+static const struct file_operations profiling_events_human_readable_fops = {
+       .owner = THIS_MODULE,
+       .open = profiling_events_human_readable_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = seq_release,
+};
+
+#endif
+
+static int memory_debugfs_show(struct seq_file *s, void *private_data)
+{
+#ifdef MALI_MEM_SWAP_TRACKING
+       seq_printf(s, "  %-25s  %-10s %-25s %-10s  %-15s  %-15s  %-10s  %-10s %-10s\n"\
+                  "=======================================================================================================================================\n",
+                  "Name (:bytes)", "pid", "pid-name", "mali_mem", "max_mali_mem",
+                  "external_mem", "ump_mem", "dma_mem", "swap_mem");
+#else
+       seq_printf(s, "  %-25s  %-10s %-25s %-10s  %-15s  %-15s  %-10s  %-10s\n"\
+                  "=======================================================================================================================================\n",
+                  "Name (:bytes)", "pid", "pid-name", "mali_mem", "max_mali_mem",
+                  "external_mem", "ump_mem", "dma_mem");
+#endif
+       mali_session_memory_tracking(s);
+       return 0;
+}
+
+static int memory_debugfs_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, memory_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations memory_usage_fops = {
+       .owner = THIS_MODULE,
+       .open = memory_debugfs_open,
+       .read  = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+static ssize_t utilization_gp_pp_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       char buf[64];
+       size_t r;
+       u32 uval = _mali_ukk_utilization_gp_pp();
+
+       r = snprintf(buf, 64, "%u\n", uval);
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t utilization_gp_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       char buf[64];
+       size_t r;
+       u32 uval = _mali_ukk_utilization_gp();
+
+       r = snprintf(buf, 64, "%u\n", uval);
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t utilization_pp_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       char buf[64];
+       size_t r;
+       u32 uval = _mali_ukk_utilization_pp();
+
+       r = snprintf(buf, 64, "%u\n", uval);
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+
+static const struct file_operations utilization_gp_pp_fops = {
+       .owner = THIS_MODULE,
+       .read = utilization_gp_pp_read,
+};
+
+static const struct file_operations utilization_gp_fops = {
+       .owner = THIS_MODULE,
+       .read = utilization_gp_read,
+};
+
+static const struct file_operations utilization_pp_fops = {
+       .owner = THIS_MODULE,
+       .read = utilization_pp_read,
+};
+
+static ssize_t user_settings_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       unsigned long val;
+       int ret;
+       _mali_uk_user_setting_t setting;
+       char buf[32];
+
+       cnt = min(cnt, sizeof(buf) - 1);
+       if (copy_from_user(buf, ubuf, cnt)) {
+               return -EFAULT;
+       }
+       buf[cnt] = '\0';
+
+       ret = kstrtoul(buf, 10, &val);
+       if (0 != ret) {
+               return ret;
+       }
+
+       /* Update setting */
+       setting = (_mali_uk_user_setting_t)(filp->private_data);
+       mali_set_user_setting(setting, val);
+
+       *ppos += cnt;
+       return cnt;
+}
+
+static ssize_t user_settings_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       char buf[64];
+       size_t r;
+       u32 value;
+       _mali_uk_user_setting_t setting;
+
+       setting = (_mali_uk_user_setting_t)(filp->private_data);
+       value = mali_get_user_setting(setting);
+
+       r = snprintf(buf, 64, "%u\n", value);
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static const struct file_operations user_settings_fops = {
+       .owner = THIS_MODULE,
+       .open = open_copy_private_data,
+       .read = user_settings_read,
+       .write = user_settings_write,
+};
+
+static int mali_sysfs_user_settings_register(void)
+{
+       struct dentry *mali_user_settings_dir = debugfs_create_dir("userspace_settings", mali_debugfs_dir);
+
+       if (mali_user_settings_dir != NULL) {
+               long i;
+               for (i = 0; i < _MALI_UK_USER_SETTING_MAX; i++) {
+                       debugfs_create_file(_mali_uk_user_setting_descriptions[i],
+                                           0600, mali_user_settings_dir, (void *)i,
+                                           &user_settings_fops);
+               }
+       }
+
+       return 0;
+}
+
+static ssize_t pp_num_cores_enabled_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
+{
+       int ret;
+       char buffer[32];
+       unsigned long val;
+
+       if (count >= sizeof(buffer)) {
+               return -ENOMEM;
+       }
+
+       if (copy_from_user(&buffer[0], buf, count)) {
+               return -EFAULT;
+       }
+       buffer[count] = '\0';
+
+       ret = kstrtoul(&buffer[0], 10, &val);
+       if (0 != ret) {
+               return -EINVAL;
+       }
+
+       ret = mali_executor_set_perf_level(val, MALI_TRUE); /* override even if core scaling is disabled */
+       if (ret) {
+               return ret;
+       }
+
+       *offp += count;
+       return count;
+}
+
+static ssize_t pp_num_cores_enabled_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+       int r;
+       char buffer[64];
+
+       r = snprintf(buffer, 64, "%u\n", mali_executor_get_num_cores_enabled());
+
+       return simple_read_from_buffer(buf, count, offp, buffer, r);
+}
+
+static const struct file_operations pp_num_cores_enabled_fops = {
+       .owner = THIS_MODULE,
+       .write = pp_num_cores_enabled_write,
+       .read = pp_num_cores_enabled_read,
+       .llseek = default_llseek,
+};
+
+static ssize_t pp_num_cores_total_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+       int r;
+       char buffer[64];
+
+       r = snprintf(buffer, 64, "%u\n", mali_executor_get_num_cores_total());
+
+       return simple_read_from_buffer(buf, count, offp, buffer, r);
+}
+
+static const struct file_operations pp_num_cores_total_fops = {
+       .owner = THIS_MODULE,
+       .read = pp_num_cores_total_read,
+};
+
+static ssize_t pp_core_scaling_enabled_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
+{
+       int ret;
+       char buffer[32];
+       unsigned long val;
+
+       if (count >= sizeof(buffer)) {
+               return -ENOMEM;
+       }
+
+       if (copy_from_user(&buffer[0], buf, count)) {
+               return -EFAULT;
+       }
+       buffer[count] = '\0';
+
+       ret = kstrtoul(&buffer[0], 10, &val);
+       if (0 != ret) {
+               return -EINVAL;
+       }
+
+       switch (val) {
+       case 1:
+               mali_executor_core_scaling_enable();
+               break;
+       case 0:
+               mali_executor_core_scaling_disable();
+               break;
+       default:
+               return -EINVAL;
+               break;
+       }
+
+       *offp += count;
+       return count;
+}
+
+static ssize_t pp_core_scaling_enabled_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+       return simple_read_from_buffer(buf, count, offp, mali_executor_core_scaling_is_enabled() ? "1\n" : "0\n", 2);
+}
+static const struct file_operations pp_core_scaling_enabled_fops = {
+       .owner = THIS_MODULE,
+       .write = pp_core_scaling_enabled_write,
+       .read = pp_core_scaling_enabled_read,
+       .llseek = default_llseek,
+};
+
+static ssize_t version_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+       int r = 0;
+       char buffer[64];
+
+       switch (mali_kernel_core_get_product_id()) {
+       case _MALI_PRODUCT_ID_MALI200:
+               r = snprintf(buffer, 64, "Mali-200\n");
+               break;
+       case _MALI_PRODUCT_ID_MALI300:
+               r = snprintf(buffer, 64, "Mali-300\n");
+               break;
+       case _MALI_PRODUCT_ID_MALI400:
+               r = snprintf(buffer, 64, "Mali-400 MP\n");
+               break;
+       case _MALI_PRODUCT_ID_MALI450:
+               r = snprintf(buffer, 64, "Mali-450 MP\n");
+               break;
+       case _MALI_PRODUCT_ID_MALI470:
+               r = snprintf(buffer, 64, "Mali-470 MP\n");
+               break;
+       case _MALI_PRODUCT_ID_UNKNOWN:
+               return -EINVAL;
+               break;
+       };
+
+       return simple_read_from_buffer(buf, count, offp, buffer, r);
+}
+
+static const struct file_operations version_fops = {
+       .owner = THIS_MODULE,
+       .read = version_read,
+};
+
+#if defined(DEBUG)
+static int timeline_debugfs_show(struct seq_file *s, void *private_data)
+{
+       struct mali_session_data *session, *tmp;
+       u32 session_seq = 1;
+
+       seq_printf(s, "timeline system info: \n=================\n\n");
+
+       mali_session_lock();
+       MALI_SESSION_FOREACH(session, tmp, link) {
+               seq_printf(s, "session %d <%p> start:\n", session_seq, session);
+               mali_timeline_debug_print_system(session->timeline_system, s);
+               seq_printf(s, "session %d end\n\n\n", session_seq++);
+       }
+       mali_session_unlock();
+
+       return 0;
+}
+
+static int timeline_debugfs_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, timeline_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations timeline_dump_fops = {
+       .owner = THIS_MODULE,
+       .open = timeline_debugfs_open,
+       .read  = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release
+};
+#endif
+
+int mali_sysfs_register(const char *mali_dev_name)
+{
+       mali_debugfs_dir = debugfs_create_dir(mali_dev_name, NULL);
+       if (ERR_PTR(-ENODEV) == mali_debugfs_dir) {
+               /* Debugfs not supported. */
+               mali_debugfs_dir = NULL;
+       } else {
+               if (NULL != mali_debugfs_dir) {
+                       /* Debugfs directory created successfully; create files now */
+                       struct dentry *mali_power_dir;
+                       struct dentry *mali_gp_dir;
+                       struct dentry *mali_pp_dir;
+                       struct dentry *mali_l2_dir;
+                       struct dentry *mali_profiling_dir;
+
+                       debugfs_create_file("version", 0400, mali_debugfs_dir, NULL, &version_fops);
+
+                       mali_power_dir = debugfs_create_dir("power", mali_debugfs_dir);
+                       if (mali_power_dir != NULL) {
+                               debugfs_create_file("always_on", 0600, mali_power_dir, NULL, &power_always_on_fops);
+                               debugfs_create_file("power_events", 0200, mali_power_dir, NULL, &power_power_events_fops);
+                       }
+
+                       mali_gp_dir = debugfs_create_dir("gp", mali_debugfs_dir);
+                       if (mali_gp_dir != NULL) {
+                               u32 num_groups;
+                               long i;
+
+                               num_groups = mali_group_get_glob_num_groups();
+                               for (i = 0; i < num_groups; i++) {
+                                       struct mali_group *group = mali_group_get_glob_group(i);
+
+                                       struct mali_gp_core *gp_core = mali_group_get_gp_core(group);
+                                       if (NULL != gp_core) {
+                                               struct dentry *mali_gp_gpx_dir;
+                                               mali_gp_gpx_dir = debugfs_create_dir("gp0", mali_gp_dir);
+                                               if (NULL != mali_gp_gpx_dir) {
+                                                       debugfs_create_file("base_addr", 0400, mali_gp_gpx_dir, &gp_core->hw_core, &hw_core_base_addr_fops);
+                                                       debugfs_create_file("enabled", 0600, mali_gp_gpx_dir, group, &group_enabled_fops);
+                                               }
+                                               break; /* no need to look for any other GP cores */
+                                       }
+
+                               }
+                       }
+
+                       mali_pp_dir = debugfs_create_dir("pp", mali_debugfs_dir);
+                       if (mali_pp_dir != NULL) {
+                               u32 num_groups;
+                               long i;
+
+                               debugfs_create_file("num_cores_total", 0400, mali_pp_dir, NULL, &pp_num_cores_total_fops);
+                               debugfs_create_file("num_cores_enabled", 0600, mali_pp_dir, NULL, &pp_num_cores_enabled_fops);
+                               debugfs_create_file("core_scaling_enabled", 0600, mali_pp_dir, NULL, &pp_core_scaling_enabled_fops);
+
+                               num_groups = mali_group_get_glob_num_groups();
+                               for (i = 0; i < num_groups; i++) {
+                                       struct mali_group *group = mali_group_get_glob_group(i);
+
+                                       struct mali_pp_core *pp_core = mali_group_get_pp_core(group);
+                                       if (NULL != pp_core) {
+                                               char buf[16];
+                                               struct dentry *mali_pp_ppx_dir;
+                                               _mali_osk_snprintf(buf, sizeof(buf), "pp%u", mali_pp_core_get_id(pp_core));
+                                               mali_pp_ppx_dir = debugfs_create_dir(buf, mali_pp_dir);
+                                               if (NULL != mali_pp_ppx_dir) {
+                                                       debugfs_create_file("base_addr", 0400, mali_pp_ppx_dir, &pp_core->hw_core, &hw_core_base_addr_fops);
+                                                       if (!mali_group_is_virtual(group)) {
+                                                               debugfs_create_file("enabled", 0600, mali_pp_ppx_dir, group, &group_enabled_fops);
+                                                       }
+                                               }
+                                       }
+                               }
+                       }
+
+                       mali_l2_dir = debugfs_create_dir("l2", mali_debugfs_dir);
+                       if (mali_l2_dir != NULL) {
+                               struct dentry *mali_l2_all_dir;
+                               u32 l2_id;
+                               struct mali_l2_cache_core *l2_cache;
+
+                               mali_l2_all_dir = debugfs_create_dir("all", mali_l2_dir);
+                               if (mali_l2_all_dir != NULL) {
+                                       debugfs_create_file("counter_src0", 0200, mali_l2_all_dir, NULL, &l2_all_counter_src0_fops);
+                                       debugfs_create_file("counter_src1", 0200, mali_l2_all_dir, NULL, &l2_all_counter_src1_fops);
+                               }
+
+                               l2_id = 0;
+                               l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id);
+                               while (NULL != l2_cache) {
+                                       char buf[16];
+                                       struct dentry *mali_l2_l2x_dir;
+                                       _mali_osk_snprintf(buf, sizeof(buf), "l2%u", l2_id);
+                                       mali_l2_l2x_dir = debugfs_create_dir(buf, mali_l2_dir);
+                                       if (NULL != mali_l2_l2x_dir) {
+                                               debugfs_create_file("counter_src0", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_src0_fops);
+                                               debugfs_create_file("counter_src1", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_src1_fops);
+                                               debugfs_create_file("counter_val0", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_val0_fops);
+                                               debugfs_create_file("counter_val1", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_val1_fops);
+                                               debugfs_create_file("base_addr", 0400, mali_l2_l2x_dir, &l2_cache->hw_core, &hw_core_base_addr_fops);
+                                       }
+
+                                       /* try next L2 */
+                                       l2_id++;
+                                       l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id);
+                               }
+                       }
+
+                       debugfs_create_file("gpu_memory", 0444, mali_debugfs_dir, NULL, &memory_usage_fops);
+
+                       debugfs_create_file("utilization_gp_pp", 0400, mali_debugfs_dir, NULL, &utilization_gp_pp_fops);
+                       debugfs_create_file("utilization_gp", 0400, mali_debugfs_dir, NULL, &utilization_gp_fops);
+                       debugfs_create_file("utilization_pp", 0400, mali_debugfs_dir, NULL, &utilization_pp_fops);
+
+                       mali_profiling_dir = debugfs_create_dir("profiling", mali_debugfs_dir);
+                       if (mali_profiling_dir != NULL) {
+                               u32 max_sub_jobs;
+                               long i;
+                               struct dentry *mali_profiling_gp_dir;
+                               struct dentry *mali_profiling_pp_dir;
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+                               struct dentry *mali_profiling_proc_dir;
+#endif
+                               /*
+                                * Create directory where we can set GP HW counters.
+                                */
+                               mali_profiling_gp_dir = debugfs_create_dir("gp", mali_profiling_dir);
+                               if (mali_profiling_gp_dir != NULL) {
+                                       debugfs_create_file("counter_src0", 0600, mali_profiling_gp_dir, (void *)PRIVATE_DATA_COUNTER_MAKE_GP(0), &profiling_counter_src_fops);
+                                       debugfs_create_file("counter_src1", 0600, mali_profiling_gp_dir, (void *)PRIVATE_DATA_COUNTER_MAKE_GP(1), &profiling_counter_src_fops);
+                               }
+
+                               /*
+                                * Create directory where we can set PP HW counters.
+                                * Possible override with specific HW counters for a particular sub job
+                                * (Disable core scaling before using the override!)
+                                */
+                               mali_profiling_pp_dir = debugfs_create_dir("pp", mali_profiling_dir);
+                               if (mali_profiling_pp_dir != NULL) {
+                                       debugfs_create_file("counter_src0", 0600, mali_profiling_pp_dir, (void *)PRIVATE_DATA_COUNTER_MAKE_PP(0), &profiling_counter_src_fops);
+                                       debugfs_create_file("counter_src1", 0600, mali_profiling_pp_dir, (void *)PRIVATE_DATA_COUNTER_MAKE_PP(1), &profiling_counter_src_fops);
+                               }
+
+                               max_sub_jobs = mali_executor_get_num_cores_total();
+                               for (i = 0; i < max_sub_jobs; i++) {
+                                       char buf[16];
+                                       struct dentry *mali_profiling_pp_x_dir;
+                                       _mali_osk_snprintf(buf, sizeof(buf), "%u", i);
+                                       mali_profiling_pp_x_dir = debugfs_create_dir(buf, mali_profiling_pp_dir);
+                                       if (NULL != mali_profiling_pp_x_dir) {
+                                               debugfs_create_file("counter_src0",
+                                                                   0600, mali_profiling_pp_x_dir,
+                                                                   (void *)PRIVATE_DATA_COUNTER_MAKE_PP_SUB_JOB(0, i),
+                                                                   &profiling_counter_src_fops);
+                                               debugfs_create_file("counter_src1",
+                                                                   0600, mali_profiling_pp_x_dir,
+                                                                   (void *)PRIVATE_DATA_COUNTER_MAKE_PP_SUB_JOB(1, i),
+                                                                   &profiling_counter_src_fops);
+                                       }
+                               }
+
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+                               mali_profiling_proc_dir = debugfs_create_dir("proc", mali_profiling_dir);
+                               if (mali_profiling_proc_dir != NULL) {
+                                       struct dentry *mali_profiling_proc_default_dir = debugfs_create_dir("default", mali_profiling_proc_dir);
+                                       if (mali_profiling_proc_default_dir != NULL) {
+                                               debugfs_create_file("enable", 0600, mali_profiling_proc_default_dir, (void *)_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, &user_settings_fops);
+                                       }
+                               }
+                               debugfs_create_file("record", 0600, mali_profiling_dir, NULL, &profiling_record_fops);
+                               debugfs_create_file("events", 0400, mali_profiling_dir, NULL, &profiling_events_fops);
+                               debugfs_create_file("events_human_readable", 0400, mali_profiling_dir, NULL, &profiling_events_human_readable_fops);
+#endif
+                       }
+
+#if MALI_STATE_TRACKING
+                       debugfs_create_file("state_dump", 0400, mali_debugfs_dir, NULL, &mali_seq_internal_state_fops);
+#endif
+
+#if defined(DEBUG)
+                       debugfs_create_file("timeline_dump", 0400, mali_debugfs_dir, NULL, &timeline_dump_fops);
+#endif
+                       if (mali_sysfs_user_settings_register()) {
+                               /* Failed to create the debugfs entries for the user settings DB. */
+                               MALI_DEBUG_PRINT(2, ("Failed to create user setting debugfs files. Ignoring...\n"));
+                       }
+               }
+       }
+
+       /* Success! */
+       return 0;
+}
+
+int mali_sysfs_unregister(void)
+{
+       if (NULL != mali_debugfs_dir) {
+               debugfs_remove_recursive(mali_debugfs_dir);
+       }
+       return 0;
+}
+
+#else /* MALI_LICENSE_IS_GPL */
+
+/* Dummy implementations for non-GPL */
+
+int mali_sysfs_register(struct mali_dev *device, dev_t dev, const char *mali_dev_name)
+{
+       return 0;
+}
+
+int mali_sysfs_unregister(void)
+{
+       return 0;
+}
+
+#endif /* MALI_LICENSE_IS_GPL */
diff --git a/utgard/r8p0/linux/mali_kernel_sysfs.h b/utgard/r8p0/linux/mali_kernel_sysfs.h
new file mode 100755 (executable)
index 0000000..9dad7f2
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2011-2013, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_SYSFS_H__
+#define __MALI_KERNEL_SYSFS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/device.h>
+
+#define MALI_PROC_DIR "driver/mali"
+
+int mali_sysfs_register(const char *mali_dev_name);
+int mali_sysfs_unregister(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_LINUX_H__ */
diff --git a/utgard/r8p0/linux/mali_linux_trace.h b/utgard/r8p0/linux/mali_linux_trace.h
new file mode 100755 (executable)
index 0000000..08682a7
--- /dev/null
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2012-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#if !defined (MALI_LINUX_TRACE_H) || defined (TRACE_HEADER_MULTI_READ)
+#define MALI_LINUX_TRACE_H
+
+#include <linux/types.h>
+
+#include <linux/stringify.h>
+#include <linux/tracepoint.h>
+
+#undef  TRACE_SYSTEM
+#define TRACE_SYSTEM mali
+#ifndef TRACEPOINTS_ENABLED
+#define TRACE_SYSTEM_STRING __stringfy(TRACE_SYSTEM)
+#endif
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE mali_linux_trace
+
+/**
+ * Define the tracepoint used to communicate the status of a GPU. Called
+ * when a GPU turns on or turns off.
+ *
+ * @param event_id The type of the event. This parameter is a bitfield
+ *  encoding the type of the event.
+ *
+ * @param d0 First data parameter.
+ * @param d1 Second data parameter.
+ * @param d2 Third data parameter.
+ * @param d3 Fourth data parameter.
+ * @param d4 Fifth data parameter.
+ */
+TRACE_EVENT(mali_timeline_event,
+
+           TP_PROTO(unsigned int event_id, unsigned int d0, unsigned int d1,
+                    unsigned int d2, unsigned int d3, unsigned int d4),
+
+           TP_ARGS(event_id, d0, d1, d2, d3, d4),
+
+           TP_STRUCT__entry(
+                   __field(unsigned int, event_id)
+                   __field(unsigned int, d0)
+                   __field(unsigned int, d1)
+                   __field(unsigned int, d2)
+                   __field(unsigned int, d3)
+                   __field(unsigned int, d4)
+           ),
+
+           TP_fast_assign(
+                   __entry->event_id = event_id;
+                   __entry->d0 = d0;
+                   __entry->d1 = d1;
+                   __entry->d2 = d2;
+                   __entry->d3 = d3;
+                   __entry->d4 = d4;
+           ),
+
+           TP_printk("event=%d", __entry->event_id)
+          );
+
+/**
+ * Define a tracepoint used to regsiter the value of a hardware counter.
+ * Hardware counters belonging to the vertex or fragment processor are
+ * reported via this tracepoint each frame, whilst L2 cache hardware
+ * counters are reported continuously.
+ *
+ * @param counter_id The counter ID.
+ * @param value The value of the counter.
+ */
+TRACE_EVENT(mali_hw_counter,
+
+           TP_PROTO(unsigned int counter_id, unsigned int value),
+
+           TP_ARGS(counter_id, value),
+
+           TP_STRUCT__entry(
+                   __field(unsigned int, counter_id)
+                   __field(unsigned int, value)
+           ),
+
+           TP_fast_assign(
+                   __entry->counter_id = counter_id;
+           ),
+
+           TP_printk("event %d = %d", __entry->counter_id, __entry->value)
+          );
+
+/**
+ * Define a tracepoint used to send a bundle of software counters.
+ *
+ * @param counters The bundle of counters.
+ */
+TRACE_EVENT(mali_sw_counters,
+
+           TP_PROTO(pid_t pid, pid_t tid, void *surface_id, unsigned int *counters),
+
+           TP_ARGS(pid, tid, surface_id, counters),
+
+           TP_STRUCT__entry(
+                   __field(pid_t, pid)
+                   __field(pid_t, tid)
+                   __field(void *, surface_id)
+                   __field(unsigned int *, counters)
+           ),
+
+           TP_fast_assign(
+                   __entry->pid = pid;
+                   __entry->tid = tid;
+                   __entry->surface_id = surface_id;
+                   __entry->counters = counters;
+           ),
+
+           TP_printk("counters were %s", __entry->counters == NULL ? "NULL" : "not NULL")
+          );
+
+/**
+ * Define a tracepoint used to gather core activity for systrace
+ * @param pid The process id for which the core activity originates from
+ * @param active If the core is active (1) or not (0)
+ * @param core_type The type of core active, either GP (1) or PP (0)
+ * @param core_id The core id that is active for the core_type
+ * @param frame_builder_id The frame builder id associated with this core activity
+ * @param flush_id The flush id associated with this core activity
+ */
+TRACE_EVENT(mali_core_active,
+
+           TP_PROTO(pid_t pid, unsigned int active, unsigned int core_type, unsigned int core_id, unsigned int frame_builder_id, unsigned int flush_id),
+
+           TP_ARGS(pid, active, core_type, core_id, frame_builder_id, flush_id),
+
+           TP_STRUCT__entry(
+                   __field(pid_t, pid)
+                   __field(unsigned int, active)
+                   __field(unsigned int, core_type)
+                   __field(unsigned int, core_id)
+                   __field(unsigned int, frame_builder_id)
+                   __field(unsigned int, flush_id)
+           ),
+
+           TP_fast_assign(
+                   __entry->pid = pid;
+                   __entry->active = active;
+                   __entry->core_type = core_type;
+                   __entry->core_id = core_id;
+                   __entry->frame_builder_id = frame_builder_id;
+                   __entry->flush_id = flush_id;
+           ),
+
+           TP_printk("%s|%d|%s%i:%x|%d", __entry->active ? "S" : "F", __entry->pid, __entry->core_type ? "GP" : "PP", __entry->core_id, __entry->flush_id, __entry->frame_builder_id)
+          );
+
+#endif /* MALI_LINUX_TRACE_H */
+
+/* This part must exist outside the header guard. */
+#include <trace/define_trace.h>
+
diff --git a/utgard/r8p0/linux/mali_memory.c b/utgard/r8p0/linux/mali_memory.c
new file mode 100755 (executable)
index 0000000..f159514
--- /dev/null
@@ -0,0 +1,586 @@
+/*
+ * Copyright (C) 2013-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/platform_device.h>
+#include <linux/idr.h>
+
+#include "mali_osk.h"
+#include "mali_executor.h"
+
+#include "mali_memory.h"
+#include "mali_memory_os_alloc.h"
+#include "mali_memory_block_alloc.h"
+#include "mali_memory_util.h"
+#include "mali_memory_virtual.h"
+#include "mali_memory_manager.h"
+#include "mali_memory_cow.h"
+#include "mali_memory_swap_alloc.h"
+#include "mali_memory_defer_bind.h"
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#include "mali_memory_secure.h"
+#endif
+
+extern unsigned int mali_dedicated_mem_size;
+extern unsigned int mali_shared_mem_size;
+
+#define MALI_VM_NUM_FAULT_PREFETCH (0x8)
+
+static void mali_mem_vma_open(struct vm_area_struct *vma)
+{
+       mali_mem_allocation *alloc = (mali_mem_allocation *)vma->vm_private_data;
+       MALI_DEBUG_PRINT(4, ("Open called on vma %p\n", vma));
+
+       /* If need to share the allocation, add ref_count here */
+       mali_allocation_ref(alloc);
+       return;
+}
+static void mali_mem_vma_close(struct vm_area_struct *vma)
+{
+       /* If need to share the allocation, unref ref_count here */
+       mali_mem_allocation *alloc = (mali_mem_allocation *)vma->vm_private_data;
+
+       if (NULL        != alloc) {
+               struct file *filp = NULL;
+               struct mali_session_data *session = NULL;
+
+               filp = vma->vm_file;
+               MALI_DEBUG_ASSERT(filp);
+               session = (struct mali_session_data *)filp->private_data;
+               MALI_DEBUG_ASSERT(session);
+
+               mali_session_memory_lock(session);
+               vma->vm_private_data = NULL;
+               alloc->cpu_mapping.vma = NULL;
+               mali_session_memory_unlock(session);
+
+               mali_allocation_unref(&alloc);
+       }
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+static int mali_mem_vma_fault(struct vm_fault *vmf)
+#else
+static int mali_mem_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+#endif
+{
+       struct file *filp = NULL;
+       struct mali_session_data *session = NULL;
+       mali_mem_allocation *alloc = NULL;
+       mali_mem_backend *mem_bkend = NULL;
+       int ret;
+       int prefetch_num = MALI_VM_NUM_FAULT_PREFETCH;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+       struct vm_area_struct *vma = vmf->vma;
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+       unsigned long address = (unsigned long)vmf->address;
+#else
+       unsigned long address = (unsigned long)vmf->virtual_address;
+#endif
+       filp = vma->vm_file;
+       MALI_DEBUG_ASSERT(filp);
+       session = (struct mali_session_data *)filp->private_data;
+       MALI_DEBUG_ASSERT(session);
+       mali_session_memory_lock(session);
+       if (NULL == vma->vm_private_data) {
+               MALI_DEBUG_PRINT(1, ("mali_vma_fault: The memory has been freed!\n"));
+               mali_session_memory_unlock(session);
+               return VM_FAULT_SIGBUS;
+       } else {
+               alloc = (mali_mem_allocation *)vma->vm_private_data;
+               MALI_DEBUG_ASSERT(alloc->backend_handle);
+               MALI_DEBUG_ASSERT(alloc->cpu_mapping.vma == vma);
+               MALI_DEBUG_ASSERT((unsigned long)alloc->cpu_mapping.addr <= address);
+               mali_allocation_ref(alloc);
+       }
+       mali_session_memory_unlock(session);
+
+
+       /* Get backend memory & Map on CPU */
+       mutex_lock(&mali_idr_mutex);
+       if (!(mem_bkend = idr_find(&mali_backend_idr, alloc->backend_handle))) {
+               MALI_DEBUG_PRINT(1, ("Can't find memory backend in mmap!\n"));
+               mutex_unlock(&mali_idr_mutex);
+               mali_allocation_unref(&alloc);
+               return VM_FAULT_SIGBUS;
+       }
+       mutex_unlock(&mali_idr_mutex);
+       MALI_DEBUG_ASSERT(mem_bkend->type == alloc->type);
+
+       if ((mem_bkend->type == MALI_MEM_COW && (MALI_MEM_BACKEND_FLAG_SWAP_COWED !=
+                       (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) &&
+           (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_COW_CPU_NO_WRITE)) {
+               /*check if use page fault to do COW*/
+               MALI_DEBUG_PRINT(4, ("mali_vma_fault: do cow allocate on demand!, address=0x%x\n", address));
+               mutex_lock(&mem_bkend->mutex);
+               ret = mali_mem_cow_allocate_on_demand(mem_bkend,
+                                                     (address - vma->vm_start) / PAGE_SIZE);
+               mutex_unlock(&mem_bkend->mutex);
+
+               if (ret != _MALI_OSK_ERR_OK) {
+                       mali_allocation_unref(&alloc);
+                       return VM_FAULT_OOM;
+               }
+               prefetch_num = 1;
+
+               /* handle COW modified range cpu mapping
+                we zap the mapping in cow_modify_range, it will trigger page fault
+                when CPU access it, so here we map it to CPU*/
+               mutex_lock(&mem_bkend->mutex);
+               ret = mali_mem_cow_cpu_map_pages_locked(mem_bkend, vma, address, prefetch_num);
+               mutex_unlock(&mem_bkend->mutex);
+
+               if (unlikely(ret != _MALI_OSK_ERR_OK)) {
+                       mali_allocation_unref(&alloc);
+                       return VM_FAULT_SIGBUS;
+               }
+       } else if ((mem_bkend->type == MALI_MEM_SWAP) ||
+                  (mem_bkend->type == MALI_MEM_COW && (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) {
+               u32 offset_in_bkend = (address - vma->vm_start) / PAGE_SIZE;
+               int ret = _MALI_OSK_ERR_OK;
+
+               mutex_lock(&mem_bkend->mutex);
+               if (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_COW_CPU_NO_WRITE) {
+                       ret = mali_mem_swap_cow_page_on_demand(mem_bkend, offset_in_bkend, &vmf->page);
+               } else {
+                       ret = mali_mem_swap_allocate_page_on_demand(mem_bkend, offset_in_bkend, &vmf->page);
+               }
+               mutex_unlock(&mem_bkend->mutex);
+
+               if (ret != _MALI_OSK_ERR_OK) {
+                       MALI_DEBUG_PRINT(2, ("Mali swap memory page fault process failed, address=0x%x\n", address));
+                       mali_allocation_unref(&alloc);
+                       return VM_FAULT_OOM;
+               } else {
+                       mali_allocation_unref(&alloc);
+                       return VM_FAULT_LOCKED;
+               }
+       } else {
+               MALI_PRINT_ERROR(("Mali vma fault! It never happen, indicating some logic errors in caller.\n"));
+               mali_allocation_unref(&alloc);
+               /*NOT support yet or OOM*/
+               return VM_FAULT_OOM;
+       }
+
+       mali_allocation_unref(&alloc);
+       return VM_FAULT_NOPAGE;
+}
+
+static struct vm_operations_struct mali_kernel_vm_ops = {
+       .open = mali_mem_vma_open,
+       .close = mali_mem_vma_close,
+       .fault = mali_mem_vma_fault,
+};
+
+
+/** @ map mali allocation to CPU address
+*
+* Supported backend types:
+* --MALI_MEM_OS
+* -- need to add COW?
+ *Not supported backend types:
+* -_MALI_MEMORY_BIND_BACKEND_UMP
+* -_MALI_MEMORY_BIND_BACKEND_DMA_BUF
+* -_MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY
+*
+*/
+int mali_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+       struct mali_session_data *session;
+       mali_mem_allocation *mali_alloc = NULL;
+       u32 mali_addr = vma->vm_pgoff << PAGE_SHIFT;
+       struct mali_vma_node *mali_vma_node = NULL;
+       mali_mem_backend *mem_bkend = NULL;
+       int ret = -EFAULT;
+
+       session = (struct mali_session_data *)filp->private_data;
+       if (NULL == session) {
+               MALI_PRINT_ERROR(("mmap called without any session data available\n"));
+               return -EFAULT;
+       }
+
+       MALI_DEBUG_PRINT(4, ("MMap() handler: start=0x%08X, phys=0x%08X, size=0x%08X vma->flags 0x%08x\n",
+                            (unsigned int)vma->vm_start, (unsigned int)(vma->vm_pgoff << PAGE_SHIFT),
+                            (unsigned int)(vma->vm_end - vma->vm_start), vma->vm_flags));
+
+       /* Operations used on any memory system */
+       /* do not need to anything in vm open/close now */
+
+       /* find mali allocation structure by vaddress*/
+       mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0);
+       if (likely(mali_vma_node)) {
+               mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
+               MALI_DEBUG_ASSERT(mali_addr == mali_vma_node->vm_node.start);
+               if (unlikely(mali_addr != mali_vma_node->vm_node.start)) {
+                       /* only allow to use start address for mmap */
+                       MALI_DEBUG_PRINT(1, ("mali_addr != mali_vma_node->vm_node.start\n"));
+                       return -EFAULT;
+               }
+       } else {
+               MALI_DEBUG_ASSERT(NULL == mali_vma_node);
+               return -EFAULT;
+       }
+
+       mali_alloc->cpu_mapping.addr = (void __user *)vma->vm_start;
+
+       if (mali_alloc->flags & _MALI_MEMORY_ALLOCATE_DEFER_BIND) {
+               MALI_DEBUG_PRINT(1, ("ERROR : trying to access varying memory by CPU!\n"));
+               return -EFAULT;
+       }
+
+       /* Get backend memory & Map on CPU */
+       mutex_lock(&mali_idr_mutex);
+       if (!(mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle))) {
+               MALI_DEBUG_PRINT(1, ("Can't find memory backend in mmap!\n"));
+               mutex_unlock(&mali_idr_mutex);
+               return -EFAULT;
+       }
+       mutex_unlock(&mali_idr_mutex);
+
+       if ((vma->vm_start + mem_bkend->size) > vma->vm_end) {
+               MALI_PRINT_ERROR(("mali_mmap: out of memory mapping map_size %d, physical_size %d\n",  vma->vm_end - vma->vm_start, mem_bkend->size));
+               return -EFAULT;
+       }
+
+       if (!(MALI_MEM_SWAP == mali_alloc->type ||
+             (MALI_MEM_COW == mali_alloc->type && (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)))) {
+               /* Set some bits which indicate that, the memory is IO memory, meaning
+                * that no paging is to be performed and the memory should not be
+                * included in crash dumps. And that the memory is reserved, meaning
+                * that it's present and can never be paged out (see also previous
+                * entry)
+                */
+               vma->vm_flags |= VM_IO;
+               vma->vm_flags |= VM_DONTCOPY;
+               vma->vm_flags |= VM_PFNMAP;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)
+               vma->vm_flags |= VM_RESERVED;
+#else
+               vma->vm_flags |= VM_DONTDUMP;
+               vma->vm_flags |= VM_DONTEXPAND;
+#endif
+       } else if (MALI_MEM_SWAP == mali_alloc->type) {
+               vma->vm_pgoff = mem_bkend->start_idx;
+       }
+
+       vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+       vma->vm_ops = &mali_kernel_vm_ops;
+
+       mali_alloc->cpu_mapping.addr = (void __user *)vma->vm_start;
+
+       /* If it's a copy-on-write mapping, map to read only */
+       if (!(vma->vm_flags & VM_WRITE)) {
+               MALI_DEBUG_PRINT(4, ("mmap allocation with read only !\n"));
+               /* add VM_WRITE for do_page_fault will check this when a write fault */
+               vma->vm_flags |= VM_WRITE | VM_READ;
+               vma->vm_page_prot = PAGE_READONLY;
+               vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+               mem_bkend->flags |= MALI_MEM_BACKEND_FLAG_COW_CPU_NO_WRITE;
+               goto out;
+       }
+
+       if (mem_bkend->type == MALI_MEM_OS) {
+               ret = mali_mem_os_cpu_map(mem_bkend, vma);
+       } else if (mem_bkend->type == MALI_MEM_COW &&
+                  (MALI_MEM_BACKEND_FLAG_SWAP_COWED != (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) {
+               ret = mali_mem_cow_cpu_map(mem_bkend, vma);
+       } else if (mem_bkend->type == MALI_MEM_BLOCK) {
+               ret = mali_mem_block_cpu_map(mem_bkend, vma);
+       } else if ((mem_bkend->type == MALI_MEM_SWAP) || (mem_bkend->type == MALI_MEM_COW &&
+                       (MALI_MEM_BACKEND_FLAG_SWAP_COWED == (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)))) {
+               /*For swappable memory, CPU page table will be created by page fault handler. */
+               ret = 0;
+       } else if (mem_bkend->type == MALI_MEM_SECURE) {
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+               ret = mali_mem_secure_cpu_map(mem_bkend, vma);
+#else
+               MALI_DEBUG_PRINT(1, ("DMA not supported for mali secure memory\n"));
+               return -EFAULT;
+#endif
+       } else {
+               /* Not support yet*/
+               MALI_DEBUG_PRINT_ERROR(("Invalid type of backend memory! \n"));
+               return -EFAULT;
+       }
+
+       if (ret != 0) {
+               MALI_DEBUG_PRINT(1, ("ret != 0\n"));
+               return -EFAULT;
+       }
+out:
+       MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == mali_alloc->magic);
+
+       vma->vm_private_data = (void *)mali_alloc;
+       mali_alloc->cpu_mapping.vma = vma;
+
+       mali_allocation_ref(mali_alloc);
+
+       return 0;
+}
+
+_mali_osk_errcode_t mali_mem_mali_map_prepare(mali_mem_allocation *descriptor)
+{
+       u32 size = descriptor->psize;
+       struct mali_session_data *session = descriptor->session;
+
+       MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
+
+       /* Map dma-buf into this session's page tables */
+
+       if (descriptor->flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
+               size += MALI_MMU_PAGE_SIZE;
+       }
+
+       return mali_mmu_pagedir_map(session->page_directory, descriptor->mali_vma_node.vm_node.start, size);
+}
+
+_mali_osk_errcode_t mali_mem_mali_map_resize(mali_mem_allocation *descriptor, u32 new_size)
+{
+       u32 old_size = descriptor->psize;
+       struct mali_session_data *session = descriptor->session;
+
+       MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
+
+       if (descriptor->flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
+               new_size  += MALI_MMU_PAGE_SIZE;
+       }
+
+       if (new_size > old_size) {
+               MALI_DEBUG_ASSERT(new_size <= descriptor->mali_vma_node.vm_node.size);
+               return mali_mmu_pagedir_map(session->page_directory, descriptor->mali_vma_node.vm_node.start + old_size, new_size - old_size);
+       }
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_mem_mali_map_free(struct mali_session_data *session, u32 size, mali_address_t vaddr, u32 flags)
+{
+       if (flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
+               size += MALI_MMU_PAGE_SIZE;
+       }
+
+       /* Umap and flush L2 */
+       mali_mmu_pagedir_unmap(session->page_directory, vaddr, size);
+       mali_executor_zap_all_active(session);
+}
+
+u32 _mali_ukk_report_memory_usage(void)
+{
+       u32 sum = 0;
+
+       if (MALI_TRUE == mali_memory_have_dedicated_memory()) {
+               sum += mali_mem_block_allocator_stat();
+       }
+
+       sum += mali_mem_os_stat();
+
+       return sum;
+}
+
+u32 _mali_ukk_report_total_memory_size(void)
+{
+       return mali_dedicated_mem_size + mali_shared_mem_size;
+}
+
+
+/**
+ * Per-session memory descriptor mapping table sizes
+ */
+#define MALI_MEM_DESCRIPTORS_INIT 64
+#define MALI_MEM_DESCRIPTORS_MAX 65536
+
+_mali_osk_errcode_t mali_memory_session_begin(struct mali_session_data *session_data)
+{
+       MALI_DEBUG_PRINT(5, ("Memory session begin\n"));
+
+       session_data->memory_lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED,
+                                   _MALI_OSK_LOCK_ORDER_MEM_SESSION);
+
+       if (NULL == session_data->memory_lock) {
+               MALI_ERROR(_MALI_OSK_ERR_FAULT);
+       }
+
+       session_data->cow_lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_UNORDERED, 0);
+       if (NULL == session_data->cow_lock) {
+               _mali_osk_mutex_term(session_data->memory_lock);
+               MALI_ERROR(_MALI_OSK_ERR_FAULT);
+       }
+
+       mali_memory_manager_init(&session_data->allocation_mgr);
+
+       MALI_DEBUG_PRINT(5, ("MMU session begin: success\n"));
+       MALI_SUCCESS;
+}
+
+void mali_memory_session_end(struct mali_session_data *session)
+{
+       MALI_DEBUG_PRINT(3, ("MMU session end\n"));
+
+       if (NULL == session) {
+               MALI_DEBUG_PRINT(1, ("No session data found during session end\n"));
+               return;
+       }
+       /* free allocation */
+       mali_free_session_allocations(session);
+       /* do some check in unint*/
+       mali_memory_manager_uninit(&session->allocation_mgr);
+
+       /* Free the lock */
+       _mali_osk_mutex_term(session->memory_lock);
+       _mali_osk_mutex_term(session->cow_lock);
+       return;
+}
+
+_mali_osk_errcode_t mali_memory_initialize(void)
+{
+       _mali_osk_errcode_t err;
+
+       idr_init(&mali_backend_idr);
+       mutex_init(&mali_idr_mutex);
+
+       err = mali_mem_swap_init();
+       if (err != _MALI_OSK_ERR_OK) {
+               return err;
+       }
+       err = mali_mem_os_init();
+       if (_MALI_OSK_ERR_OK == err) {
+               err = mali_mem_defer_bind_manager_init();
+       }
+
+       return err;
+}
+
+void mali_memory_terminate(void)
+{
+       mali_mem_swap_term();
+       mali_mem_defer_bind_manager_destory();
+       mali_mem_os_term();
+       if (mali_memory_have_dedicated_memory()) {
+               mali_mem_block_allocator_destroy();
+       }
+}
+
+
+struct mali_page_node *_mali_page_node_allocate(mali_page_node_type type)
+{
+       mali_page_node *page_node = NULL;
+
+       page_node = kzalloc(sizeof(mali_page_node), GFP_KERNEL);
+       MALI_DEBUG_ASSERT(NULL != page_node);
+
+       if (page_node) {
+               page_node->type = type;
+               INIT_LIST_HEAD(&page_node->list);
+       }
+
+       return page_node;
+}
+
+void _mali_page_node_ref(struct mali_page_node *node)
+{
+       if (node->type == MALI_PAGE_NODE_OS) {
+               /* add ref to this page */
+               get_page(node->page);
+       } else if (node->type == MALI_PAGE_NODE_BLOCK) {
+               mali_mem_block_add_ref(node);
+       } else if (node->type == MALI_PAGE_NODE_SWAP) {
+               atomic_inc(&node->swap_it->ref_count);
+       } else {
+               MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n"));
+       }
+}
+
+void _mali_page_node_unref(struct mali_page_node *node)
+{
+       if (node->type == MALI_PAGE_NODE_OS) {
+               /* unref to this page */
+               put_page(node->page);
+       } else if (node->type == MALI_PAGE_NODE_BLOCK) {
+               mali_mem_block_dec_ref(node);
+       } else {
+               MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n"));
+       }
+}
+
+
+void _mali_page_node_add_page(struct mali_page_node *node, struct page *page)
+{
+       MALI_DEBUG_ASSERT(MALI_PAGE_NODE_OS == node->type);
+       node->page = page;
+}
+
+
+void _mali_page_node_add_swap_item(struct mali_page_node *node, struct mali_swap_item *item)
+{
+       MALI_DEBUG_ASSERT(MALI_PAGE_NODE_SWAP == node->type);
+       node->swap_it = item;
+}
+
+void _mali_page_node_add_block_item(struct mali_page_node *node, mali_block_item *item)
+{
+       MALI_DEBUG_ASSERT(MALI_PAGE_NODE_BLOCK == node->type);
+       node->blk_it = item;
+}
+
+
+int _mali_page_node_get_ref_count(struct mali_page_node *node)
+{
+       if (node->type == MALI_PAGE_NODE_OS) {
+               /* get ref count of this page */
+               return page_count(node->page);
+       } else if (node->type == MALI_PAGE_NODE_BLOCK) {
+               return mali_mem_block_get_ref_count(node);
+       } else if (node->type == MALI_PAGE_NODE_SWAP) {
+               return atomic_read(&node->swap_it->ref_count);
+       } else {
+               MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n"));
+       }
+       return -1;
+}
+
+
+dma_addr_t _mali_page_node_get_dma_addr(struct mali_page_node *node)
+{
+       if (node->type == MALI_PAGE_NODE_OS) {
+               return page_private(node->page);
+       } else if (node->type == MALI_PAGE_NODE_BLOCK) {
+               return _mali_blk_item_get_phy_addr(node->blk_it);
+       } else if (node->type == MALI_PAGE_NODE_SWAP) {
+               return node->swap_it->dma_addr;
+       } else {
+               MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n"));
+       }
+       return 0;
+}
+
+
+unsigned long _mali_page_node_get_pfn(struct mali_page_node *node)
+{
+       if (node->type == MALI_PAGE_NODE_OS) {
+               return page_to_pfn(node->page);
+       } else if (node->type == MALI_PAGE_NODE_BLOCK) {
+               /* get phy addr for BLOCK page*/
+               return _mali_blk_item_get_pfn(node->blk_it);
+       } else if (node->type == MALI_PAGE_NODE_SWAP) {
+               return page_to_pfn(node->swap_it->page);
+       } else {
+               MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n"));
+       }
+       return 0;
+}
+
+
diff --git a/utgard/r8p0/linux/mali_memory.h b/utgard/r8p0/linux/mali_memory.h
new file mode 100755 (executable)
index 0000000..e5e4f66
--- /dev/null
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2013-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_H__
+#define __MALI_MEMORY_H__
+
+#include "mali_osk.h"
+#include "mali_session.h"
+
+#include <linux/list.h>
+#include <linux/mm.h>
+
+#include "mali_memory_types.h"
+#include "mali_memory_os_alloc.h"
+
+_mali_osk_errcode_t mali_memory_initialize(void);
+void mali_memory_terminate(void);
+
+/** @brief Allocate a page table page
+ *
+ * Allocate a page for use as a page directory or page table. The page is
+ * mapped into kernel space.
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise an error code
+ * @param table_page GPU pointer to the allocated page
+ * @param mapping CPU pointer to the mapping of the allocated page
+ */
+MALI_STATIC_INLINE _mali_osk_errcode_t
+mali_mmu_get_table_page(mali_dma_addr *table_page, mali_io_address *mapping)
+{
+       return mali_mem_os_get_table_page(table_page, mapping);
+}
+
+/** @brief Release a page table page
+ *
+ * Release a page table page allocated through \a mali_mmu_get_table_page
+ *
+ * @param pa the GPU address of the page to release
+ */
+MALI_STATIC_INLINE void
+mali_mmu_release_table_page(mali_dma_addr phys, void *virt)
+{
+       mali_mem_os_release_table_page(phys, virt);
+}
+
+/** @brief mmap function
+ *
+ * mmap syscalls on the Mali device node will end up here.
+ *
+ * This function allocates Mali memory and maps it on CPU and Mali.
+ */
+int mali_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/** @brief Start a new memory session
+ *
+ * Called when a process opens the Mali device node.
+ *
+ * @param session Pointer to session to initialize
+ */
+_mali_osk_errcode_t mali_memory_session_begin(struct mali_session_data *session);
+
+/** @brief Close a memory session
+ *
+ * Called when a process closes the Mali device node.
+ *
+ * Memory allocated by the session will be freed
+ *
+ * @param session Pointer to the session to terminate
+ */
+void mali_memory_session_end(struct mali_session_data *session);
+
+/** @brief Prepare Mali page tables for mapping
+ *
+ * This function will prepare the Mali page tables for mapping the memory
+ * described by \a descriptor.
+ *
+ * Page tables will be reference counted and allocated, if not yet present.
+ *
+ * @param descriptor Pointer to the memory descriptor to the mapping
+ */
+_mali_osk_errcode_t mali_mem_mali_map_prepare(mali_mem_allocation *descriptor);
+
+/** @brief Resize Mali page tables for mapping
+ *
+ * This function will Resize the Mali page tables for mapping the memory
+ * described by \a descriptor.
+ *
+ * Page tables will be reference counted and allocated, if not yet present.
+ *
+ * @param descriptor Pointer to the memory descriptor to the mapping
+ * @param new_size The new size of descriptor
+ */
+_mali_osk_errcode_t mali_mem_mali_map_resize(mali_mem_allocation *descriptor, u32 new_size);
+
+/** @brief Free Mali page tables for mapping
+ *
+ * This function will unmap pages from Mali memory and free the page tables
+ * that are now unused.
+ *
+ * The updated pages in the Mali L2 cache will be invalidated, and the MMU TLBs will be zapped if necessary.
+ *
+ * @param descriptor Pointer to the memory descriptor to unmap
+ */
+void mali_mem_mali_map_free(struct mali_session_data *session, u32 size, mali_address_t vaddr, u32 flags);
+
+/** @brief Parse resource and prepare the OS memory allocator
+ *
+ * @param size Maximum size to allocate for Mali GPU.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_memory_core_resource_os_memory(u32 size);
+
+/** @brief Parse resource and prepare the dedicated memory allocator
+ *
+ * @param start Physical start address of dedicated Mali GPU memory.
+ * @param size Size of dedicated Mali GPU memory.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(u32 start, u32 size);
+
+
+struct mali_page_node *_mali_page_node_allocate(mali_page_node_type type);
+
+void _mali_page_node_ref(struct mali_page_node *node);
+void _mali_page_node_unref(struct mali_page_node *node);
+void _mali_page_node_add_page(struct mali_page_node *node, struct page *page);
+
+void _mali_page_node_add_block_item(struct mali_page_node *node, mali_block_item *item);
+
+void _mali_page_node_add_swap_item(struct mali_page_node *node, struct mali_swap_item *item);
+
+int _mali_page_node_get_ref_count(struct mali_page_node *node);
+dma_addr_t _mali_page_node_get_dma_addr(struct mali_page_node *node);
+unsigned long _mali_page_node_get_pfn(struct mali_page_node *node);
+
+#endif /* __MALI_MEMORY_H__ */
diff --git a/utgard/r8p0/linux/mali_memory_block_alloc.c b/utgard/r8p0/linux/mali_memory_block_alloc.c
new file mode 100755 (executable)
index 0000000..af3dd4d
--- /dev/null
@@ -0,0 +1,363 @@
+/*
+ * Copyright (C) 2010-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_memory.h"
+#include "mali_memory_block_alloc.h"
+#include "mali_osk.h"
+#include <linux/mutex.h>
+
+
+static mali_block_allocator *mali_mem_block_gobal_allocator = NULL;
+
+unsigned long _mali_blk_item_get_phy_addr(mali_block_item *item)
+{
+       return (item->phy_addr & ~(MALI_BLOCK_REF_MASK));
+}
+
+
+unsigned long _mali_blk_item_get_pfn(mali_block_item *item)
+{
+       return (item->phy_addr / MALI_BLOCK_SIZE);
+}
+
+
+u32 mali_mem_block_get_ref_count(mali_page_node *node)
+{
+       MALI_DEBUG_ASSERT(node->type == MALI_PAGE_NODE_BLOCK);
+       return (node->blk_it->phy_addr & MALI_BLOCK_REF_MASK);
+}
+
+
+/* Increase the refence count
+* It not atomic, so it need to get sp_lock before call this function
+*/
+
+u32 mali_mem_block_add_ref(mali_page_node *node)
+{
+       MALI_DEBUG_ASSERT(node->type == MALI_PAGE_NODE_BLOCK);
+       MALI_DEBUG_ASSERT(mali_mem_block_get_ref_count(node) < MALI_BLOCK_MAX_REF_COUNT);
+       return (node->blk_it->phy_addr++ & MALI_BLOCK_REF_MASK);
+}
+
+/* Decase the refence count
+* It not atomic, so it need to get sp_lock before call this function
+*/
+u32 mali_mem_block_dec_ref(mali_page_node *node)
+{
+       MALI_DEBUG_ASSERT(node->type == MALI_PAGE_NODE_BLOCK);
+       MALI_DEBUG_ASSERT(mali_mem_block_get_ref_count(node) > 0);
+       return (node->blk_it->phy_addr-- & MALI_BLOCK_REF_MASK);
+}
+
+
+static mali_block_allocator *mali_mem_block_allocator_create(u32 base_address, u32 size)
+{
+       mali_block_allocator *info;
+       u32 usable_size;
+       u32 num_blocks;
+       mali_page_node *m_node;
+       mali_block_item *mali_blk_items = NULL;
+       int i = 0;
+
+       usable_size = size & ~(MALI_BLOCK_SIZE - 1);
+       MALI_DEBUG_PRINT(3, ("Mali block allocator create for region starting at 0x%08X length 0x%08X\n", base_address, size));
+       MALI_DEBUG_PRINT(4, ("%d usable bytes\n", usable_size));
+       num_blocks = usable_size / MALI_BLOCK_SIZE;
+       MALI_DEBUG_PRINT(4, ("which becomes %d blocks\n", num_blocks));
+
+       if (usable_size == 0) {
+               MALI_DEBUG_PRINT(1, ("Memory block of size %d is unusable\n", size));
+               return NULL;
+       }
+
+       info = _mali_osk_calloc(1, sizeof(mali_block_allocator));
+       if (NULL != info) {
+               INIT_LIST_HEAD(&info->free);
+               spin_lock_init(&info->sp_lock);
+               info->total_num = num_blocks;
+               mali_blk_items = _mali_osk_calloc(1, sizeof(mali_block_item) * num_blocks);
+
+               if (mali_blk_items) {
+                       info->items = mali_blk_items;
+                       /* add blocks(4k size) to free list*/
+                       for (i = 0 ; i < num_blocks ; i++) {
+                               /* add block information*/
+                               mali_blk_items[i].phy_addr = base_address + (i * MALI_BLOCK_SIZE);
+                               /* add  to free list */
+                               m_node = _mali_page_node_allocate(MALI_PAGE_NODE_BLOCK);
+                               if (m_node == NULL)
+                                       goto fail;
+                               _mali_page_node_add_block_item(m_node, &(mali_blk_items[i]));
+                               list_add_tail(&m_node->list, &info->free);
+                               atomic_add(1, &info->free_num);
+                       }
+                       return info;
+               }
+       }
+fail:
+       mali_mem_block_allocator_destroy();
+       return NULL;
+}
+
+void mali_mem_block_allocator_destroy(void)
+{
+       struct mali_page_node *m_page, *m_tmp;
+       mali_block_allocator *info = mali_mem_block_gobal_allocator;
+       MALI_DEBUG_ASSERT_POINTER(info);
+       MALI_DEBUG_PRINT(4, ("Memory block destroy !\n"));
+
+       if (NULL == info)
+               return;
+
+       list_for_each_entry_safe(m_page, m_tmp , &info->free, list) {
+               MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK);
+               list_del(&m_page->list);
+               kfree(m_page);
+       }
+
+       _mali_osk_free(info->items);
+       _mali_osk_free(info);
+}
+
+u32 mali_mem_block_release(mali_mem_backend *mem_bkend)
+{
+       mali_mem_allocation *alloc = mem_bkend->mali_allocation;
+       u32 free_pages_nr = 0;
+       MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_BLOCK);
+
+       /* Unmap the memory from the mali virtual address space. */
+       mali_mem_block_mali_unmap(alloc);
+       mutex_lock(&mem_bkend->mutex);
+       free_pages_nr = mali_mem_block_free(&mem_bkend->block_mem);
+       mutex_unlock(&mem_bkend->mutex);
+       return free_pages_nr;
+}
+
+
+int mali_mem_block_alloc(mali_mem_block_mem *block_mem, u32 size)
+{
+       struct mali_page_node *m_page, *m_tmp;
+       size_t page_count = PAGE_ALIGN(size) / _MALI_OSK_MALI_PAGE_SIZE;
+       mali_block_allocator *info = mali_mem_block_gobal_allocator;
+       MALI_DEBUG_ASSERT_POINTER(info);
+
+       MALI_DEBUG_PRINT(4, ("BLOCK Mem: Allocate size = 0x%x\n", size));
+       /*do some init */
+       INIT_LIST_HEAD(&block_mem->pfns);
+
+       spin_lock(&info->sp_lock);
+       /*check if have enough space*/
+       if (atomic_read(&info->free_num) > page_count) {
+               list_for_each_entry_safe(m_page, m_tmp , &info->free, list) {
+                       if (page_count > 0) {
+                               MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK);
+                               MALI_DEBUG_ASSERT(mali_mem_block_get_ref_count(m_page) == 0);
+                               list_move(&m_page->list, &block_mem->pfns);
+                               block_mem->count++;
+                               atomic_dec(&info->free_num);
+                               _mali_page_node_ref(m_page);
+                       } else {
+                               break;
+                       }
+                       page_count--;
+               }
+       } else {
+               /* can't allocate from BLOCK memory*/
+               spin_unlock(&info->sp_lock);
+               return -1;
+       }
+
+       spin_unlock(&info->sp_lock);
+       return 0;
+}
+
+u32 mali_mem_block_free(mali_mem_block_mem *block_mem)
+{
+       u32 free_pages_nr = 0;
+
+       free_pages_nr = mali_mem_block_free_list(&block_mem->pfns);
+       MALI_DEBUG_PRINT(4, ("BLOCK Mem free : allocated size = 0x%x, free size = 0x%x\n", block_mem->count * _MALI_OSK_MALI_PAGE_SIZE,
+                            free_pages_nr * _MALI_OSK_MALI_PAGE_SIZE));
+       block_mem->count = 0;
+       MALI_DEBUG_ASSERT(list_empty(&block_mem->pfns));
+
+       return free_pages_nr;
+}
+
+
+u32 mali_mem_block_free_list(struct list_head *list)
+{
+       struct mali_page_node *m_page, *m_tmp;
+       mali_block_allocator *info = mali_mem_block_gobal_allocator;
+       u32 free_pages_nr = 0;
+
+       if (info) {
+               spin_lock(&info->sp_lock);
+               list_for_each_entry_safe(m_page, m_tmp , list, list) {
+                       if (1 == _mali_page_node_get_ref_count(m_page)) {
+                               free_pages_nr++;
+                       }
+                       mali_mem_block_free_node(m_page);
+               }
+               spin_unlock(&info->sp_lock);
+       }
+       return free_pages_nr;
+}
+
+/* free the node,*/
+void mali_mem_block_free_node(struct mali_page_node *node)
+{
+       mali_block_allocator *info = mali_mem_block_gobal_allocator;
+
+       /* only handle BLOCK node */
+       if (node->type == MALI_PAGE_NODE_BLOCK && info) {
+               /*Need to make this atomic?*/
+               if (1 == _mali_page_node_get_ref_count(node)) {
+                       /*Move to free list*/
+                       _mali_page_node_unref(node);
+                       list_move_tail(&node->list, &info->free);
+                       atomic_add(1, &info->free_num);
+               } else {
+                       _mali_page_node_unref(node);
+                       list_del(&node->list);
+                       kfree(node);
+               }
+       }
+}
+
+/* unref the node, but not free it */
+_mali_osk_errcode_t mali_mem_block_unref_node(struct mali_page_node *node)
+{
+       mali_block_allocator *info = mali_mem_block_gobal_allocator;
+       mali_page_node *new_node;
+
+       /* only handle BLOCK node */
+       if (node->type == MALI_PAGE_NODE_BLOCK && info) {
+               /*Need to make this atomic?*/
+               if (1 == _mali_page_node_get_ref_count(node)) {
+                       /* allocate a  new node, Add to free list, keep the old node*/
+                       _mali_page_node_unref(node);
+                       new_node = _mali_page_node_allocate(MALI_PAGE_NODE_BLOCK);
+                       if (new_node) {
+                               memcpy(new_node, node, sizeof(mali_page_node));
+                               list_add(&new_node->list, &info->free);
+                               atomic_add(1, &info->free_num);
+                       } else
+                               return _MALI_OSK_ERR_FAULT;
+
+               } else {
+                       _mali_page_node_unref(node);
+               }
+       }
+       return _MALI_OSK_ERR_OK;
+}
+
+
+int mali_mem_block_mali_map(mali_mem_block_mem *block_mem, struct mali_session_data *session, u32 vaddr, u32 props)
+{
+       struct mali_page_directory *pagedir = session->page_directory;
+       struct mali_page_node *m_page;
+       dma_addr_t phys;
+       u32 virt = vaddr;
+       u32 prop = props;
+
+       list_for_each_entry(m_page, &block_mem->pfns, list) {
+               MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK);
+               phys = _mali_page_node_get_dma_addr(m_page);
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
+               /* Verify that the "physical" address is 32-bit and
+                * usable for Mali, when on a system with bus addresses
+                * wider than 32-bit. */
+               MALI_DEBUG_ASSERT(0 == (phys >> 32));
+#endif
+               mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys, MALI_MMU_PAGE_SIZE, prop);
+               virt += MALI_MMU_PAGE_SIZE;
+       }
+
+       return 0;
+}
+
+void mali_mem_block_mali_unmap(mali_mem_allocation *alloc)
+{
+       struct mali_session_data *session;
+       MALI_DEBUG_ASSERT_POINTER(alloc);
+       session = alloc->session;
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       mali_session_memory_lock(session);
+       mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start,
+                              alloc->flags);
+       mali_session_memory_unlock(session);
+}
+
+
+int mali_mem_block_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma)
+{
+       int ret;
+       mali_mem_block_mem *block_mem = &mem_bkend->block_mem;
+       unsigned long addr = vma->vm_start;
+       struct mali_page_node *m_page;
+       MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_BLOCK);
+
+       list_for_each_entry(m_page, &block_mem->pfns, list) {
+               MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK);
+               ret = vm_insert_pfn(vma, addr, _mali_page_node_get_pfn(m_page));
+
+               if (unlikely(0 != ret)) {
+                       return -EFAULT;
+               }
+               addr += _MALI_OSK_MALI_PAGE_SIZE;
+
+       }
+
+       return 0;
+}
+
+
+_mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(u32 start, u32 size)
+{
+       mali_block_allocator *allocator;
+
+       /* Do the low level linux operation first */
+
+       /* Request ownership of the memory */
+       if (_MALI_OSK_ERR_OK != _mali_osk_mem_reqregion(start, size, "Dedicated Mali GPU memory")) {
+               MALI_DEBUG_PRINT(1, ("Failed to request memory region for frame buffer (0x%08X - 0x%08X)\n", start, start + size - 1));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       /* Create generic block allocator object to handle it */
+       allocator = mali_mem_block_allocator_create(start, size);
+
+       if (NULL == allocator) {
+               MALI_DEBUG_PRINT(1, ("Memory bank registration failed\n"));
+               _mali_osk_mem_unreqregion(start, size);
+               MALI_ERROR(_MALI_OSK_ERR_FAULT);
+       }
+
+       mali_mem_block_gobal_allocator = (mali_block_allocator *)allocator;
+
+       return _MALI_OSK_ERR_OK;
+}
+
+mali_bool mali_memory_have_dedicated_memory(void)
+{
+       return mali_mem_block_gobal_allocator ? MALI_TRUE : MALI_FALSE;
+}
+
+u32 mali_mem_block_allocator_stat(void)
+{
+       mali_block_allocator *allocator = mali_mem_block_gobal_allocator;
+       MALI_DEBUG_ASSERT_POINTER(allocator);
+
+
+       return (allocator->total_num - atomic_read(&allocator->free_num)) * _MALI_OSK_MALI_PAGE_SIZE;
+}
diff --git a/utgard/r8p0/linux/mali_memory_block_alloc.h b/utgard/r8p0/linux/mali_memory_block_alloc.h
new file mode 100755 (executable)
index 0000000..3e11ef2
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2010, 2013, 2015-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_BLOCK_ALLOCATOR_H__
+#define __MALI_BLOCK_ALLOCATOR_H__
+
+#include "mali_session.h"
+#include "mali_memory.h"
+#include <linux/spinlock.h>
+
+#include "mali_memory_types.h"
+
+#define MALI_BLOCK_SIZE (PAGE_SIZE)  /* 4 kB, manage BLOCK memory as page size */
+#define MALI_BLOCK_REF_MASK (0xFFF)
+#define MALI_BLOCK_MAX_REF_COUNT (0xFFF)
+
+
+
+typedef struct mali_block_allocator {
+       /*
+       * In free list, each node's ref_count is 0,
+       * ref_count added when allocated or referenced in COW
+       */
+       mali_block_item *items; /* information for each block item*/
+       struct list_head free; /*free list of mali_memory_node*/
+       spinlock_t sp_lock; /*lock for reference count & free list opertion*/
+       u32 total_num; /* Number of total pages*/
+       atomic_t free_num; /*number of free pages*/
+} mali_block_allocator;
+
+unsigned long _mali_blk_item_get_phy_addr(mali_block_item *item);
+unsigned long _mali_blk_item_get_pfn(mali_block_item *item);
+u32 mali_mem_block_get_ref_count(mali_page_node *node);
+u32 mali_mem_block_add_ref(mali_page_node *node);
+u32 mali_mem_block_dec_ref(mali_page_node *node);
+u32 mali_mem_block_release(mali_mem_backend *mem_bkend);
+int mali_mem_block_alloc(mali_mem_block_mem *block_mem, u32 size);
+int mali_mem_block_mali_map(mali_mem_block_mem *block_mem, struct mali_session_data *session, u32 vaddr, u32 props);
+void mali_mem_block_mali_unmap(mali_mem_allocation *alloc);
+
+int mali_mem_block_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma);
+_mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(u32 start, u32 size);
+mali_bool mali_memory_have_dedicated_memory(void);
+u32 mali_mem_block_free(mali_mem_block_mem *block_mem);
+u32 mali_mem_block_free_list(struct list_head *list);
+void mali_mem_block_free_node(struct mali_page_node *node);
+void mali_mem_block_allocator_destroy(void);
+_mali_osk_errcode_t mali_mem_block_unref_node(struct mali_page_node *node);
+u32 mali_mem_block_allocator_stat(void);
+
+#endif /* __MALI_BLOCK_ALLOCATOR_H__ */
diff --git a/utgard/r8p0/linux/mali_memory_cow.c b/utgard/r8p0/linux/mali_memory_cow.c
new file mode 100644 (file)
index 0000000..ae417db
--- /dev/null
@@ -0,0 +1,776 @@
+/*\r
+ * Copyright (C) 2013-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */\r
+#include <linux/mm.h>\r
+#include <linux/list.h>\r
+#include <linux/mm_types.h>\r
+#include <linux/fs.h>\r
+#include <linux/dma-mapping.h>\r
+#include <linux/highmem.h>\r
+#include <asm/cacheflush.h>\r
+#include <linux/sched.h>\r
+#ifdef CONFIG_ARM\r
+#include <asm/outercache.h>\r
+#endif\r
+#include <asm/dma-mapping.h>\r
+\r
+#include "mali_memory.h"\r
+#include "mali_kernel_common.h"\r
+#include "mali_uk_types.h"\r
+#include "mali_osk.h"\r
+#include "mali_kernel_linux.h"\r
+#include "mali_memory_cow.h"\r
+#include "mali_memory_block_alloc.h"\r
+#include "mali_memory_swap_alloc.h"\r
+\r
+/**\r
+* allocate pages for COW backend and flush cache\r
+*/\r
+static struct page *mali_mem_cow_alloc_page(void)\r
+\r
+{\r
+       mali_mem_os_mem os_mem;\r
+       struct mali_page_node *node;\r
+       struct page *new_page;\r
+\r
+       int ret = 0;\r
+       /* allocate pages from os mem */\r
+       ret = mali_mem_os_alloc_pages(&os_mem, _MALI_OSK_MALI_PAGE_SIZE);\r
+\r
+       if (ret) {\r
+               return NULL;\r
+       }\r
+\r
+       MALI_DEBUG_ASSERT(1 == os_mem.count);\r
+\r
+       node = _MALI_OSK_CONTAINER_OF(os_mem.pages.next, struct mali_page_node, list);\r
+       new_page = node->page;\r
+       node->page = NULL;\r
+       list_del(&node->list);\r
+       kfree(node);\r
+\r
+       return new_page;\r
+}\r
+\r
+\r
+static struct list_head *_mali_memory_cow_get_node_list(mali_mem_backend *target_bk,\r
+               u32 target_offset,\r
+               u32 target_size)\r
+{\r
+       MALI_DEBUG_ASSERT(MALI_MEM_OS == target_bk->type || MALI_MEM_COW == target_bk->type ||\r
+                         MALI_MEM_BLOCK == target_bk->type || MALI_MEM_SWAP == target_bk->type);\r
+\r
+       if (MALI_MEM_OS == target_bk->type) {\r
+               MALI_DEBUG_ASSERT(&target_bk->os_mem);\r
+               MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->os_mem.count);\r
+               return &target_bk->os_mem.pages;\r
+       } else if (MALI_MEM_COW == target_bk->type) {\r
+               MALI_DEBUG_ASSERT(&target_bk->cow_mem);\r
+               MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->cow_mem.count);\r
+               return  &target_bk->cow_mem.pages;\r
+       } else if (MALI_MEM_BLOCK == target_bk->type) {\r
+               MALI_DEBUG_ASSERT(&target_bk->block_mem);\r
+               MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->block_mem.count);\r
+               return  &target_bk->block_mem.pfns;\r
+       } else if (MALI_MEM_SWAP == target_bk->type) {\r
+               MALI_DEBUG_ASSERT(&target_bk->swap_mem);\r
+               MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->swap_mem.count);\r
+               return  &target_bk->swap_mem.pages;\r
+       }\r
+\r
+       return NULL;\r
+}\r
+\r
+/**\r
+* Do COW for os memory - support do COW for memory from bank memory\r
+* The range_start/size can be zero, which means it will call cow_modify_range\r
+* latter.\r
+* This function allocate new pages for COW backend from os mem for a modified range\r
+* It will keep the page which not in the modified range and Add ref to it\r
+*\r
+* @target_bk - target allocation's backend(the allocation need to do COW)\r
+* @target_offset - the offset in target allocation to do COW(for support COW  a memory allocated from memory_bank, 4K align)\r
+* @target_size - size of target allocation to do COW (for support memory bank)\r
+* @backend -COW backend\r
+* @range_start - offset of modified range (4K align)\r
+* @range_size - size of modified range\r
+*/\r
+_mali_osk_errcode_t mali_memory_cow_os_memory(mali_mem_backend *target_bk,\r
+               u32 target_offset,\r
+               u32 target_size,\r
+               mali_mem_backend *backend,\r
+               u32 range_start,\r
+               u32 range_size)\r
+{\r
+       mali_mem_cow *cow = &backend->cow_mem;\r
+       struct mali_page_node *m_page, *m_tmp, *page_node;\r
+       int target_page = 0;\r
+       struct page *new_page;\r
+       struct list_head *pages = NULL;\r
+\r
+       pages = _mali_memory_cow_get_node_list(target_bk, target_offset, target_size);\r
+\r
+       if (NULL == pages) {\r
+               MALI_DEBUG_PRINT_ERROR(("No memory page  need to cow ! \n"));\r
+               return _MALI_OSK_ERR_FAULT;\r
+       }\r
+\r
+       MALI_DEBUG_ASSERT(0 == cow->count);\r
+\r
+       INIT_LIST_HEAD(&cow->pages);\r
+       mutex_lock(&target_bk->mutex);\r
+       list_for_each_entry_safe(m_page, m_tmp, pages, list) {\r
+               /* add page from (target_offset,target_offset+size) to cow backend */\r
+               if ((target_page >= target_offset / _MALI_OSK_MALI_PAGE_SIZE) &&\r
+                   (target_page < ((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE))) {\r
+\r
+                       /* allocate a new page node, alway use OS memory for COW */\r
+                       page_node = _mali_page_node_allocate(MALI_PAGE_NODE_OS);\r
+\r
+                       if (NULL == page_node) {\r
+                               mutex_unlock(&target_bk->mutex);\r
+                               goto error;\r
+                       }\r
+\r
+                       INIT_LIST_HEAD(&page_node->list);\r
+\r
+                       /* check if in the modified range*/\r
+                       if ((cow->count >= range_start / _MALI_OSK_MALI_PAGE_SIZE) &&\r
+                           (cow->count < (range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE)) {\r
+                               /* need to allocate a new page */\r
+                               /* To simplify the case, All COW memory is allocated from os memory ?*/\r
+                               new_page = mali_mem_cow_alloc_page();\r
+\r
+                               if (NULL == new_page) {\r
+                                       kfree(page_node);\r
+                                       mutex_unlock(&target_bk->mutex);\r
+                                       goto error;\r
+                               }\r
+\r
+                               _mali_page_node_add_page(page_node, new_page);\r
+                       } else {\r
+                               /*Add Block memory case*/\r
+                               if (m_page->type != MALI_PAGE_NODE_BLOCK) {\r
+                                       _mali_page_node_add_page(page_node, m_page->page);\r
+                               } else {\r
+                                       page_node->type = MALI_PAGE_NODE_BLOCK;\r
+                                       _mali_page_node_add_block_item(page_node, m_page->blk_it);\r
+                               }\r
+\r
+                               /* add ref to this page */\r
+                               _mali_page_node_ref(m_page);\r
+                       }\r
+\r
+                       /* add it to COW backend page list */\r
+                       list_add_tail(&page_node->list, &cow->pages);\r
+                       cow->count++;\r
+               }\r
+               target_page++;\r
+       }\r
+       mutex_unlock(&target_bk->mutex);\r
+       return _MALI_OSK_ERR_OK;\r
+error:\r
+       mali_mem_cow_release(backend, MALI_FALSE);\r
+       return _MALI_OSK_ERR_FAULT;\r
+}\r
+\r
+_mali_osk_errcode_t mali_memory_cow_swap_memory(mali_mem_backend *target_bk,\r
+               u32 target_offset,\r
+               u32 target_size,\r
+               mali_mem_backend *backend,\r
+               u32 range_start,\r
+               u32 range_size)\r
+{\r
+       mali_mem_cow *cow = &backend->cow_mem;\r
+       struct mali_page_node *m_page, *m_tmp, *page_node;\r
+       int target_page = 0;\r
+       struct mali_swap_item *swap_item;\r
+       struct list_head *pages = NULL;\r
+\r
+       pages = _mali_memory_cow_get_node_list(target_bk, target_offset, target_size);\r
+       if (NULL == pages) {\r
+               MALI_DEBUG_PRINT_ERROR(("No swap memory page need to cow ! \n"));\r
+               return _MALI_OSK_ERR_FAULT;\r
+       }\r
+\r
+       MALI_DEBUG_ASSERT(0 == cow->count);\r
+\r
+       INIT_LIST_HEAD(&cow->pages);\r
+       mutex_lock(&target_bk->mutex);\r
+\r
+       backend->flags |= MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN;\r
+\r
+       list_for_each_entry_safe(m_page, m_tmp, pages, list) {\r
+               /* add page from (target_offset,target_offset+size) to cow backend */\r
+               if ((target_page >= target_offset / _MALI_OSK_MALI_PAGE_SIZE) &&\r
+                   (target_page < ((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE))) {\r
+\r
+                       /* allocate a new page node, use swap memory for COW memory swap cowed flag. */\r
+                       page_node = _mali_page_node_allocate(MALI_PAGE_NODE_SWAP);\r
+\r
+                       if (NULL == page_node) {\r
+                               mutex_unlock(&target_bk->mutex);\r
+                               goto error;\r
+                       }\r
+\r
+                       /* check if in the modified range*/\r
+                       if ((cow->count >= range_start / _MALI_OSK_MALI_PAGE_SIZE) &&\r
+                           (cow->count < (range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE)) {\r
+                               /* need to allocate a new page */\r
+                               /* To simplify the case, All COW memory is allocated from os memory ?*/\r
+                               swap_item = mali_mem_swap_alloc_swap_item();\r
+\r
+                               if (NULL == swap_item) {\r
+                                       kfree(page_node);\r
+                                       mutex_unlock(&target_bk->mutex);\r
+                                       goto error;\r
+                               }\r
+\r
+                               swap_item->idx = mali_mem_swap_idx_alloc();\r
+\r
+                               if (_MALI_OSK_BITMAP_INVALIDATE_INDEX == swap_item->idx) {\r
+                                       MALI_DEBUG_PRINT(1, ("Failed to allocate swap index in swap CoW.\n"));\r
+                                       kfree(page_node);\r
+                                       kfree(swap_item);\r
+                                       mutex_unlock(&target_bk->mutex);\r
+                                       goto error;\r
+                               }\r
+\r
+                               _mali_page_node_add_swap_item(page_node, swap_item);\r
+                       } else {\r
+                               _mali_page_node_add_swap_item(page_node, m_page->swap_it);\r
+\r
+                               /* add ref to this page */\r
+                               _mali_page_node_ref(m_page);\r
+                       }\r
+\r
+                       list_add_tail(&page_node->list, &cow->pages);\r
+                       cow->count++;\r
+               }\r
+               target_page++;\r
+       }\r
+       mutex_unlock(&target_bk->mutex);\r
+\r
+       return _MALI_OSK_ERR_OK;\r
+error:\r
+       mali_mem_swap_release(backend, MALI_FALSE);\r
+       return _MALI_OSK_ERR_FAULT;\r
+\r
+}\r
+\r
+\r
+_mali_osk_errcode_t _mali_mem_put_page_node(mali_page_node *node)\r
+{\r
+       if (node->type == MALI_PAGE_NODE_OS) {\r
+               return mali_mem_os_put_page(node->page);\r
+       } else if (node->type == MALI_PAGE_NODE_BLOCK) {\r
+               return mali_mem_block_unref_node(node);\r
+       } else if (node->type == MALI_PAGE_NODE_SWAP) {\r
+               return _mali_mem_swap_put_page_node(node);\r
+       } else\r
+               MALI_DEBUG_ASSERT(0);\r
+       return _MALI_OSK_ERR_FAULT;\r
+}\r
+\r
+\r
+/**\r
+* Modify a range of a exist COW backend\r
+* @backend -COW backend\r
+* @range_start - offset of modified range (4K align)\r
+* @range_size - size of modified range(in byte)\r
+*/\r
+_mali_osk_errcode_t mali_memory_cow_modify_range(mali_mem_backend *backend,\r
+               u32 range_start,\r
+               u32 range_size)\r
+{\r
+       mali_mem_allocation *alloc = NULL;\r
+       struct mali_session_data *session;\r
+       mali_mem_cow *cow = &backend->cow_mem;\r
+       struct mali_page_node *m_page, *m_tmp;\r
+       LIST_HEAD(pages);\r
+       struct page *new_page;\r
+       u32 count = 0;\r
+       s32 change_pages_nr = 0;\r
+       _mali_osk_errcode_t ret = _MALI_OSK_ERR_OK;\r
+\r
+       if (range_start % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);\r
+       if (range_size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);\r
+\r
+       alloc = backend->mali_allocation;\r
+       MALI_DEBUG_ASSERT_POINTER(alloc);\r
+\r
+       session = alloc->session;\r
+       MALI_DEBUG_ASSERT_POINTER(session);\r
+\r
+       MALI_DEBUG_ASSERT(MALI_MEM_COW == backend->type);\r
+       MALI_DEBUG_ASSERT(((range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE) <= cow->count);\r
+\r
+       mutex_lock(&backend->mutex);\r
+\r
+       /* free pages*/\r
+       list_for_each_entry_safe(m_page, m_tmp, &cow->pages, list) {\r
+\r
+               /* check if in the modified range*/\r
+               if ((count >= range_start / _MALI_OSK_MALI_PAGE_SIZE) &&\r
+                   (count < (range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE)) {\r
+                       if (MALI_PAGE_NODE_SWAP != m_page->type) {\r
+                               new_page = mali_mem_cow_alloc_page();\r
+\r
+                               if (NULL == new_page) {\r
+                                       goto error;\r
+                               }\r
+                               if (1 != _mali_page_node_get_ref_count(m_page))\r
+                                       change_pages_nr++;\r
+                               /* unref old page*/\r
+                               _mali_osk_mutex_wait(session->cow_lock);\r
+                               if (_mali_mem_put_page_node(m_page)) {\r
+                                       __free_page(new_page);\r
+                                       _mali_osk_mutex_signal(session->cow_lock);\r
+                                       goto error;\r
+                               }\r
+                               _mali_osk_mutex_signal(session->cow_lock);\r
+                               /* add new page*/\r
+                               /* always use OS for COW*/\r
+                               m_page->type = MALI_PAGE_NODE_OS;\r
+                               _mali_page_node_add_page(m_page, new_page);\r
+                       } else {\r
+                               struct mali_swap_item *swap_item;\r
+\r
+                               swap_item = mali_mem_swap_alloc_swap_item();\r
+\r
+                               if (NULL == swap_item) {\r
+                                       goto error;\r
+                               }\r
+\r
+                               swap_item->idx = mali_mem_swap_idx_alloc();\r
+\r
+                               if (_MALI_OSK_BITMAP_INVALIDATE_INDEX == swap_item->idx) {\r
+                                       MALI_DEBUG_PRINT(1, ("Failed to allocate swap index in swap CoW modify range.\n"));\r
+                                       kfree(swap_item);\r
+                                       goto error;\r
+                               }\r
+\r
+                               if (1 != _mali_page_node_get_ref_count(m_page)) {\r
+                                       change_pages_nr++;\r
+                               }\r
+\r
+                               if (_mali_mem_put_page_node(m_page)) {\r
+                                       mali_mem_swap_free_swap_item(swap_item);\r
+                                       goto error;\r
+                               }\r
+\r
+                               _mali_page_node_add_swap_item(m_page, swap_item);\r
+                       }\r
+               }\r
+               count++;\r
+       }\r
+       cow->change_pages_nr  = change_pages_nr;\r
+\r
+       MALI_DEBUG_ASSERT(MALI_MEM_COW == alloc->type);\r
+\r
+       /* ZAP cpu mapping(modified range), and do cpu mapping here if need */\r
+       if (NULL != alloc->cpu_mapping.vma) {\r
+               MALI_DEBUG_ASSERT(0 != alloc->backend_handle);\r
+               MALI_DEBUG_ASSERT(NULL != alloc->cpu_mapping.vma);\r
+               MALI_DEBUG_ASSERT(alloc->cpu_mapping.vma->vm_end - alloc->cpu_mapping.vma->vm_start >= range_size);\r
+\r
+               if (MALI_MEM_BACKEND_FLAG_SWAP_COWED != (backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)) {\r
+                       zap_vma_ptes(alloc->cpu_mapping.vma, alloc->cpu_mapping.vma->vm_start + range_start, range_size);\r
+\r
+                       ret = mali_mem_cow_cpu_map_pages_locked(backend, alloc->cpu_mapping.vma, alloc->cpu_mapping.vma->vm_start  + range_start, range_size / _MALI_OSK_MALI_PAGE_SIZE);\r
+\r
+                       if (unlikely(ret != _MALI_OSK_ERR_OK)) {\r
+                               MALI_DEBUG_PRINT(2, ("mali_memory_cow_modify_range: cpu mapping failed !\n"));\r
+                               ret =  _MALI_OSK_ERR_FAULT;\r
+                       }\r
+               } else {\r
+                       /* used to trigger page fault for swappable cowed memory. */\r
+                       alloc->cpu_mapping.vma->vm_flags |= VM_PFNMAP;\r
+                       alloc->cpu_mapping.vma->vm_flags |= VM_MIXEDMAP;\r
+\r
+                       zap_vma_ptes(alloc->cpu_mapping.vma, alloc->cpu_mapping.vma->vm_start + range_start, range_size);\r
+                       /* delete this flag to let swappble is ummapped regard to stauct page not page frame. */\r
+                       alloc->cpu_mapping.vma->vm_flags &= ~VM_PFNMAP;\r
+                       alloc->cpu_mapping.vma->vm_flags &= ~VM_MIXEDMAP;\r
+               }\r
+       }\r
+\r
+error:\r
+       mutex_unlock(&backend->mutex);\r
+       return ret;\r
+\r
+}\r
+\r
+\r
+/**\r
+* Allocate pages for COW backend\r
+* @alloc  -allocation for COW allocation\r
+* @target_bk - target allocation's backend(the allocation need to do COW)\r
+* @target_offset - the offset in target allocation to do COW(for support COW  a memory allocated from memory_bank, 4K align)\r
+* @target_size - size of target allocation to do COW (for support memory bank)(in byte)\r
+* @backend -COW backend\r
+* @range_start - offset of modified range (4K align)\r
+* @range_size - size of modified range(in byte)\r
+*/\r
+_mali_osk_errcode_t mali_memory_do_cow(mali_mem_backend *target_bk,\r
+                                      u32 target_offset,\r
+                                      u32 target_size,\r
+                                      mali_mem_backend *backend,\r
+                                      u32 range_start,\r
+                                      u32 range_size)\r
+{\r
+       struct mali_session_data *session = backend->mali_allocation->session;\r
+\r
+       MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);\r
+\r
+       /* size & offset must be a multiple of the system page size */\r
+       if (target_size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);\r
+       if (range_size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);\r
+       if (target_offset % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);\r
+       if (range_start % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);\r
+\r
+       /* check backend type */\r
+       MALI_DEBUG_ASSERT(MALI_MEM_COW == backend->type);\r
+\r
+       switch (target_bk->type) {\r
+       case MALI_MEM_OS:\r
+       case MALI_MEM_BLOCK:\r
+               return mali_memory_cow_os_memory(target_bk, target_offset, target_size, backend, range_start, range_size);\r
+               break;\r
+       case MALI_MEM_COW:\r
+               if (backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED) {\r
+                       return mali_memory_cow_swap_memory(target_bk, target_offset, target_size, backend, range_start, range_size);\r
+               } else {\r
+                       return mali_memory_cow_os_memory(target_bk, target_offset, target_size, backend, range_start, range_size);\r
+               }\r
+               break;\r
+       case MALI_MEM_SWAP:\r
+               return mali_memory_cow_swap_memory(target_bk, target_offset, target_size, backend, range_start, range_size);\r
+               break;\r
+       case MALI_MEM_EXTERNAL:\r
+               /*NOT support yet*/\r
+               MALI_DEBUG_PRINT_ERROR(("External physical memory not supported ! \n"));\r
+               return _MALI_OSK_ERR_UNSUPPORTED;\r
+               break;\r
+       case MALI_MEM_DMA_BUF:\r
+               /*NOT support yet*/\r
+               MALI_DEBUG_PRINT_ERROR(("DMA buffer not supported ! \n"));\r
+               return _MALI_OSK_ERR_UNSUPPORTED;\r
+               break;\r
+       case MALI_MEM_UMP:\r
+               /*NOT support yet*/\r
+               MALI_DEBUG_PRINT_ERROR(("UMP buffer not supported ! \n"));\r
+               return _MALI_OSK_ERR_UNSUPPORTED;\r
+               break;\r
+       default:\r
+               /*Not support yet*/\r
+               MALI_DEBUG_PRINT_ERROR(("Invalid memory type not supported ! \n"));\r
+               return _MALI_OSK_ERR_UNSUPPORTED;\r
+               break;\r
+       }\r
+       return _MALI_OSK_ERR_OK;\r
+}\r
+\r
+\r
+/**\r
+* Map COW backend memory to mali\r
+* Support OS/BLOCK for mali_page_node\r
+*/\r
+int mali_mem_cow_mali_map(mali_mem_backend *mem_bkend, u32 range_start, u32 range_size)\r
+{\r
+       mali_mem_allocation *cow_alloc;\r
+       struct mali_page_node *m_page;\r
+       struct mali_session_data *session;\r
+       struct mali_page_directory *pagedir;\r
+       u32 virt, start;\r
+\r
+       cow_alloc = mem_bkend->mali_allocation;\r
+       virt = cow_alloc->mali_vma_node.vm_node.start;\r
+       start = virt;\r
+\r
+       MALI_DEBUG_ASSERT_POINTER(mem_bkend);\r
+       MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);\r
+       MALI_DEBUG_ASSERT_POINTER(cow_alloc);\r
+\r
+       session = cow_alloc->session;\r
+       pagedir = session->page_directory;\r
+       MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);\r
+       list_for_each_entry(m_page, &mem_bkend->cow_mem.pages, list) {\r
+               if ((virt - start >= range_start) && (virt - start < range_start + range_size)) {\r
+                       dma_addr_t phys = _mali_page_node_get_dma_addr(m_page);\r
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)\r
+                       MALI_DEBUG_ASSERT(0 == (phys >> 32));\r
+#endif\r
+                       mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys,\r
+                                               MALI_MMU_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);\r
+               }\r
+               virt += MALI_MMU_PAGE_SIZE;\r
+       }\r
+       return 0;\r
+}\r
+\r
+/**\r
+* Map COW backend to cpu\r
+* support OS/BLOCK memory\r
+*/\r
+int mali_mem_cow_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma)\r
+{\r
+       mali_mem_cow *cow = &mem_bkend->cow_mem;\r
+       struct mali_page_node *m_page;\r
+       int ret;\r
+       unsigned long addr = vma->vm_start;\r
+       MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_COW);\r
+\r
+       list_for_each_entry(m_page, &cow->pages, list) {\r
+               /* We should use vm_insert_page, but it does a dcache\r
+                * flush which makes it way slower than remap_pfn_range or vm_insert_pfn.\r
+               ret = vm_insert_page(vma, addr, page);\r
+               */\r
+               ret = vm_insert_pfn(vma, addr, _mali_page_node_get_pfn(m_page));\r
+\r
+               if (unlikely(0 != ret)) {\r
+                       return ret;\r
+               }\r
+               addr += _MALI_OSK_MALI_PAGE_SIZE;\r
+       }\r
+\r
+       return 0;\r
+}\r
+\r
+/**\r
+* Map some pages(COW backend) to CPU vma@vaddr\r
+*@ mem_bkend - COW backend\r
+*@ vma\r
+*@ vaddr -start CPU vaddr mapped to\r
+*@ num - max number of pages to map to CPU vaddr\r
+*/\r
+_mali_osk_errcode_t mali_mem_cow_cpu_map_pages_locked(mali_mem_backend *mem_bkend,\r
+               struct vm_area_struct *vma,\r
+               unsigned long vaddr,\r
+               int num)\r
+{\r
+       mali_mem_cow *cow = &mem_bkend->cow_mem;\r
+       struct mali_page_node *m_page;\r
+       int ret;\r
+       int offset;\r
+       int count ;\r
+       unsigned long vstart = vma->vm_start;\r
+       count = 0;\r
+       MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_COW);\r
+       MALI_DEBUG_ASSERT(0 == vaddr % _MALI_OSK_MALI_PAGE_SIZE);\r
+       MALI_DEBUG_ASSERT(0 == vstart % _MALI_OSK_MALI_PAGE_SIZE);\r
+       offset = (vaddr - vstart) / _MALI_OSK_MALI_PAGE_SIZE;\r
+\r
+       list_for_each_entry(m_page, &cow->pages, list) {\r
+               if ((count >= offset) && (count < offset + num)) {\r
+                       ret = vm_insert_pfn(vma, vaddr, _mali_page_node_get_pfn(m_page));\r
+\r
+                       if (unlikely(0 != ret)) {\r
+                               if (count == offset) {\r
+                                       return _MALI_OSK_ERR_FAULT;\r
+                               } else {\r
+                                       /* ret is EBUSY when page isn't in modify range, but now it's OK*/\r
+                                       return _MALI_OSK_ERR_OK;\r
+                               }\r
+                       }\r
+                       vaddr += _MALI_OSK_MALI_PAGE_SIZE;\r
+               }\r
+               count++;\r
+       }\r
+       return _MALI_OSK_ERR_OK;\r
+}\r
+\r
+/**\r
+* Release COW backend memory\r
+* free it directly(put_page--unref page), not put into pool\r
+*/\r
+u32 mali_mem_cow_release(mali_mem_backend *mem_bkend, mali_bool is_mali_mapped)\r
+{\r
+       mali_mem_allocation *alloc;\r
+       struct mali_session_data *session;\r
+       u32 free_pages_nr = 0;\r
+       MALI_DEBUG_ASSERT_POINTER(mem_bkend);\r
+       MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);\r
+       alloc = mem_bkend->mali_allocation;\r
+       MALI_DEBUG_ASSERT_POINTER(alloc);\r
+\r
+       session = alloc->session;\r
+       MALI_DEBUG_ASSERT_POINTER(session);\r
+\r
+       if (MALI_MEM_BACKEND_FLAG_SWAP_COWED != (MALI_MEM_BACKEND_FLAG_SWAP_COWED & mem_bkend->flags)) {\r
+               /* Unmap the memory from the mali virtual address space. */\r
+               if (MALI_TRUE == is_mali_mapped)\r
+                       mali_mem_os_mali_unmap(alloc);\r
+               /* free cow backend list*/\r
+               _mali_osk_mutex_wait(session->cow_lock);\r
+               free_pages_nr = mali_mem_os_free(&mem_bkend->cow_mem.pages, mem_bkend->cow_mem.count, MALI_TRUE);\r
+               _mali_osk_mutex_signal(session->cow_lock);\r
+\r
+               free_pages_nr += mali_mem_block_free_list(&mem_bkend->cow_mem.pages);\r
+\r
+               MALI_DEBUG_ASSERT(list_empty(&mem_bkend->cow_mem.pages));\r
+       } else {\r
+               free_pages_nr = mali_mem_swap_release(mem_bkend, is_mali_mapped);\r
+       }\r
+\r
+\r
+       MALI_DEBUG_PRINT(4, ("COW Mem free : allocated size = 0x%x, free size = 0x%x\n", mem_bkend->cow_mem.count * _MALI_OSK_MALI_PAGE_SIZE,\r
+                            free_pages_nr * _MALI_OSK_MALI_PAGE_SIZE));\r
+\r
+       mem_bkend->cow_mem.count = 0;\r
+       return free_pages_nr;\r
+}\r
+\r
+\r
+/* Dst node could os node or swap node. */\r
+void _mali_mem_cow_copy_page(mali_page_node *src_node, mali_page_node *dst_node)\r
+{\r
+       void *dst, *src;\r
+       struct page *dst_page;\r
+       dma_addr_t dma_addr;\r
+\r
+       MALI_DEBUG_ASSERT(src_node != NULL);\r
+       MALI_DEBUG_ASSERT(dst_node != NULL);\r
+       MALI_DEBUG_ASSERT(dst_node->type == MALI_PAGE_NODE_OS\r
+                         || dst_node->type == MALI_PAGE_NODE_SWAP);\r
+\r
+       if (dst_node->type == MALI_PAGE_NODE_OS) {\r
+               dst_page = dst_node->page;\r
+       } else {\r
+               dst_page = dst_node->swap_it->page;\r
+       }\r
+\r
+       dma_unmap_page(&mali_platform_device->dev, _mali_page_node_get_dma_addr(dst_node),\r
+                      _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);\r
+\r
+       /* map it , and copy the content*/\r
+       dst = kmap_atomic(dst_page);\r
+\r
+       if (src_node->type == MALI_PAGE_NODE_OS ||\r
+           src_node->type == MALI_PAGE_NODE_SWAP) {\r
+               struct page *src_page;\r
+\r
+               if (src_node->type == MALI_PAGE_NODE_OS) {\r
+                       src_page = src_node->page;\r
+               } else {\r
+                       src_page = src_node->swap_it->page;\r
+               }\r
+\r
+               /* Clear and invaliate cache */\r
+               /* In ARM architecture, speculative read may pull stale data into L1 cache\r
+                * for kernel linear mapping page table. DMA_BIDIRECTIONAL could\r
+                * invalidate the L1 cache so that following read get the latest data\r
+               */\r
+               dma_unmap_page(&mali_platform_device->dev, _mali_page_node_get_dma_addr(src_node),\r
+                              _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);\r
+\r
+               src = kmap_atomic(src_page);\r
+               memcpy(dst, src , _MALI_OSK_MALI_PAGE_SIZE);\r
+               kunmap_atomic(src);\r
+               dma_addr = dma_map_page(&mali_platform_device->dev, src_page,\r
+                                       0, _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);\r
+\r
+               if (src_node->type == MALI_PAGE_NODE_SWAP) {\r
+                       src_node->swap_it->dma_addr = dma_addr;\r
+               }\r
+       } else if (src_node->type == MALI_PAGE_NODE_BLOCK) {\r
+               /*\r
+               * use ioremap to map src for BLOCK memory\r
+               */\r
+               src = ioremap_nocache(_mali_page_node_get_dma_addr(src_node), _MALI_OSK_MALI_PAGE_SIZE);\r
+               memcpy(dst, src , _MALI_OSK_MALI_PAGE_SIZE);\r
+               iounmap(src);\r
+       }\r
+       kunmap_atomic(dst);\r
+       dma_addr = dma_map_page(&mali_platform_device->dev, dst_page,\r
+                               0, _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);\r
+\r
+       if (dst_node->type == MALI_PAGE_NODE_SWAP) {\r
+               dst_node->swap_it->dma_addr = dma_addr;\r
+       }\r
+}\r
+\r
+\r
+/*\r
+* allocate page on demand when CPU access it,\r
+* THis used in page fault handler\r
+*/\r
+_mali_osk_errcode_t mali_mem_cow_allocate_on_demand(mali_mem_backend *mem_bkend, u32 offset_page)\r
+{\r
+       struct page *new_page = NULL;\r
+       struct mali_page_node *new_node = NULL;\r
+       int i = 0;\r
+       struct mali_page_node *m_page, *found_node = NULL;\r
+       struct  mali_session_data *session = NULL;\r
+       mali_mem_cow *cow = &mem_bkend->cow_mem;\r
+       MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);\r
+       MALI_DEBUG_ASSERT(offset_page < mem_bkend->size / _MALI_OSK_MALI_PAGE_SIZE);\r
+       MALI_DEBUG_PRINT(4, ("mali_mem_cow_allocate_on_demand !, offset_page =0x%x\n", offset_page));\r
+\r
+       /* allocate new page here */\r
+       new_page = mali_mem_cow_alloc_page();\r
+       if (!new_page)\r
+               return _MALI_OSK_ERR_NOMEM;\r
+\r
+       new_node = _mali_page_node_allocate(MALI_PAGE_NODE_OS);\r
+       if (!new_node) {\r
+               __free_page(new_page);\r
+               return _MALI_OSK_ERR_NOMEM;\r
+       }\r
+\r
+       /* find the page in backend*/\r
+       list_for_each_entry(m_page, &cow->pages, list) {\r
+               if (i == offset_page) {\r
+                       found_node = m_page;\r
+                       break;\r
+               }\r
+               i++;\r
+       }\r
+       MALI_DEBUG_ASSERT(found_node);\r
+       if (NULL == found_node) {\r
+               __free_page(new_page);\r
+               kfree(new_node);\r
+               return _MALI_OSK_ERR_ITEM_NOT_FOUND;\r
+       }\r
+\r
+       _mali_page_node_add_page(new_node, new_page);\r
+\r
+       /* Copy the src page's content to new page */\r
+       _mali_mem_cow_copy_page(found_node, new_node);\r
+\r
+       MALI_DEBUG_ASSERT_POINTER(mem_bkend->mali_allocation);\r
+       session = mem_bkend->mali_allocation->session;\r
+       MALI_DEBUG_ASSERT_POINTER(session);\r
+       if (1 != _mali_page_node_get_ref_count(found_node)) {\r
+               atomic_add(1, &session->mali_mem_allocated_pages);\r
+               if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {\r
+                       session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;\r
+               }\r
+               mem_bkend->cow_mem.change_pages_nr++;\r
+       }\r
+\r
+       _mali_osk_mutex_wait(session->cow_lock);\r
+       if (_mali_mem_put_page_node(found_node)) {\r
+               __free_page(new_page);\r
+               kfree(new_node);\r
+               _mali_osk_mutex_signal(session->cow_lock);\r
+               return _MALI_OSK_ERR_NOMEM;\r
+       }\r
+       _mali_osk_mutex_signal(session->cow_lock);\r
+\r
+       list_replace(&found_node->list, &new_node->list);\r
+\r
+       kfree(found_node);\r
+\r
+       /* map to GPU side*/\r
+       _mali_osk_mutex_wait(session->memory_lock);\r
+       mali_mem_cow_mali_map(mem_bkend, offset_page * _MALI_OSK_MALI_PAGE_SIZE, _MALI_OSK_MALI_PAGE_SIZE);\r
+       _mali_osk_mutex_signal(session->memory_lock);\r
+       return _MALI_OSK_ERR_OK;\r
+}\r
diff --git a/utgard/r8p0/linux/mali_memory_cow.h b/utgard/r8p0/linux/mali_memory_cow.h
new file mode 100644 (file)
index 0000000..9b9e834
--- /dev/null
@@ -0,0 +1,48 @@
+/*\r
+ * Copyright (C) 2013-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */\r
+\r
+#ifndef __MALI_MEMORY_COW_H__\r
+#define __MALI_MEMORY_COW_H__\r
+\r
+#include "mali_osk.h"\r
+#include "mali_session.h"\r
+#include "mali_memory_types.h"\r
+\r
+int mali_mem_cow_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma);\r
+_mali_osk_errcode_t mali_mem_cow_cpu_map_pages_locked(mali_mem_backend *mem_bkend,\r
+               struct vm_area_struct *vma,\r
+               unsigned long vaddr,\r
+               int num);\r
+\r
+_mali_osk_errcode_t mali_memory_do_cow(mali_mem_backend *target_bk,\r
+                                      u32 target_offset,\r
+                                      u32 target_size,\r
+                                      mali_mem_backend *backend,\r
+                                      u32 range_start,\r
+                                      u32 range_size);\r
+\r
+_mali_osk_errcode_t mali_memory_cow_modify_range(mali_mem_backend *backend,\r
+               u32 range_start,\r
+               u32 range_size);\r
+\r
+_mali_osk_errcode_t mali_memory_cow_os_memory(mali_mem_backend *target_bk,\r
+               u32 target_offset,\r
+               u32 target_size,\r
+               mali_mem_backend *backend,\r
+               u32 range_start,\r
+               u32 range_size);\r
+\r
+void _mali_mem_cow_copy_page(mali_page_node *src_node, mali_page_node *dst_node);\r
+\r
+int mali_mem_cow_mali_map(mali_mem_backend *mem_bkend, u32 range_start, u32 range_size);\r
+u32 mali_mem_cow_release(mali_mem_backend *mem_bkend, mali_bool is_mali_mapped);\r
+_mali_osk_errcode_t mali_mem_cow_allocate_on_demand(mali_mem_backend *mem_bkend, u32 offset_page);\r
+#endif\r
+\r
diff --git a/utgard/r8p0/linux/mali_memory_defer_bind.c b/utgard/r8p0/linux/mali_memory_defer_bind.c
new file mode 100644 (file)
index 0000000..a13eea0
--- /dev/null
@@ -0,0 +1,262 @@
+/*\r
+ * Copyright (C) 2013-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */\r
+#include <linux/mm.h>\r
+#include <linux/list.h>\r
+#include <linux/mm_types.h>\r
+#include <linux/fs.h>\r
+#include <linux/dma-mapping.h>\r
+#include <linux/highmem.h>\r
+#include <asm/cacheflush.h>\r
+#include <linux/sched.h>\r
+#ifdef CONFIG_ARM\r
+#include <asm/outercache.h>\r
+#endif\r
+#include <asm/dma-mapping.h>\r
+\r
+#include "mali_memory.h"\r
+#include "mali_kernel_common.h"\r
+#include "mali_uk_types.h"\r
+#include "mali_osk.h"\r
+#include "mali_kernel_linux.h"\r
+#include "mali_memory_defer_bind.h"\r
+#include "mali_executor.h"\r
+#include "mali_osk.h"\r
+#include "mali_scheduler.h"\r
+#include "mali_gp_job.h"\r
+\r
+mali_defer_bind_manager *mali_dmem_man = NULL;\r
+\r
+static u32 mali_dmem_get_gp_varying_size(struct mali_gp_job *gp_job)\r
+{\r
+       return gp_job->required_varying_memsize / _MALI_OSK_MALI_PAGE_SIZE;\r
+}\r
+\r
+_mali_osk_errcode_t mali_mem_defer_bind_manager_init(void)\r
+{\r
+       mali_dmem_man = _mali_osk_calloc(1, sizeof(struct mali_defer_bind_manager));\r
+       if (!mali_dmem_man)\r
+               return _MALI_OSK_ERR_NOMEM;\r
+\r
+       atomic_set(&mali_dmem_man->num_used_pages, 0);\r
+       atomic_set(&mali_dmem_man->num_dmem, 0);\r
+\r
+       return _MALI_OSK_ERR_OK;\r
+}\r
+\r
+\r
+void mali_mem_defer_bind_manager_destory(void)\r
+{\r
+       if (mali_dmem_man) {\r
+               MALI_DEBUG_ASSERT(0 == atomic_read(&mali_dmem_man->num_dmem));\r
+               kfree(mali_dmem_man);\r
+       }\r
+       mali_dmem_man = NULL;\r
+}\r
+\r
+\r
+/*allocate pages from OS memory*/\r
+_mali_osk_errcode_t mali_mem_defer_alloc_mem(u32 require, struct mali_session_data *session, mali_defer_mem_block *dblock)\r
+{\r
+       int retval = 0;\r
+       u32 num_pages = require;\r
+       mali_mem_os_mem os_mem;\r
+\r
+       retval = mali_mem_os_alloc_pages(&os_mem, num_pages * _MALI_OSK_MALI_PAGE_SIZE);\r
+\r
+       /* add to free pages list */\r
+       if (0 == retval) {\r
+               MALI_DEBUG_PRINT(4, ("mali_mem_defer_alloc_mem ,,*** pages allocate = 0x%x \n", num_pages));\r
+               list_splice(&os_mem.pages, &dblock->free_pages);\r
+               atomic_add(os_mem.count, &dblock->num_free_pages);\r
+               atomic_add(os_mem.count, &session->mali_mem_allocated_pages);\r
+               if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {\r
+                       session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;\r
+               }\r
+               return _MALI_OSK_ERR_OK;\r
+       } else\r
+               return _MALI_OSK_ERR_FAULT;\r
+}\r
+\r
+_mali_osk_errcode_t mali_mem_prepare_mem_for_job(struct mali_gp_job *next_gp_job, mali_defer_mem_block *dblock)\r
+{\r
+       u32 require_page;\r
+\r
+       if (!next_gp_job)\r
+               return _MALI_OSK_ERR_FAULT;\r
+\r
+       require_page = mali_dmem_get_gp_varying_size(next_gp_job);\r
+\r
+       MALI_DEBUG_PRINT(4, ("mali_mem_defer_prepare_mem_work, require alloc page 0x%x\n",\r
+                            require_page));\r
+       /* allocate more pages from OS */\r
+       if (_MALI_OSK_ERR_OK != mali_mem_defer_alloc_mem(require_page, next_gp_job->session, dblock)) {\r
+               MALI_DEBUG_PRINT(1, ("ERROR##mali_mem_defer_prepare_mem_work, allocate page failed!!"));\r
+               return _MALI_OSK_ERR_NOMEM;\r
+       }\r
+\r
+       next_gp_job->bind_flag = MALI_DEFER_BIND_MEMORY_PREPARED;\r
+\r
+       return _MALI_OSK_ERR_OK;\r
+}\r
+\r
+\r
+/* do preparetion for allocation before defer bind */\r
+_mali_osk_errcode_t mali_mem_defer_bind_allocation_prepare(mali_mem_allocation *alloc, struct list_head *list, u32 *required_varying_memsize)\r
+{\r
+       mali_mem_backend *mem_bkend = NULL;\r
+       struct mali_backend_bind_list *bk_list = _mali_osk_calloc(1, sizeof(struct mali_backend_bind_list));\r
+       if (NULL == bk_list)\r
+               return _MALI_OSK_ERR_FAULT;\r
+\r
+       INIT_LIST_HEAD(&bk_list->node);\r
+       /* Get backend memory */\r
+       mutex_lock(&mali_idr_mutex);\r
+       if (!(mem_bkend = idr_find(&mali_backend_idr, alloc->backend_handle))) {\r
+               MALI_DEBUG_PRINT(1, ("Can't find memory backend in defer bind!\n"));\r
+               mutex_unlock(&mali_idr_mutex);\r
+               _mali_osk_free(bk_list);\r
+               return _MALI_OSK_ERR_FAULT;\r
+       }\r
+       mutex_unlock(&mali_idr_mutex);\r
+\r
+       /* If the mem backend has already been bound, no need to bind again.*/\r
+       if (mem_bkend->os_mem.count > 0) {\r
+               _mali_osk_free(bk_list);\r
+               return _MALI_OSK_ERR_OK;\r
+       }\r
+\r
+       MALI_DEBUG_PRINT(4, ("bind_allocation_prepare:: allocation =%x vaddr=0x%x!\n", alloc, alloc->mali_vma_node.vm_node.start));\r
+\r
+       INIT_LIST_HEAD(&mem_bkend->os_mem.pages);\r
+\r
+       bk_list->bkend = mem_bkend;\r
+       bk_list->vaddr = alloc->mali_vma_node.vm_node.start;\r
+       bk_list->session = alloc->session;\r
+       bk_list->page_num = mem_bkend->size / _MALI_OSK_MALI_PAGE_SIZE;\r
+       *required_varying_memsize +=  mem_bkend->size;\r
+       MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_OS);\r
+\r
+       /* add to job to do list */\r
+       list_add(&bk_list->node, list);\r
+\r
+       return _MALI_OSK_ERR_OK;\r
+}\r
+\r
+\r
+\r
+/* bind phyiscal memory to allocation\r
+This function will be called in IRQ handler*/\r
+static _mali_osk_errcode_t mali_mem_defer_bind_allocation(struct mali_backend_bind_list *bk_node,\r
+               struct list_head *pages)\r
+{\r
+       struct mali_session_data *session = bk_node->session;\r
+       mali_mem_backend *mem_bkend = bk_node->bkend;\r
+       MALI_DEBUG_PRINT(4, ("mali_mem_defer_bind_allocation, bind bkend = %x page num=0x%x vaddr=%x session=%x\n", mem_bkend, bk_node->page_num, bk_node->vaddr, session));\r
+\r
+       MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_OS);\r
+       list_splice(pages, &mem_bkend->os_mem.pages);\r
+       mem_bkend->os_mem.count = bk_node->page_num;\r
+\r
+       if (mem_bkend->type == MALI_MEM_OS) {\r
+               mali_mem_os_mali_map(&mem_bkend->os_mem, session, bk_node->vaddr, 0,\r
+                                    mem_bkend->os_mem.count, MALI_MMU_FLAGS_DEFAULT);\r
+       }\r
+       smp_wmb();\r
+       bk_node->flag = MALI_DEFER_BIND_MEMORY_BINDED;\r
+       mem_bkend->flags &= ~MALI_MEM_BACKEND_FLAG_NOT_BINDED;\r
+       mem_bkend->flags |= MALI_MEM_BACKEND_FLAG_BINDED;\r
+       return _MALI_OSK_ERR_OK;\r
+}\r
+\r
+\r
+static struct list_head *mali_mem_defer_get_free_page_list(u32 count, struct list_head *pages, mali_defer_mem_block *dblock)\r
+{\r
+       int i = 0;\r
+       struct mali_page_node *m_page, *m_tmp;\r
+\r
+       if (atomic_read(&dblock->num_free_pages) < count) {\r
+               return NULL;\r
+       } else {\r
+               list_for_each_entry_safe(m_page, m_tmp, &dblock->free_pages, list) {\r
+                       if (i < count) {\r
+                               list_move_tail(&m_page->list, pages);\r
+                       } else {\r
+                               break;\r
+                       }\r
+                       i++;\r
+               }\r
+               MALI_DEBUG_ASSERT(i == count);\r
+               atomic_sub(count, &dblock->num_free_pages);\r
+               return pages;\r
+       }\r
+}\r
+\r
+\r
+/* called in job start IOCTL to bind physical memory for each allocations\r
+@ bk_list backend list to do defer bind\r
+@ pages page list to do this bind\r
+@ count number of pages\r
+*/\r
+_mali_osk_errcode_t mali_mem_defer_bind(struct mali_gp_job *gp,\r
+                                       struct mali_defer_mem_block *dmem_block)\r
+{\r
+       struct mali_defer_mem *dmem = NULL;\r
+       struct mali_backend_bind_list *bkn, *bkn_tmp;\r
+       LIST_HEAD(pages);\r
+\r
+       if (gp->required_varying_memsize != (atomic_read(&dmem_block->num_free_pages) * _MALI_OSK_MALI_PAGE_SIZE)) {\r
+               MALI_DEBUG_PRINT_ERROR(("#BIND:  The memsize of varying buffer not match to the pagesize of the dmem_block!!## \n"));\r
+               return _MALI_OSK_ERR_FAULT;\r
+       }\r
+\r
+       MALI_DEBUG_PRINT(4, ("#BIND: GP job=%x## \n", gp));\r
+       dmem = (mali_defer_mem *)_mali_osk_calloc(1, sizeof(struct mali_defer_mem));\r
+       if (dmem) {\r
+               INIT_LIST_HEAD(&dmem->node);\r
+               gp->dmem = dmem;\r
+       } else {\r
+               return _MALI_OSK_ERR_NOMEM;\r
+       }\r
+\r
+       atomic_add(1, &mali_dmem_man->num_dmem);\r
+       /* for each bk_list backend, do bind */\r
+       list_for_each_entry_safe(bkn, bkn_tmp , &gp->vary_todo, node) {\r
+               INIT_LIST_HEAD(&pages);\r
+               if (likely(mali_mem_defer_get_free_page_list(bkn->page_num, &pages, dmem_block))) {\r
+                       list_del(&bkn->node);\r
+                       mali_mem_defer_bind_allocation(bkn, &pages);\r
+                       _mali_osk_free(bkn);\r
+               } else {\r
+                       /* not enough memory will not happen */\r
+                       MALI_DEBUG_PRINT_ERROR(("#BIND: NOT enough memory when binded !!## \n"));\r
+                       _mali_osk_free(gp->dmem);\r
+                       return _MALI_OSK_ERR_NOMEM;\r
+               }\r
+       }\r
+\r
+       if (!list_empty(&gp->vary_todo)) {\r
+               MALI_DEBUG_PRINT_ERROR(("#BIND:  The deferbind backend list isn't empty !!## \n"));\r
+               _mali_osk_free(gp->dmem);\r
+               return _MALI_OSK_ERR_FAULT;\r
+       }\r
+\r
+       dmem->flag = MALI_DEFER_BIND_MEMORY_BINDED;\r
+\r
+       return _MALI_OSK_ERR_OK;\r
+}\r
+\r
+void mali_mem_defer_dmem_free(struct mali_gp_job *gp)\r
+{\r
+       if (gp->dmem) {\r
+               atomic_dec(&mali_dmem_man->num_dmem);\r
+               _mali_osk_free(gp->dmem);\r
+       }\r
+}\r
+\r
diff --git a/utgard/r8p0/linux/mali_memory_defer_bind.h b/utgard/r8p0/linux/mali_memory_defer_bind.h
new file mode 100644 (file)
index 0000000..4cf6f16
--- /dev/null
@@ -0,0 +1,64 @@
+/*\r
+ * Copyright (C) 2013-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */\r
+#ifndef __MALI_MEMORY_DEFER_BIND_H_\r
+#define __MALI_MEMORY_DEFER_BIND_H_\r
+\r
+\r
+#include "mali_osk.h"\r
+#include "mali_session.h"\r
+\r
+#include <linux/list.h>\r
+#include <linux/mm.h>\r
+#include <linux/rbtree.h>\r
+#include <linux/spinlock.h>\r
+#include <linux/types.h>\r
+\r
+\r
+#include "mali_memory_types.h"\r
+#include "mali_memory_os_alloc.h"\r
+#include "mali_uk_types.h"\r
+\r
+struct mali_gp_job;\r
+\r
+typedef struct mali_defer_mem {\r
+       struct list_head node;   /*dlist node in bind manager */\r
+       u32 flag;\r
+} mali_defer_mem;\r
+\r
+\r
+typedef struct mali_defer_mem_block {\r
+       struct list_head free_pages; /* page pool */\r
+       atomic_t num_free_pages;\r
+} mali_defer_mem_block;\r
+\r
+/* varying memory list need to bind */\r
+typedef struct mali_backend_bind_list {\r
+       struct list_head node;\r
+       struct mali_mem_backend *bkend;\r
+       u32 vaddr;\r
+       u32 page_num;\r
+       struct mali_session_data *session;\r
+       u32 flag;\r
+} mali_backend_bind_lists;\r
+\r
+\r
+typedef struct mali_defer_bind_manager {\r
+       atomic_t num_used_pages;\r
+       atomic_t num_dmem;\r
+} mali_defer_bind_manager;\r
+\r
+_mali_osk_errcode_t mali_mem_defer_bind_manager_init(void);\r
+void mali_mem_defer_bind_manager_destory(void);\r
+_mali_osk_errcode_t mali_mem_defer_bind(struct mali_gp_job *gp, struct mali_defer_mem_block *dmem_block);\r
+_mali_osk_errcode_t mali_mem_defer_bind_allocation_prepare(mali_mem_allocation *alloc, struct list_head *list,  u32 *required_varying_memsize);\r
+_mali_osk_errcode_t mali_mem_prepare_mem_for_job(struct mali_gp_job *next_gp_job, mali_defer_mem_block *dblock);\r
+void mali_mem_defer_dmem_free(struct mali_gp_job *gp);\r
+\r
+#endif\r
diff --git a/utgard/r8p0/linux/mali_memory_dma_buf.c b/utgard/r8p0/linux/mali_memory_dma_buf.c
new file mode 100755 (executable)
index 0000000..59107fa
--- /dev/null
@@ -0,0 +1,403 @@
+/*
+ * Copyright (C) 2012-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/fs.h>      /* file system operations */
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
+#include <linux/uaccess.h>
+#else
+#include <asm/uaccess.h>
+#endif
+#include <linux/dma-buf.h>
+#include <linux/scatterlist.h>
+#include <linux/rbtree.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_kernel_linux.h"
+
+#include "mali_memory.h"
+#include "mali_memory_dma_buf.h"
+#include "mali_memory_virtual.h"
+#include "mali_pp_job.h"
+
+/*
+ * Map DMA buf attachment \a mem into \a session at virtual address \a virt.
+ */
+static int mali_dma_buf_map(mali_mem_backend *mem_backend)
+{
+       mali_mem_allocation *alloc;
+       struct mali_dma_buf_attachment *mem;
+       struct  mali_session_data *session;
+       struct mali_page_directory *pagedir;
+       _mali_osk_errcode_t err;
+       struct scatterlist *sg;
+       u32 virt, flags, unmap_dma_size;
+       int i;
+
+       MALI_DEBUG_ASSERT_POINTER(mem_backend);
+
+       alloc = mem_backend->mali_allocation;
+       MALI_DEBUG_ASSERT_POINTER(alloc);
+
+       mem = mem_backend->dma_buf.attachment;
+       MALI_DEBUG_ASSERT_POINTER(mem);
+       MALI_DEBUG_ASSERT_POINTER(mem->buf);
+       unmap_dma_size = mem->buf->size;
+       session = alloc->session;
+       MALI_DEBUG_ASSERT_POINTER(session);
+       MALI_DEBUG_ASSERT(mem->session == session);
+
+       virt = alloc->mali_vma_node.vm_node.start;
+       flags = alloc->flags;
+
+       mali_session_memory_lock(session);
+       mem->map_ref++;
+
+       MALI_DEBUG_PRINT(5, ("Mali DMA-buf: map attachment %p, new map_ref = %d\n", mem, mem->map_ref));
+#if (!defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)) && (defined(CONFIG_MALI_DMA_BUF_LAZY_MAP))
+       if (MALI_FALSE == mem->is_mapped)
+#else
+       if (1 == mem->map_ref)
+#endif
+       {
+               /* First reference taken, so we need to map the dma buf */
+               MALI_DEBUG_ASSERT(!mem->is_mapped);
+
+               mem->sgt = dma_buf_map_attachment(mem->attachment, DMA_BIDIRECTIONAL);
+               if (IS_ERR_OR_NULL(mem->sgt)) {
+                       MALI_DEBUG_PRINT_ERROR(("Failed to map dma-buf attachment\n"));
+                       mem->map_ref--;
+                       mali_session_memory_unlock(session);
+                       return -EFAULT;
+               }
+
+               err = mali_mem_mali_map_prepare(alloc);
+               if (_MALI_OSK_ERR_OK != err) {
+                       MALI_DEBUG_PRINT(1, ("Mapping of DMA memory failed\n"));
+                       mem->map_ref--;
+                       mali_session_memory_unlock(session);
+                       return -ENOMEM;
+               }
+
+               pagedir = mali_session_get_page_directory(session);
+               MALI_DEBUG_ASSERT_POINTER(pagedir);
+
+               for_each_sg(mem->sgt->sgl, sg, mem->sgt->nents, i) {
+                       u32 size = sg_dma_len(sg);
+                       dma_addr_t phys = sg_dma_address(sg);
+
+                       unmap_dma_size -= size;
+                       /* sg must be page aligned. */
+                       MALI_DEBUG_ASSERT(0 == size % MALI_MMU_PAGE_SIZE);
+                       MALI_DEBUG_ASSERT(0 == (phys & ~(uintptr_t)0xFFFFFFFF));
+
+                       mali_mmu_pagedir_update(pagedir, virt, phys, size, MALI_MMU_FLAGS_DEFAULT);
+
+                       virt += size;
+               }
+
+               if (flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
+                       u32 guard_phys;
+                       MALI_DEBUG_PRINT(7, ("Mapping in extra guard page\n"));
+
+                       guard_phys = sg_dma_address(mem->sgt->sgl);
+                       mali_mmu_pagedir_update(pagedir, virt, guard_phys, MALI_MMU_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);
+               }
+
+               mem->is_mapped = MALI_TRUE;
+
+               if (0 != unmap_dma_size) {
+                       MALI_DEBUG_PRINT_ERROR(("The dma buf size isn't equal to the total scatterlists' dma length.\n"));
+                       mali_session_memory_unlock(session);
+                       return -EFAULT;
+               }
+
+               /* Wake up any thread waiting for buffer to become mapped */
+               wake_up_all(&mem->wait_queue);
+
+               mali_session_memory_unlock(session);
+       } else {
+               MALI_DEBUG_ASSERT(mem->is_mapped);
+               mali_session_memory_unlock(session);
+       }
+
+       return 0;
+}
+
+static void mali_dma_buf_unmap(mali_mem_allocation *alloc, struct mali_dma_buf_attachment *mem)
+{
+       MALI_DEBUG_ASSERT_POINTER(alloc);
+       MALI_DEBUG_ASSERT_POINTER(mem);
+       MALI_DEBUG_ASSERT_POINTER(mem->attachment);
+       MALI_DEBUG_ASSERT_POINTER(mem->buf);
+       MALI_DEBUG_ASSERT_POINTER(alloc->session);
+
+       mali_session_memory_lock(alloc->session);
+       mem->map_ref--;
+
+       MALI_DEBUG_PRINT(5, ("Mali DMA-buf: unmap attachment %p, new map_ref = %d\n", mem, mem->map_ref));
+
+       if (0 == mem->map_ref) {
+               if (NULL != mem->sgt) {
+                       dma_buf_unmap_attachment(mem->attachment, mem->sgt, DMA_BIDIRECTIONAL);
+                        mem->sgt = NULL;
+               }
+               if (MALI_TRUE == mem->is_mapped) {
+                       mali_mem_mali_map_free(alloc->session, alloc->psize, alloc->mali_vma_node.vm_node.start,
+                                              alloc->flags);
+               }
+               mem->is_mapped = MALI_FALSE;
+       }
+
+       /* Wake up any thread waiting for buffer to become unmapped */
+       wake_up_all(&mem->wait_queue);
+
+       mali_session_memory_unlock(alloc->session);
+}
+
+#if !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+int mali_dma_buf_map_job(struct mali_pp_job *job)
+{
+       struct mali_dma_buf_attachment *mem;
+       _mali_osk_errcode_t err;
+       int i;
+       int ret = 0;
+       u32 num_memory_cookies;
+       struct mali_session_data *session;
+       struct mali_vma_node *mali_vma_node = NULL;
+       mali_mem_allocation *mali_alloc = NULL;
+       mali_mem_backend *mem_bkend = NULL;
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       num_memory_cookies = mali_pp_job_num_memory_cookies(job);
+
+       session = mali_pp_job_get_session(job);
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       for (i = 0; i < num_memory_cookies; i++) {
+               u32 mali_addr  = mali_pp_job_get_memory_cookie(job, i);
+               mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0);
+               MALI_DEBUG_ASSERT(NULL != mali_vma_node);
+               mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
+               MALI_DEBUG_ASSERT(NULL != mali_alloc);
+               if (MALI_MEM_DMA_BUF != mali_alloc->type) {
+                       continue;
+               }
+
+               /* Get backend memory & Map on CPU */
+               mutex_lock(&mali_idr_mutex);
+               mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle);
+               mutex_unlock(&mali_idr_mutex);
+               MALI_DEBUG_ASSERT(NULL != mem_bkend);
+
+               mem = mem_bkend->dma_buf.attachment;
+
+               MALI_DEBUG_ASSERT_POINTER(mem);
+               MALI_DEBUG_ASSERT(mem->session == mali_pp_job_get_session(job));
+
+               err = mali_dma_buf_map(mem_bkend);
+               if (0 != err) {
+                       MALI_DEBUG_PRINT_ERROR(("Mali DMA-buf: Failed to map dma-buf for mali address %x\n", mali_addr));
+                       ret = -EFAULT;
+                       continue;
+               }
+       }
+       return ret;
+}
+
+void mali_dma_buf_unmap_job(struct mali_pp_job *job)
+{
+       struct mali_dma_buf_attachment *mem;
+       int i;
+       u32 num_memory_cookies;
+       struct mali_session_data *session;
+       struct mali_vma_node *mali_vma_node = NULL;
+       mali_mem_allocation *mali_alloc = NULL;
+       mali_mem_backend *mem_bkend = NULL;
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       num_memory_cookies = mali_pp_job_num_memory_cookies(job);
+
+       session = mali_pp_job_get_session(job);
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       for (i = 0; i < num_memory_cookies; i++) {
+               u32 mali_addr  = mali_pp_job_get_memory_cookie(job, i);
+               mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0);
+               MALI_DEBUG_ASSERT(NULL != mali_vma_node);
+               mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
+               MALI_DEBUG_ASSERT(NULL != mali_alloc);
+               if (MALI_MEM_DMA_BUF != mali_alloc->type) {
+                       continue;
+               }
+
+               /* Get backend memory & Map on CPU */
+               mutex_lock(&mali_idr_mutex);
+               mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle);
+               mutex_unlock(&mali_idr_mutex);
+               MALI_DEBUG_ASSERT(NULL != mem_bkend);
+
+               mem = mem_bkend->dma_buf.attachment;
+
+               MALI_DEBUG_ASSERT_POINTER(mem);
+               MALI_DEBUG_ASSERT(mem->session == mali_pp_job_get_session(job));
+               mali_dma_buf_unmap(mem_bkend->mali_allocation, mem);
+       }
+}
+#endif /* !CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH */
+
+int mali_dma_buf_get_size(struct mali_session_data *session, _mali_uk_dma_buf_get_size_s __user *user_arg)
+{
+       _mali_uk_dma_buf_get_size_s args;
+       int fd;
+       struct dma_buf *buf;
+
+       /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+       if (0 != copy_from_user(&args, (void __user *)user_arg, sizeof(_mali_uk_dma_buf_get_size_s))) {
+               return -EFAULT;
+       }
+
+       /* Do DMA-BUF stuff */
+       fd = args.mem_fd;
+
+       buf = dma_buf_get(fd);
+       if (IS_ERR_OR_NULL(buf)) {
+               MALI_DEBUG_PRINT_ERROR(("Failed to get dma-buf from fd: %d\n", fd));
+               return PTR_RET(buf);
+       }
+
+       if (0 != put_user(buf->size, &user_arg->size)) {
+               dma_buf_put(buf);
+               return -EFAULT;
+       }
+
+       dma_buf_put(buf);
+
+       return 0;
+}
+
+_mali_osk_errcode_t mali_mem_bind_dma_buf(mali_mem_allocation *alloc,
+               mali_mem_backend *mem_backend,
+               int fd, u32 flags)
+{
+       struct dma_buf *buf;
+       struct mali_dma_buf_attachment *dma_mem;
+       struct  mali_session_data *session = alloc->session;
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+       MALI_DEBUG_ASSERT_POINTER(mem_backend);
+       MALI_DEBUG_ASSERT_POINTER(alloc);
+
+       /* get dma buffer */
+       buf = dma_buf_get(fd);
+       if (IS_ERR_OR_NULL(buf)) {
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       /* Currently, mapping of the full buffer are supported. */
+       if (alloc->psize != buf->size) {
+               goto failed_alloc_mem;
+       }
+
+       dma_mem = _mali_osk_calloc(1, sizeof(struct mali_dma_buf_attachment));
+       if (NULL == dma_mem) {
+               goto failed_alloc_mem;
+       }
+
+       dma_mem->buf = buf;
+       dma_mem->session = session;
+#if (!defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)) && (defined(CONFIG_MALI_DMA_BUF_LAZY_MAP))
+       dma_mem->map_ref = 1;
+#else
+       dma_mem->map_ref = 0;
+#endif
+       init_waitqueue_head(&dma_mem->wait_queue);
+
+       dma_mem->attachment = dma_buf_attach(dma_mem->buf, &mali_platform_device->dev);
+       if (NULL == dma_mem->attachment) {
+               goto failed_dma_attach;
+       }
+
+       mem_backend->dma_buf.attachment = dma_mem;
+
+       alloc->flags |= MALI_MEM_FLAG_DONT_CPU_MAP;
+       if (flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
+               alloc->flags |= MALI_MEM_FLAG_MALI_GUARD_PAGE;
+       }
+
+
+#if defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+       /* Map memory into session's Mali virtual address space. */
+       if (0 != mali_dma_buf_map(mem_backend)) {
+               goto Failed_dma_map;
+       }
+#endif
+
+       return _MALI_OSK_ERR_OK;
+
+#if defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+Failed_dma_map:
+       mali_dma_buf_unmap(alloc, dma_mem);
+#endif
+       /* Wait for buffer to become unmapped */
+       wait_event(dma_mem->wait_queue, !dma_mem->is_mapped);
+       MALI_DEBUG_ASSERT(!dma_mem->is_mapped);
+       dma_buf_detach(dma_mem->buf, dma_mem->attachment);
+failed_dma_attach:
+       _mali_osk_free(dma_mem);
+failed_alloc_mem:
+       dma_buf_put(buf);
+       return _MALI_OSK_ERR_FAULT;
+}
+
+void mali_mem_unbind_dma_buf(mali_mem_backend *mem_backend)
+{
+       struct mali_dma_buf_attachment *mem;
+       struct  mali_session_data *session;
+       MALI_DEBUG_ASSERT_POINTER(mem_backend);
+       MALI_DEBUG_ASSERT(MALI_MEM_DMA_BUF == mem_backend->type);
+
+       mem = mem_backend->dma_buf.attachment;
+
+       MALI_DEBUG_ASSERT_POINTER(mem);
+       MALI_DEBUG_ASSERT_POINTER(mem->attachment);
+       MALI_DEBUG_ASSERT_POINTER(mem->buf);
+       MALI_DEBUG_PRINT(3, ("Mali DMA-buf: release attachment %p\n", mem));
+
+       MALI_DEBUG_ASSERT_POINTER(mem_backend->mali_allocation);
+       session = mem_backend->mali_allocation->session;
+
+#if (defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)) ||((!defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)) && (defined(CONFIG_MALI_DMA_BUF_LAZY_MAP)))
+       /* We mapped implicitly on attach, so we need to unmap on release */
+       mali_dma_buf_unmap(mem_backend->mali_allocation, mem);
+#endif
+       /* Wait for buffer to become unmapped */
+       wait_event(mem->wait_queue, !mem->is_mapped);
+       MALI_DEBUG_ASSERT(!mem->is_mapped);
+
+       dma_buf_detach(mem->buf, mem->attachment);
+       dma_buf_put(mem->buf);
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+       mali_session_memory_lock(session);
+       _mali_osk_free(mem);
+       mali_session_memory_unlock(session);
+}
diff --git a/utgard/r8p0/linux/mali_memory_dma_buf.h b/utgard/r8p0/linux/mali_memory_dma_buf.h
new file mode 100755 (executable)
index 0000000..37b6f6b
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2011-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_DMA_BUF_H__
+#define __MALI_MEMORY_DMA_BUF_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "mali_uk_types.h"
+#include "mali_osk.h"
+#include "mali_memory.h"
+
+struct mali_pp_job;
+
+struct mali_dma_buf_attachment;
+struct mali_dma_buf_attachment {
+       struct dma_buf *buf;
+       struct dma_buf_attachment *attachment;
+       struct sg_table *sgt;
+       struct mali_session_data *session;
+       int map_ref;
+       struct mutex map_lock;
+       mali_bool is_mapped;
+       wait_queue_head_t wait_queue;
+};
+
+int mali_dma_buf_get_size(struct mali_session_data *session, _mali_uk_dma_buf_get_size_s __user *arg);
+
+void mali_mem_unbind_dma_buf(mali_mem_backend *mem_backend);
+
+_mali_osk_errcode_t mali_mem_bind_dma_buf(mali_mem_allocation *alloc,
+               mali_mem_backend *mem_backend,
+               int fd, u32 flags);
+
+#if !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+int mali_dma_buf_map_job(struct mali_pp_job *job);
+void mali_dma_buf_unmap_job(struct mali_pp_job *job);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_MEMORY_DMA_BUF_H__ */
diff --git a/utgard/r8p0/linux/mali_memory_external.c b/utgard/r8p0/linux/mali_memory_external.c
new file mode 100755 (executable)
index 0000000..c0097f6
--- /dev/null
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2013-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+#include "mali_memory.h"
+#include "mali_mem_validation.h"
+#include "mali_uk_types.h"
+
+void mali_mem_unbind_ext_buf(mali_mem_backend *mem_backend)
+{
+       mali_mem_allocation *alloc;
+       struct mali_session_data *session;
+       MALI_DEBUG_ASSERT_POINTER(mem_backend);
+       alloc = mem_backend->mali_allocation;
+       MALI_DEBUG_ASSERT_POINTER(alloc);
+       MALI_DEBUG_ASSERT(MALI_MEM_EXTERNAL == mem_backend->type);
+
+       session = alloc->session;
+       MALI_DEBUG_ASSERT_POINTER(session);
+       mali_session_memory_lock(session);
+       mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start,
+                              alloc->flags);
+       mali_session_memory_unlock(session);
+}
+
+_mali_osk_errcode_t mali_mem_bind_ext_buf(mali_mem_allocation *alloc,
+               mali_mem_backend *mem_backend,
+               u32 phys_addr,
+               u32 flag)
+{
+       struct mali_session_data *session;
+       _mali_osk_errcode_t err;
+       u32 virt, phys, size;
+       MALI_DEBUG_ASSERT_POINTER(mem_backend);
+       MALI_DEBUG_ASSERT_POINTER(alloc);
+       size = alloc->psize;
+       session = (struct mali_session_data *)(uintptr_t)alloc->session;
+       MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);
+
+       /* check arguments */
+       /* NULL might be a valid Mali address */
+       if (!size) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+       /* size must be a multiple of the system page size */
+       if (size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+       /* Validate the mali physical range */
+       if (_MALI_OSK_ERR_OK != mali_mem_validation_check(phys_addr, size)) {
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       if (flag & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
+               alloc->flags |= MALI_MEM_FLAG_MALI_GUARD_PAGE;
+       }
+
+       mali_session_memory_lock(session);
+
+       virt = alloc->mali_vma_node.vm_node.start;
+       phys = phys_addr;
+
+       err = mali_mem_mali_map_prepare(alloc);
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_session_memory_unlock(session);
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       mali_mmu_pagedir_update(session->page_directory, virt, phys, size, MALI_MMU_FLAGS_DEFAULT);
+
+       if (alloc->flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
+               mali_mmu_pagedir_update(session->page_directory, virt + size, phys, _MALI_OSK_MALI_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);
+       }
+       MALI_DEBUG_PRINT(3,
+                        ("Requested to map physical memory 0x%x-0x%x into virtual memory 0x%x\n",
+                         phys_addr, (phys_addr + size - 1),
+                         virt));
+       mali_session_memory_unlock(session);
+
+       MALI_SUCCESS;
+}
+
diff --git a/utgard/r8p0/linux/mali_memory_external.h b/utgard/r8p0/linux/mali_memory_external.h
new file mode 100644 (file)
index 0000000..800ac2a
--- /dev/null
@@ -0,0 +1,29 @@
+
+/*
+ * Copyright (C) 2011-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_EXTERNAL_H__
+#define __MALI_MEMORY_EXTERNAL_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+_mali_osk_errcode_t mali_mem_bind_ext_buf(mali_mem_allocation *alloc,
+               mali_mem_backend *mem_backend,
+               u32 phys_addr,
+               u32 flag);
+void mali_mem_unbind_ext_buf(mali_mem_backend *mem_backend);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/utgard/r8p0/linux/mali_memory_manager.c b/utgard/r8p0/linux/mali_memory_manager.c
new file mode 100644 (file)
index 0000000..10d367e
--- /dev/null
@@ -0,0 +1,1029 @@
+/*
+ * Copyright (C) 2013-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/sched.h>
+
+#include <linux/platform_device.h>
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#include <linux/dma-buf.h>
+#endif
+#include <linux/idr.h>
+
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_kernel_linux.h"
+#include "mali_scheduler.h"
+#include "mali_memory.h"
+#include "mali_memory_os_alloc.h"
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#include "mali_memory_dma_buf.h"
+#include "mali_memory_secure.h"
+#endif
+#if defined(CONFIG_MALI400_UMP)
+#include "mali_memory_ump.h"
+#endif
+#include "mali_memory_manager.h"
+#include "mali_memory_virtual.h"
+#include "mali_memory_util.h"
+#include "mali_memory_external.h"
+#include "mali_memory_cow.h"
+#include "mali_memory_block_alloc.h"
+#include "mali_ukk.h"
+#include "mali_memory_swap_alloc.h"
+
+#if (LINUX_VERSION_CODE == KERNEL_VERSION(3, 14, 0)) || (LINUX_VERSION_CODE == KERNEL_VERSION(4, 9, 0))
+#define AML_MALI_DEBUG 1
+#endif
+
+/*
+* New memory system interface
+*/
+
+/*inti idr for backend memory */
+struct idr mali_backend_idr;
+struct mutex mali_idr_mutex;
+
+extern void show_mem(unsigned int flags);
+
+/* init allocation manager */
+int mali_memory_manager_init(struct mali_allocation_manager *mgr)
+{
+       /* init Locks */
+       rwlock_init(&mgr->vm_lock);
+       mutex_init(&mgr->list_mutex);
+
+       /* init link */
+       INIT_LIST_HEAD(&mgr->head);
+
+       /* init RB tree */
+       mgr->allocation_mgr_rb = RB_ROOT;
+       mgr->mali_allocation_num = 0;
+       return 0;
+}
+
+/* Deinit allocation manager
+* Do some check for debug
+*/
+void mali_memory_manager_uninit(struct mali_allocation_manager *mgr)
+{
+       /* check RB tree is empty */
+       MALI_DEBUG_ASSERT(((void *)(mgr->allocation_mgr_rb.rb_node) == (void *)rb_last(&mgr->allocation_mgr_rb)));
+       /* check allocation List */
+       MALI_DEBUG_ASSERT(list_empty(&mgr->head));
+}
+
+/* Prepare memory descriptor */
+static mali_mem_allocation *mali_mem_allocation_struct_create(struct mali_session_data *session)
+{
+       mali_mem_allocation *mali_allocation;
+
+       /* Allocate memory */
+       mali_allocation = (mali_mem_allocation *)kzalloc(sizeof(mali_mem_allocation), GFP_KERNEL);
+       if (NULL == mali_allocation) {
+               MALI_PRINT_ERROR(("mali_mem_allocation_struct_create: descriptor was NULL\n"));
+               return NULL;
+       }
+
+       MALI_DEBUG_CODE(mali_allocation->magic = MALI_MEM_ALLOCATION_VALID_MAGIC);
+
+       /* do init */
+       mali_allocation->flags = 0;
+       mali_allocation->session = session;
+
+       INIT_LIST_HEAD(&mali_allocation->list);
+       _mali_osk_atomic_init(&mali_allocation->mem_alloc_refcount, 1);
+
+       /**
+       *add to session list
+       */
+       mutex_lock(&session->allocation_mgr.list_mutex);
+       list_add_tail(&mali_allocation->list, &session->allocation_mgr.head);
+       session->allocation_mgr.mali_allocation_num++;
+       mutex_unlock(&session->allocation_mgr.list_mutex);
+
+       return mali_allocation;
+}
+
+void  mali_mem_allocation_struct_destory(mali_mem_allocation *alloc)
+{
+       MALI_DEBUG_ASSERT_POINTER(alloc);
+       MALI_DEBUG_ASSERT_POINTER(alloc->session);
+       mutex_lock(&alloc->session->allocation_mgr.list_mutex);
+       list_del(&alloc->list);
+       alloc->session->allocation_mgr.mali_allocation_num--;
+       mutex_unlock(&alloc->session->allocation_mgr.list_mutex);
+
+       kfree(alloc);
+}
+
+int mali_mem_backend_struct_create(mali_mem_backend **backend, u32 psize)
+{
+       mali_mem_backend *mem_backend = NULL;
+       s32 ret = -ENOSPC;
+       s32 index = -1;
+       *backend = (mali_mem_backend *)kzalloc(sizeof(mali_mem_backend), GFP_KERNEL);
+       if (NULL == *backend) {
+               MALI_PRINT_ERROR( ("mali_mem_backend_struct_create: backend descriptor was NULL\n"));
+               return -1;
+       }
+       mem_backend = *backend;
+       mem_backend->size = psize;
+       mutex_init(&mem_backend->mutex);
+       INIT_LIST_HEAD(&mem_backend->list);
+       mem_backend->using_count = 0;
+
+
+       /* link backend with id */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
+again:
+       if (!idr_pre_get(&mali_backend_idr, GFP_KERNEL)) {
+               kfree(mem_backend);
+               return -ENOMEM;
+       }
+       mutex_lock(&mali_idr_mutex);
+       ret = idr_get_new_above(&mali_backend_idr, mem_backend, 1, &index);
+       mutex_unlock(&mali_idr_mutex);
+
+       if (-ENOSPC == ret) {
+               kfree(mem_backend);
+               return -ENOSPC;
+       }
+       if (-EAGAIN == ret)
+               goto again;
+#else
+       mutex_lock(&mali_idr_mutex);
+       ret = idr_alloc(&mali_backend_idr, mem_backend, 1, MALI_S32_MAX, GFP_KERNEL);
+       mutex_unlock(&mali_idr_mutex);
+       index = ret;
+       if (ret < 0) {
+               MALI_PRINT_ERROR(("mali_mem_backend_struct_create: Can't allocate idr for backend! \n"));
+               kfree(mem_backend);
+               return -ENOSPC;
+       }
+#endif
+       return index;
+}
+
+
+static void mali_mem_backend_struct_destory(mali_mem_backend **backend, s32 backend_handle)
+{
+       mali_mem_backend *mem_backend = *backend;
+
+       mutex_lock(&mali_idr_mutex);
+       idr_remove(&mali_backend_idr, backend_handle);
+       mutex_unlock(&mali_idr_mutex);
+       kfree(mem_backend);
+       *backend = NULL;
+}
+
+mali_mem_backend *mali_mem_backend_struct_search(struct mali_session_data *session, u32 mali_address)
+{
+       struct mali_vma_node *mali_vma_node = NULL;
+       mali_mem_backend *mem_bkend = NULL;
+       mali_mem_allocation *mali_alloc = NULL;
+       MALI_DEBUG_ASSERT_POINTER(session);
+       mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_address, 0);
+       if (NULL == mali_vma_node)  {
+               MALI_DEBUG_PRINT(1, ("mali_mem_backend_struct_search:vma node was NULL\n"));
+               return NULL;
+       }
+       mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
+       /* Get backend memory & Map on CPU */
+       mutex_lock(&mali_idr_mutex);
+       mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle);
+       mutex_unlock(&mali_idr_mutex);
+       MALI_DEBUG_ASSERT(NULL != mem_bkend);
+       return mem_bkend;
+}
+
+static _mali_osk_errcode_t mali_mem_resize(struct mali_session_data *session, mali_mem_backend *mem_backend, u32 physical_size)
+{
+       _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
+       int retval = 0;
+       mali_mem_allocation *mali_allocation = NULL;
+       mali_mem_os_mem tmp_os_mem;
+       s32 change_page_count;
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+       MALI_DEBUG_ASSERT_POINTER(mem_backend);
+       MALI_DEBUG_PRINT(4, (" mali_mem_resize_memory called! \n"));
+       MALI_DEBUG_ASSERT(0 == physical_size %  MALI_MMU_PAGE_SIZE);
+
+       mali_allocation = mem_backend->mali_allocation;
+       MALI_DEBUG_ASSERT_POINTER(mali_allocation);
+
+       MALI_DEBUG_ASSERT(MALI_MEM_FLAG_CAN_RESIZE & mali_allocation->flags);
+       MALI_DEBUG_ASSERT(MALI_MEM_OS == mali_allocation->type);
+
+       mutex_lock(&mem_backend->mutex);
+
+       /* Do resize*/
+       if (physical_size > mem_backend->size) {
+               u32 add_size = physical_size - mem_backend->size;
+
+               MALI_DEBUG_ASSERT(0 == add_size %  MALI_MMU_PAGE_SIZE);
+
+               /* Allocate new pages from os mem */
+               retval = mali_mem_os_alloc_pages(&tmp_os_mem, add_size);
+
+               if (retval) {
+                       if (-ENOMEM == retval) {
+                               ret = _MALI_OSK_ERR_NOMEM;
+                       } else {
+                               ret = _MALI_OSK_ERR_FAULT;
+                       }
+                       MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory allocation failed !\n"));
+                       goto failed_alloc_memory;
+               }
+
+               MALI_DEBUG_ASSERT(tmp_os_mem.count == add_size / MALI_MMU_PAGE_SIZE);
+
+               /* Resize the memory of the backend */
+               ret = mali_mem_os_resize_pages(&tmp_os_mem, &mem_backend->os_mem, 0, tmp_os_mem.count);
+
+               if (ret) {
+                       MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory      resizing failed !\n"));
+                       goto failed_resize_pages;
+               }
+
+               /*Resize cpu mapping */
+               if (NULL != mali_allocation->cpu_mapping.vma) {
+                       ret = mali_mem_os_resize_cpu_map_locked(mem_backend, mali_allocation->cpu_mapping.vma, mali_allocation->cpu_mapping.vma->vm_start  + mem_backend->size, add_size);
+                       if (unlikely(ret != _MALI_OSK_ERR_OK)) {
+                               MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: cpu mapping failed !\n"));
+                               goto  failed_cpu_map;
+                       }
+               }
+
+               /* Resize mali mapping */
+               _mali_osk_mutex_wait(session->memory_lock);
+               ret = mali_mem_mali_map_resize(mali_allocation, physical_size);
+
+               if (ret) {
+                       MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_resize: mali map resize fail !\n"));
+                       goto failed_gpu_map;
+               }
+
+               ret = mali_mem_os_mali_map(&mem_backend->os_mem, session, mali_allocation->mali_vma_node.vm_node.start,
+                                          mali_allocation->psize / MALI_MMU_PAGE_SIZE, add_size / MALI_MMU_PAGE_SIZE, mali_allocation->mali_mapping.properties);
+               if (ret) {
+                       MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: mali mapping failed !\n"));
+                       goto failed_gpu_map;
+               }
+
+               _mali_osk_mutex_signal(session->memory_lock);
+       } else {
+               u32 dec_size, page_count;
+               u32 vaddr = 0;
+               INIT_LIST_HEAD(&tmp_os_mem.pages);
+               tmp_os_mem.count = 0;
+
+               dec_size = mem_backend->size - physical_size;
+               MALI_DEBUG_ASSERT(0 == dec_size %  MALI_MMU_PAGE_SIZE);
+
+               page_count = dec_size / MALI_MMU_PAGE_SIZE;
+               vaddr = mali_allocation->mali_vma_node.vm_node.start + physical_size;
+
+               /* Resize the memory of the backend */
+               ret = mali_mem_os_resize_pages(&mem_backend->os_mem, &tmp_os_mem, physical_size / MALI_MMU_PAGE_SIZE, page_count);
+
+               if (ret) {
+                       MALI_DEBUG_PRINT(4, ("_mali_ukk_mem_resize: mali map resize failed!\n"));
+                       goto failed_resize_pages;
+               }
+
+               /* Resize mali map */
+               _mali_osk_mutex_wait(session->memory_lock);
+               mali_mem_mali_map_free(session, dec_size, vaddr, mali_allocation->flags);
+               _mali_osk_mutex_signal(session->memory_lock);
+
+               /* Zap cpu mapping */
+               if (0 != mali_allocation->cpu_mapping.addr) {
+                       MALI_DEBUG_ASSERT(NULL != mali_allocation->cpu_mapping.vma);
+                       zap_vma_ptes(mali_allocation->cpu_mapping.vma, mali_allocation->cpu_mapping.vma->vm_start + physical_size, dec_size);
+               }
+
+               /* Free those extra pages */
+               mali_mem_os_free(&tmp_os_mem.pages, tmp_os_mem.count, MALI_FALSE);
+       }
+
+       /* Resize memory allocation and memory backend */
+       change_page_count = (s32)(physical_size - mem_backend->size) / MALI_MMU_PAGE_SIZE;
+       mali_allocation->psize = physical_size;
+       mem_backend->size = physical_size;
+       mutex_unlock(&mem_backend->mutex);
+
+       if (change_page_count > 0) {
+               atomic_add(change_page_count, &session->mali_mem_allocated_pages);
+               if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
+                       session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
+               }
+
+       } else {
+               atomic_sub((s32)(-change_page_count), &session->mali_mem_allocated_pages);
+       }
+
+       return _MALI_OSK_ERR_OK;
+
+failed_gpu_map:
+       _mali_osk_mutex_signal(session->memory_lock);
+failed_cpu_map:
+       if (physical_size > mem_backend->size) {
+               mali_mem_os_resize_pages(&mem_backend->os_mem, &tmp_os_mem, mem_backend->size / MALI_MMU_PAGE_SIZE,
+                                        (physical_size - mem_backend->size) / MALI_MMU_PAGE_SIZE);
+       } else {
+               mali_mem_os_resize_pages(&tmp_os_mem, &mem_backend->os_mem, 0, tmp_os_mem.count);
+       }
+failed_resize_pages:
+       if (0 != tmp_os_mem.count)
+               mali_mem_os_free(&tmp_os_mem.pages, tmp_os_mem.count, MALI_FALSE);
+failed_alloc_memory:
+
+       mutex_unlock(&mem_backend->mutex);
+       return ret;
+}
+
+
+/* Set GPU MMU properties */
+static void _mali_memory_gpu_map_property_set(u32 *properties, u32 flags)
+{
+       if (_MALI_MEMORY_GPU_READ_ALLOCATE & flags) {
+               *properties = MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE;
+       } else {
+               *properties = MALI_MMU_FLAGS_DEFAULT;
+       }
+}
+
+_mali_osk_errcode_t mali_mem_add_mem_size(struct mali_session_data *session, u32 mali_addr, u32 add_size)
+{
+       mali_mem_backend *mem_backend = NULL;
+       _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
+       mali_mem_allocation *mali_allocation = NULL;
+       u32 new_physical_size;
+       MALI_DEBUG_ASSERT_POINTER(session);
+       MALI_DEBUG_ASSERT(0 == add_size %  MALI_MMU_PAGE_SIZE);
+
+       /* Get the memory backend that need to be resize. */
+       mem_backend = mali_mem_backend_struct_search(session, mali_addr);
+
+       if (NULL == mem_backend)  {
+               MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory backend = NULL!\n"));
+               return ret;
+       }
+
+       mali_allocation = mem_backend->mali_allocation;
+
+       MALI_DEBUG_ASSERT_POINTER(mali_allocation);
+
+       new_physical_size = add_size + mem_backend->size;
+
+       if (new_physical_size > (mali_allocation->mali_vma_node.vm_node.size))
+               return ret;
+
+       MALI_DEBUG_ASSERT(new_physical_size != mem_backend->size);
+
+       ret = mali_mem_resize(session, mem_backend, new_physical_size);
+
+       return ret;
+}
+
+/**
+*  function@_mali_ukk_mem_allocate - allocate mali memory
+*/
+_mali_osk_errcode_t _mali_ukk_mem_allocate(_mali_uk_alloc_mem_s *args)
+{
+       struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+       mali_mem_backend *mem_backend = NULL;
+       _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
+       int retval = 0;
+       mali_mem_allocation *mali_allocation = NULL;
+       struct mali_vma_node *mali_vma_node = NULL;
+       MALI_DEBUG_PRINT(4, (" _mali_ukk_mem_allocate, vaddr=0x%x, size =0x%x! \n", args->gpu_vaddr, args->psize));
+
+       if (args->vsize < args->psize) {
+               MALI_PRINT_ERROR(("_mali_ukk_mem_allocate: vsize %d  shouldn't be less than psize %d\n", args->vsize, args->psize));
+               return _MALI_OSK_ERR_INVALID_ARGS;
+       } else if ((args->vsize % _MALI_OSK_MALI_PAGE_SIZE) || (args->psize % _MALI_OSK_MALI_PAGE_SIZE)) {
+               MALI_PRINT_ERROR(("_mali_ukk_mem_allocate: not supported non page aligned size-->pszie %d, vsize %d\n",  args->psize, args->vsize));
+               return _MALI_OSK_ERR_INVALID_ARGS;
+       } else if ((args->vsize != args->psize) && ((args->flags & _MALI_MEMORY_ALLOCATE_SWAPPABLE) || (args->flags & _MALI_MEMORY_ALLOCATE_SECURE))) {
+               MALI_PRINT_ERROR(("_mali_ukk_mem_allocate: not supported mem resizeable for mem flag %d\n",  args->flags));
+               return _MALI_OSK_ERR_INVALID_ARGS;
+       }
+
+       /* Check if the address is allocated
+       */
+       mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, args->gpu_vaddr, 0);
+
+       if (unlikely(mali_vma_node)) {
+               MALI_PRINT_ERROR(("The mali virtual address has already been used ! \n"));
+               return _MALI_OSK_ERR_FAULT;
+       }
+       /**
+       *create mali memory allocation
+       */
+
+       mali_allocation = mali_mem_allocation_struct_create(session);
+
+       if (mali_allocation == NULL) {
+               MALI_PRINT_ERROR((" _mali_ukk_mem_allocate, vaddr=0x%x, size =0x%x! %s, %d\n", args->gpu_vaddr, args->psize, __FILE__, __LINE__));
+               MALI_PRINT_ERROR(("_mali_ukk_mem_allocate: Failed to create allocation struct! \n"));
+               return _MALI_OSK_ERR_NOMEM;
+       }
+       mali_allocation->psize = args->psize;
+       mali_allocation->vsize = args->vsize;
+
+       /* MALI_MEM_OS if need to support mem resize,
+        * or MALI_MEM_BLOCK if have dedicated memory,
+        * or MALI_MEM_OS,
+        * or MALI_MEM_SWAP.
+        */
+       if (args->flags & _MALI_MEMORY_ALLOCATE_SWAPPABLE) {
+               mali_allocation->type = MALI_MEM_SWAP;
+       } else if (args->flags & _MALI_MEMORY_ALLOCATE_RESIZEABLE) {
+               mali_allocation->type = MALI_MEM_OS;
+               mali_allocation->flags |= MALI_MEM_FLAG_CAN_RESIZE;
+       } else if (args->flags & _MALI_MEMORY_ALLOCATE_SECURE) {
+               mali_allocation->type = MALI_MEM_SECURE;
+       } else if (MALI_TRUE == mali_memory_have_dedicated_memory()) {
+               mali_allocation->type = MALI_MEM_BLOCK;
+       } else {
+               mali_allocation->type = MALI_MEM_OS;
+       }
+
+       /**
+       *add allocation node to RB tree for index
+       */
+       mali_allocation->mali_vma_node.vm_node.start = args->gpu_vaddr;
+       mali_allocation->mali_vma_node.vm_node.size = args->vsize;
+
+       mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);
+       mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, args->psize);
+       if (mali_allocation->backend_handle < 0) {
+               ret = _MALI_OSK_ERR_NOMEM;
+               MALI_PRINT_ERROR(("mali_allocation->psize = %d mali_allocation->vsize = %d mali_allocation->type = %d \n",  mali_allocation->psize, mali_allocation->vsize, mali_allocation->type));
+               MALI_PRINT_ERROR(("mali_allocation->backend_handle < 0! \n"));
+               goto failed_alloc_backend;
+       }
+
+
+       mem_backend->mali_allocation = mali_allocation;
+       mem_backend->type = mali_allocation->type;
+
+       mali_allocation->mali_mapping.addr = args->gpu_vaddr;
+
+       /* set gpu mmu propery */
+       _mali_memory_gpu_map_property_set(&mali_allocation->mali_mapping.properties, args->flags);
+       /* do prepare for MALI mapping */
+       if (!(args->flags & _MALI_MEMORY_ALLOCATE_NO_BIND_GPU) && mali_allocation->psize > 0) {
+               _mali_osk_mutex_wait(session->memory_lock);
+
+               ret = mali_mem_mali_map_prepare(mali_allocation);
+               if (0 != ret) {
+                       _mali_osk_mutex_signal(session->memory_lock);
+                       MALI_PRINT_ERROR(("mali_allocation->psize = %d mali_allocation->vsize = %d mali_allocation->type = %d \n",  mali_allocation->psize, mali_allocation->vsize, mali_allocation->type));
+                       MALI_PRINT_ERROR(("Aml-------%s, %d\n", __FILE__, __LINE__));
+                       goto failed_prepare_map;
+               }
+               _mali_osk_mutex_signal(session->memory_lock);
+       }
+
+       if (mali_allocation->psize == 0) {
+               mem_backend->os_mem.count = 0;
+               INIT_LIST_HEAD(&mem_backend->os_mem.pages);
+               goto done;
+       }
+
+       if (args->flags & _MALI_MEMORY_ALLOCATE_DEFER_BIND) {
+               mali_allocation->flags |= _MALI_MEMORY_ALLOCATE_DEFER_BIND;
+               mem_backend->flags |= MALI_MEM_BACKEND_FLAG_NOT_BINDED;
+               /* init for defer bind backend*/
+               mem_backend->os_mem.count = 0;
+               INIT_LIST_HEAD(&mem_backend->os_mem.pages);
+
+               goto done;
+       }
+
+       if (likely(mali_allocation->psize > 0)) {
+
+               if (MALI_MEM_SECURE == mem_backend->type) {
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+                       ret = mali_mem_secure_attach_dma_buf(&mem_backend->secure_mem, mem_backend->size, args->secure_shared_fd);
+                       if (_MALI_OSK_ERR_OK != ret) {
+                               MALI_PRINT_ERROR(("Failed to attach dma buf for secure memory! \n"));
+                               goto failed_alloc_pages;
+                       }
+#else
+                       ret = _MALI_OSK_ERR_UNSUPPORTED;
+                       MALI_PRINT_ERROR(("DMA not supported for mali secure memory! \n"));
+                       goto failed_alloc_pages;
+#endif
+               } else {
+
+                       /**
+                       *allocate physical memory
+                       */
+                       if (mem_backend->type == MALI_MEM_OS) {
+                               retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
+                       } else if (mem_backend->type == MALI_MEM_BLOCK) {
+                               /* try to allocated from BLOCK memory first, then try OS memory if failed.*/
+                               if (mali_mem_block_alloc(&mem_backend->block_mem, mem_backend->size)) {
+                                       retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
+                                       mem_backend->type = MALI_MEM_OS;
+                                       mali_allocation->type = MALI_MEM_OS;
+                               }
+                       } else if (MALI_MEM_SWAP == mem_backend->type) {
+                               retval = mali_mem_swap_alloc_pages(&mem_backend->swap_mem, mali_allocation->mali_vma_node.vm_node.size, &mem_backend->start_idx);
+                       }  else {
+                               /* ONLY support mem_os type */
+                               MALI_DEBUG_ASSERT(0);
+                       }
+
+                       if (retval) {
+                               ret = _MALI_OSK_ERR_NOMEM;
+                               MALI_PRINT_ERROR(("mali_allocation->psize = %d mali_allocation->vsize = %d mali_allocation->type = %d \n",  mali_allocation->psize, mali_allocation->vsize, mali_allocation->type));
+                               MALI_PRINT_ERROR((" can't allocate enough pages! \n"));
+                               goto failed_alloc_pages;
+                       }
+               }
+       }
+
+       /**
+       *map to GPU side
+       */
+       if (!(args->flags & _MALI_MEMORY_ALLOCATE_NO_BIND_GPU) && mali_allocation->psize > 0) {
+               _mali_osk_mutex_wait(session->memory_lock);
+               /* Map on Mali */
+
+               if (mem_backend->type == MALI_MEM_OS) {
+                       ret = mali_mem_os_mali_map(&mem_backend->os_mem, session, args->gpu_vaddr, 0,
+                                                  mem_backend->size / MALI_MMU_PAGE_SIZE, mali_allocation->mali_mapping.properties);
+
+               } else if (mem_backend->type == MALI_MEM_BLOCK) {
+                       mali_mem_block_mali_map(&mem_backend->block_mem, session, args->gpu_vaddr,
+                                               mali_allocation->mali_mapping.properties);
+               } else if (mem_backend->type == MALI_MEM_SWAP) {
+                       ret = mali_mem_swap_mali_map(&mem_backend->swap_mem, session, args->gpu_vaddr,
+                                                    mali_allocation->mali_mapping.properties);
+               } else if (mem_backend->type == MALI_MEM_SECURE) {
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+                       ret = mali_mem_secure_mali_map(&mem_backend->secure_mem, session, args->gpu_vaddr, mali_allocation->mali_mapping.properties);
+#endif
+               } else { /* unsupport type */
+                       MALI_DEBUG_ASSERT(0);
+               }
+
+               _mali_osk_mutex_signal(session->memory_lock);
+       }
+done:
+       if (MALI_MEM_OS == mem_backend->type) {
+               atomic_add(mem_backend->os_mem.count, &session->mali_mem_allocated_pages);
+       } else if (MALI_MEM_BLOCK == mem_backend->type) {
+               atomic_add(mem_backend->block_mem.count, &session->mali_mem_allocated_pages);
+       } else if (MALI_MEM_SECURE == mem_backend->type) {
+               atomic_add(mem_backend->secure_mem.count, &session->mali_mem_allocated_pages);
+       } else {
+               MALI_DEBUG_ASSERT(MALI_MEM_SWAP == mem_backend->type);
+               atomic_add(mem_backend->swap_mem.count, &session->mali_mem_allocated_pages);
+               atomic_add(mem_backend->swap_mem.count, &session->mali_mem_array[mem_backend->type]);
+       }
+
+       if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
+               session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
+       }
+       return _MALI_OSK_ERR_OK;
+
+failed_alloc_pages:
+       mali_mem_mali_map_free(session, mali_allocation->psize, mali_allocation->mali_vma_node.vm_node.start, mali_allocation->flags);
+failed_prepare_map:
+       mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle);
+failed_alloc_backend:
+
+       mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node);
+       mali_mem_allocation_struct_destory(mali_allocation);
+
+       return ret;
+}
+
+
+_mali_osk_errcode_t _mali_ukk_mem_free(_mali_uk_free_mem_s *args)
+{
+       struct  mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+       u32 vaddr = args->gpu_vaddr;
+       mali_mem_allocation *mali_alloc = NULL;
+       struct mali_vma_node *mali_vma_node = NULL;
+
+       /* find mali allocation structure by vaddress*/
+       mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, vaddr, 0);
+       if (NULL == mali_vma_node) {
+               MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_free: invalid addr: 0x%x\n", vaddr));
+               return _MALI_OSK_ERR_INVALID_ARGS;
+       }
+
+       mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
+
+       if (mali_alloc) {
+               if ((MALI_MEM_UMP == mali_alloc->type) || (MALI_MEM_DMA_BUF == mali_alloc->type)
+                   || (MALI_MEM_EXTERNAL == mali_alloc->type)) {
+                       MALI_PRINT_ERROR(("_mali_ukk_mem_free: not supported for memory type %d\n",  mali_alloc->type));
+                       return _MALI_OSK_ERR_UNSUPPORTED;
+               }
+               /* check ref_count */
+               args->free_pages_nr = mali_allocation_unref(&mali_alloc);
+       }
+       return _MALI_OSK_ERR_OK;
+}
+
+
+/**
+* Function _mali_ukk_mem_bind -- bind a external memory to a new GPU address
+* It will allocate a new mem allocation and bind external memory to it.
+* Supported backend type are:
+* _MALI_MEMORY_BIND_BACKEND_UMP
+* _MALI_MEMORY_BIND_BACKEND_DMA_BUF
+* _MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY
+* CPU access is not supported yet
+*/
+_mali_osk_errcode_t _mali_ukk_mem_bind(_mali_uk_bind_mem_s *args)
+{
+       struct  mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+       mali_mem_backend *mem_backend = NULL;
+       _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
+       mali_mem_allocation *mali_allocation = NULL;
+       MALI_DEBUG_PRINT(5, (" _mali_ukk_mem_bind, vaddr=0x%x, size =0x%x! \n", args->vaddr, args->size));
+
+       /**
+       * allocate mali allocation.
+       */
+       mali_allocation = mali_mem_allocation_struct_create(session);
+
+       if (mali_allocation == NULL) {
+               return _MALI_OSK_ERR_NOMEM;
+       }
+       mali_allocation->psize = args->size;
+       mali_allocation->vsize = args->size;
+       mali_allocation->mali_mapping.addr = args->vaddr;
+
+       /* add allocation node to RB tree for index  */
+       mali_allocation->mali_vma_node.vm_node.start = args->vaddr;
+       mali_allocation->mali_vma_node.vm_node.size = args->size;
+       mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);
+
+       /* allocate backend*/
+       if (mali_allocation->psize > 0) {
+               mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, mali_allocation->psize);
+               if (mali_allocation->backend_handle < 0) {
+                       goto Failed_alloc_backend;
+               }
+
+       } else {
+               goto Failed_alloc_backend;
+       }
+
+       mem_backend->size = mali_allocation->psize;
+       mem_backend->mali_allocation = mali_allocation;
+
+       switch (args->flags & _MALI_MEMORY_BIND_BACKEND_MASK) {
+       case  _MALI_MEMORY_BIND_BACKEND_UMP:
+#if defined(CONFIG_MALI400_UMP)
+               mali_allocation->type = MALI_MEM_UMP;
+               mem_backend->type = MALI_MEM_UMP;
+               ret = mali_mem_bind_ump_buf(mali_allocation, mem_backend,
+                                           args->mem_union.bind_ump.secure_id, args->mem_union.bind_ump.flags);
+               if (_MALI_OSK_ERR_OK != ret) {
+                       MALI_DEBUG_PRINT(1, ("Bind ump buf failed\n"));
+                       goto  Failed_bind_backend;
+               }
+#else
+               MALI_DEBUG_PRINT(1, ("UMP not supported\n"));
+               goto Failed_bind_backend;
+#endif
+               break;
+       case  _MALI_MEMORY_BIND_BACKEND_DMA_BUF:
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+               mali_allocation->type = MALI_MEM_DMA_BUF;
+               mem_backend->type = MALI_MEM_DMA_BUF;
+               ret = mali_mem_bind_dma_buf(mali_allocation, mem_backend,
+                                           args->mem_union.bind_dma_buf.mem_fd, args->mem_union.bind_dma_buf.flags);
+               if (_MALI_OSK_ERR_OK != ret) {
+                       MALI_DEBUG_PRINT(1, ("Bind dma buf failed\n"));
+                       goto Failed_bind_backend;
+               }
+#else
+               MALI_DEBUG_PRINT(1, ("DMA not supported\n"));
+               goto Failed_bind_backend;
+#endif
+               break;
+       case _MALI_MEMORY_BIND_BACKEND_MALI_MEMORY:
+               /* not allowed */
+               MALI_DEBUG_PRINT_ERROR(("Mali internal memory type not supported !\n"));
+               goto Failed_bind_backend;
+               break;
+
+       case _MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY:
+               mali_allocation->type = MALI_MEM_EXTERNAL;
+               mem_backend->type = MALI_MEM_EXTERNAL;
+               ret = mali_mem_bind_ext_buf(mali_allocation, mem_backend, args->mem_union.bind_ext_memory.phys_addr,
+                                           args->mem_union.bind_ext_memory.flags);
+               if (_MALI_OSK_ERR_OK != ret) {
+                       MALI_DEBUG_PRINT(1, ("Bind external buf failed\n"));
+                       goto Failed_bind_backend;
+               }
+               break;
+
+       case _MALI_MEMORY_BIND_BACKEND_EXT_COW:
+               /* not allowed */
+               MALI_DEBUG_PRINT_ERROR(("External cow memory  type not supported !\n"));
+               goto Failed_bind_backend;
+               break;
+
+       default:
+               MALI_DEBUG_PRINT_ERROR(("Invalid memory type  not supported !\n"));
+               goto Failed_bind_backend;
+               break;
+       }
+       MALI_DEBUG_ASSERT(0 == mem_backend->size % MALI_MMU_PAGE_SIZE);
+       atomic_add(mem_backend->size / MALI_MMU_PAGE_SIZE, &session->mali_mem_array[mem_backend->type]);
+       return _MALI_OSK_ERR_OK;
+
+Failed_bind_backend:
+       mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle);
+
+Failed_alloc_backend:
+       mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node);
+       mali_mem_allocation_struct_destory(mali_allocation);
+
+       MALI_DEBUG_PRINT(1, (" _mali_ukk_mem_bind, return ERROR! \n"));
+       return ret;
+}
+
+
+/*
+* Function _mali_ukk_mem_unbind -- unbind a external memory to a new GPU address
+* This function unbind the backend memory and free the allocation
+* no ref_count for this type of memory
+*/
+_mali_osk_errcode_t _mali_ukk_mem_unbind(_mali_uk_unbind_mem_s *args)
+{
+       /**/
+       struct  mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+       mali_mem_allocation *mali_allocation = NULL;
+       struct mali_vma_node *mali_vma_node = NULL;
+       u32 mali_addr = args->vaddr;
+       MALI_DEBUG_PRINT(5, (" _mali_ukk_mem_unbind, vaddr=0x%x! \n", args->vaddr));
+
+       /* find the allocation by vaddr */
+       mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0);
+       if (likely(mali_vma_node)) {
+               MALI_DEBUG_ASSERT(mali_addr == mali_vma_node->vm_node.start);
+               mali_allocation = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
+       } else {
+               MALI_DEBUG_ASSERT(NULL != mali_vma_node);
+               return _MALI_OSK_ERR_INVALID_ARGS;
+       }
+
+       if (NULL != mali_allocation) {
+
+               if ((MALI_MEM_UMP != mali_allocation->type) && (MALI_MEM_DMA_BUF != mali_allocation->type)
+                   && (MALI_MEM_EXTERNAL != mali_allocation->type)) {
+                       MALI_PRINT_ERROR(("_mali_ukk_mem_unbind not supported for memory type %d\n",  mali_allocation->type));
+                       return _MALI_OSK_ERR_UNSUPPORTED;
+               }
+
+               /* check ref_count */
+               mali_allocation_unref(&mali_allocation);
+       }
+       return _MALI_OSK_ERR_OK;
+}
+
+/*
+* Function _mali_ukk_mem_cow --  COW for an allocation
+* This function allocate new pages for  a range (range, range+size) of allocation
+*  And Map it(keep use the not in range pages from target allocation ) to an GPU vaddr
+*/
+_mali_osk_errcode_t _mali_ukk_mem_cow(_mali_uk_cow_mem_s *args)
+{
+       _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
+       mali_mem_backend *target_backend = NULL;
+       mali_mem_backend *mem_backend = NULL;
+       struct mali_vma_node *mali_vma_node = NULL;
+       mali_mem_allocation *mali_allocation = NULL;
+
+       struct  mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+       /* Get the target backend for cow */
+       target_backend = mali_mem_backend_struct_search(session, args->target_handle);
+
+       if (NULL == target_backend || 0 == target_backend->size) {
+               MALI_DEBUG_ASSERT_POINTER(target_backend);
+               MALI_DEBUG_ASSERT(0 != target_backend->size);
+               return ret;
+       }
+
+       /*Cow not support resized mem */
+       MALI_DEBUG_ASSERT(MALI_MEM_FLAG_CAN_RESIZE != (MALI_MEM_FLAG_CAN_RESIZE & target_backend->mali_allocation->flags));
+
+       /* Check if the new mali address is allocated */
+       mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, args->vaddr, 0);
+
+       if (unlikely(mali_vma_node)) {
+               MALI_DEBUG_PRINT_ERROR(("The mali virtual address has already been used ! \n"));
+               return ret;
+       }
+
+       /* create new alloction for COW*/
+       mali_allocation = mali_mem_allocation_struct_create(session);
+       if (mali_allocation == NULL) {
+               MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_cow: Failed to create allocation struct!\n"));
+               return _MALI_OSK_ERR_NOMEM;
+       }
+       mali_allocation->psize = args->target_size;
+       mali_allocation->vsize = args->target_size;
+       mali_allocation->type = MALI_MEM_COW;
+
+       /*add allocation node to RB tree for index*/
+       mali_allocation->mali_vma_node.vm_node.start = args->vaddr;
+       mali_allocation->mali_vma_node.vm_node.size = mali_allocation->vsize;
+       mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);
+
+       /* create new backend for COW memory */
+       mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, mali_allocation->psize);
+       if (mali_allocation->backend_handle < 0) {
+               ret = _MALI_OSK_ERR_NOMEM;
+               MALI_DEBUG_PRINT(1, ("mali_allocation->backend_handle < 0! \n"));
+               goto failed_alloc_backend;
+       }
+       mem_backend->mali_allocation = mali_allocation;
+       mem_backend->type = mali_allocation->type;
+
+       if (target_backend->type == MALI_MEM_SWAP ||
+           (MALI_MEM_COW == target_backend->type && (MALI_MEM_BACKEND_FLAG_SWAP_COWED & target_backend->flags))) {
+               mem_backend->flags |= MALI_MEM_BACKEND_FLAG_SWAP_COWED;
+               /**
+                *     CoWed swap backends couldn't be mapped as non-linear vma, because if one
+                * vma is set with flag VM_NONLINEAR, the vma->vm_private_data will be used by kernel,
+                * while in mali driver, we use this variable to store the pointer of mali_allocation, so there
+                * is a conflict.
+                *     To resolve this problem, we have to do some fake things, we reserved about 64MB
+                * space from index 0, there isn't really page's index will be set from 0 to (64MB>>PAGE_SHIFT_NUM),
+                * and all of CoWed swap memory backends' start_idx will be assigned with 0, and these
+                * backends will be mapped as linear and will add to priority tree of global swap file, while
+                * these vmas will never be found by using normal page->index, these pages in those vma
+                * also couldn't be swapped out.
+                */
+               mem_backend->start_idx = 0;
+       }
+
+       /* Add the target backend's cow count, also allocate new pages for COW backend from os mem
+       *for a modified range and keep the page which not in the modified range and Add ref to it
+       */
+       MALI_DEBUG_PRINT(3, ("Cow mapping: target_addr: 0x%x;  cow_addr: 0x%x,  size: %u\n", target_backend->mali_allocation->mali_vma_node.vm_node.start,
+                            mali_allocation->mali_vma_node.vm_node.start, mali_allocation->mali_vma_node.vm_node.size));
+
+       ret = mali_memory_do_cow(target_backend, args->target_offset, args->target_size, mem_backend, args->range_start, args->range_size);
+       if (_MALI_OSK_ERR_OK != ret) {
+               MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_cow: Failed to cow!\n"));
+               goto failed_do_cow;
+       }
+
+       /**
+       *map to GPU side
+       */
+       mali_allocation->mali_mapping.addr = args->vaddr;
+       /* set gpu mmu propery */
+       _mali_memory_gpu_map_property_set(&mali_allocation->mali_mapping.properties, args->flags);
+
+       _mali_osk_mutex_wait(session->memory_lock);
+       /* Map on Mali */
+       ret = mali_mem_mali_map_prepare(mali_allocation);
+       if (0 != ret) {
+               MALI_DEBUG_PRINT(1, (" prepare map fail! \n"));
+               goto failed_gpu_map;
+       }
+
+       if (!(mem_backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)) {
+               mali_mem_cow_mali_map(mem_backend, 0, mem_backend->size);
+       }
+
+       _mali_osk_mutex_signal(session->memory_lock);
+
+       mutex_lock(&target_backend->mutex);
+       target_backend->flags |= MALI_MEM_BACKEND_FLAG_COWED;
+       mutex_unlock(&target_backend->mutex);
+
+       atomic_add(args->range_size / MALI_MMU_PAGE_SIZE, &session->mali_mem_allocated_pages);
+       if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
+               session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
+       }
+       return _MALI_OSK_ERR_OK;
+
+failed_gpu_map:
+       _mali_osk_mutex_signal(session->memory_lock);
+       mali_mem_cow_release(mem_backend, MALI_FALSE);
+       mem_backend->cow_mem.count = 0;
+failed_do_cow:
+       mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle);
+failed_alloc_backend:
+       mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node);
+       mali_mem_allocation_struct_destory(mali_allocation);
+
+       return ret;
+}
+
+_mali_osk_errcode_t _mali_ukk_mem_cow_modify_range(_mali_uk_cow_modify_range_s *args)
+{
+       _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
+       mali_mem_backend *mem_backend = NULL;
+       struct  mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+
+       MALI_DEBUG_PRINT(4, (" _mali_ukk_mem_cow_modify_range called! \n"));
+       /* Get the backend that need to be modified. */
+       mem_backend = mali_mem_backend_struct_search(session, args->vaddr);
+
+       if (NULL == mem_backend || 0 == mem_backend->size) {
+               MALI_DEBUG_ASSERT_POINTER(mem_backend);
+               MALI_DEBUG_ASSERT(0 != mem_backend->size);
+               return ret;
+       }
+
+       if (MALI_MEM_COW  != mem_backend->type) {
+               MALI_PRINT_ERROR(("_mali_ukk_mem_cow_modify_range: not supported for memory type %d !\n", mem_backend->type));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       ret =  mali_memory_cow_modify_range(mem_backend, args->range_start, args->size);
+       args->change_pages_nr = mem_backend->cow_mem.change_pages_nr;
+       if (_MALI_OSK_ERR_OK != ret)
+               return  ret;
+       _mali_osk_mutex_wait(session->memory_lock);
+       if (!(mem_backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)) {
+               mali_mem_cow_mali_map(mem_backend, args->range_start, args->size);
+       }
+       _mali_osk_mutex_signal(session->memory_lock);
+
+       atomic_add(args->change_pages_nr, &session->mali_mem_allocated_pages);
+       if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
+               session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+
+_mali_osk_errcode_t _mali_ukk_mem_resize(_mali_uk_mem_resize_s *args)
+{
+       mali_mem_backend *mem_backend = NULL;
+       _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
+
+       struct  mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+       MALI_DEBUG_PRINT(4, (" mali_mem_resize_memory called! \n"));
+       MALI_DEBUG_ASSERT(0 == args->psize %  MALI_MMU_PAGE_SIZE);
+
+       /* Get the memory backend that need to be resize. */
+       mem_backend = mali_mem_backend_struct_search(session, args->vaddr);
+
+       if (NULL == mem_backend)  {
+               MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory backend = NULL!\n"));
+               return ret;
+       }
+
+       MALI_DEBUG_ASSERT(args->psize != mem_backend->size);
+
+       ret = mali_mem_resize(session, mem_backend, args->psize);
+
+       return ret;
+}
+
+_mali_osk_errcode_t _mali_ukk_mem_usage_get(_mali_uk_profiling_memory_usage_get_s *args)
+{
+       args->memory_usage = _mali_ukk_report_memory_usage();
+       if (0 != args->vaddr) {
+               mali_mem_backend *mem_backend = NULL;
+               struct  mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+               /* Get the backend that need to be modified. */
+               mem_backend = mali_mem_backend_struct_search(session, args->vaddr);
+               if (NULL == mem_backend) {
+                       MALI_DEBUG_ASSERT_POINTER(mem_backend);
+                       return _MALI_OSK_ERR_FAULT;
+               }
+
+               if (MALI_MEM_COW == mem_backend->type)
+                       args->change_pages_nr = mem_backend->cow_mem.change_pages_nr;
+       }
+       return _MALI_OSK_ERR_OK;
+}
diff --git a/utgard/r8p0/linux/mali_memory_manager.h b/utgard/r8p0/linux/mali_memory_manager.h
new file mode 100644 (file)
index 0000000..caccd9e
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2013-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_MANAGER_H__
+#define __MALI_MEMORY_MANAGER_H__
+
+#include "mali_osk.h"
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include "mali_memory_types.h"
+#include "mali_memory_os_alloc.h"
+#include "mali_uk_types.h"
+
+struct mali_allocation_manager {
+       rwlock_t vm_lock;
+       struct rb_root allocation_mgr_rb;
+       struct list_head head;
+       struct mutex list_mutex;
+       u32 mali_allocation_num;
+};
+
+extern struct idr mali_backend_idr;
+extern struct mutex mali_idr_mutex;
+
+int mali_memory_manager_init(struct mali_allocation_manager *mgr);
+void mali_memory_manager_uninit(struct mali_allocation_manager *mgr);
+
+void  mali_mem_allocation_struct_destory(mali_mem_allocation *alloc);
+
+_mali_osk_errcode_t mali_mem_add_mem_size(struct mali_session_data *session, u32 mali_addr, u32 add_size);
+mali_mem_backend *mali_mem_backend_struct_search(struct mali_session_data *session, u32 mali_address);
+_mali_osk_errcode_t _mali_ukk_mem_allocate(_mali_uk_alloc_mem_s *args);
+_mali_osk_errcode_t _mali_ukk_mem_free(_mali_uk_free_mem_s *args);
+_mali_osk_errcode_t _mali_ukk_mem_bind(_mali_uk_bind_mem_s *args);
+_mali_osk_errcode_t _mali_ukk_mem_unbind(_mali_uk_unbind_mem_s *args);
+_mali_osk_errcode_t _mali_ukk_mem_cow(_mali_uk_cow_mem_s *args);
+_mali_osk_errcode_t _mali_ukk_mem_cow_modify_range(_mali_uk_cow_modify_range_s *args);
+_mali_osk_errcode_t _mali_ukk_mem_usage_get(_mali_uk_profiling_memory_usage_get_s *args);
+_mali_osk_errcode_t _mali_ukk_mem_resize(_mali_uk_mem_resize_s *args);
+
+
+#endif
+
diff --git a/utgard/r8p0/linux/mali_memory_os_alloc.c b/utgard/r8p0/linux/mali_memory_os_alloc.c
new file mode 100644 (file)
index 0000000..73ca64b
--- /dev/null
@@ -0,0 +1,847 @@
+/*
+ * Copyright (C) 2013-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/version.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+
+#include "mali_osk.h"
+#include "mali_memory.h"
+#include "mali_memory_os_alloc.h"
+#include "mali_kernel_linux.h"
+
+/* Minimum size of allocator page pool */
+#define MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB * 256)
+#define MALI_OS_MEMORY_POOL_TRIM_JIFFIES (10 * CONFIG_HZ) /* Default to 10s */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+static unsigned long dma_attrs_wc= 0;
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+/* Write combine dma_attrs */
+static DEFINE_DMA_ATTRS(dma_attrs_wc);
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
+static int mali_mem_os_shrink(int nr_to_scan, gfp_t gfp_mask);
+#else
+static int mali_mem_os_shrink(struct shrinker *shrinker, int nr_to_scan, gfp_t gfp_mask);
+#endif
+#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+static int mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc);
+#else
+static unsigned long mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc);
+static unsigned long mali_mem_os_shrink_count(struct shrinker *shrinker, struct shrink_control *sc);
+#endif
+#endif
+static void mali_mem_os_trim_pool(struct work_struct *work);
+extern void show_mem(unsigned int flags);
+
+struct mali_mem_os_allocator mali_mem_os_allocator = {
+       .pool_lock = __SPIN_LOCK_UNLOCKED(pool_lock),
+       .pool_pages = LIST_HEAD_INIT(mali_mem_os_allocator.pool_pages),
+       .pool_count = 0,
+
+       .allocated_pages = ATOMIC_INIT(0),
+       .allocation_limit = 0,
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+       .shrinker.shrink = mali_mem_os_shrink,
+#else
+       .shrinker.count_objects = mali_mem_os_shrink_count,
+       .shrinker.scan_objects = mali_mem_os_shrink,
+#endif
+       .shrinker.seeks = DEFAULT_SEEKS,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
+       .timed_shrinker = __DELAYED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool, TIMER_DEFERRABLE),
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38)
+       .timed_shrinker = __DEFERRED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool),
+#else
+       .timed_shrinker = __DELAYED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool),
+#endif
+};
+
+u32 mali_mem_os_free(struct list_head *os_pages, u32 pages_count, mali_bool cow_flag)
+{
+       LIST_HEAD(pages);
+       struct mali_page_node *m_page, *m_tmp;
+       u32 free_pages_nr = 0;
+
+       if (MALI_TRUE == cow_flag) {
+               list_for_each_entry_safe(m_page, m_tmp, os_pages, list) {
+                       /*only handle OS node here */
+                       if (m_page->type == MALI_PAGE_NODE_OS) {
+                               if (1 == _mali_page_node_get_ref_count(m_page)) {
+                                       list_move(&m_page->list, &pages);
+                                       atomic_sub(1, &mali_mem_os_allocator.allocated_pages);
+                                       free_pages_nr ++;
+                               } else {
+                                       _mali_page_node_unref(m_page);
+                                       m_page->page = NULL;
+                                       list_del(&m_page->list);
+                                       kfree(m_page);
+                               }
+                       }
+               }
+       } else {
+               list_cut_position(&pages, os_pages, os_pages->prev);
+               atomic_sub(pages_count, &mali_mem_os_allocator.allocated_pages);
+               free_pages_nr = pages_count;
+       }
+
+       /* Put pages on pool. */
+       spin_lock(&mali_mem_os_allocator.pool_lock);
+       list_splice(&pages, &mali_mem_os_allocator.pool_pages);
+       mali_mem_os_allocator.pool_count += free_pages_nr;
+       spin_unlock(&mali_mem_os_allocator.pool_lock);
+
+       if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) {
+               MALI_DEBUG_PRINT(5, ("OS Mem: Starting pool trim timer %u\n", mali_mem_os_allocator.pool_count));
+               queue_delayed_work(mali_mem_os_allocator.wq, &mali_mem_os_allocator.timed_shrinker, MALI_OS_MEMORY_POOL_TRIM_JIFFIES);
+       }
+       return free_pages_nr;
+}
+
+/**
+* put page without put it into page pool
+*/
+_mali_osk_errcode_t mali_mem_os_put_page(struct page *page)
+{
+       MALI_DEBUG_ASSERT_POINTER(page);
+       if (1 == page_count(page)) {
+               atomic_sub(1, &mali_mem_os_allocator.allocated_pages);
+               dma_unmap_page(&mali_platform_device->dev, page_private(page),
+                              _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
+               ClearPagePrivate(page);
+       }
+       put_page(page);
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_mem_os_resize_pages(mali_mem_os_mem *mem_from, mali_mem_os_mem *mem_to, u32 start_page, u32 page_count)
+{
+       struct mali_page_node *m_page, *m_tmp;
+       u32 i = 0;
+
+       MALI_DEBUG_ASSERT_POINTER(mem_from);
+       MALI_DEBUG_ASSERT_POINTER(mem_to);
+
+       if (mem_from->count < start_page + page_count) {
+               return _MALI_OSK_ERR_INVALID_ARGS;
+       }
+
+       list_for_each_entry_safe(m_page, m_tmp, &mem_from->pages, list) {
+               if (i >= start_page && i < start_page + page_count) {
+                       list_move_tail(&m_page->list, &mem_to->pages);
+                       mem_from->count--;
+                       mem_to->count++;
+               }
+               i++;
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+
+int mali_mem_os_alloc_pages(mali_mem_os_mem *os_mem, u32 size)
+{
+       struct page *new_page;
+       LIST_HEAD(pages_list);
+       size_t page_count = PAGE_ALIGN(size) / _MALI_OSK_MALI_PAGE_SIZE;
+       size_t remaining = page_count;
+       struct mali_page_node *m_page, *m_tmp;
+       u32 i;
+
+       MALI_DEBUG_ASSERT_POINTER(os_mem);
+
+       if (atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE + size > mali_mem_os_allocator.allocation_limit) {
+               MALI_PRINT_ERROR( ("Mali Mem: Unable to allocate %u bytes. Currently allocated: %lu, max limit %lu\n",
+                                    size,
+                                    atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE,
+                                    mali_mem_os_allocator.allocation_limit));
+               return -ENOMEM;
+       }
+
+       INIT_LIST_HEAD(&os_mem->pages);
+       os_mem->count = page_count;
+
+       /* Grab pages from pool. */
+       {
+               size_t pool_pages;
+               spin_lock(&mali_mem_os_allocator.pool_lock);
+               pool_pages = min(remaining, mali_mem_os_allocator.pool_count);
+               for (i = pool_pages; i > 0; i--) {
+                       BUG_ON(list_empty(&mali_mem_os_allocator.pool_pages));
+                       list_move(mali_mem_os_allocator.pool_pages.next, &pages_list);
+               }
+               mali_mem_os_allocator.pool_count -= pool_pages;
+               remaining -= pool_pages;
+               spin_unlock(&mali_mem_os_allocator.pool_lock);
+       }
+
+       /* Process pages from pool. */
+       i = 0;
+       list_for_each_entry_safe(m_page, m_tmp, &pages_list, list) {
+               BUG_ON(NULL == m_page);
+
+               list_move_tail(&m_page->list, &os_mem->pages);
+       }
+
+       /* Allocate new pages, if needed. */
+       for (i = 0; i < remaining; i++) {
+               dma_addr_t dma_addr;
+               gfp_t flags = __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD;
+               int err;
+
+#if defined(CONFIG_ARM) && !defined(CONFIG_ARM_LPAE)
+               flags |= GFP_HIGHUSER;
+#else
+#ifdef CONFIG_ZONE_DMA32
+               flags |= GFP_DMA32;
+#else
+#ifdef CONFIG_ZONE_DMA
+               flags |= GFP_DMA;
+#else
+               /* arm64 utgard only work on < 4G, but the kernel
+                * didn't provide method to allocte memory < 4G
+                */
+               MALI_DEBUG_ASSERT(0);
+#endif
+#endif
+#endif
+
+               new_page = alloc_page(flags);
+               if (new_page == NULL) {
+                       new_page = alloc_page(flags | GFP_KERNEL);
+               }
+               if (unlikely(NULL == new_page)) {
+                       MALI_PRINT_ERROR(("alloc_page() return NULL at last! Please check kernel memory!"));
+                       /* Calculate the number of pages actually allocated, and free them. */
+#ifdef AML_MALI_DEBUG
+                       MALI_PRINT_ERROR(("alloc_page() return NULL\n"));
+                       show_mem(SHOW_MEM_FILTER_NODES);
+#endif
+                       os_mem->count = (page_count - remaining) + i;
+                       atomic_add(os_mem->count, &mali_mem_os_allocator.allocated_pages);
+                       mali_mem_os_free(&os_mem->pages, os_mem->count, MALI_FALSE);
+                       return -ENOMEM;
+               }
+
+               /* Ensure page is flushed from CPU caches. */
+               dma_addr = dma_map_page(&mali_platform_device->dev, new_page,
+                                       0, _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
+               dma_unmap_page(&mali_platform_device->dev, dma_addr,
+                              _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
+               dma_addr = dma_map_page(&mali_platform_device->dev, new_page,
+                                       0, _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+               err = dma_mapping_error(&mali_platform_device->dev, dma_addr);
+               if (unlikely(err)) {
+                       MALI_PRINT_ERROR(("OS Mem: Failed to DMA map page %p: %u",
+                                               new_page, err));
+                       __free_page(new_page);
+                       os_mem->count = (page_count - remaining) + i;
+                       atomic_add(os_mem->count, &mali_mem_os_allocator.allocated_pages);
+                       mali_mem_os_free(&os_mem->pages, os_mem->count, MALI_FALSE);
+                       return -EFAULT;
+               }
+
+               /* Store page phys addr */
+               SetPagePrivate(new_page);
+               set_page_private(new_page, dma_addr);
+
+               m_page = _mali_page_node_allocate(MALI_PAGE_NODE_OS);
+               if (unlikely(NULL == m_page)) {
+#ifdef AML_MALI_DEBUG
+                       show_mem(SHOW_MEM_FILTER_NODES);
+#endif
+                       MALI_PRINT_ERROR(("OS Mem: Can't allocate mali_page node! \n"));
+                       dma_unmap_page(&mali_platform_device->dev, page_private(new_page),
+                                      _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
+                       ClearPagePrivate(new_page);
+                       __free_page(new_page);
+                       os_mem->count = (page_count - remaining) + i;
+                       atomic_add(os_mem->count, &mali_mem_os_allocator.allocated_pages);
+                       mali_mem_os_free(&os_mem->pages, os_mem->count, MALI_FALSE);
+                       return -EFAULT;
+               }
+               m_page->page = new_page;
+
+               list_add_tail(&m_page->list, &os_mem->pages);
+       }
+
+       atomic_add(page_count, &mali_mem_os_allocator.allocated_pages);
+
+       if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES > mali_mem_os_allocator.pool_count) {
+//             MALI_PRINT_ERROR( ("OS Mem: Stopping pool trim timer, only %u pages on pool\n", mali_mem_os_allocator.pool_count));
+               cancel_delayed_work(&mali_mem_os_allocator.timed_shrinker);
+       }
+
+       return 0;
+}
+
+
+_mali_osk_errcode_t mali_mem_os_mali_map(mali_mem_os_mem *os_mem, struct mali_session_data *session, u32 vaddr, u32 start_page, u32 mapping_pgae_num, u32 props)
+{
+       struct mali_page_directory *pagedir = session->page_directory;
+       struct mali_page_node *m_page;
+       u32 virt;
+       u32 prop = props;
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+       MALI_DEBUG_ASSERT_POINTER(os_mem);
+
+       MALI_DEBUG_ASSERT(start_page <= os_mem->count);
+       MALI_DEBUG_ASSERT((start_page + mapping_pgae_num) <= os_mem->count);
+
+       if ((start_page + mapping_pgae_num) == os_mem->count) {
+
+               virt = vaddr + MALI_MMU_PAGE_SIZE * (start_page + mapping_pgae_num);
+
+               list_for_each_entry_reverse(m_page, &os_mem->pages, list) {
+
+                       virt -= MALI_MMU_PAGE_SIZE;
+                       if (mapping_pgae_num > 0) {
+                               dma_addr_t phys = page_private(m_page->page);
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
+                               /* Verify that the "physical" address is 32-bit and
+                               * usable for Mali, when on a system with bus addresses
+                               * wider than 32-bit. */
+                               MALI_DEBUG_ASSERT(0 == (phys >> 32));
+#endif
+                               mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys, MALI_MMU_PAGE_SIZE, prop);
+                       } else {
+                               break;
+                       }
+                       mapping_pgae_num--;
+               }
+
+       } else {
+               u32 i = 0;
+               virt = vaddr;
+               list_for_each_entry(m_page, &os_mem->pages, list) {
+
+                       if (i >= start_page) {
+                               dma_addr_t phys = page_private(m_page->page);
+
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
+                               /* Verify that the "physical" address is 32-bit and
+                               * usable for Mali, when on a system with bus addresses
+                               * wider than 32-bit. */
+                               MALI_DEBUG_ASSERT(0 == (phys >> 32));
+#endif
+                               mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys, MALI_MMU_PAGE_SIZE, prop);
+                       }
+                       i++;
+                       virt += MALI_MMU_PAGE_SIZE;
+               }
+       }
+       return _MALI_OSK_ERR_OK;
+}
+
+
+void mali_mem_os_mali_unmap(mali_mem_allocation *alloc)
+{
+       struct mali_session_data *session;
+       MALI_DEBUG_ASSERT_POINTER(alloc);
+       session = alloc->session;
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       mali_session_memory_lock(session);
+       mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start,
+                              alloc->flags);
+       mali_session_memory_unlock(session);
+}
+
+int mali_mem_os_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma)
+{
+       mali_mem_os_mem *os_mem = &mem_bkend->os_mem;
+       struct mali_page_node *m_page;
+       struct page *page;
+       int ret;
+       unsigned long addr = vma->vm_start;
+       MALI_DEBUG_ASSERT(MALI_MEM_OS == mem_bkend->type);
+
+       list_for_each_entry(m_page, &os_mem->pages, list) {
+               /* We should use vm_insert_page, but it does a dcache
+                * flush which makes it way slower than remap_pfn_range or vm_insert_pfn.
+               ret = vm_insert_page(vma, addr, page);
+               */
+               page = m_page->page;
+               ret = vm_insert_pfn(vma, addr, page_to_pfn(page));
+
+               if (unlikely(0 != ret)) {
+                       return -EFAULT;
+               }
+               addr += _MALI_OSK_MALI_PAGE_SIZE;
+       }
+
+       return 0;
+}
+
+_mali_osk_errcode_t mali_mem_os_resize_cpu_map_locked(mali_mem_backend *mem_bkend, struct vm_area_struct *vma, unsigned long start_vaddr, u32 mappig_size)
+{
+       mali_mem_os_mem *os_mem = &mem_bkend->os_mem;
+       struct mali_page_node *m_page;
+       int ret;
+       int offset;
+       int mapping_page_num;
+       int count ;
+
+       unsigned long vstart = vma->vm_start;
+       count = 0;
+       MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_OS);
+       MALI_DEBUG_ASSERT(0 == start_vaddr % _MALI_OSK_MALI_PAGE_SIZE);
+       MALI_DEBUG_ASSERT(0 == vstart % _MALI_OSK_MALI_PAGE_SIZE);
+       offset = (start_vaddr - vstart) / _MALI_OSK_MALI_PAGE_SIZE;
+       MALI_DEBUG_ASSERT(offset <= os_mem->count);
+       mapping_page_num = mappig_size / _MALI_OSK_MALI_PAGE_SIZE;
+       MALI_DEBUG_ASSERT((offset + mapping_page_num) <= os_mem->count);
+
+       if ((offset + mapping_page_num) == os_mem->count) {
+
+               unsigned long vm_end = start_vaddr + mappig_size;
+
+               list_for_each_entry_reverse(m_page, &os_mem->pages, list) {
+
+                       vm_end -= _MALI_OSK_MALI_PAGE_SIZE;
+                       if (mapping_page_num > 0) {
+                               ret = vm_insert_pfn(vma, vm_end, page_to_pfn(m_page->page));
+
+                               if (unlikely(0 != ret)) {
+                                       /*will return -EBUSY If the page has already been mapped into table, but it's OK*/
+                                       if (-EBUSY == ret) {
+                                               break;
+                                       } else {
+                                               MALI_DEBUG_PRINT(1, ("OS Mem: mali_mem_os_resize_cpu_map_locked failed, ret = %d, offset is %d,page_count is %d\n",
+                                                                    ret,  offset + mapping_page_num, os_mem->count));
+                                       }
+                                       return _MALI_OSK_ERR_FAULT;
+                               }
+                       } else {
+                               break;
+                       }
+                       mapping_page_num--;
+
+               }
+       } else {
+
+               list_for_each_entry(m_page, &os_mem->pages, list) {
+                       if (count >= offset) {
+
+                               ret = vm_insert_pfn(vma, vstart, page_to_pfn(m_page->page));
+
+                               if (unlikely(0 != ret)) {
+                                       /*will return -EBUSY If the page has already been mapped into table, but it's OK*/
+                                       if (-EBUSY == ret) {
+                                               break;
+                                       } else {
+                                               MALI_DEBUG_PRINT(1, ("OS Mem: mali_mem_os_resize_cpu_map_locked failed, ret = %d, count is %d, offset is %d,page_count is %d\n",
+                                                                    ret, count, offset, os_mem->count));
+                                       }
+                                       return _MALI_OSK_ERR_FAULT;
+                               }
+                       }
+                       count++;
+                       vstart += _MALI_OSK_MALI_PAGE_SIZE;
+               }
+       }
+       return _MALI_OSK_ERR_OK;
+}
+
+u32 mali_mem_os_release(mali_mem_backend *mem_bkend)
+{
+
+       mali_mem_allocation *alloc;
+       struct mali_session_data *session;
+       u32 free_pages_nr = 0;
+       MALI_DEBUG_ASSERT_POINTER(mem_bkend);
+       MALI_DEBUG_ASSERT(MALI_MEM_OS == mem_bkend->type);
+
+       alloc = mem_bkend->mali_allocation;
+       MALI_DEBUG_ASSERT_POINTER(alloc);
+
+       session = alloc->session;
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       /* Unmap the memory from the mali virtual address space. */
+       mali_mem_os_mali_unmap(alloc);
+       mutex_lock(&mem_bkend->mutex);
+       /* Free pages */
+       if (MALI_MEM_BACKEND_FLAG_COWED & mem_bkend->flags) {
+               /* Lock to avoid the free race condition for the cow shared memory page node. */
+               _mali_osk_mutex_wait(session->cow_lock);
+               free_pages_nr = mali_mem_os_free(&mem_bkend->os_mem.pages, mem_bkend->os_mem.count, MALI_TRUE);
+               _mali_osk_mutex_signal(session->cow_lock);
+       } else {
+               free_pages_nr = mali_mem_os_free(&mem_bkend->os_mem.pages, mem_bkend->os_mem.count, MALI_FALSE);
+       }
+       mutex_unlock(&mem_bkend->mutex);
+
+       MALI_DEBUG_PRINT(4, ("OS Mem free : allocated size = 0x%x, free size = 0x%x\n", mem_bkend->os_mem.count * _MALI_OSK_MALI_PAGE_SIZE,
+                            free_pages_nr * _MALI_OSK_MALI_PAGE_SIZE));
+
+       mem_bkend->os_mem.count = 0;
+       return free_pages_nr;
+}
+
+
+#define MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE 128
+static struct {
+       struct {
+               mali_dma_addr phys;
+               mali_io_address mapping;
+       } page[MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE];
+       size_t count;
+       spinlock_t lock;
+} mali_mem_page_table_page_pool = {
+       .count = 0,
+       .lock = __SPIN_LOCK_UNLOCKED(pool_lock),
+};
+
+_mali_osk_errcode_t mali_mem_os_get_table_page(mali_dma_addr *phys, mali_io_address *mapping)
+{
+       _mali_osk_errcode_t ret = _MALI_OSK_ERR_NOMEM;
+       dma_addr_t tmp_phys;
+
+       spin_lock(&mali_mem_page_table_page_pool.lock);
+       if (0 < mali_mem_page_table_page_pool.count) {
+               u32 i = --mali_mem_page_table_page_pool.count;
+               *phys = mali_mem_page_table_page_pool.page[i].phys;
+               *mapping = mali_mem_page_table_page_pool.page[i].mapping;
+
+               ret = _MALI_OSK_ERR_OK;
+       }
+       spin_unlock(&mali_mem_page_table_page_pool.lock);
+
+       if (_MALI_OSK_ERR_OK != ret) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+               *mapping = dma_alloc_attrs(&mali_platform_device->dev,
+                                          _MALI_OSK_MALI_PAGE_SIZE, &tmp_phys,
+                                          GFP_KERNEL, dma_attrs_wc);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+               *mapping = dma_alloc_attrs(&mali_platform_device->dev,
+                                          _MALI_OSK_MALI_PAGE_SIZE, &tmp_phys,
+                                          GFP_KERNEL, &dma_attrs_wc);
+#else
+               *mapping = dma_alloc_writecombine(&mali_platform_device->dev,
+                                                 _MALI_OSK_MALI_PAGE_SIZE, &tmp_phys, GFP_KERNEL);
+#endif
+               if (NULL != *mapping) {
+                       ret = _MALI_OSK_ERR_OK;
+
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
+                       /* Verify that the "physical" address is 32-bit and
+                        * usable for Mali, when on a system with bus addresses
+                        * wider than 32-bit. */
+                       MALI_DEBUG_ASSERT(0 == (tmp_phys >> 32));
+#endif
+
+                       *phys = (mali_dma_addr)tmp_phys;
+               }
+       }
+#ifdef AML_MALI_DEBUG
+               if (ret != _MALI_OSK_ERR_OK) {
+                       MALI_PRINT_ERROR(("dma_alloc_attrs() return NULL\n"));
+                       show_mem(SHOW_MEM_FILTER_NODES);
+               }
+#endif
+
+       return ret;
+}
+
+void mali_mem_os_release_table_page(mali_dma_addr phys, void *virt)
+{
+       spin_lock(&mali_mem_page_table_page_pool.lock);
+       if (MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE > mali_mem_page_table_page_pool.count) {
+               u32 i = mali_mem_page_table_page_pool.count;
+               mali_mem_page_table_page_pool.page[i].phys = phys;
+               mali_mem_page_table_page_pool.page[i].mapping = virt;
+
+               ++mali_mem_page_table_page_pool.count;
+
+               spin_unlock(&mali_mem_page_table_page_pool.lock);
+       } else {
+               spin_unlock(&mali_mem_page_table_page_pool.lock);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+               dma_free_attrs(&mali_platform_device->dev,
+                            _MALI_OSK_MALI_PAGE_SIZE, virt, phys,
+                            dma_attrs_wc);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+               dma_free_attrs(&mali_platform_device->dev,
+                              _MALI_OSK_MALI_PAGE_SIZE, virt, phys,
+                              &dma_attrs_wc);
+#else
+               dma_free_writecombine(&mali_platform_device->dev,
+                                     _MALI_OSK_MALI_PAGE_SIZE, virt, phys);
+#endif
+       }
+}
+
+void mali_mem_os_free_page_node(struct mali_page_node *m_page)
+{
+       struct page *page = m_page->page;
+       MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_OS);
+
+       if (1  == page_count(page)) {
+               dma_unmap_page(&mali_platform_device->dev, page_private(page),
+                              _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
+               ClearPagePrivate(page);
+       }
+       __free_page(page);
+       m_page->page = NULL;
+       list_del(&m_page->list);
+       kfree(m_page);
+}
+
+/* The maximum number of page table pool pages to free in one go. */
+#define MALI_MEM_OS_CHUNK_TO_FREE 64UL
+
+/* Free a certain number of pages from the page table page pool.
+ * The pool lock must be held when calling the function, and the lock will be
+ * released before returning.
+ */
+static void mali_mem_os_page_table_pool_free(size_t nr_to_free)
+{
+       mali_dma_addr phys_arr[MALI_MEM_OS_CHUNK_TO_FREE];
+       void *virt_arr[MALI_MEM_OS_CHUNK_TO_FREE];
+       u32 i;
+
+       MALI_DEBUG_ASSERT(nr_to_free <= MALI_MEM_OS_CHUNK_TO_FREE);
+
+       /* Remove nr_to_free pages from the pool and store them locally on stack. */
+       for (i = 0; i < nr_to_free; i++) {
+               u32 pool_index = mali_mem_page_table_page_pool.count - i - 1;
+
+               phys_arr[i] = mali_mem_page_table_page_pool.page[pool_index].phys;
+               virt_arr[i] = mali_mem_page_table_page_pool.page[pool_index].mapping;
+       }
+
+       mali_mem_page_table_page_pool.count -= nr_to_free;
+
+       spin_unlock(&mali_mem_page_table_page_pool.lock);
+
+       /* After releasing the spinlock: free the pages we removed from the pool. */
+       for (i = 0; i < nr_to_free; i++) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+               dma_free_attrs(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE,
+                              virt_arr[i], (dma_addr_t)phys_arr[i], dma_attrs_wc);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+               dma_free_attrs(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE,
+                              virt_arr[i], (dma_addr_t)phys_arr[i], &dma_attrs_wc);
+#else
+               dma_free_writecombine(&mali_platform_device->dev,
+                                     _MALI_OSK_MALI_PAGE_SIZE,
+                                     virt_arr[i], (dma_addr_t)phys_arr[i]);
+#endif
+       }
+}
+
+static void mali_mem_os_trim_page_table_page_pool(void)
+{
+       size_t nr_to_free = 0;
+       size_t nr_to_keep;
+
+       /* Keep 2 page table pages for each 1024 pages in the page cache. */
+       nr_to_keep = mali_mem_os_allocator.pool_count / 512;
+       /* And a minimum of eight pages, to accomodate new sessions. */
+       nr_to_keep += 8;
+
+       if (0 == spin_trylock(&mali_mem_page_table_page_pool.lock)) return;
+
+       if (nr_to_keep < mali_mem_page_table_page_pool.count) {
+               nr_to_free = mali_mem_page_table_page_pool.count - nr_to_keep;
+               nr_to_free = min((size_t)MALI_MEM_OS_CHUNK_TO_FREE, nr_to_free);
+       }
+
+       /* Pool lock will be released by the callee. */
+       mali_mem_os_page_table_pool_free(nr_to_free);
+}
+
+static unsigned long mali_mem_os_shrink_count(struct shrinker *shrinker, struct shrink_control *sc)
+{
+       return mali_mem_os_allocator.pool_count;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
+static int mali_mem_os_shrink(int nr_to_scan, gfp_t gfp_mask)
+#else
+static int mali_mem_os_shrink(struct shrinker *shrinker, int nr_to_scan, gfp_t gfp_mask)
+#endif /* Linux < 2.6.35 */
+#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+static int mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc)
+#else
+static unsigned long mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc)
+#endif /* Linux < 3.12.0 */
+#endif /* Linux < 3.0.0 */
+{
+       struct mali_page_node *m_page, *m_tmp;
+       unsigned long flags;
+       struct list_head *le, pages;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
+       int nr = nr_to_scan;
+#else
+       int nr = sc->nr_to_scan;
+#endif
+
+       if (0 == nr) {
+               return mali_mem_os_shrink_count(shrinker, sc);
+       }
+
+       if (0 == spin_trylock_irqsave(&mali_mem_os_allocator.pool_lock, flags)) {
+               /* Not able to lock. */
+               return -1;
+       }
+
+       if (0 == mali_mem_os_allocator.pool_count) {
+               /* No pages availble */
+               spin_unlock_irqrestore(&mali_mem_os_allocator.pool_lock, flags);
+               return 0;
+       }
+
+       /* Release from general page pool */
+       nr = min((size_t)nr, mali_mem_os_allocator.pool_count);
+       mali_mem_os_allocator.pool_count -= nr;
+       list_for_each(le, &mali_mem_os_allocator.pool_pages) {
+               --nr;
+               if (0 == nr) break;
+       }
+       list_cut_position(&pages, &mali_mem_os_allocator.pool_pages, le);
+       spin_unlock_irqrestore(&mali_mem_os_allocator.pool_lock, flags);
+
+       list_for_each_entry_safe(m_page, m_tmp, &pages, list) {
+               mali_mem_os_free_page_node(m_page);
+       }
+
+       if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES > mali_mem_os_allocator.pool_count) {
+               /* Pools are empty, stop timer */
+               MALI_DEBUG_PRINT(5, ("Stopping timer, only %u pages on pool\n", mali_mem_os_allocator.pool_count));
+               cancel_delayed_work(&mali_mem_os_allocator.timed_shrinker);
+       }
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+       return mali_mem_os_shrink_count(shrinker, sc);
+#else
+       return nr;
+#endif
+}
+
+static void mali_mem_os_trim_pool(struct work_struct *data)
+{
+       struct mali_page_node *m_page, *m_tmp;
+       struct list_head *le;
+       LIST_HEAD(pages);
+       size_t nr_to_free;
+
+       MALI_IGNORE(data);
+
+       MALI_DEBUG_PRINT(3, ("OS Mem: Trimming pool %u\n", mali_mem_os_allocator.pool_count));
+
+       /* Release from general page pool */
+       spin_lock(&mali_mem_os_allocator.pool_lock);
+       if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) {
+               size_t count = mali_mem_os_allocator.pool_count - MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES;
+               const size_t min_to_free = min(64, MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES);
+
+               /* Free half the pages on the pool above the static limit. Or 64 pages, 256KB. */
+               nr_to_free = max(count / 2, min_to_free);
+
+               mali_mem_os_allocator.pool_count -= nr_to_free;
+               list_for_each(le, &mali_mem_os_allocator.pool_pages) {
+                       --nr_to_free;
+                       if (0 == nr_to_free) break;
+               }
+               list_cut_position(&pages, &mali_mem_os_allocator.pool_pages, le);
+       }
+       spin_unlock(&mali_mem_os_allocator.pool_lock);
+
+       list_for_each_entry_safe(m_page, m_tmp, &pages, list) {
+               mali_mem_os_free_page_node(m_page);
+       }
+
+       /* Release some pages from page table page pool */
+       mali_mem_os_trim_page_table_page_pool();
+
+       if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) {
+               MALI_DEBUG_PRINT(4, ("OS Mem: Starting pool trim timer %u\n", mali_mem_os_allocator.pool_count));
+               queue_delayed_work(mali_mem_os_allocator.wq, &mali_mem_os_allocator.timed_shrinker, MALI_OS_MEMORY_POOL_TRIM_JIFFIES);
+       }
+}
+
+_mali_osk_errcode_t mali_mem_os_init(void)
+{
+       mali_mem_os_allocator.wq = alloc_workqueue("mali-mem", WQ_UNBOUND, 1);
+       if (NULL == mali_mem_os_allocator.wq) {
+               return _MALI_OSK_ERR_NOMEM;
+       }
+#if LINUX_VERSION_CODE >=  KERNEL_VERSION(4, 8, 0)
+       dma_attrs_wc = DMA_ATTR_WRITE_COMBINE;
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+       dma_set_attr(DMA_ATTR_WRITE_COMBINE, &dma_attrs_wc);
+#endif
+
+       register_shrinker(&mali_mem_os_allocator.shrinker);
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_mem_os_term(void)
+{
+       struct mali_page_node *m_page, *m_tmp;
+       unregister_shrinker(&mali_mem_os_allocator.shrinker);
+       cancel_delayed_work_sync(&mali_mem_os_allocator.timed_shrinker);
+
+       if (NULL != mali_mem_os_allocator.wq) {
+               destroy_workqueue(mali_mem_os_allocator.wq);
+               mali_mem_os_allocator.wq = NULL;
+       }
+
+       spin_lock(&mali_mem_os_allocator.pool_lock);
+       list_for_each_entry_safe(m_page, m_tmp, &mali_mem_os_allocator.pool_pages, list) {
+               mali_mem_os_free_page_node(m_page);
+
+               --mali_mem_os_allocator.pool_count;
+       }
+       BUG_ON(mali_mem_os_allocator.pool_count);
+       spin_unlock(&mali_mem_os_allocator.pool_lock);
+
+       /* Release from page table page pool */
+       do {
+               u32 nr_to_free;
+
+               spin_lock(&mali_mem_page_table_page_pool.lock);
+
+               nr_to_free = min((size_t)MALI_MEM_OS_CHUNK_TO_FREE, mali_mem_page_table_page_pool.count);
+
+               /* Pool lock will be released by the callee. */
+               mali_mem_os_page_table_pool_free(nr_to_free);
+       } while (0 != mali_mem_page_table_page_pool.count);
+}
+
+_mali_osk_errcode_t mali_memory_core_resource_os_memory(u32 size)
+{
+       mali_mem_os_allocator.allocation_limit = size;
+
+       MALI_SUCCESS;
+}
+
+u32 mali_mem_os_stat(void)
+{
+       return atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE;
+}
diff --git a/utgard/r8p0/linux/mali_memory_os_alloc.h b/utgard/r8p0/linux/mali_memory_os_alloc.h
new file mode 100755 (executable)
index 0000000..b92fffe
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2013-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_OS_ALLOC_H__
+#define __MALI_MEMORY_OS_ALLOC_H__
+
+#include "mali_osk.h"
+#include "mali_memory_types.h"
+
+
+/** @brief Release Mali OS memory
+ *
+ * The session memory_lock must be held when calling this function.
+ *
+ * @param mem_bkend Pointer to the mali_mem_backend to release
+ */
+u32 mali_mem_os_release(mali_mem_backend *mem_bkend);
+
+_mali_osk_errcode_t mali_mem_os_get_table_page(mali_dma_addr *phys, mali_io_address *mapping);
+
+void mali_mem_os_release_table_page(mali_dma_addr phys, void *virt);
+
+_mali_osk_errcode_t mali_mem_os_init(void);
+
+void mali_mem_os_term(void);
+
+u32 mali_mem_os_stat(void);
+
+void mali_mem_os_free_page_node(struct mali_page_node *m_page);
+
+int mali_mem_os_alloc_pages(mali_mem_os_mem *os_mem, u32 size);
+
+u32 mali_mem_os_free(struct list_head *os_pages, u32 pages_count, mali_bool cow_flag);
+
+_mali_osk_errcode_t mali_mem_os_put_page(struct page *page);
+
+_mali_osk_errcode_t mali_mem_os_resize_pages(mali_mem_os_mem *mem_from, mali_mem_os_mem *mem_to, u32 start_page, u32 page_count);
+
+_mali_osk_errcode_t mali_mem_os_mali_map(mali_mem_os_mem *os_mem, struct mali_session_data *session, u32 vaddr, u32 start_page, u32 mapping_pgae_num, u32 props);
+
+void mali_mem_os_mali_unmap(mali_mem_allocation *alloc);
+
+int mali_mem_os_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma);
+
+_mali_osk_errcode_t mali_mem_os_resize_cpu_map_locked(mali_mem_backend *mem_bkend, struct vm_area_struct *vma, unsigned long start_vaddr, u32 mappig_size);
+
+#endif /* __MALI_MEMORY_OS_ALLOC_H__ */
diff --git a/utgard/r8p0/linux/mali_memory_secure.c b/utgard/r8p0/linux/mali_memory_secure.c
new file mode 100644 (file)
index 0000000..7856ae6
--- /dev/null
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2010-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_memory.h"
+#include "mali_memory_secure.h"
+#include "mali_osk.h"
+#include <linux/mutex.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-buf.h>
+
+_mali_osk_errcode_t mali_mem_secure_attach_dma_buf(mali_mem_secure *secure_mem, u32 size, int mem_fd)
+{
+       struct dma_buf *buf;
+       MALI_DEBUG_ASSERT_POINTER(secure_mem);
+
+       /* get dma buffer */
+       buf = dma_buf_get(mem_fd);
+       if (IS_ERR_OR_NULL(buf)) {
+               MALI_DEBUG_PRINT_ERROR(("Failed to get dma buf!\n"));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       if (size != buf->size) {
+               MALI_DEBUG_PRINT_ERROR(("The secure mem size not match to the dma buf size!\n"));
+               goto failed_alloc_mem;
+       }
+
+       secure_mem->buf =  buf;
+       secure_mem->attachment = dma_buf_attach(secure_mem->buf, &mali_platform_device->dev);
+       if (NULL == secure_mem->attachment) {
+               MALI_DEBUG_PRINT_ERROR(("Failed to get dma buf attachment!\n"));
+               goto failed_dma_attach;
+       }
+
+       secure_mem->sgt = dma_buf_map_attachment(secure_mem->attachment, DMA_BIDIRECTIONAL);
+       if (IS_ERR_OR_NULL(secure_mem->sgt)) {
+               MALI_DEBUG_PRINT_ERROR(("Failed to map dma buf attachment\n"));
+               goto  failed_dma_map;
+       }
+
+       secure_mem->count = size / MALI_MMU_PAGE_SIZE;
+
+       return _MALI_OSK_ERR_OK;
+
+failed_dma_map:
+       dma_buf_detach(secure_mem->buf, secure_mem->attachment);
+failed_dma_attach:
+failed_alloc_mem:
+       dma_buf_put(buf);
+       return _MALI_OSK_ERR_FAULT;
+}
+
+_mali_osk_errcode_t mali_mem_secure_mali_map(mali_mem_secure *secure_mem, struct mali_session_data *session, u32 vaddr, u32 props)
+{
+       struct mali_page_directory *pagedir;
+       struct scatterlist *sg;
+       u32 virt = vaddr;
+       u32 prop = props;
+       int i;
+
+       MALI_DEBUG_ASSERT_POINTER(secure_mem);
+       MALI_DEBUG_ASSERT_POINTER(secure_mem->sgt);
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       pagedir = session->page_directory;
+
+       for_each_sg(secure_mem->sgt->sgl, sg, secure_mem->sgt->nents, i) {
+               u32 size = sg_dma_len(sg);
+               dma_addr_t phys = sg_dma_address(sg);
+
+               /* sg must be page aligned. */
+               MALI_DEBUG_ASSERT(0 == size % MALI_MMU_PAGE_SIZE);
+               MALI_DEBUG_ASSERT(0 == (phys & ~(uintptr_t)0xFFFFFFFF));
+
+               mali_mmu_pagedir_update(pagedir, virt, phys, size, prop);
+
+               MALI_DEBUG_PRINT(3, ("The secure mem physical address: 0x%x gpu virtual address: 0x%x! \n", phys, virt));
+               virt += size;
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_mem_secure_mali_unmap(mali_mem_allocation *alloc)
+{
+       struct mali_session_data *session;
+       MALI_DEBUG_ASSERT_POINTER(alloc);
+       session = alloc->session;
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       mali_session_memory_lock(session);
+       mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start,
+                              alloc->flags);
+       mali_session_memory_unlock(session);
+}
+
+
+int mali_mem_secure_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma)
+{
+
+       int ret = 0;
+       struct scatterlist *sg;
+       mali_mem_secure *secure_mem = &mem_bkend->secure_mem;
+       unsigned long addr = vma->vm_start;
+       int i;
+
+       MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_SECURE);
+
+       for_each_sg(secure_mem->sgt->sgl, sg, secure_mem->sgt->nents, i) {
+               phys_addr_t phys;
+               dma_addr_t dev_addr;
+               u32 size, j;
+               dev_addr = sg_dma_address(sg);
+#if defined(CONFIG_ARM64) ||LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+               phys =  dma_to_phys(&mali_platform_device->dev, dev_addr);
+#else
+               phys = page_to_phys(pfn_to_page(dma_to_pfn(&mali_platform_device->dev, dev_addr)));
+#endif
+               size = sg_dma_len(sg);
+               MALI_DEBUG_ASSERT(0 == size % _MALI_OSK_MALI_PAGE_SIZE);
+
+               for (j = 0; j < size / _MALI_OSK_MALI_PAGE_SIZE; j++) {
+                       ret = vm_insert_pfn(vma, addr, PFN_DOWN(phys));
+
+                       if (unlikely(0 != ret)) {
+                               return -EFAULT;
+                       }
+                       addr += _MALI_OSK_MALI_PAGE_SIZE;
+                       phys += _MALI_OSK_MALI_PAGE_SIZE;
+
+                       MALI_DEBUG_PRINT(3, ("The secure mem physical address: 0x%x , cpu virtual address: 0x%x! \n", phys, addr));
+               }
+       }
+       return ret;
+}
+
+u32 mali_mem_secure_release(mali_mem_backend *mem_bkend)
+{
+       struct mali_mem_secure *mem;
+       mali_mem_allocation *alloc = mem_bkend->mali_allocation;
+       u32 free_pages_nr = 0;
+       MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_SECURE);
+
+       mem = &mem_bkend->secure_mem;
+       MALI_DEBUG_ASSERT_POINTER(mem->attachment);
+       MALI_DEBUG_ASSERT_POINTER(mem->buf);
+       MALI_DEBUG_ASSERT_POINTER(mem->sgt);
+       /* Unmap the memory from the mali virtual address space. */
+       mali_mem_secure_mali_unmap(alloc);
+       mutex_lock(&mem_bkend->mutex);
+       dma_buf_unmap_attachment(mem->attachment, mem->sgt, DMA_BIDIRECTIONAL);
+       dma_buf_detach(mem->buf, mem->attachment);
+       dma_buf_put(mem->buf);
+       mutex_unlock(&mem_bkend->mutex);
+
+       free_pages_nr = mem->count;
+
+       return free_pages_nr;
+}
+
+
diff --git a/utgard/r8p0/linux/mali_memory_secure.h b/utgard/r8p0/linux/mali_memory_secure.h
new file mode 100644 (file)
index 0000000..cb85767
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2010, 2013, 2015-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_SECURE_H__
+#define __MALI_MEMORY_SECURE_H__
+
+#include "mali_session.h"
+#include "mali_memory.h"
+#include <linux/spinlock.h>
+
+#include "mali_memory_types.h"
+
+_mali_osk_errcode_t mali_mem_secure_attach_dma_buf(mali_mem_secure *secure_mem, u32 size, int mem_fd);
+
+_mali_osk_errcode_t mali_mem_secure_mali_map(mali_mem_secure *secure_mem, struct mali_session_data *session, u32 vaddr, u32 props);
+
+void mali_mem_secure_mali_unmap(mali_mem_allocation *alloc);
+
+int mali_mem_secure_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma);
+
+u32 mali_mem_secure_release(mali_mem_backend *mem_bkend);
+
+#endif /* __MALI_MEMORY_SECURE_H__ */
diff --git a/utgard/r8p0/linux/mali_memory_swap_alloc.c b/utgard/r8p0/linux/mali_memory_swap_alloc.c
new file mode 100644 (file)
index 0000000..132bad4
--- /dev/null
@@ -0,0 +1,950 @@
+/*
+ * Copyright (C) 2013-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/sched.h>
+#include <linux/idr.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/shmem_fs.h>
+#include <linux/file.h>
+#include <linux/swap.h>
+#include <linux/pagemap.h>
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_memory.h"
+#include "mali_memory_manager.h"
+#include "mali_memory_virtual.h"
+#include "mali_memory_cow.h"
+#include "mali_ukk.h"
+#include "mali_kernel_utilization.h"
+#include "mali_memory_swap_alloc.h"
+
+
+static struct _mali_osk_bitmap idx_mgr;
+static struct file *global_swap_file;
+static struct address_space *global_swap_space;
+static _mali_osk_wq_work_t *mali_mem_swap_out_workq = NULL;
+static u32 mem_backend_swapped_pool_size;
+#ifdef MALI_MEM_SWAP_TRACKING
+static u32 mem_backend_swapped_unlock_size;
+#endif
+/* Lock order: mem_backend_swapped_pool_lock  > each memory backend's mutex lock.
+ * This lock used to protect mem_backend_swapped_pool_size and mem_backend_swapped_pool. */
+static struct mutex mem_backend_swapped_pool_lock;
+static struct list_head mem_backend_swapped_pool;
+
+extern struct mali_mem_os_allocator mali_mem_os_allocator;
+
+#define MALI_SWAP_LOW_MEM_DEFAULT_VALUE (60*1024*1024)
+#define MALI_SWAP_INVALIDATE_MALI_ADDRESS (0)               /* Used to mark the given memory cookie is invalidate. */
+#define MALI_SWAP_GLOBAL_SWAP_FILE_SIZE (0xFFFFFFFF)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
+#define MALI_SWAP_GLOBAL_SWAP_FILE_INDEX ((MALI_SWAP_GLOBAL_SWAP_FILE_SIZE) >> PAGE_SHIFT)
+#else
+#define MALI_SWAP_GLOBAL_SWAP_FILE_INDEX ((MALI_SWAP_GLOBAL_SWAP_FILE_SIZE) >> PAGE_CACHE_SHIFT)
+#endif
+#define MALI_SWAP_GLOBAL_SWAP_FILE_INDEX_RESERVE (1 << 15) /* Reserved for CoW nonlinear swap backend memory, the space size is 128MB. */
+
+unsigned int mali_mem_swap_out_threshold_value = MALI_SWAP_LOW_MEM_DEFAULT_VALUE;
+
+/**
+ * We have two situations to do shrinking things, one is we met low GPU utilization which shows GPU needn't touch too
+ * swappable backends in short time, and the other one is we add new swappable backends, the total pool size exceed
+ * the threshold value of the swapped pool size.
+ */
+typedef enum {
+       MALI_MEM_SWAP_SHRINK_WITH_LOW_UTILIZATION = 100,
+       MALI_MEM_SWAP_SHRINK_FOR_ADDING_NEW_BACKENDS = 257,
+} _mali_mem_swap_pool_shrink_type_t;
+
+static void mali_mem_swap_swapped_bkend_pool_check_for_low_utilization(void *arg);
+
+_mali_osk_errcode_t mali_mem_swap_init(void)
+{
+       gfp_t flags = __GFP_NORETRY | __GFP_NOWARN;
+
+       if (_MALI_OSK_ERR_OK != _mali_osk_bitmap_init(&idx_mgr, MALI_SWAP_GLOBAL_SWAP_FILE_INDEX, MALI_SWAP_GLOBAL_SWAP_FILE_INDEX_RESERVE)) {
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       global_swap_file = shmem_file_setup("mali_swap", MALI_SWAP_GLOBAL_SWAP_FILE_SIZE, VM_NORESERVE);
+       if (IS_ERR(global_swap_file)) {
+               _mali_osk_bitmap_term(&idx_mgr);
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       global_swap_space = global_swap_file->f_path.dentry->d_inode->i_mapping;
+
+       mali_mem_swap_out_workq = _mali_osk_wq_create_work(mali_mem_swap_swapped_bkend_pool_check_for_low_utilization, NULL);
+       if (NULL == mali_mem_swap_out_workq) {
+               _mali_osk_bitmap_term(&idx_mgr);
+               fput(global_swap_file);
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+#if defined(CONFIG_ARM) && !defined(CONFIG_ARM_LPAE)
+       flags |= GFP_HIGHUSER;
+#else
+#ifdef CONFIG_ZONE_DMA32
+       flags |= GFP_DMA32;
+#else
+#ifdef CONFIG_ZONE_DMA
+       flags |= GFP_DMA;
+#else
+       /* arm64 utgard only work on < 4G, but the kernel
+        * didn't provide method to allocte memory < 4G
+        */
+       MALI_DEBUG_ASSERT(0);
+#endif
+#endif
+#endif
+
+       /* When we use shmem_read_mapping_page to allocate/swap-in, it will
+        * use these flags to allocate new page if need.*/
+       mapping_set_gfp_mask(global_swap_space, flags);
+
+       mem_backend_swapped_pool_size = 0;
+#ifdef MALI_MEM_SWAP_TRACKING
+       mem_backend_swapped_unlock_size = 0;
+#endif
+       mutex_init(&mem_backend_swapped_pool_lock);
+       INIT_LIST_HEAD(&mem_backend_swapped_pool);
+
+       MALI_DEBUG_PRINT(2, ("Mali SWAP: Swap out threshold vaule is %uM\n", mali_mem_swap_out_threshold_value >> 20));
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_mem_swap_term(void)
+{
+       _mali_osk_bitmap_term(&idx_mgr);
+
+       fput(global_swap_file);
+
+       _mali_osk_wq_delete_work(mali_mem_swap_out_workq);
+
+       MALI_DEBUG_ASSERT(list_empty(&mem_backend_swapped_pool));
+       MALI_DEBUG_ASSERT(0 == mem_backend_swapped_pool_size);
+
+       return;
+}
+
+struct file *mali_mem_swap_get_global_swap_file(void)
+{
+       return  global_swap_file;
+}
+
+/* Judge if swappable backend in swapped pool. */
+static mali_bool mali_memory_swap_backend_in_swapped_pool(mali_mem_backend *mem_bkend)
+{
+       MALI_DEBUG_ASSERT_POINTER(mem_bkend);
+
+       return !list_empty(&mem_bkend->list);
+}
+
+void mali_memory_swap_list_backend_delete(mali_mem_backend *mem_bkend)
+{
+       MALI_DEBUG_ASSERT_POINTER(mem_bkend);
+
+       mutex_lock(&mem_backend_swapped_pool_lock);
+       mutex_lock(&mem_bkend->mutex);
+
+       if (MALI_FALSE == mali_memory_swap_backend_in_swapped_pool(mem_bkend)) {
+               mutex_unlock(&mem_bkend->mutex);
+               mutex_unlock(&mem_backend_swapped_pool_lock);
+               return;
+       }
+
+       MALI_DEBUG_ASSERT(!list_empty(&mem_bkend->list));
+
+       list_del_init(&mem_bkend->list);
+
+       mutex_unlock(&mem_bkend->mutex);
+
+       mem_backend_swapped_pool_size -= mem_bkend->size;
+
+       mutex_unlock(&mem_backend_swapped_pool_lock);
+}
+
+static void mali_mem_swap_out_page_node(mali_page_node *page_node)
+{
+       MALI_DEBUG_ASSERT(page_node);
+
+       dma_unmap_page(&mali_platform_device->dev, page_node->swap_it->dma_addr,
+                      _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
+       set_page_dirty(page_node->swap_it->page);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
+       put_page(page_node->swap_it->page);
+#else
+       page_cache_release(page_node->swap_it->page);
+#endif
+}
+
+void mali_mem_swap_unlock_single_mem_backend(mali_mem_backend *mem_bkend)
+{
+       mali_page_node *m_page;
+
+       MALI_DEBUG_ASSERT(1 == mutex_is_locked(&mem_bkend->mutex));
+
+       if (MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN == (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN)) {
+               return;
+       }
+
+       mem_bkend->flags |= MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN;
+
+       list_for_each_entry(m_page, &mem_bkend->swap_mem.pages, list) {
+               mali_mem_swap_out_page_node(m_page);
+       }
+
+       return;
+}
+
+static void mali_mem_swap_unlock_partial_locked_mem_backend(mali_mem_backend *mem_bkend, mali_page_node *page_node)
+{
+       mali_page_node *m_page;
+
+       MALI_DEBUG_ASSERT(1 == mutex_is_locked(&mem_bkend->mutex));
+
+       list_for_each_entry(m_page, &mem_bkend->swap_mem.pages, list) {
+               if (m_page == page_node) {
+                       break;
+               }
+               mali_mem_swap_out_page_node(m_page);
+       }
+}
+
+static void mali_mem_swap_swapped_bkend_pool_shrink(_mali_mem_swap_pool_shrink_type_t shrink_type)
+{
+       mali_mem_backend *bkend, *tmp_bkend;
+       long system_free_size;
+       u32 last_gpu_utilization, gpu_utilization_threshold_value, temp_swap_out_threshold_value;
+
+       MALI_DEBUG_ASSERT(1 == mutex_is_locked(&mem_backend_swapped_pool_lock));
+
+       if (MALI_MEM_SWAP_SHRINK_WITH_LOW_UTILIZATION == shrink_type) {
+               /**
+                * When we met that system memory is very low and Mali locked swappable memory size is less than
+                * threshold value, and at the same time, GPU load is very low and don't need high performance,
+                * at this condition, we can unlock more swap memory backend from swapped backends pool.
+                */
+               gpu_utilization_threshold_value = MALI_MEM_SWAP_SHRINK_WITH_LOW_UTILIZATION;
+               temp_swap_out_threshold_value = (mali_mem_swap_out_threshold_value >> 2);
+       } else {
+               /* When we add swappable memory backends to swapped pool, we need to think that we couldn't
+               * hold too much swappable backends in Mali driver, and also we need considering performance.
+               * So there is a balance for swapping out memory backend, we should follow the following conditions:
+               * 1. Total memory size in global mem backend swapped pool is more than the defined threshold value.
+               * 2. System level free memory size is less than the defined threshold value.
+               * 3. Please note that GPU utilization problem isn't considered in this condition.
+               */
+               gpu_utilization_threshold_value = MALI_MEM_SWAP_SHRINK_FOR_ADDING_NEW_BACKENDS;
+               temp_swap_out_threshold_value = mali_mem_swap_out_threshold_value;
+       }
+
+       /* Get system free pages number. */
+       system_free_size = global_page_state(NR_FREE_PAGES) * PAGE_SIZE;
+       last_gpu_utilization = _mali_ukk_utilization_gp_pp();
+
+       if ((last_gpu_utilization < gpu_utilization_threshold_value)
+           && (system_free_size < mali_mem_swap_out_threshold_value)
+           && (mem_backend_swapped_pool_size > temp_swap_out_threshold_value)) {
+               list_for_each_entry_safe(bkend, tmp_bkend, &mem_backend_swapped_pool, list) {
+                       if (mem_backend_swapped_pool_size <= temp_swap_out_threshold_value) {
+                               break;
+                       }
+
+                       mutex_lock(&bkend->mutex);
+
+                       /* check if backend is in use. */
+                       if (0 < bkend->using_count) {
+                               mutex_unlock(&bkend->mutex);
+                               continue;
+                       }
+
+                       mali_mem_swap_unlock_single_mem_backend(bkend);
+                       list_del_init(&bkend->list);
+                       mem_backend_swapped_pool_size -= bkend->size;
+#ifdef MALI_MEM_SWAP_TRACKING
+                       mem_backend_swapped_unlock_size += bkend->size;
+#endif
+                       mutex_unlock(&bkend->mutex);
+               }
+       }
+
+       return;
+}
+
+static void mali_mem_swap_swapped_bkend_pool_check_for_low_utilization(void *arg)
+{
+       MALI_IGNORE(arg);
+
+       mutex_lock(&mem_backend_swapped_pool_lock);
+
+       mali_mem_swap_swapped_bkend_pool_shrink(MALI_MEM_SWAP_SHRINK_WITH_LOW_UTILIZATION);
+
+       mutex_unlock(&mem_backend_swapped_pool_lock);
+}
+
+/**
+ * After PP job finished, we add all of swappable memory backend used by this PP
+ * job to the tail of the global swapped pool, and if the total size of swappable memory is more than threshold
+ * value, we also need to shrink the swapped pool start from the head of the list.
+ */
+void mali_memory_swap_list_backend_add(mali_mem_backend *mem_bkend)
+{
+       mutex_lock(&mem_backend_swapped_pool_lock);
+       mutex_lock(&mem_bkend->mutex);
+
+       if (mali_memory_swap_backend_in_swapped_pool(mem_bkend)) {
+               MALI_DEBUG_ASSERT(!list_empty(&mem_bkend->list));
+
+               list_del_init(&mem_bkend->list);
+               list_add_tail(&mem_bkend->list, &mem_backend_swapped_pool);
+               mutex_unlock(&mem_bkend->mutex);
+               mutex_unlock(&mem_backend_swapped_pool_lock);
+               return;
+       }
+
+       list_add_tail(&mem_bkend->list, &mem_backend_swapped_pool);
+
+       mutex_unlock(&mem_bkend->mutex);
+       mem_backend_swapped_pool_size += mem_bkend->size;
+
+       mali_mem_swap_swapped_bkend_pool_shrink(MALI_MEM_SWAP_SHRINK_FOR_ADDING_NEW_BACKENDS);
+
+       mutex_unlock(&mem_backend_swapped_pool_lock);
+       return;
+}
+
+
+u32 mali_mem_swap_idx_alloc(void)
+{
+       return _mali_osk_bitmap_alloc(&idx_mgr);
+}
+
+void mali_mem_swap_idx_free(u32 idx)
+{
+       _mali_osk_bitmap_free(&idx_mgr, idx);
+}
+
+static u32 mali_mem_swap_idx_range_alloc(u32 count)
+{
+       u32 index;
+
+       index = _mali_osk_bitmap_alloc_range(&idx_mgr, count);
+
+       return index;
+}
+
+static void mali_mem_swap_idx_range_free(u32 idx, int num)
+{
+       _mali_osk_bitmap_free_range(&idx_mgr, idx, num);
+}
+
+struct mali_swap_item *mali_mem_swap_alloc_swap_item(void)
+{
+       mali_swap_item *swap_item;
+
+       swap_item = kzalloc(sizeof(mali_swap_item), GFP_KERNEL);
+
+       if (NULL == swap_item) {
+               return NULL;
+       }
+
+       atomic_set(&swap_item->ref_count, 1);
+       swap_item->page = NULL;
+       atomic_add(1, &mali_mem_os_allocator.allocated_pages);
+
+       return swap_item;
+}
+
+void mali_mem_swap_free_swap_item(mali_swap_item *swap_item)
+{
+       struct inode *file_node;
+       long long start, end;
+
+       /* If this swap item is shared, we just reduce the reference counter. */
+       if (0 == atomic_dec_return(&swap_item->ref_count)) {
+               file_node = global_swap_file->f_path.dentry->d_inode;
+               start = swap_item->idx;
+               start = start << 12;
+               end = start + PAGE_SIZE;
+
+               shmem_truncate_range(file_node, start, (end - 1));
+
+               mali_mem_swap_idx_free(swap_item->idx);
+
+               atomic_sub(1, &mali_mem_os_allocator.allocated_pages);
+
+               kfree(swap_item);
+       }
+}
+
+/* Used to allocate new swap item for new memory allocation and cow page for write. */
+struct mali_page_node *_mali_mem_swap_page_node_allocate(void)
+{
+       struct mali_page_node *m_page;
+
+       m_page = _mali_page_node_allocate(MALI_PAGE_NODE_SWAP);
+
+       if (NULL == m_page) {
+               return NULL;
+       }
+
+       m_page->swap_it = mali_mem_swap_alloc_swap_item();
+
+       if (NULL == m_page->swap_it) {
+               kfree(m_page);
+               return NULL;
+       }
+
+       return m_page;
+}
+
+_mali_osk_errcode_t _mali_mem_swap_put_page_node(struct mali_page_node *m_page)
+{
+
+       mali_mem_swap_free_swap_item(m_page->swap_it);
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void _mali_mem_swap_page_node_free(struct mali_page_node *m_page)
+{
+       _mali_mem_swap_put_page_node(m_page);
+
+       kfree(m_page);
+
+       return;
+}
+
+u32 mali_mem_swap_free(mali_mem_swap *swap_mem)
+{
+       struct mali_page_node *m_page, *m_tmp;
+       u32 free_pages_nr = 0;
+
+       MALI_DEBUG_ASSERT_POINTER(swap_mem);
+
+       list_for_each_entry_safe(m_page, m_tmp, &swap_mem->pages, list) {
+               MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_SWAP);
+
+               /* free the page node and release the swap item, if the ref count is 1,
+                * then need also free the swap item. */
+               list_del(&m_page->list);
+               if (1 == _mali_page_node_get_ref_count(m_page)) {
+                       free_pages_nr++;
+               }
+
+               _mali_mem_swap_page_node_free(m_page);
+       }
+
+       return free_pages_nr;
+}
+
+static u32 mali_mem_swap_cow_free(mali_mem_cow *cow_mem)
+{
+       struct mali_page_node *m_page, *m_tmp;
+       u32 free_pages_nr = 0;
+
+       MALI_DEBUG_ASSERT_POINTER(cow_mem);
+
+       list_for_each_entry_safe(m_page, m_tmp, &cow_mem->pages, list) {
+               MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_SWAP);
+
+               /* free the page node and release the swap item, if the ref count is 1,
+                * then need also free the swap item. */
+               list_del(&m_page->list);
+               if (1 == _mali_page_node_get_ref_count(m_page)) {
+                       free_pages_nr++;
+               }
+
+               _mali_mem_swap_page_node_free(m_page);
+       }
+
+       return free_pages_nr;
+}
+
+u32 mali_mem_swap_release(mali_mem_backend *mem_bkend, mali_bool is_mali_mapped)
+{
+       mali_mem_allocation *alloc;
+       u32 free_pages_nr = 0;
+
+       MALI_DEBUG_ASSERT_POINTER(mem_bkend);
+       alloc = mem_bkend->mali_allocation;
+       MALI_DEBUG_ASSERT_POINTER(alloc);
+
+       if (is_mali_mapped) {
+               mali_mem_swap_mali_unmap(alloc);
+       }
+
+       mali_memory_swap_list_backend_delete(mem_bkend);
+
+       mutex_lock(&mem_bkend->mutex);
+       /* To make sure the given memory backend was unlocked from Mali side,
+        * and then free this memory block. */
+       mali_mem_swap_unlock_single_mem_backend(mem_bkend);
+       mutex_unlock(&mem_bkend->mutex);
+
+       if (MALI_MEM_SWAP == mem_bkend->type) {
+               free_pages_nr = mali_mem_swap_free(&mem_bkend->swap_mem);
+       } else {
+               free_pages_nr = mali_mem_swap_cow_free(&mem_bkend->cow_mem);
+       }
+
+       return free_pages_nr;
+}
+
+mali_bool mali_mem_swap_in_page_node(struct mali_page_node *page_node)
+{
+       MALI_DEBUG_ASSERT(NULL != page_node);
+
+       page_node->swap_it->page = shmem_read_mapping_page(global_swap_space, page_node->swap_it->idx);
+
+       if (IS_ERR(page_node->swap_it->page)) {
+               MALI_DEBUG_PRINT_ERROR(("SWAP Mem: failed to swap in page with index: %d.\n", page_node->swap_it->idx));
+               return MALI_FALSE;
+       }
+
+       /* Ensure page is flushed from CPU caches. */
+       page_node->swap_it->dma_addr = dma_map_page(&mali_platform_device->dev, page_node->swap_it->page,
+                                      0, _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
+
+       return MALI_TRUE;
+}
+
+int mali_mem_swap_alloc_pages(mali_mem_swap *swap_mem, u32 size, u32 *bkend_idx)
+{
+       size_t page_count = PAGE_ALIGN(size) / PAGE_SIZE;
+       struct mali_page_node *m_page;
+       long system_free_size;
+       u32 i, index;
+       mali_bool ret;
+
+       MALI_DEBUG_ASSERT(NULL != swap_mem);
+       MALI_DEBUG_ASSERT(NULL != bkend_idx);
+       MALI_DEBUG_ASSERT(page_count <= MALI_SWAP_GLOBAL_SWAP_FILE_INDEX_RESERVE);
+
+       if (atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE + size > mali_mem_os_allocator.allocation_limit) {
+               MALI_DEBUG_PRINT(2, ("Mali Mem: Unable to allocate %u bytes. Currently allocated: %lu, max limit %lu\n",
+                                    size,
+                                    atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE,
+                                    mali_mem_os_allocator.allocation_limit));
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       INIT_LIST_HEAD(&swap_mem->pages);
+       swap_mem->count = page_count;
+       index = mali_mem_swap_idx_range_alloc(page_count);
+
+       if (_MALI_OSK_BITMAP_INVALIDATE_INDEX == index) {
+               MALI_PRINT_ERROR(("Mali Swap: Failed to allocate continuous index for swappable Mali memory."));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       for (i = 0; i < page_count; i++) {
+               m_page = _mali_mem_swap_page_node_allocate();
+
+               if (NULL == m_page) {
+                       MALI_DEBUG_PRINT_ERROR(("SWAP Mem: Failed to allocate mali page node."));
+                       swap_mem->count = i;
+
+                       mali_mem_swap_free(swap_mem);
+                       mali_mem_swap_idx_range_free(index + i, page_count - i);
+                       return _MALI_OSK_ERR_FAULT;
+               }
+
+               m_page->swap_it->idx = index + i;
+
+               ret = mali_mem_swap_in_page_node(m_page);
+
+               if (MALI_FALSE == ret) {
+                       MALI_DEBUG_PRINT_ERROR(("SWAP Mem: Allocate new page from SHMEM file failed."));
+                       _mali_mem_swap_page_node_free(m_page);
+                       mali_mem_swap_idx_range_free(index + i + 1, page_count - i - 1);
+
+                       swap_mem->count = i;
+                       mali_mem_swap_free(swap_mem);
+                       return _MALI_OSK_ERR_NOMEM;
+               }
+
+               list_add_tail(&m_page->list, &swap_mem->pages);
+       }
+
+       system_free_size = global_page_state(NR_FREE_PAGES) * PAGE_SIZE;
+
+       if ((system_free_size < mali_mem_swap_out_threshold_value)
+           && (mem_backend_swapped_pool_size > (mali_mem_swap_out_threshold_value >> 2))
+           && mali_utilization_enabled()) {
+               _mali_osk_wq_schedule_work(mali_mem_swap_out_workq);
+       }
+
+       *bkend_idx = index;
+       return 0;
+}
+
+void mali_mem_swap_mali_unmap(mali_mem_allocation *alloc)
+{
+       struct mali_session_data *session;
+
+       MALI_DEBUG_ASSERT_POINTER(alloc);
+       session = alloc->session;
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       mali_session_memory_lock(session);
+       mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start,
+                              alloc->flags);
+       mali_session_memory_unlock(session);
+}
+
+
+/* Insert these pages from shmem to mali page table*/
+_mali_osk_errcode_t mali_mem_swap_mali_map(mali_mem_swap *swap_mem, struct mali_session_data *session, u32 vaddr, u32 props)
+{
+       struct mali_page_directory *pagedir = session->page_directory;
+       struct mali_page_node *m_page;
+       dma_addr_t phys;
+       u32 virt = vaddr;
+       u32 prop = props;
+
+       list_for_each_entry(m_page, &swap_mem->pages, list) {
+               MALI_DEBUG_ASSERT(NULL != m_page->swap_it->page);
+               phys = m_page->swap_it->dma_addr;
+
+               mali_mmu_pagedir_update(pagedir, virt, phys, MALI_MMU_PAGE_SIZE, prop);
+               virt += MALI_MMU_PAGE_SIZE;
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+int mali_mem_swap_in_pages(struct mali_pp_job *job)
+{
+       u32 num_memory_cookies;
+       struct mali_session_data *session;
+       struct mali_vma_node *mali_vma_node = NULL;
+       mali_mem_allocation *mali_alloc = NULL;
+       mali_mem_backend *mem_bkend = NULL;
+       struct mali_page_node *m_page;
+       mali_bool swap_in_success = MALI_TRUE;
+       int i;
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       num_memory_cookies = mali_pp_job_num_memory_cookies(job);
+       session = mali_pp_job_get_session(job);
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       for (i = 0; i < num_memory_cookies; i++) {
+
+               u32 mali_addr  = mali_pp_job_get_memory_cookie(job, i);
+
+               mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0);
+               if (NULL == mali_vma_node) {
+                       job->memory_cookies[i] = MALI_SWAP_INVALIDATE_MALI_ADDRESS;
+                       swap_in_success = MALI_FALSE;
+                       MALI_PRINT_ERROR(("SWAP Mem: failed to find mali_vma_node through Mali address: 0x%08x.\n", mali_addr));
+                       continue;
+               }
+
+               mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
+               MALI_DEBUG_ASSERT(NULL != mali_alloc);
+
+               if (MALI_MEM_SWAP != mali_alloc->type &&
+                   MALI_MEM_COW != mali_alloc->type) {
+                       continue;
+               }
+
+               /* Get backend memory & Map on GPU */
+               mutex_lock(&mali_idr_mutex);
+               mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle);
+               mutex_unlock(&mali_idr_mutex);
+               MALI_DEBUG_ASSERT(NULL != mem_bkend);
+
+               /* We neednot hold backend's lock here, race safe.*/
+               if ((MALI_MEM_COW == mem_bkend->type) &&
+                   (!(mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) {
+                       continue;
+               }
+
+               mutex_lock(&mem_bkend->mutex);
+
+               /* When swap_in_success is MALI_FALSE, it means this job has memory backend that could not be swapped in,
+                * and it will be aborted in mali scheduler, so here, we just mark those memory cookies which
+                * should not be swapped out when delete job to invalide */
+               if (MALI_FALSE == swap_in_success) {
+                       job->memory_cookies[i] = MALI_SWAP_INVALIDATE_MALI_ADDRESS;
+                       mutex_unlock(&mem_bkend->mutex);
+                       continue;
+               }
+
+               /* Before swap in, checking if this memory backend has been swapped in by the latest flushed jobs. */
+               ++mem_bkend->using_count;
+
+               if (1 < mem_bkend->using_count) {
+                       MALI_DEBUG_ASSERT(MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN != (MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN & mem_bkend->flags));
+                       mutex_unlock(&mem_bkend->mutex);
+                       continue;
+               }
+
+               if (MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN != (MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN & mem_bkend->flags)) {
+                       mutex_unlock(&mem_bkend->mutex);
+                       continue;
+               }
+
+
+               list_for_each_entry(m_page, &mem_bkend->swap_mem.pages, list) {
+                       if (MALI_FALSE == mali_mem_swap_in_page_node(m_page)) {
+                               /* Don't have enough memory to swap in page, so release pages have already been swapped
+                                * in and then mark this pp job to be fail. */
+                               mali_mem_swap_unlock_partial_locked_mem_backend(mem_bkend, m_page);
+                               swap_in_success = MALI_FALSE;
+                               break;
+                       }
+               }
+
+               if (swap_in_success) {
+#ifdef MALI_MEM_SWAP_TRACKING
+                       mem_backend_swapped_unlock_size -= mem_bkend->size;
+#endif
+                       _mali_osk_mutex_wait(session->memory_lock);
+                       mali_mem_swap_mali_map(&mem_bkend->swap_mem, session, mali_alloc->mali_mapping.addr, mali_alloc->mali_mapping.properties);
+                       _mali_osk_mutex_signal(session->memory_lock);
+
+                       /* Remove the unlock flag from mem backend flags, mark this backend has been swapped in. */
+                       mem_bkend->flags &= ~(MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN);
+                       mutex_unlock(&mem_bkend->mutex);
+               } else {
+                       --mem_bkend->using_count;
+                       /* Marking that this backend is not swapped in, need not to be processed anymore. */
+                       job->memory_cookies[i] = MALI_SWAP_INVALIDATE_MALI_ADDRESS;
+                       mutex_unlock(&mem_bkend->mutex);
+               }
+       }
+
+       job->swap_status = swap_in_success ? MALI_SWAP_IN_SUCC : MALI_SWAP_IN_FAIL;
+
+       return _MALI_OSK_ERR_OK;
+}
+
+int mali_mem_swap_out_pages(struct mali_pp_job *job)
+{
+       u32 num_memory_cookies;
+       struct mali_session_data *session;
+       struct mali_vma_node *mali_vma_node = NULL;
+       mali_mem_allocation *mali_alloc = NULL;
+       mali_mem_backend *mem_bkend = NULL;
+       int i;
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       num_memory_cookies = mali_pp_job_num_memory_cookies(job);
+       session = mali_pp_job_get_session(job);
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+
+       for (i = 0; i < num_memory_cookies; i++) {
+               u32 mali_addr  = mali_pp_job_get_memory_cookie(job, i);
+
+               if (MALI_SWAP_INVALIDATE_MALI_ADDRESS == mali_addr) {
+                       continue;
+               }
+
+               mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0);
+
+               if (NULL == mali_vma_node) {
+                       MALI_PRINT_ERROR(("SWAP Mem: failed to find mali_vma_node through Mali address: 0x%08x.\n", mali_addr));
+                       continue;
+               }
+
+               mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
+               MALI_DEBUG_ASSERT(NULL != mali_alloc);
+
+               if (MALI_MEM_SWAP != mali_alloc->type &&
+                   MALI_MEM_COW != mali_alloc->type) {
+                       continue;
+               }
+
+               mutex_lock(&mali_idr_mutex);
+               mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle);
+               mutex_unlock(&mali_idr_mutex);
+               MALI_DEBUG_ASSERT(NULL != mem_bkend);
+
+               /* We neednot hold backend's lock here, race safe.*/
+               if ((MALI_MEM_COW == mem_bkend->type) &&
+                   (!(mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) {
+                       continue;
+               }
+
+               mutex_lock(&mem_bkend->mutex);
+
+               MALI_DEBUG_ASSERT(0 < mem_bkend->using_count);
+
+               /* Reducing the using_count of mem backend means less pp job are using this memory backend,
+                * if this count get to zero, it means no pp job is using it now, could put it to swap out list. */
+               --mem_bkend->using_count;
+
+               if (0 < mem_bkend->using_count) {
+                       mutex_unlock(&mem_bkend->mutex);
+                       continue;
+               }
+               mutex_unlock(&mem_bkend->mutex);
+
+               mali_memory_swap_list_backend_add(mem_bkend);
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+int mali_mem_swap_allocate_page_on_demand(mali_mem_backend *mem_bkend, u32 offset, struct page **pagep)
+{
+       struct mali_page_node *m_page, *found_node = NULL;
+       struct page *found_page;
+       mali_mem_swap *swap = NULL;
+       mali_mem_cow *cow = NULL;
+       dma_addr_t dma_addr;
+       u32 i = 0;
+
+       if (MALI_MEM_SWAP == mem_bkend->type) {
+               swap = &mem_bkend->swap_mem;
+               list_for_each_entry(m_page, &swap->pages, list) {
+                       if (i == offset) {
+                               found_node = m_page;
+                               break;
+                       }
+                       i++;
+               }
+       } else {
+               MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);
+               MALI_DEBUG_ASSERT(MALI_MEM_BACKEND_FLAG_SWAP_COWED == (MALI_MEM_BACKEND_FLAG_SWAP_COWED & mem_bkend->flags));
+
+               cow = &mem_bkend->cow_mem;
+               list_for_each_entry(m_page, &cow->pages, list) {
+                       if (i == offset) {
+                               found_node = m_page;
+                               break;
+                       }
+                       i++;
+               }
+       }
+
+       if (NULL == found_node) {
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       found_page = shmem_read_mapping_page(global_swap_space, found_node->swap_it->idx);
+
+       if (!IS_ERR(found_page)) {
+               lock_page(found_page);
+               dma_addr = dma_map_page(&mali_platform_device->dev, found_page,
+                                       0, _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
+               dma_unmap_page(&mali_platform_device->dev, dma_addr,
+                              _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
+
+               *pagep = found_page;
+       } else {
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+int mali_mem_swap_cow_page_on_demand(mali_mem_backend *mem_bkend, u32 offset, struct page **pagep)
+{
+       struct mali_page_node *m_page, *found_node = NULL, *new_node = NULL;
+       mali_mem_cow *cow = NULL;
+       u32 i = 0;
+
+       MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);
+       MALI_DEBUG_ASSERT(MALI_MEM_BACKEND_FLAG_SWAP_COWED == (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED));
+       MALI_DEBUG_ASSERT(MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN == (MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN & mem_bkend->flags));
+       MALI_DEBUG_ASSERT(!mali_memory_swap_backend_in_swapped_pool(mem_bkend));
+
+       cow = &mem_bkend->cow_mem;
+       list_for_each_entry(m_page, &cow->pages, list) {
+               if (i == offset) {
+                       found_node = m_page;
+                       break;
+               }
+               i++;
+       }
+
+       if (NULL == found_node) {
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       new_node = _mali_mem_swap_page_node_allocate();
+
+       if (NULL == new_node) {
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       new_node->swap_it->idx = mali_mem_swap_idx_alloc();
+
+       if (_MALI_OSK_BITMAP_INVALIDATE_INDEX == new_node->swap_it->idx) {
+               MALI_DEBUG_PRINT(1, ("Failed to allocate swap index in swap CoW on demand.\n"));
+               kfree(new_node->swap_it);
+               kfree(new_node);
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       if (MALI_FALSE == mali_mem_swap_in_page_node(new_node)) {
+               _mali_mem_swap_page_node_free(new_node);
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       /* swap in found node for copy in kernel. */
+       if (MALI_FALSE == mali_mem_swap_in_page_node(found_node)) {
+               mali_mem_swap_out_page_node(new_node);
+               _mali_mem_swap_page_node_free(new_node);
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       _mali_mem_cow_copy_page(found_node, new_node);
+
+       list_replace(&found_node->list, &new_node->list);
+
+       if (1 != _mali_page_node_get_ref_count(found_node)) {
+               atomic_add(1, &mem_bkend->mali_allocation->session->mali_mem_allocated_pages);
+               if (atomic_read(&mem_bkend->mali_allocation->session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > mem_bkend->mali_allocation->session->max_mali_mem_allocated_size) {
+                       mem_bkend->mali_allocation->session->max_mali_mem_allocated_size = atomic_read(&mem_bkend->mali_allocation->session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
+               }
+               mem_bkend->cow_mem.change_pages_nr++;
+       }
+
+       mali_mem_swap_out_page_node(found_node);
+       _mali_mem_swap_page_node_free(found_node);
+
+       /* When swap in the new page node, we have called dma_map_page for this page.\n */
+       dma_unmap_page(&mali_platform_device->dev, new_node->swap_it->dma_addr,
+                      _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
+
+       lock_page(new_node->swap_it->page);
+
+       *pagep = new_node->swap_it->page;
+
+       return _MALI_OSK_ERR_OK;
+}
+
+#ifdef MALI_MEM_SWAP_TRACKING
+void mali_mem_swap_tracking(u32 *swap_pool_size, u32 *unlock_size)
+{
+       *swap_pool_size = mem_backend_swapped_pool_size;
+       *unlock_size =  mem_backend_swapped_unlock_size;
+}
+#endif
diff --git a/utgard/r8p0/linux/mali_memory_swap_alloc.h b/utgard/r8p0/linux/mali_memory_swap_alloc.h
new file mode 100644 (file)
index 0000000..3624d71
--- /dev/null
@@ -0,0 +1,121 @@
+/*\r
+ * Copyright (C) 2013-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */\r
+\r
+#ifndef __MALI_MEMORY_SWAP_ALLOC_H__\r
+#define __MALI_MEMORY_SWAP_ALLOC_H__\r
+\r
+#include "mali_osk.h"\r
+#include "mali_session.h"\r
+\r
+#include "mali_memory_types.h"\r
+#include "mali_pp_job.h"\r
+\r
+/**\r
+ * Initialize memory swapping module.\r
+ */\r
+_mali_osk_errcode_t mali_mem_swap_init(void);\r
+\r
+void mali_mem_swap_term(void);\r
+\r
+/**\r
+ * Return global share memory file to other modules.\r
+ */\r
+struct file *mali_mem_swap_get_global_swap_file(void);\r
+\r
+/**\r
+ * Unlock the given memory backend and pages in it could be swapped out by kernel.\r
+ */\r
+void mali_mem_swap_unlock_single_mem_backend(mali_mem_backend *mem_bkend);\r
+\r
+/**\r
+ * Remove the given memory backend from global swap list.\r
+ */\r
+void mali_memory_swap_list_backend_delete(mali_mem_backend *mem_bkend);\r
+\r
+/**\r
+ * Add the given memory backend to global swap list.\r
+ */\r
+void mali_memory_swap_list_backend_add(mali_mem_backend *mem_bkend);\r
+\r
+/**\r
+ * Allocate 1 index from bitmap used as page index in global swap file.\r
+ */\r
+u32 mali_mem_swap_idx_alloc(void);\r
+\r
+void mali_mem_swap_idx_free(u32 idx);\r
+\r
+/**\r
+ * Allocate a new swap item without page index.\r
+ */\r
+struct mali_swap_item *mali_mem_swap_alloc_swap_item(void);\r
+\r
+/**\r
+ * Free a swap item, truncate the corresponding space in page cache and free index of page.\r
+ */\r
+void mali_mem_swap_free_swap_item(mali_swap_item *swap_item);\r
+\r
+/**\r
+ * Allocate a page node with swap item.\r
+ */\r
+struct mali_page_node *_mali_mem_swap_page_node_allocate(void);\r
+\r
+/**\r
+ * Reduce the reference count of given page node and if return 0, just free this page node.\r
+ */\r
+_mali_osk_errcode_t _mali_mem_swap_put_page_node(struct mali_page_node *m_page);\r
+\r
+void _mali_mem_swap_page_node_free(struct mali_page_node *m_page);\r
+\r
+/**\r
+ * Free a swappable memory backend.\r
+ */\r
+u32 mali_mem_swap_free(mali_mem_swap *swap_mem);\r
+\r
+/**\r
+ * Ummap and free.\r
+ */\r
+u32 mali_mem_swap_release(mali_mem_backend *mem_bkend, mali_bool is_mali_mapped);\r
+\r
+/**\r
+ * Read in a page from global swap file with the pre-allcated page index.\r
+ */\r
+mali_bool mali_mem_swap_in_page_node(struct mali_page_node *page_node);\r
+\r
+int mali_mem_swap_alloc_pages(mali_mem_swap *swap_mem, u32 size, u32 *bkend_idx);\r
+\r
+_mali_osk_errcode_t mali_mem_swap_mali_map(mali_mem_swap *swap_mem, struct mali_session_data *session, u32 vaddr, u32 props);\r
+\r
+void mali_mem_swap_mali_unmap(mali_mem_allocation *alloc);\r
+\r
+/**\r
+ * When pp job created, we need swap in all of memory backend needed by this pp job.\r
+ */\r
+int mali_mem_swap_in_pages(struct mali_pp_job *job);\r
+\r
+/**\r
+ * Put all of memory backends used this pp job to the global swap list.\r
+ */\r
+int mali_mem_swap_out_pages(struct mali_pp_job *job);\r
+\r
+/**\r
+ * This will be called in page fault to process CPU read&write.\r
+ */\r
+int mali_mem_swap_allocate_page_on_demand(mali_mem_backend *mem_bkend, u32 offset, struct page **pagep) ;\r
+\r
+/**\r
+ * Used to process cow on demand for swappable memory backend.\r
+ */\r
+int mali_mem_swap_cow_page_on_demand(mali_mem_backend *mem_bkend, u32 offset, struct page **pagep);\r
+\r
+#ifdef MALI_MEM_SWAP_TRACKING\r
+void mali_mem_swap_tracking(u32 *swap_pool_size, u32 *unlock_size);\r
+#endif\r
+#endif /* __MALI_MEMORY_SWAP_ALLOC_H__ */\r
+\r
diff --git a/utgard/r8p0/linux/mali_memory_types.h b/utgard/r8p0/linux/mali_memory_types.h
new file mode 100755 (executable)
index 0000000..60cd9f4
--- /dev/null
@@ -0,0 +1,219 @@
+/*
+ * Copyright (C) 2013-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_TYPES_H__
+#define __MALI_MEMORY_TYPES_H__
+
+#include <linux/mm.h>
+
+#if defined(CONFIG_MALI400_UMP)
+#include "ump_kernel_interface.h"
+#endif
+
+typedef u32 mali_address_t;
+
+typedef enum mali_mem_type {
+       MALI_MEM_OS,
+       MALI_MEM_EXTERNAL,
+       MALI_MEM_SWAP,
+       MALI_MEM_DMA_BUF,
+       MALI_MEM_UMP,
+       MALI_MEM_BLOCK,
+       MALI_MEM_COW,
+       MALI_MEM_SECURE,
+       MALI_MEM_TYPE_MAX,
+} mali_mem_type;
+
+typedef struct mali_block_item {
+       /* for block type, the block_phy is alway page size align
+       * so use low 12bit used for ref_cout.
+       */
+       unsigned long phy_addr;
+} mali_block_item;
+
+/**
+ * idx is used to locate the given page in the address space of swap file.
+ * ref_count is used to mark how many memory backends are using this item.
+ */
+typedef struct mali_swap_item {
+       u32 idx;
+       atomic_t ref_count;
+       struct page *page;
+       dma_addr_t dma_addr;
+} mali_swap_item;
+
+typedef enum mali_page_node_type {
+       MALI_PAGE_NODE_OS,
+       MALI_PAGE_NODE_BLOCK,
+       MALI_PAGE_NODE_SWAP,
+} mali_page_node_type;
+
+typedef struct mali_page_node {
+       struct list_head list;
+       union {
+               struct page *page;
+               mali_block_item *blk_it; /*pointer to block item*/
+               mali_swap_item *swap_it;
+       };
+
+       u32 type;
+} mali_page_node;
+
+typedef struct mali_mem_os_mem {
+       struct list_head pages;
+       u32 count;
+} mali_mem_os_mem;
+
+typedef struct mali_mem_dma_buf {
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+       struct mali_dma_buf_attachment *attachment;
+#endif
+} mali_mem_dma_buf;
+
+typedef struct mali_mem_external {
+       dma_addr_t phys;
+       u32 size;
+} mali_mem_external;
+
+typedef struct mali_mem_ump {
+#if defined(CONFIG_MALI400_UMP)
+       ump_dd_handle handle;
+#endif
+} mali_mem_ump;
+
+typedef struct block_allocator_allocation {
+       /* The list will be released in reverse order */
+       struct block_info *last_allocated;
+       u32 mapping_length;
+       struct block_allocator *info;
+} block_allocator_allocation;
+
+typedef struct mali_mem_block_mem {
+       struct list_head pfns;
+       u32 count;
+} mali_mem_block_mem;
+
+typedef struct mali_mem_virt_mali_mapping {
+       mali_address_t addr; /* Virtual Mali address */
+       u32 properties;      /* MMU Permissions + cache, must match MMU HW */
+} mali_mem_virt_mali_mapping;
+
+typedef struct mali_mem_virt_cpu_mapping {
+       void __user *addr;
+       struct vm_area_struct *vma;
+} mali_mem_virt_cpu_mapping;
+
+#define MALI_MEM_ALLOCATION_VALID_MAGIC 0xdeda110c
+#define MALI_MEM_ALLOCATION_FREED_MAGIC 0x10101010
+
+typedef struct mali_mm_node {
+       /* MALI GPU vaddr start, use u32 for mmu only support 32bit address*/
+       uint32_t start; /* GPU vaddr */
+       uint32_t size;  /* GPU allocation virtual size */
+       unsigned allocated : 1;
+} mali_mm_node;
+
+typedef struct mali_vma_node {
+       struct mali_mm_node vm_node;
+       struct rb_node vm_rb;
+} mali_vma_node;
+
+
+typedef struct mali_mem_allocation {
+       MALI_DEBUG_CODE(u32 magic);
+       mali_mem_type type;                /**< Type of memory */
+       u32 flags;                         /**< Flags for this allocation */
+
+       struct mali_session_data *session; /**< Pointer to session that owns the allocation */
+
+       mali_mem_virt_cpu_mapping cpu_mapping; /**< CPU mapping */
+       mali_mem_virt_mali_mapping mali_mapping; /**< Mali mapping */
+
+       /* add for new memory system */
+       struct mali_vma_node mali_vma_node;
+       u32 vsize; /* virtual size*/
+       u32 psize; /* physical backend memory size*/
+       struct list_head list;
+       s32 backend_handle; /* idr for mem_backend */
+       _mali_osk_atomic_t mem_alloc_refcount;
+} mali_mem_allocation;
+
+struct mali_mem_os_allocator {
+       spinlock_t pool_lock;
+       struct list_head pool_pages;
+       size_t pool_count;
+
+       atomic_t allocated_pages;
+       size_t allocation_limit;
+
+       struct shrinker shrinker;
+       struct delayed_work timed_shrinker;
+       struct workqueue_struct *wq;
+};
+
+/* COW backend memory type */
+typedef struct mali_mem_cow {
+       struct list_head pages;  /**< all pages for this cow backend allocation,
+                                                                including new allocated pages for modified range*/
+       u32 count;               /**< number of pages */
+       s32 change_pages_nr;
+} mali_mem_cow;
+
+typedef struct mali_mem_swap {
+       struct list_head pages;
+       u32 count;
+} mali_mem_swap;
+
+typedef struct mali_mem_secure {
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+       struct dma_buf *buf;
+       struct dma_buf_attachment *attachment;
+       struct sg_table *sgt;
+#endif
+       u32 count;
+} mali_mem_secure;
+
+#define MALI_MEM_BACKEND_FLAG_COWED                   (0x1)  /* COW has happen on this backend */
+#define MALI_MEM_BACKEND_FLAG_COW_CPU_NO_WRITE        (0x2)  /* This is an COW backend, mapped as not allowed cpu to write */
+#define MALI_MEM_BACKEND_FLAG_SWAP_COWED              (0x4)  /* Mark the given backend is cowed from swappable memory. */
+/* Mark this backend is not swapped_in in MALI driver, and before using it,
+ * we should swap it in and set up corresponding page table. */
+#define MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN            (0x8)
+#define MALI_MEM_BACKEND_FLAG_NOT_BINDED              (0x1 << 5) /* this backend it not back with physical memory, used for defer bind */
+#define MALI_MEM_BACKEND_FLAG_BINDED              (0x1 << 6) /* this backend it back with physical memory, used for defer bind */
+
+typedef struct mali_mem_backend {
+       mali_mem_type type;                /**< Type of backend memory */
+       u32 flags;                         /**< Flags for this allocation */
+       u32 size;
+       /* Union selected by type. */
+       union {
+               mali_mem_os_mem os_mem;       /**< MALI_MEM_OS */
+               mali_mem_external ext_mem;    /**< MALI_MEM_EXTERNAL */
+               mali_mem_dma_buf dma_buf;     /**< MALI_MEM_DMA_BUF */
+               mali_mem_ump ump_mem;         /**< MALI_MEM_UMP */
+               mali_mem_block_mem block_mem; /**< MALI_MEM_BLOCK */
+               mali_mem_cow cow_mem;
+               mali_mem_swap swap_mem;
+               mali_mem_secure secure_mem;
+       };
+       mali_mem_allocation *mali_allocation;
+       struct mutex mutex;
+       mali_mem_type cow_type;
+
+       struct list_head list;           /**< Used to link swappable memory backend to the global swappable list */
+       int using_count;                 /**< Mark how many PP jobs are using this memory backend */
+       u32 start_idx;                   /**< If the correspondign vma of this backend is linear, this value will be used to set vma->vm_pgoff */
+} mali_mem_backend;
+
+#define MALI_MEM_FLAG_MALI_GUARD_PAGE (_MALI_MAP_EXTERNAL_MAP_GUARD_PAGE)
+#define MALI_MEM_FLAG_DONT_CPU_MAP    (1 << 1)
+#define MALI_MEM_FLAG_CAN_RESIZE  (_MALI_MEMORY_ALLOCATE_RESIZEABLE)
+#endif /* __MALI_MEMORY_TYPES__ */
diff --git a/utgard/r8p0/linux/mali_memory_ump.c b/utgard/r8p0/linux/mali_memory_ump.c
new file mode 100755 (executable)
index 0000000..72bef08
--- /dev/null
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2012-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_kernel_linux.h"
+#include "mali_memory.h"
+#include "ump_kernel_interface.h"
+
+static int mali_mem_ump_map(mali_mem_backend *mem_backend)
+{
+       ump_dd_handle ump_mem;
+       mali_mem_allocation *alloc;
+       struct mali_session_data *session;
+       u32 nr_blocks;
+       u32 i;
+       ump_dd_physical_block *ump_blocks;
+       struct mali_page_directory *pagedir;
+       u32 offset = 0;
+       _mali_osk_errcode_t err;
+
+       MALI_DEBUG_ASSERT_POINTER(mem_backend);
+       MALI_DEBUG_ASSERT(MALI_MEM_UMP == mem_backend->type);
+
+       alloc = mem_backend->mali_allocation;
+       MALI_DEBUG_ASSERT_POINTER(alloc);
+
+       session = alloc->session;
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       ump_mem = mem_backend->ump_mem.handle;
+       MALI_DEBUG_ASSERT(UMP_DD_HANDLE_INVALID != ump_mem);
+
+       nr_blocks = ump_dd_phys_block_count_get(ump_mem);
+       if (nr_blocks == 0) {
+               MALI_DEBUG_PRINT(1, ("No block count\n"));
+               return -EINVAL;
+       }
+
+       ump_blocks = _mali_osk_malloc(sizeof(*ump_blocks) * nr_blocks);
+       if (NULL == ump_blocks) {
+               return -ENOMEM;
+       }
+
+       if (UMP_DD_INVALID == ump_dd_phys_blocks_get(ump_mem, ump_blocks, nr_blocks)) {
+               _mali_osk_free(ump_blocks);
+               return -EFAULT;
+       }
+
+       pagedir = session->page_directory;
+
+       mali_session_memory_lock(session);
+
+       err = mali_mem_mali_map_prepare(alloc);
+       if (_MALI_OSK_ERR_OK != err) {
+               MALI_DEBUG_PRINT(1, ("Mapping of UMP memory failed\n"));
+
+               _mali_osk_free(ump_blocks);
+               mali_session_memory_unlock(session);
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < nr_blocks; ++i) {
+               u32 virt = alloc->mali_vma_node.vm_node.start + offset;
+
+               MALI_DEBUG_PRINT(7, ("Mapping in 0x%08x size %d\n", ump_blocks[i].addr , ump_blocks[i].size));
+
+               mali_mmu_pagedir_update(pagedir, virt, ump_blocks[i].addr,
+                                       ump_blocks[i].size, MALI_MMU_FLAGS_DEFAULT);
+
+               offset += ump_blocks[i].size;
+       }
+
+       if (alloc->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
+               u32 virt = alloc->mali_vma_node.vm_node.start + offset;
+
+               /* Map in an extra virtual guard page at the end of the VMA */
+               MALI_DEBUG_PRINT(6, ("Mapping in extra guard page\n"));
+
+               mali_mmu_pagedir_update(pagedir, virt, ump_blocks[0].addr, _MALI_OSK_MALI_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);
+
+               offset += _MALI_OSK_MALI_PAGE_SIZE;
+       }
+       mali_session_memory_unlock(session);
+       _mali_osk_free(ump_blocks);
+       return 0;
+}
+
+static void mali_mem_ump_unmap(mali_mem_allocation *alloc)
+{
+       struct mali_session_data *session;
+       MALI_DEBUG_ASSERT_POINTER(alloc);
+       session = alloc->session;
+       MALI_DEBUG_ASSERT_POINTER(session);
+       mali_session_memory_lock(session);
+       mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start,
+                              alloc->flags);
+       mali_session_memory_unlock(session);
+}
+
+int mali_mem_bind_ump_buf(mali_mem_allocation *alloc, mali_mem_backend *mem_backend, u32  secure_id, u32 flags)
+{
+       ump_dd_handle ump_mem;
+       int ret;
+       MALI_DEBUG_ASSERT_POINTER(alloc);
+       MALI_DEBUG_ASSERT_POINTER(mem_backend);
+       MALI_DEBUG_ASSERT(MALI_MEM_UMP == mem_backend->type);
+
+       MALI_DEBUG_PRINT(3,
+                        ("Requested to map ump memory with secure id %d into virtual memory 0x%08X, size 0x%08X\n",
+                         secure_id, alloc->mali_vma_node.vm_node.start, alloc->mali_vma_node.vm_node.size));
+
+       ump_mem = ump_dd_handle_create_from_secure_id(secure_id);
+       if (UMP_DD_HANDLE_INVALID == ump_mem) MALI_ERROR(_MALI_OSK_ERR_FAULT);
+       alloc->flags |= MALI_MEM_FLAG_DONT_CPU_MAP;
+       if (flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
+               alloc->flags |= MALI_MEM_FLAG_MALI_GUARD_PAGE;
+       }
+
+       mem_backend->ump_mem.handle = ump_mem;
+
+       ret = mali_mem_ump_map(mem_backend);
+       if (0 != ret) {
+               ump_dd_reference_release(ump_mem);
+               return _MALI_OSK_ERR_FAULT;
+       }
+       MALI_DEBUG_PRINT(3, ("Returning from UMP bind\n"));
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_mem_unbind_ump_buf(mali_mem_backend *mem_backend)
+{
+       ump_dd_handle ump_mem;
+       mali_mem_allocation *alloc;
+       MALI_DEBUG_ASSERT_POINTER(mem_backend);
+       MALI_DEBUG_ASSERT(MALI_MEM_UMP == mem_backend->type);
+       ump_mem = mem_backend->ump_mem.handle;
+       MALI_DEBUG_ASSERT(UMP_DD_HANDLE_INVALID != ump_mem);
+
+       alloc = mem_backend->mali_allocation;
+       MALI_DEBUG_ASSERT_POINTER(alloc);
+       mali_mem_ump_unmap(alloc);
+       ump_dd_reference_release(ump_mem);
+}
+
diff --git a/utgard/r8p0/linux/mali_memory_ump.h b/utgard/r8p0/linux/mali_memory_ump.h
new file mode 100644 (file)
index 0000000..23f59ae
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2011-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_UMP_BUF_H__
+#define __MALI_MEMORY_UMP_BUF_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "mali_uk_types.h"
+#include "mali_osk.h"
+#include "mali_memory.h"
+
+int mali_mem_bind_ump_buf(mali_mem_allocation *alloc, mali_mem_backend *mem_backend, u32  secure_id, u32 flags);
+void mali_mem_unbind_ump_buf(mali_mem_backend *mem_backend);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_MEMORY_DMA_BUF_H__ */
diff --git a/utgard/r8p0/linux/mali_memory_util.c b/utgard/r8p0/linux/mali_memory_util.c
new file mode 100755 (executable)
index 0000000..5114388
--- /dev/null
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) 2013-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_kernel_linux.h"
+#include "mali_scheduler.h"
+
+#include "mali_memory.h"
+#include "mali_memory_os_alloc.h"
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#include "mali_memory_dma_buf.h"
+#include "mali_memory_secure.h"
+#endif
+#if defined(CONFIG_MALI400_UMP)
+#include "mali_memory_ump.h"
+#endif
+#include "mali_memory_external.h"
+#include "mali_memory_manager.h"
+#include "mali_memory_virtual.h"
+#include "mali_memory_cow.h"
+#include "mali_memory_block_alloc.h"
+#include "mali_memory_swap_alloc.h"
+
+
+
+/**
+*function @_mali_free_allocation_mem - free a memory allocation
+*/
+static u32 _mali_free_allocation_mem(mali_mem_allocation *mali_alloc)
+{
+       mali_mem_backend *mem_bkend = NULL;
+       u32 free_pages_nr = 0;
+
+       struct mali_session_data *session = mali_alloc->session;
+       MALI_DEBUG_PRINT(4, (" _mali_free_allocation_mem, psize =0x%x! \n", mali_alloc->psize));
+       if (0 == mali_alloc->psize)
+               goto out;
+
+       /* Get backend memory & Map on CPU */
+       mutex_lock(&mali_idr_mutex);
+       mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle);
+       mutex_unlock(&mali_idr_mutex);
+       MALI_DEBUG_ASSERT(NULL != mem_bkend);
+
+       switch (mem_bkend->type) {
+       case MALI_MEM_OS:
+               free_pages_nr = mali_mem_os_release(mem_bkend);
+               atomic_sub(free_pages_nr, &session->mali_mem_allocated_pages);
+               break;
+       case MALI_MEM_UMP:
+#if defined(CONFIG_MALI400_UMP)
+               mali_mem_unbind_ump_buf(mem_bkend);
+               atomic_sub(mem_bkend->size / MALI_MMU_PAGE_SIZE, &session->mali_mem_array[mem_bkend->type]);
+#else
+               MALI_DEBUG_PRINT(1, ("UMP not supported\n"));
+#endif
+               break;
+       case MALI_MEM_DMA_BUF:
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+               mali_mem_unbind_dma_buf(mem_bkend);
+               atomic_sub(mem_bkend->size / MALI_MMU_PAGE_SIZE, &session->mali_mem_array[mem_bkend->type]);
+#else
+               MALI_DEBUG_PRINT(1, ("DMA not supported\n"));
+#endif
+               break;
+       case MALI_MEM_EXTERNAL:
+               mali_mem_unbind_ext_buf(mem_bkend);
+               atomic_sub(mem_bkend->size / MALI_MMU_PAGE_SIZE, &session->mali_mem_array[mem_bkend->type]);
+               break;
+
+       case MALI_MEM_BLOCK:
+               free_pages_nr = mali_mem_block_release(mem_bkend);
+               atomic_sub(free_pages_nr, &session->mali_mem_allocated_pages);
+               break;
+
+       case MALI_MEM_COW:
+               if (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED) {
+                       free_pages_nr = mali_mem_swap_release(mem_bkend, MALI_TRUE);
+               } else {
+                       free_pages_nr = mali_mem_cow_release(mem_bkend, MALI_TRUE);
+               }
+               atomic_sub(free_pages_nr, &session->mali_mem_allocated_pages);
+               break;
+       case MALI_MEM_SWAP:
+               free_pages_nr = mali_mem_swap_release(mem_bkend, MALI_TRUE);
+               atomic_sub(free_pages_nr, &session->mali_mem_allocated_pages);
+               atomic_sub(free_pages_nr, &session->mali_mem_array[mem_bkend->type]);
+               break;
+       case MALI_MEM_SECURE:
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+               free_pages_nr = mali_mem_secure_release(mem_bkend);
+               atomic_sub(free_pages_nr, &session->mali_mem_allocated_pages);
+#else
+               MALI_DEBUG_PRINT(1, ("DMA not supported for mali secure memory\n"));
+#endif
+               break;
+       default:
+               MALI_DEBUG_PRINT(1, ("mem type %d is not in the mali_mem_type enum.\n", mem_bkend->type));
+               break;
+       }
+
+       if ((NULL != mali_alloc->cpu_mapping.vma) && (mali_alloc == (mali_alloc->cpu_mapping.vma)->vm_private_data))
+               (mali_alloc->cpu_mapping.vma)->vm_private_data = NULL;
+
+       /*Remove backend memory idex */
+       mutex_lock(&mali_idr_mutex);
+       idr_remove(&mali_backend_idr, mali_alloc->backend_handle);
+       mutex_unlock(&mali_idr_mutex);
+       kfree(mem_bkend);
+out:
+       /* remove memory allocation  */
+       mali_vma_offset_remove(&session->allocation_mgr, &mali_alloc->mali_vma_node);
+       mali_mem_allocation_struct_destory(mali_alloc);
+       return free_pages_nr;
+}
+
+/**
+*  ref_count for allocation
+*/
+u32 mali_allocation_unref(struct mali_mem_allocation **alloc)
+{
+       u32 free_pages_nr = 0;
+       mali_mem_allocation *mali_alloc = *alloc;
+       *alloc = NULL;
+       if (0 == _mali_osk_atomic_dec_return(&mali_alloc->mem_alloc_refcount)) {
+               free_pages_nr = _mali_free_allocation_mem(mali_alloc);
+       }
+       return free_pages_nr;
+}
+
+void mali_allocation_ref(struct mali_mem_allocation *alloc)
+{
+       _mali_osk_atomic_inc(&alloc->mem_alloc_refcount);
+}
+
+void mali_free_session_allocations(struct mali_session_data *session)
+{
+       struct mali_mem_allocation *entry, *next;
+
+       MALI_DEBUG_PRINT(4, (" mali_free_session_allocations! \n"));
+
+       list_for_each_entry_safe(entry, next, &session->allocation_mgr.head, list) {
+               mali_allocation_unref(&entry);
+       }
+}
diff --git a/utgard/r8p0/linux/mali_memory_util.h b/utgard/r8p0/linux/mali_memory_util.h
new file mode 100644 (file)
index 0000000..0a23b77
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2013-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+
+#ifndef __MALI_MEMORY_UTIL_H__
+#define __MALI_MEMORY_UTIL_H__
+u32 mali_allocation_unref(struct mali_mem_allocation **alloc);
+
+void mali_allocation_ref(struct mali_mem_allocation *alloc);
+
+void mali_free_session_allocations(struct mali_session_data *session);
+
+#endif
diff --git a/utgard/r8p0/linux/mali_memory_virtual.c b/utgard/r8p0/linux/mali_memory_virtual.c
new file mode 100644 (file)
index 0000000..a217e43
--- /dev/null
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2013-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_kernel_linux.h"
+#include "mali_scheduler.h"
+#include "mali_memory_os_alloc.h"
+#include "mali_memory_manager.h"
+#include "mali_memory_virtual.h"
+
+
+/**
+*internal helper to link node into the rb-tree
+*/
+static inline void _mali_vma_offset_add_rb(struct mali_allocation_manager *mgr,
+               struct mali_vma_node *node)
+{
+       struct rb_node **iter = &mgr->allocation_mgr_rb.rb_node;
+       struct rb_node *parent = NULL;
+       struct mali_vma_node *iter_node;
+
+       while (likely(*iter)) {
+               parent = *iter;
+               iter_node = rb_entry(*iter, struct mali_vma_node, vm_rb);
+
+               if (node->vm_node.start < iter_node->vm_node.start)
+                       iter = &(*iter)->rb_left;
+               else if (node->vm_node.start > iter_node->vm_node.start)
+                       iter = &(*iter)->rb_right;
+               else
+                       MALI_DEBUG_ASSERT(0);
+       }
+
+       rb_link_node(&node->vm_rb, parent, iter);
+       rb_insert_color(&node->vm_rb, &mgr->allocation_mgr_rb);
+}
+
+/**
+ * mali_vma_offset_add() - Add offset node to RB Tree
+ */
+int mali_vma_offset_add(struct mali_allocation_manager *mgr,
+                       struct mali_vma_node *node)
+{
+       int ret = 0;
+       write_lock(&mgr->vm_lock);
+
+       if (node->vm_node.allocated) {
+               goto out;
+       }
+
+       _mali_vma_offset_add_rb(mgr, node);
+       /* set to allocated */
+       node->vm_node.allocated = 1;
+
+out:
+       write_unlock(&mgr->vm_lock);
+       return ret;
+}
+
+/**
+ * mali_vma_offset_remove() - Remove offset node from RB tree
+ */
+void mali_vma_offset_remove(struct mali_allocation_manager *mgr,
+                           struct mali_vma_node *node)
+{
+       write_lock(&mgr->vm_lock);
+
+       if (node->vm_node.allocated) {
+               rb_erase(&node->vm_rb, &mgr->allocation_mgr_rb);
+               memset(&node->vm_node, 0, sizeof(node->vm_node));
+       }
+       write_unlock(&mgr->vm_lock);
+}
+
+/**
+* mali_vma_offset_search - Search the node in RB tree
+*/
+struct mali_vma_node *mali_vma_offset_search(struct mali_allocation_manager *mgr,
+               unsigned long start, unsigned long pages)
+{
+       struct mali_vma_node *node, *best;
+       struct rb_node *iter;
+       unsigned long offset;
+       read_lock(&mgr->vm_lock);
+
+       iter = mgr->allocation_mgr_rb.rb_node;
+       best = NULL;
+
+       while (likely(iter)) {
+               node = rb_entry(iter, struct mali_vma_node, vm_rb);
+               offset = node->vm_node.start;
+               if (start >= offset) {
+                       iter = iter->rb_right;
+                       best = node;
+                       if (start == offset)
+                               break;
+               } else {
+                       iter = iter->rb_left;
+               }
+       }
+
+       if (best) {
+               offset = best->vm_node.start + best->vm_node.size;
+               if (offset <= start + pages)
+                       best = NULL;
+       }
+       read_unlock(&mgr->vm_lock);
+
+       return best;
+}
+
diff --git a/utgard/r8p0/linux/mali_memory_virtual.h b/utgard/r8p0/linux/mali_memory_virtual.h
new file mode 100644 (file)
index 0000000..2a70cbb
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2013-2014, 2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#ifndef __MALI_GPU_VMEM_H__
+#define __MALI_GPU_VMEM_H__
+
+#include "mali_osk.h"
+#include "mali_session.h"
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include "mali_memory_types.h"
+#include "mali_memory_os_alloc.h"
+#include "mali_memory_manager.h"
+
+
+
+int mali_vma_offset_add(struct mali_allocation_manager *mgr,
+                       struct mali_vma_node *node);
+
+void mali_vma_offset_remove(struct mali_allocation_manager *mgr,
+                           struct mali_vma_node *node);
+
+struct mali_vma_node *mali_vma_offset_search(struct mali_allocation_manager *mgr,
+               unsigned long start,    unsigned long pages);
+
+#endif
diff --git a/utgard/r8p0/linux/mali_osk_atomics.c b/utgard/r8p0/linux/mali_osk_atomics.c
new file mode 100755 (executable)
index 0000000..03f4421
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2010, 2013-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_atomics.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <asm/atomic.h>
+#include "mali_kernel_common.h"
+
+void _mali_osk_atomic_dec(_mali_osk_atomic_t *atom)
+{
+       atomic_dec((atomic_t *)&atom->u.val);
+}
+
+u32 _mali_osk_atomic_dec_return(_mali_osk_atomic_t *atom)
+{
+       return atomic_dec_return((atomic_t *)&atom->u.val);
+}
+
+void _mali_osk_atomic_inc(_mali_osk_atomic_t *atom)
+{
+       atomic_inc((atomic_t *)&atom->u.val);
+}
+
+u32 _mali_osk_atomic_inc_return(_mali_osk_atomic_t *atom)
+{
+       return atomic_inc_return((atomic_t *)&atom->u.val);
+}
+
+void _mali_osk_atomic_init(_mali_osk_atomic_t *atom, u32 val)
+{
+       MALI_DEBUG_ASSERT_POINTER(atom);
+       atomic_set((atomic_t *)&atom->u.val, val);
+}
+
+u32 _mali_osk_atomic_read(_mali_osk_atomic_t *atom)
+{
+       return atomic_read((atomic_t *)&atom->u.val);
+}
+
+void _mali_osk_atomic_term(_mali_osk_atomic_t *atom)
+{
+       MALI_IGNORE(atom);
+}
+
+u32 _mali_osk_atomic_xchg(_mali_osk_atomic_t *atom, u32 val)
+{
+       return atomic_xchg((atomic_t *)&atom->u.val, val);
+}
diff --git a/utgard/r8p0/linux/mali_osk_bitmap.c b/utgard/r8p0/linux/mali_osk_bitmap.c
new file mode 100644 (file)
index 0000000..67cc7e4
--- /dev/null
@@ -0,0 +1,152 @@
+/*\r
+ * Copyright (C) 2010, 2013-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */\r
+\r
+/**\r
+ * @file mali_osk_bitmap.c\r
+ * Implementation of the OS abstraction layer for the kernel device driver\r
+ */\r
+\r
+#include <linux/errno.h>\r
+#include <linux/slab.h>\r
+#include <linux/mm.h>\r
+#include <linux/bitmap.h>\r
+#include <linux/vmalloc.h>\r
+#include "common/mali_kernel_common.h"\r
+#include "mali_osk_types.h"\r
+#include "mali_osk.h"\r
+\r
+u32 _mali_osk_bitmap_alloc(struct _mali_osk_bitmap *bitmap)\r
+{\r
+       u32 obj;\r
+\r
+       MALI_DEBUG_ASSERT_POINTER(bitmap);\r
+\r
+       _mali_osk_spinlock_lock(bitmap->lock);\r
+\r
+       obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->reserve);\r
+\r
+       if (obj < bitmap->max) {\r
+               set_bit(obj, bitmap->table);\r
+       } else {\r
+               obj = -1;\r
+       }\r
+\r
+       if (obj != -1)\r
+               --bitmap->avail;\r
+       _mali_osk_spinlock_unlock(bitmap->lock);\r
+\r
+       return obj;\r
+}\r
+\r
+void _mali_osk_bitmap_free(struct _mali_osk_bitmap *bitmap, u32 obj)\r
+{\r
+       MALI_DEBUG_ASSERT_POINTER(bitmap);\r
+\r
+       _mali_osk_bitmap_free_range(bitmap, obj, 1);\r
+}\r
+\r
+u32 _mali_osk_bitmap_alloc_range(struct _mali_osk_bitmap *bitmap, int cnt)\r
+{\r
+       u32 obj;\r
+\r
+       MALI_DEBUG_ASSERT_POINTER(bitmap);\r
+\r
+       if (0 >= cnt) {\r
+               return -1;\r
+       }\r
+\r
+       if (1 == cnt) {\r
+               return _mali_osk_bitmap_alloc(bitmap);\r
+       }\r
+\r
+       _mali_osk_spinlock_lock(bitmap->lock);\r
+       obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,\r
+                                        bitmap->last, cnt, 0);\r
+\r
+       if (obj >= bitmap->max) {\r
+               obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,\r
+                                                bitmap->reserve, cnt, 0);\r
+       }\r
+\r
+       if (obj < bitmap->max) {\r
+               bitmap_set(bitmap->table, obj, cnt);\r
+\r
+               bitmap->last = (obj + cnt);\r
+               if (bitmap->last >= bitmap->max) {\r
+                       bitmap->last = bitmap->reserve;\r
+               }\r
+       } else {\r
+               obj = -1;\r
+       }\r
+\r
+       if (obj != -1) {\r
+               bitmap->avail -= cnt;\r
+       }\r
+\r
+       _mali_osk_spinlock_unlock(bitmap->lock);\r
+\r
+       return obj;\r
+}\r
+\r
+u32 _mali_osk_bitmap_avail(struct _mali_osk_bitmap *bitmap)\r
+{\r
+       MALI_DEBUG_ASSERT_POINTER(bitmap);\r
+\r
+       return bitmap->avail;\r
+}\r
+\r
+void _mali_osk_bitmap_free_range(struct _mali_osk_bitmap *bitmap, u32 obj, int cnt)\r
+{\r
+       MALI_DEBUG_ASSERT_POINTER(bitmap);\r
+\r
+       _mali_osk_spinlock_lock(bitmap->lock);\r
+       bitmap_clear(bitmap->table, obj, cnt);\r
+       bitmap->last = min(bitmap->last, obj);\r
+\r
+       bitmap->avail += cnt;\r
+       _mali_osk_spinlock_unlock(bitmap->lock);\r
+}\r
+\r
+int _mali_osk_bitmap_init(struct _mali_osk_bitmap *bitmap, u32 num, u32 reserve)\r
+{\r
+       MALI_DEBUG_ASSERT_POINTER(bitmap);\r
+       MALI_DEBUG_ASSERT(reserve <= num);\r
+\r
+       bitmap->reserve = reserve;\r
+       bitmap->last = reserve;\r
+       bitmap->max  = num;\r
+       bitmap->avail = num - reserve;\r
+       bitmap->lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_UNORDERED, _MALI_OSK_LOCK_ORDER_FIRST);\r
+       if (!bitmap->lock) {\r
+               return _MALI_OSK_ERR_NOMEM;\r
+       }\r
+       bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *\r
+                               sizeof(long), GFP_KERNEL);\r
+       if (!bitmap->table) {\r
+               _mali_osk_spinlock_term(bitmap->lock);\r
+               return _MALI_OSK_ERR_NOMEM;\r
+       }\r
+\r
+       return _MALI_OSK_ERR_OK;\r
+}\r
+\r
+void _mali_osk_bitmap_term(struct _mali_osk_bitmap *bitmap)\r
+{\r
+       MALI_DEBUG_ASSERT_POINTER(bitmap);\r
+\r
+       if (NULL != bitmap->lock) {\r
+               _mali_osk_spinlock_term(bitmap->lock);\r
+       }\r
+\r
+       if (NULL != bitmap->table) {\r
+               kfree(bitmap->table);\r
+       }\r
+}\r
+\r
diff --git a/utgard/r8p0/linux/mali_osk_irq.c b/utgard/r8p0/linux/mali_osk_irq.c
new file mode 100644 (file)
index 0000000..9dc45de
--- /dev/null
@@ -0,0 +1,215 @@
+/*
+ * Copyright (C) 2010-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_irq.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/types.h>
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 29))
+#include <mach/cpu.h>
+#endif
+#include <linux/slab.h>        /* For memory allocation */
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+typedef struct _mali_osk_irq_t_struct {
+       u32 irqnum;
+       void *data;
+       _mali_osk_irq_uhandler_t uhandler;
+} mali_osk_irq_object_t;
+
+typedef irqreturn_t (*irq_handler_func_t)(int, void *, struct pt_regs *);
+static irqreturn_t irq_handler_upper_half(int port_name, void *dev_id);   /* , struct pt_regs *regs*/
+
+#if MESON_CPU_TYPE == MESON_CPU_TYPE_MESON6
+u32 get_irqnum(struct _mali_osk_irq_t_struct* irq)
+{
+       if (irq)
+               return irq->irqnum;
+       else
+               return 0;
+}
+#endif
+
+#if defined(DEBUG)
+
+struct test_interrupt_data {
+       _mali_osk_irq_ack_t ack_func;
+       void *probe_data;
+       mali_bool interrupt_received;
+       wait_queue_head_t wq;
+};
+
+static irqreturn_t test_interrupt_upper_half(int port_name, void *dev_id)
+{
+       irqreturn_t ret = IRQ_NONE;
+       struct test_interrupt_data *data = (struct test_interrupt_data *)dev_id;
+
+       if (_MALI_OSK_ERR_OK == data->ack_func(data->probe_data)) {
+               data->interrupt_received = MALI_TRUE;
+               wake_up(&data->wq);
+               ret = IRQ_HANDLED;
+       }
+
+       return ret;
+}
+
+static _mali_osk_errcode_t test_interrupt(u32 irqnum,
+               _mali_osk_irq_trigger_t trigger_func,
+               _mali_osk_irq_ack_t ack_func,
+               void *probe_data,
+               const char *description)
+{
+       unsigned long irq_flags = 0;
+       struct test_interrupt_data data = {
+               .ack_func = ack_func,
+               .probe_data = probe_data,
+               .interrupt_received = MALI_FALSE,
+       };
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       irq_flags |= IRQF_SHARED;
+#endif /* defined(CONFIG_MALI_SHARED_INTERRUPTS) */
+
+       if (0 != request_irq(irqnum, test_interrupt_upper_half, irq_flags, description, &data)) {
+               MALI_DEBUG_PRINT(2, ("Unable to install test IRQ handler for core '%s'\n", description));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       init_waitqueue_head(&data.wq);
+
+       trigger_func(probe_data);
+       wait_event_timeout(data.wq, data.interrupt_received, _mali_osk_time_mstoticks(1000));
+
+       free_irq(irqnum, &data);
+
+       if (data.interrupt_received) {
+               MALI_DEBUG_PRINT(3, ("%s: Interrupt test OK\n", description));
+               return _MALI_OSK_ERR_OK;
+       } else {
+               MALI_PRINT_ERROR(("%s: Failed interrupt test on %u\n", description, irqnum));
+               return _MALI_OSK_ERR_FAULT;
+       }
+}
+
+#endif /* defined(DEBUG) */
+
+_mali_osk_irq_t *_mali_osk_irq_init(u32 irqnum, _mali_osk_irq_uhandler_t uhandler, void *int_data, _mali_osk_irq_trigger_t trigger_func, _mali_osk_irq_ack_t ack_func, void *probe_data, const char *description)
+{
+       mali_osk_irq_object_t *irq_object;
+       unsigned long irq_flags = 0;
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       irq_flags |= IRQF_SHARED;
+#endif /* defined(CONFIG_MALI_SHARED_INTERRUPTS) */
+
+       irq_object = kmalloc(sizeof(mali_osk_irq_object_t), GFP_KERNEL);
+       if (NULL == irq_object) {
+               return NULL;
+       }
+
+       if (-1 == irqnum) {
+               /* Probe for IRQ */
+               if ((NULL != trigger_func) && (NULL != ack_func)) {
+                       unsigned long probe_count = 3;
+                       _mali_osk_errcode_t err;
+                       int irq;
+
+                       MALI_DEBUG_PRINT(2, ("Probing for irq\n"));
+
+                       do {
+                               unsigned long mask;
+
+                               mask = probe_irq_on();
+                               trigger_func(probe_data);
+
+                               _mali_osk_time_ubusydelay(5);
+
+                               irq = probe_irq_off(mask);
+                               err = ack_func(probe_data);
+                       } while (irq < 0 && (err == _MALI_OSK_ERR_OK) && probe_count--);
+
+                       if (irq < 0 || (_MALI_OSK_ERR_OK != err)) irqnum = -1;
+                       else irqnum = irq;
+               } else irqnum = -1; /* no probe functions, fault */
+
+               if (-1 != irqnum) {
+                       /* found an irq */
+                       MALI_DEBUG_PRINT(2, ("Found irq %d\n", irqnum));
+               } else {
+                       MALI_DEBUG_PRINT(2, ("Probe for irq failed\n"));
+               }
+       }
+
+       irq_object->irqnum = irqnum;
+       irq_object->uhandler = uhandler;
+       irq_object->data = int_data;
+
+       if (-1 == irqnum) {
+               MALI_DEBUG_PRINT(2, ("No IRQ for core '%s' found during probe\n", description));
+               kfree(irq_object);
+               return NULL;
+       }
+
+#if defined(DEBUG)
+       /* Verify that the configured interrupt settings are working */
+       if (_MALI_OSK_ERR_OK != test_interrupt(irqnum, trigger_func, ack_func, probe_data, description)) {
+               MALI_DEBUG_PRINT(2, ("Test of IRQ(%d) handler for core '%s' failed\n", irqnum, description));
+               kfree(irq_object);
+               return NULL;
+       }
+#endif
+
+       if (0 != request_irq(irqnum, irq_handler_upper_half, irq_flags, description, irq_object)) {
+               MALI_DEBUG_PRINT(2, ("Unable to install IRQ handler for core '%s'\n", description));
+               kfree(irq_object);
+               return NULL;
+       }
+
+       return irq_object;
+}
+
+void _mali_osk_irq_term(_mali_osk_irq_t *irq)
+{
+       mali_osk_irq_object_t *irq_object = (mali_osk_irq_object_t *)irq;
+       free_irq(irq_object->irqnum, irq_object);
+       kfree(irq_object);
+}
+
+
+/** This function is called directly in interrupt context from the OS just after
+ * the CPU get the hw-irq from mali, or other devices on the same IRQ-channel.
+ * It is registered one of these function for each mali core. When an interrupt
+ * arrives this function will be called equal times as registered mali cores.
+ * That means that we only check one mali core in one function call, and the
+ * core we check for each turn is given by the \a dev_id variable.
+ * If we detect an pending interrupt on the given core, we mask the interrupt
+ * out by settging the core's IRQ_MASK register to zero.
+ * Then we schedule the mali_core_irq_handler_bottom_half to run as high priority
+ * work queue job.
+ */
+static irqreturn_t irq_handler_upper_half(int port_name, void *dev_id)   /* , struct pt_regs *regs*/
+{
+       irqreturn_t ret = IRQ_NONE;
+       mali_osk_irq_object_t *irq_object = (mali_osk_irq_object_t *)dev_id;
+
+       if (_MALI_OSK_ERR_OK == irq_object->uhandler(irq_object->data)) {
+               ret = IRQ_HANDLED;
+       }
+
+       return ret;
+}
diff --git a/utgard/r8p0/linux/mali_osk_locks.c b/utgard/r8p0/linux/mali_osk_locks.c
new file mode 100755 (executable)
index 0000000..4fa9398
--- /dev/null
@@ -0,0 +1,287 @@
+/*
+ * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_locks.c
+ * Implemenation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk_locks.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+
+
+#ifdef DEBUG
+#ifdef LOCK_ORDER_CHECKING
+static DEFINE_SPINLOCK(lock_tracking_lock);
+static mali_bool add_lock_to_log_and_check(struct _mali_osk_lock_debug_s *lock, uint32_t tid);
+static void remove_lock_from_log(struct _mali_osk_lock_debug_s *lock, uint32_t tid);
+static const char *const lock_order_to_string(_mali_osk_lock_order_t order);
+#endif /* LOCK_ORDER_CHECKING */
+
+void _mali_osk_locks_debug_init(struct _mali_osk_lock_debug_s *checker, _mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order)
+{
+       checker->orig_flags = flags;
+       checker->owner = 0;
+
+#ifdef LOCK_ORDER_CHECKING
+       checker->order = order;
+       checker->next = NULL;
+#endif
+}
+
+void _mali_osk_locks_debug_add(struct _mali_osk_lock_debug_s *checker)
+{
+       checker->owner = _mali_osk_get_tid();
+
+#ifdef LOCK_ORDER_CHECKING
+       if (!(checker->orig_flags & _MALI_OSK_LOCKFLAG_UNORDERED)) {
+               if (!add_lock_to_log_and_check(checker, _mali_osk_get_tid())) {
+                       printk(KERN_ERR "%d: ERROR lock %p taken while holding a lock of a higher order.\n",
+                              _mali_osk_get_tid(), checker);
+                       dump_stack();
+               }
+       }
+#endif
+}
+
+void _mali_osk_locks_debug_remove(struct _mali_osk_lock_debug_s *checker)
+{
+
+#ifdef LOCK_ORDER_CHECKING
+       if (!(checker->orig_flags & _MALI_OSK_LOCKFLAG_UNORDERED)) {
+               remove_lock_from_log(checker, _mali_osk_get_tid());
+       }
+#endif
+       checker->owner = 0;
+}
+
+
+#ifdef LOCK_ORDER_CHECKING
+/* Lock order checking
+ * -------------------
+ *
+ * To assure that lock ordering scheme defined by _mali_osk_lock_order_t is strictly adhered to, the
+ * following function will, together with a linked list and some extra members in _mali_osk_lock_debug_s,
+ * make sure that a lock that is taken has a higher order than the current highest-order lock a
+ * thread holds.
+ *
+ * This is done in the following manner:
+ * - A linked list keeps track of locks held by a thread.
+ * - A `next' pointer is added to each lock. This is used to chain the locks together.
+ * - When taking a lock, the `add_lock_to_log_and_check' makes sure that taking
+ *   the given lock is legal. It will follow the linked list  to find the last
+ *   lock taken by this thread. If the last lock's order was lower than the
+ *   lock that is to be taken, it appends the new lock to the list and returns
+ *   true, if not, it return false. This return value is assert()'ed on in
+ *   _mali_osk_lock_wait().
+ */
+
+static struct _mali_osk_lock_debug_s *lock_lookup_list;
+
+static void dump_lock_tracking_list(void)
+{
+       struct _mali_osk_lock_debug_s *l;
+       u32 n = 1;
+
+       /* print list for debugging purposes */
+       l = lock_lookup_list;
+
+       while (NULL != l) {
+               printk(" [lock: %p, tid_owner: %d, order: %d] ->", l, l->owner, l->order);
+               l = l->next;
+               MALI_DEBUG_ASSERT(n++ < 100);
+       }
+       printk(" NULL\n");
+}
+
+static int tracking_list_length(void)
+{
+       struct _mali_osk_lock_debug_s *l;
+       u32 n = 0;
+       l = lock_lookup_list;
+
+       while (NULL != l) {
+               l = l->next;
+               n++;
+               MALI_DEBUG_ASSERT(n < 100);
+       }
+       return n;
+}
+
+static mali_bool add_lock_to_log_and_check(struct _mali_osk_lock_debug_s *lock, uint32_t tid)
+{
+       mali_bool ret = MALI_FALSE;
+       _mali_osk_lock_order_t highest_order_for_tid = _MALI_OSK_LOCK_ORDER_FIRST;
+       struct _mali_osk_lock_debug_s *highest_order_lock = (struct _mali_osk_lock_debug_s *)0xbeefbabe;
+       struct _mali_osk_lock_debug_s *l;
+       unsigned long local_lock_flag;
+       u32 len;
+
+       spin_lock_irqsave(&lock_tracking_lock, local_lock_flag);
+       len = tracking_list_length();
+
+       l  = lock_lookup_list;
+       if (NULL == l) { /* This is the first lock taken by this thread -- record and return true */
+               lock_lookup_list = lock;
+               spin_unlock_irqrestore(&lock_tracking_lock, local_lock_flag);
+               return MALI_TRUE;
+       } else {
+               /* Traverse the locks taken and find the lock of the highest order.
+                * Since several threads may hold locks, each lock's owner must be
+                * checked so that locks not owned by this thread can be ignored. */
+               for (;;) {
+                       MALI_DEBUG_ASSERT_POINTER(l);
+                       if (tid == l->owner && l->order >= highest_order_for_tid) {
+                               highest_order_for_tid = l->order;
+                               highest_order_lock = l;
+                       }
+
+                       if (NULL != l->next) {
+                               l = l->next;
+                       } else {
+                               break;
+                       }
+               }
+
+               l->next = lock;
+               l->next = NULL;
+       }
+
+       /* We have now found the highest order lock currently held by this thread and can see if it is
+        * legal to take the requested lock. */
+       ret = highest_order_for_tid < lock->order;
+
+       if (!ret) {
+               printk(KERN_ERR "Took lock of order %d (%s) while holding lock of order %d (%s)\n",
+                      lock->order, lock_order_to_string(lock->order),
+                      highest_order_for_tid, lock_order_to_string(highest_order_for_tid));
+               dump_lock_tracking_list();
+       }
+
+       if (len + 1 != tracking_list_length()) {
+               printk(KERN_ERR "************ lock: %p\n", lock);
+               printk(KERN_ERR "************ before: %d *** after: %d ****\n", len, tracking_list_length());
+               dump_lock_tracking_list();
+               MALI_DEBUG_ASSERT_POINTER(NULL);
+       }
+
+       spin_unlock_irqrestore(&lock_tracking_lock, local_lock_flag);
+       return ret;
+}
+
+static void remove_lock_from_log(struct _mali_osk_lock_debug_s *lock, uint32_t tid)
+{
+       struct _mali_osk_lock_debug_s *curr;
+       struct _mali_osk_lock_debug_s *prev = NULL;
+       unsigned long local_lock_flag;
+       u32 len;
+       u32 n = 0;
+
+       spin_lock_irqsave(&lock_tracking_lock, local_lock_flag);
+       len = tracking_list_length();
+       curr = lock_lookup_list;
+
+       if (NULL == curr) {
+               printk(KERN_ERR "Error: Lock tracking list was empty on call to remove_lock_from_log\n");
+               dump_lock_tracking_list();
+       }
+
+       MALI_DEBUG_ASSERT_POINTER(curr);
+
+
+       while (lock != curr) {
+               prev = curr;
+
+               MALI_DEBUG_ASSERT_POINTER(curr);
+               curr = curr->next;
+               MALI_DEBUG_ASSERT(n++ < 100);
+       }
+
+       if (NULL == prev) {
+               lock_lookup_list = curr->next;
+       } else {
+               MALI_DEBUG_ASSERT_POINTER(curr);
+               MALI_DEBUG_ASSERT_POINTER(prev);
+               prev->next = curr->next;
+       }
+
+       lock->next = NULL;
+
+       if (len - 1 != tracking_list_length()) {
+               printk(KERN_ERR "************ lock: %p\n", lock);
+               printk(KERN_ERR "************ before: %d *** after: %d ****\n", len, tracking_list_length());
+               dump_lock_tracking_list();
+               MALI_DEBUG_ASSERT_POINTER(NULL);
+       }
+
+       spin_unlock_irqrestore(&lock_tracking_lock, local_lock_flag);
+}
+
+static const char *const lock_order_to_string(_mali_osk_lock_order_t order)
+{
+       switch (order) {
+       case _MALI_OSK_LOCK_ORDER_SESSIONS:
+               return "_MALI_OSK_LOCK_ORDER_SESSIONS";
+               break;
+       case _MALI_OSK_LOCK_ORDER_MEM_SESSION:
+               return "_MALI_OSK_LOCK_ORDER_MEM_SESSION";
+               break;
+       case _MALI_OSK_LOCK_ORDER_MEM_INFO:
+               return "_MALI_OSK_LOCK_ORDER_MEM_INFO";
+               break;
+       case _MALI_OSK_LOCK_ORDER_MEM_PT_CACHE:
+               return "_MALI_OSK_LOCK_ORDER_MEM_PT_CACHE";
+               break;
+       case _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP:
+               return "_MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP";
+               break;
+       case _MALI_OSK_LOCK_ORDER_PM_EXECUTION:
+               return "_MALI_OSK_LOCK_ORDER_PM_EXECUTION";
+               break;
+       case _MALI_OSK_LOCK_ORDER_EXECUTOR:
+               return "_MALI_OSK_LOCK_ORDER_EXECUTOR";
+               break;
+       case _MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM:
+               return "_MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM";
+               break;
+       case _MALI_OSK_LOCK_ORDER_SCHEDULER:
+               return "_MALI_OSK_LOCK_ORDER_SCHEDULER";
+               break;
+       case _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED:
+               return "_MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED";
+               break;
+       case _MALI_OSK_LOCK_ORDER_DMA_COMMAND:
+               return "_MALI_OSK_LOCK_ORDER_DMA_COMMAND";
+               break;
+       case _MALI_OSK_LOCK_ORDER_PROFILING:
+               return "_MALI_OSK_LOCK_ORDER_PROFILING";
+               break;
+       case _MALI_OSK_LOCK_ORDER_L2:
+               return "_MALI_OSK_LOCK_ORDER_L2";
+               break;
+       case _MALI_OSK_LOCK_ORDER_L2_COMMAND:
+               return "_MALI_OSK_LOCK_ORDER_L2_COMMAND";
+               break;
+       case _MALI_OSK_LOCK_ORDER_UTILIZATION:
+               return "_MALI_OSK_LOCK_ORDER_UTILIZATION";
+               break;
+       case _MALI_OSK_LOCK_ORDER_SESSION_PENDING_JOBS:
+               return "_MALI_OSK_LOCK_ORDER_SESSION_PENDING_JOBS";
+               break;
+       case _MALI_OSK_LOCK_ORDER_PM_STATE:
+               return "_MALI_OSK_LOCK_ORDER_PM_STATE";
+               break;
+       default:
+               return "<UNKNOWN_LOCK_ORDER>";
+       }
+}
+#endif /* LOCK_ORDER_CHECKING */
+#endif /* DEBUG */
diff --git a/utgard/r8p0/linux/mali_osk_locks.h b/utgard/r8p0/linux/mali_osk_locks.h
new file mode 100755 (executable)
index 0000000..a05586f
--- /dev/null
@@ -0,0 +1,326 @@
+/*
+ * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_locks.h
+ * Defines OS abstraction of lock and mutex
+ */
+#ifndef _MALI_OSK_LOCKS_H
+#define _MALI_OSK_LOCKS_H
+
+#include <linux/spinlock.h>
+#include <linux/rwsem.h>
+#include <linux/mutex.h>
+
+#include <linux/slab.h>
+
+#include "mali_osk_types.h"
+
+#ifdef _cplusplus
+extern "C" {
+#endif
+
+       /* When DEBUG is enabled, this struct will be used to track owner, mode and order checking */
+#ifdef DEBUG
+       struct _mali_osk_lock_debug_s {
+               u32 owner;
+               _mali_osk_lock_flags_t orig_flags;
+               _mali_osk_lock_order_t order;
+               struct _mali_osk_lock_debug_s *next;
+       };
+#endif
+
+       /* Anstraction of spinlock_t */
+       struct _mali_osk_spinlock_s {
+#ifdef DEBUG
+               struct _mali_osk_lock_debug_s checker;
+#endif
+               spinlock_t spinlock;
+       };
+
+       /* Abstration of spinlock_t and lock flag which is used to store register's state before locking */
+       struct _mali_osk_spinlock_irq_s {
+#ifdef DEBUG
+               struct _mali_osk_lock_debug_s checker;
+#endif
+
+               spinlock_t spinlock;
+               unsigned long flags;
+       };
+
+       /* Abstraction of rw_semaphore in OS */
+       struct _mali_osk_mutex_rw_s {
+#ifdef DEBUG
+               struct _mali_osk_lock_debug_s checker;
+               _mali_osk_lock_mode_t mode;
+#endif
+
+               struct rw_semaphore rw_sema;
+       };
+
+       /* Mutex and mutex_interruptible functions share the same osk mutex struct */
+       struct _mali_osk_mutex_s {
+#ifdef DEBUG
+               struct _mali_osk_lock_debug_s checker;
+#endif
+               struct mutex mutex;
+       };
+
+#ifdef DEBUG
+       /** @brief _mali_osk_locks_debug_init/add/remove() functions are declared when DEBUG is enabled and
+        * defined in file mali_osk_locks.c. When LOCK_ORDER_CHECKING is enabled, calling these functions when we
+        * init/lock/unlock a lock/mutex, we could track lock order of a given tid. */
+       void _mali_osk_locks_debug_init(struct _mali_osk_lock_debug_s *checker, _mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order);
+       void _mali_osk_locks_debug_add(struct _mali_osk_lock_debug_s *checker);
+       void _mali_osk_locks_debug_remove(struct _mali_osk_lock_debug_s *checker);
+
+       /** @brief This function can return a given lock's owner when DEBUG     is enabled. */
+       static inline u32 _mali_osk_lock_get_owner(struct _mali_osk_lock_debug_s *lock)
+       {
+               return lock->owner;
+       }
+#else
+#define _mali_osk_locks_debug_init(x, y, z) do {} while (0)
+#define _mali_osk_locks_debug_add(x) do {} while (0)
+#define _mali_osk_locks_debug_remove(x) do {} while (0)
+#endif
+
+       /** @brief Before use _mali_osk_spin_lock, init function should be used to allocate memory and initial spinlock*/
+       static inline _mali_osk_spinlock_t *_mali_osk_spinlock_init(_mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order)
+       {
+               _mali_osk_spinlock_t *lock = NULL;
+
+               lock = kmalloc(sizeof(_mali_osk_spinlock_t), GFP_KERNEL);
+               if (NULL == lock) {
+                       return NULL;
+               }
+               spin_lock_init(&lock->spinlock);
+               _mali_osk_locks_debug_init((struct _mali_osk_lock_debug_s *)lock, flags, order);
+               return lock;
+       }
+
+       /** @brief Lock a spinlock */
+       static inline void  _mali_osk_spinlock_lock(_mali_osk_spinlock_t *lock)
+       {
+               BUG_ON(NULL == lock);
+               spin_lock(&lock->spinlock);
+               _mali_osk_locks_debug_add((struct _mali_osk_lock_debug_s *)lock);
+       }
+
+       /** @brief Unlock a spinlock */
+       static inline void _mali_osk_spinlock_unlock(_mali_osk_spinlock_t *lock)
+       {
+               BUG_ON(NULL == lock);
+               _mali_osk_locks_debug_remove((struct _mali_osk_lock_debug_s *)lock);
+               spin_unlock(&lock->spinlock);
+       }
+
+       /** @brief Free a memory block which the argument lock pointed to and its type must be
+        * _mali_osk_spinlock_t *. */
+       static inline void _mali_osk_spinlock_term(_mali_osk_spinlock_t *lock)
+       {
+               /* Parameter validation  */
+               BUG_ON(NULL == lock);
+
+               /* Linux requires no explicit termination of spinlocks, semaphores, or rw_semaphores */
+               kfree(lock);
+       }
+
+       /** @brief Before _mali_osk_spinlock_irq_lock/unlock/term() is called, init function should be
+        * called to initial spinlock and flags in struct _mali_osk_spinlock_irq_t. */
+       static inline _mali_osk_spinlock_irq_t *_mali_osk_spinlock_irq_init(_mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order)
+       {
+               _mali_osk_spinlock_irq_t *lock = NULL;
+               lock = kmalloc(sizeof(_mali_osk_spinlock_irq_t), GFP_KERNEL);
+
+               if (NULL == lock) {
+                       return NULL;
+               }
+
+               lock->flags = 0;
+               spin_lock_init(&lock->spinlock);
+               _mali_osk_locks_debug_init((struct _mali_osk_lock_debug_s *)lock, flags, order);
+               return lock;
+       }
+
+       /** @brief Lock spinlock and save the register's state */
+       static inline void _mali_osk_spinlock_irq_lock(_mali_osk_spinlock_irq_t *lock)
+       {
+               unsigned long tmp_flags;
+
+               BUG_ON(NULL == lock);
+               spin_lock_irqsave(&lock->spinlock, tmp_flags);
+               lock->flags = tmp_flags;
+               _mali_osk_locks_debug_add((struct _mali_osk_lock_debug_s *)lock);
+       }
+
+       /** @brief Unlock spinlock with saved register's state */
+       static inline void _mali_osk_spinlock_irq_unlock(_mali_osk_spinlock_irq_t *lock)
+       {
+               BUG_ON(NULL == lock);
+               _mali_osk_locks_debug_remove((struct _mali_osk_lock_debug_s *)lock);
+               spin_unlock_irqrestore(&lock->spinlock, lock->flags);
+       }
+
+       /** @brief Destroy a given memory block which lock pointed to, and the lock type must be
+        * _mali_osk_spinlock_irq_t *. */
+       static inline void _mali_osk_spinlock_irq_term(_mali_osk_spinlock_irq_t *lock)
+       {
+               /* Parameter validation  */
+               BUG_ON(NULL == lock);
+
+               /* Linux requires no explicit termination of spinlocks, semaphores, or rw_semaphores */
+               kfree(lock);
+       }
+
+       /** @brief Before _mali_osk_mutex_rw_wait/signal/term() is called, we should call
+        * _mali_osk_mutex_rw_init() to kmalloc a memory block and initial part of elements in it. */
+       static inline _mali_osk_mutex_rw_t *_mali_osk_mutex_rw_init(_mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order)
+       {
+               _mali_osk_mutex_rw_t *lock = NULL;
+
+               lock = kmalloc(sizeof(_mali_osk_mutex_rw_t), GFP_KERNEL);
+
+               if (NULL == lock) {
+                       return NULL;
+               }
+
+               init_rwsem(&lock->rw_sema);
+               _mali_osk_locks_debug_init((struct _mali_osk_lock_debug_s *)lock, flags, order);
+               return lock;
+       }
+
+       /** @brief When call _mali_osk_mutex_rw_wait/signal() functions, the second argument mode
+        * should be assigned with value _MALI_OSK_LOCKMODE_RO or _MALI_OSK_LOCKMODE_RW */
+       static inline void _mali_osk_mutex_rw_wait(_mali_osk_mutex_rw_t *lock, _mali_osk_lock_mode_t mode)
+       {
+               BUG_ON(NULL == lock);
+               BUG_ON(!(_MALI_OSK_LOCKMODE_RO == mode || _MALI_OSK_LOCKMODE_RW == mode));
+
+               if (mode == _MALI_OSK_LOCKMODE_RO) {
+                       down_read(&lock->rw_sema);
+               } else {
+                       down_write(&lock->rw_sema);
+               }
+
+#ifdef DEBUG
+               if (mode == _MALI_OSK_LOCKMODE_RW) {
+                       lock->mode = mode;
+               } else { /* mode == _MALI_OSK_LOCKMODE_RO */
+                       lock->mode = mode;
+               }
+               _mali_osk_locks_debug_add((struct _mali_osk_lock_debug_s *)lock);
+#endif
+       }
+
+       /** @brief Up lock->rw_sema with up_read/write() accordinf argument mode's value. */
+       static inline void  _mali_osk_mutex_rw_signal(_mali_osk_mutex_rw_t *lock, _mali_osk_lock_mode_t mode)
+       {
+               BUG_ON(NULL == lock);
+               BUG_ON(!(_MALI_OSK_LOCKMODE_RO == mode || _MALI_OSK_LOCKMODE_RW == mode));
+#ifdef DEBUG
+               /* make sure the thread releasing the lock actually was the owner */
+               if (mode == _MALI_OSK_LOCKMODE_RW) {
+                       _mali_osk_locks_debug_remove((struct _mali_osk_lock_debug_s *)lock);
+                       /* This lock now has no owner */
+                       lock->checker.owner = 0;
+               }
+#endif
+
+               if (mode == _MALI_OSK_LOCKMODE_RO) {
+                       up_read(&lock->rw_sema);
+               } else {
+                       up_write(&lock->rw_sema);
+               }
+       }
+
+       /** @brief Free a given memory block which lock pointed to and its type must be
+        * _mali_sok_mutex_rw_t *. */
+       static inline void _mali_osk_mutex_rw_term(_mali_osk_mutex_rw_t *lock)
+       {
+               /* Parameter validation  */
+               BUG_ON(NULL == lock);
+
+               /* Linux requires no explicit termination of spinlocks, semaphores, or rw_semaphores */
+               kfree(lock);
+       }
+
+       /** @brief Mutex & mutex_interruptible share the same init and term function, because they have the
+        * same osk mutex struct, and the difference between them is which locking function they use */
+       static inline _mali_osk_mutex_t *_mali_osk_mutex_init(_mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order)
+       {
+               _mali_osk_mutex_t *lock = NULL;
+
+               lock = kmalloc(sizeof(_mali_osk_mutex_t), GFP_KERNEL);
+
+               if (NULL == lock) {
+                       return NULL;
+               }
+               mutex_init(&lock->mutex);
+
+               _mali_osk_locks_debug_init((struct _mali_osk_lock_debug_s *)lock, flags, order);
+               return lock;
+       }
+
+       /** @brief  Lock the lock->mutex with mutex_lock_interruptible function */
+       static inline _mali_osk_errcode_t _mali_osk_mutex_wait_interruptible(_mali_osk_mutex_t *lock)
+       {
+               _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+
+               BUG_ON(NULL == lock);
+
+               if (mutex_lock_interruptible(&lock->mutex)) {
+                       printk(KERN_WARNING "Mali: Can not lock mutex\n");
+                       err = _MALI_OSK_ERR_RESTARTSYSCALL;
+               }
+
+               _mali_osk_locks_debug_add((struct _mali_osk_lock_debug_s *)lock);
+               return err;
+       }
+
+       /** @brief Unlock the lock->mutex which is locked with mutex_lock_interruptible() function. */
+       static inline void _mali_osk_mutex_signal_interruptible(_mali_osk_mutex_t *lock)
+       {
+               BUG_ON(NULL == lock);
+               _mali_osk_locks_debug_remove((struct _mali_osk_lock_debug_s *)lock);
+               mutex_unlock(&lock->mutex);
+       }
+
+       /** @brief Lock the lock->mutex just with mutex_lock() function which could not be interruptted. */
+       static inline void _mali_osk_mutex_wait(_mali_osk_mutex_t *lock)
+       {
+               BUG_ON(NULL == lock);
+               mutex_lock(&lock->mutex);
+               _mali_osk_locks_debug_add((struct _mali_osk_lock_debug_s *)lock);
+       }
+
+       /** @brief Unlock the lock->mutex which is locked with mutex_lock() function. */
+       static inline void _mali_osk_mutex_signal(_mali_osk_mutex_t *lock)
+       {
+               BUG_ON(NULL == lock);
+               _mali_osk_locks_debug_remove((struct _mali_osk_lock_debug_s *)lock);
+               mutex_unlock(&lock->mutex);
+       }
+
+       /** @brief Free a given memory block which lock point. */
+       static inline void _mali_osk_mutex_term(_mali_osk_mutex_t *lock)
+       {
+               /* Parameter validation  */
+               BUG_ON(NULL == lock);
+
+               /* Linux requires no explicit termination of spinlocks, semaphores, or rw_semaphores */
+               kfree(lock);
+       }
+
+#ifdef _cplusplus
+}
+#endif
+
+#endif
diff --git a/utgard/r8p0/linux/mali_osk_low_level_mem.c b/utgard/r8p0/linux/mali_osk_low_level_mem.c
new file mode 100755 (executable)
index 0000000..bc713a1
--- /dev/null
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_low_level_mem.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <asm/io.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+
+void _mali_osk_mem_barrier(void)
+{
+       mb();
+}
+
+void _mali_osk_write_mem_barrier(void)
+{
+       wmb();
+}
+
+mali_io_address _mali_osk_mem_mapioregion(uintptr_t phys, u32 size, const char *description)
+{
+       return (mali_io_address)ioremap_nocache(phys, size);
+}
+
+void _mali_osk_mem_unmapioregion(uintptr_t phys, u32 size, mali_io_address virt)
+{
+       iounmap((void *)virt);
+}
+
+_mali_osk_errcode_t inline _mali_osk_mem_reqregion(uintptr_t phys, u32 size, const char *description)
+{
+#if MALI_LICENSE_IS_GPL
+       return _MALI_OSK_ERR_OK; /* GPL driver gets the mem region for the resources registered automatically */
+#else
+       return ((NULL == request_mem_region(phys, size, description)) ? _MALI_OSK_ERR_NOMEM : _MALI_OSK_ERR_OK);
+#endif
+}
+
+void inline _mali_osk_mem_unreqregion(uintptr_t phys, u32 size)
+{
+#if !MALI_LICENSE_IS_GPL
+       release_mem_region(phys, size);
+#endif
+}
+
+void inline _mali_osk_mem_iowrite32_relaxed(volatile mali_io_address addr, u32 offset, u32 val)
+{
+       __raw_writel(cpu_to_le32(val), ((u8 *)addr) + offset);
+}
+
+u32 inline _mali_osk_mem_ioread32(volatile mali_io_address addr, u32 offset)
+{
+       return ioread32(((u8 *)addr) + offset);
+}
+
+void inline _mali_osk_mem_iowrite32(volatile mali_io_address addr, u32 offset, u32 val)
+{
+       iowrite32(val, ((u8 *)addr) + offset);
+}
+
+void _mali_osk_cache_flushall(void)
+{
+       /** @note Cached memory is not currently supported in this implementation */
+}
+
+void _mali_osk_cache_ensure_uncached_range_flushed(void *uncached_mapping, u32 offset, u32 size)
+{
+       _mali_osk_write_mem_barrier();
+}
+
+u32 _mali_osk_mem_write_safe(void __user *dest, const void __user *src, u32 size)
+{
+#define MALI_MEM_SAFE_COPY_BLOCK_SIZE 4096
+       u32 retval = 0;
+       void *temp_buf;
+
+       temp_buf = kmalloc(MALI_MEM_SAFE_COPY_BLOCK_SIZE, GFP_KERNEL);
+       if (NULL != temp_buf) {
+               u32 bytes_left_to_copy = size;
+               u32 i;
+               for (i = 0; i < size; i += MALI_MEM_SAFE_COPY_BLOCK_SIZE) {
+                       u32 size_to_copy;
+                       u32 size_copied;
+                       u32 bytes_left;
+
+                       if (bytes_left_to_copy > MALI_MEM_SAFE_COPY_BLOCK_SIZE) {
+                               size_to_copy = MALI_MEM_SAFE_COPY_BLOCK_SIZE;
+                       } else {
+                               size_to_copy = bytes_left_to_copy;
+                       }
+
+                       bytes_left = copy_from_user(temp_buf, ((char *)src) + i, size_to_copy);
+                       size_copied = size_to_copy - bytes_left;
+
+                       bytes_left = copy_to_user(((char *)dest) + i, temp_buf, size_copied);
+                       size_copied -= bytes_left;
+
+                       bytes_left_to_copy -= size_copied;
+                       retval += size_copied;
+
+                       if (size_copied != size_to_copy) {
+                               break; /* Early out, we was not able to copy this entire block */
+                       }
+               }
+
+               kfree(temp_buf);
+       }
+
+       return retval;
+}
+
+_mali_osk_errcode_t _mali_ukk_mem_write_safe(_mali_uk_mem_write_safe_s *args)
+{
+       void __user *src;
+       void __user *dst;
+       struct mali_session_data *session;
+
+       MALI_DEBUG_ASSERT_POINTER(args);
+
+       session = (struct mali_session_data *)(uintptr_t)args->ctx;
+
+       if (NULL == session) {
+               return _MALI_OSK_ERR_INVALID_ARGS;
+       }
+
+       src = (void __user *)(uintptr_t)args->src;
+       dst = (void __user *)(uintptr_t)args->dest;
+
+       /* Return number of bytes actually copied */
+       args->size = _mali_osk_mem_write_safe(dst, src, args->size);
+       return _MALI_OSK_ERR_OK;
+}
diff --git a/utgard/r8p0/linux/mali_osk_mali.c b/utgard/r8p0/linux/mali_osk_mali.c
new file mode 100755 (executable)
index 0000000..f47b343
--- /dev/null
@@ -0,0 +1,496 @@
+/*
+ * Copyright (C) 2010-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_mali.c
+ * Implementation of the OS abstraction layer which is specific for the Mali kernel device driver
+ */
+#include <linux/kernel.h>
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
+#include <linux/uaccess.h>
+#else
+#include <asm/uaccess.h>
+#endif
+#include <linux/platform_device.h>
+#include <linux/mali/mali_utgard.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include "mali_osk_mali.h"
+#include "mali_kernel_common.h" /* MALI_xxx macros */
+#include "mali_osk.h"           /* kernel side OS functions */
+#include "mali_kernel_linux.h"
+
+static mali_bool mali_secure_mode_enabled = MALI_FALSE;
+static mali_bool mali_secure_mode_supported = MALI_FALSE;
+
+/* Function that init the mali gpu secure mode */
+void (*mali_secure_mode_deinit)(void) = NULL;
+/* Function that reset GPU and enable the mali gpu secure mode */
+int (*mali_gpu_reset_and_secure_mode_enable)(void) = NULL;
+/* Function that reset GPU and disable the mali gpu secure mode */
+int (*mali_gpu_reset_and_secure_mode_disable)(void) = NULL;
+
+#ifdef CONFIG_MALI_DT
+
+#define MALI_OSK_INVALID_RESOURCE_ADDRESS 0xFFFFFFFF
+
+/**
+ * Define the max number of resource we could have.
+ */
+#define MALI_OSK_MAX_RESOURCE_NUMBER 27
+
+/**
+ * Define the max number of resource with interrupts, and they are
+ * the first 20 elements in array mali_osk_resource_bank.
+ */
+#define MALI_OSK_RESOURCE_WITH_IRQ_NUMBER 20
+
+/**
+ * pp core start and end location in mali_osk_resource_bank array.
+ */
+#define MALI_OSK_RESOURCE_PP_LOCATION_START 2
+#define MALI_OSK_RESOURCE_PP_LOCATION_END 17
+
+/**
+ * L2 cache start and end location in mali_osk_resource_bank array.
+ */
+#define MALI_OSK_RESOURCE_L2_LOCATION_START 20
+#define MALI_OSK_RESOURCE_l2_LOCATION_END 22
+
+/**
+ * DMA unit location.
+ */
+#define MALI_OSK_RESOURCE_DMA_LOCATION 26
+
+static _mali_osk_resource_t mali_osk_resource_bank[MALI_OSK_MAX_RESOURCE_NUMBER] = {
+       {.description = "Mali_GP", .base = MALI_OFFSET_GP, .irq_name = "IRQGP",},
+       {.description = "Mali_GP_MMU", .base = MALI_OFFSET_GP_MMU, .irq_name = "IRQGPMMU",},
+       {.description = "Mali_PP0", .base = MALI_OFFSET_PP0, .irq_name = "IRQPP0",},
+       {.description = "Mali_PP0_MMU", .base = MALI_OFFSET_PP0_MMU, .irq_name = "IRQPPMMU0",},
+       {.description = "Mali_PP1", .base = MALI_OFFSET_PP1, .irq_name = "IRQPP1",},
+       {.description = "Mali_PP1_MMU", .base = MALI_OFFSET_PP1_MMU, .irq_name = "IRQPPMMU1",},
+       {.description = "Mali_PP2", .base = MALI_OFFSET_PP2, .irq_name = "IRQPP2",},
+       {.description = "Mali_PP2_MMU", .base = MALI_OFFSET_PP2_MMU, .irq_name = "IRQPPMMU2",},
+       {.description = "Mali_PP3", .base = MALI_OFFSET_PP3, .irq_name = "IRQPP3",},
+       {.description = "Mali_PP3_MMU", .base = MALI_OFFSET_PP3_MMU, .irq_name = "IRQPPMMU3",},
+       {.description = "Mali_PP4", .base = MALI_OFFSET_PP4, .irq_name = "IRQPP4",},
+       {.description = "Mali_PP4_MMU", .base = MALI_OFFSET_PP4_MMU, .irq_name = "IRQPPMMU4",},
+       {.description = "Mali_PP5", .base = MALI_OFFSET_PP5, .irq_name = "IRQPP5",},
+       {.description = "Mali_PP5_MMU", .base = MALI_OFFSET_PP5_MMU, .irq_name = "IRQPPMMU5",},
+       {.description = "Mali_PP6", .base = MALI_OFFSET_PP6, .irq_name = "IRQPP6",},
+       {.description = "Mali_PP6_MMU", .base = MALI_OFFSET_PP6_MMU, .irq_name = "IRQPPMMU6",},
+       {.description = "Mali_PP7", .base = MALI_OFFSET_PP7, .irq_name = "IRQPP7",},
+       {.description = "Mali_PP7_MMU", .base = MALI_OFFSET_PP7_MMU, .irq_name = "IRQPPMMU",},
+       {.description = "Mali_PP_Broadcast", .base = MALI_OFFSET_PP_BCAST, .irq_name = "IRQPP",},
+       {.description = "Mali_PMU", .base = MALI_OFFSET_PMU, .irq_name = "IRQPMU",},
+       {.description = "Mali_L2", .base = MALI_OFFSET_L2_RESOURCE0,},
+       {.description = "Mali_L2", .base = MALI_OFFSET_L2_RESOURCE1,},
+       {.description = "Mali_L2", .base = MALI_OFFSET_L2_RESOURCE2,},
+       {.description = "Mali_PP_MMU_Broadcast", .base = MALI_OFFSET_PP_BCAST_MMU,},
+       {.description = "Mali_Broadcast", .base = MALI_OFFSET_BCAST,},
+       {.description = "Mali_DLBU", .base = MALI_OFFSET_DLBU,},
+       {.description = "Mali_DMA", .base = MALI_OFFSET_DMA,},
+};
+
+static int _mali_osk_get_compatible_name(const char **out_string)
+{
+       struct device_node *node = mali_platform_device->dev.of_node;
+
+       MALI_DEBUG_ASSERT(NULL != node);
+
+       return of_property_read_string(node, "compatible", out_string);
+}
+
+_mali_osk_errcode_t _mali_osk_resource_initialize(void)
+{
+       mali_bool mali_is_450 = MALI_FALSE, mali_is_470 = MALI_FALSE;
+       int i, pp_core_num = 0, l2_core_num = 0;
+       struct resource *res;
+       const char *compatible_name = NULL;
+
+       if (0 == _mali_osk_get_compatible_name(&compatible_name)) {
+               if (0 == strncmp(compatible_name, "arm,mali-450", strlen("arm,mali-450"))) {
+                       mali_is_450 = MALI_TRUE;
+                       MALI_DEBUG_PRINT(2, ("mali-450 device tree detected."));
+               } else if (0 == strncmp(compatible_name, "arm,mali-470", strlen("arm,mali-470"))) {
+                       mali_is_470 = MALI_TRUE;
+                       MALI_DEBUG_PRINT(2, ("mali-470 device tree detected."));
+               }
+       }
+
+       for (i = 0; i < MALI_OSK_RESOURCE_WITH_IRQ_NUMBER; i++) {
+               res = platform_get_resource_byname(mali_platform_device, IORESOURCE_IRQ, mali_osk_resource_bank[i].irq_name);
+               if (res) {
+                       mali_osk_resource_bank[i].irq = res->start;
+               } else {
+                       mali_osk_resource_bank[i].base = MALI_OSK_INVALID_RESOURCE_ADDRESS;
+               }
+       }
+
+       for (i = MALI_OSK_RESOURCE_PP_LOCATION_START; i <= MALI_OSK_RESOURCE_PP_LOCATION_END; i++) {
+               if (MALI_OSK_INVALID_RESOURCE_ADDRESS != mali_osk_resource_bank[i].base) {
+                       pp_core_num++;
+               }
+       }
+
+       /* We have to divide by 2, because we caculate twice for only one pp(pp_core and pp_mmu_core). */
+       if (0 != pp_core_num % 2) {
+               MALI_DEBUG_PRINT(2, ("The value of pp core number isn't normal."));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       pp_core_num /= 2;
+
+       /**
+        * we can caculate the number of l2 cache core according the number of pp core number
+        * and device type(mali400/mali450/mali470).
+        */
+       l2_core_num = 1;
+       if (mali_is_450) {
+               if (pp_core_num > 4) {
+                       l2_core_num = 3;
+               } else if (pp_core_num <= 4) {
+                       l2_core_num = 2;
+               }
+       }
+
+       for (i = MALI_OSK_RESOURCE_l2_LOCATION_END; i > MALI_OSK_RESOURCE_L2_LOCATION_START + l2_core_num - 1; i--) {
+               mali_osk_resource_bank[i].base = MALI_OSK_INVALID_RESOURCE_ADDRESS;
+       }
+
+       /* If device is not mali-450 type, we have to remove related resource from resource bank. */
+       if (!(mali_is_450 || mali_is_470)) {
+               for (i = MALI_OSK_RESOURCE_l2_LOCATION_END + 1; i < MALI_OSK_MAX_RESOURCE_NUMBER; i++) {
+                       mali_osk_resource_bank[i].base = MALI_OSK_INVALID_RESOURCE_ADDRESS;
+               }
+       }
+
+       if (mali_is_470)
+               mali_osk_resource_bank[MALI_OSK_RESOURCE_DMA_LOCATION].base = MALI_OSK_INVALID_RESOURCE_ADDRESS;
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_osk_resource_find(u32 addr, _mali_osk_resource_t *res)
+{
+       int i;
+
+       if (NULL == mali_platform_device) {
+               return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+       }
+
+       /* Traverse all of resources in resources bank to find the matching one. */
+       for (i = 0; i < MALI_OSK_MAX_RESOURCE_NUMBER; i++) {
+               if (mali_osk_resource_bank[i].base == addr) {
+                       if (NULL != res) {
+                               res->base = addr + _mali_osk_resource_base_address();
+                               res->description = mali_osk_resource_bank[i].description;
+                               res->irq = mali_osk_resource_bank[i].irq;
+                       }
+                       return _MALI_OSK_ERR_OK;
+               }
+       }
+
+       return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+}
+
+uintptr_t _mali_osk_resource_base_address(void)
+{
+       struct resource *reg_res = NULL;
+       uintptr_t ret = 0;
+
+       reg_res = platform_get_resource(mali_platform_device, IORESOURCE_MEM, 0);
+
+       if (NULL != reg_res) {
+               ret = reg_res->start;
+       }
+
+       return ret;
+}
+
+void _mali_osk_device_data_pmu_config_get(u16 *domain_config_array, int array_size)
+{
+       struct device_node *node = mali_platform_device->dev.of_node;
+       struct property *prop;
+       const __be32 *p;
+       int length = 0, i = 0;
+       u32 u;
+
+       MALI_DEBUG_PRINT(2, ("Get pmu config from device tree configuration.\n"));
+
+       MALI_DEBUG_ASSERT(NULL != node);
+
+       if (!of_get_property(node, "pmu_domain_config", &length)) {
+               return;
+       }
+
+       if (array_size != length / sizeof(u32)) {
+               MALI_PRINT_ERROR(("Wrong pmu domain config in device tree."));
+               return;
+       }
+
+       of_property_for_each_u32(node, "pmu_domain_config", prop, p, u) {
+               domain_config_array[i] = (u16)u;
+               i++;
+       }
+
+       return;
+}
+
+u32 _mali_osk_get_pmu_switch_delay(void)
+{
+       struct device_node *node = mali_platform_device->dev.of_node;
+       u32 switch_delay;
+
+       MALI_DEBUG_ASSERT(NULL != node);
+
+       if (0 == of_property_read_u32(node, "pmu_switch_delay", &switch_delay)) {
+               return switch_delay;
+       } else {
+               MALI_DEBUG_PRINT(2, ("Couldn't find pmu_switch_delay in device tree configuration.\n"));
+       }
+
+       return 0;
+}
+
+#else /* CONFIG_MALI_DT */
+
+_mali_osk_errcode_t _mali_osk_resource_find(u32 addr, _mali_osk_resource_t *res)
+{
+       int i;
+       uintptr_t phys_addr;
+
+       if (NULL == mali_platform_device) {
+               /* Not connected to a device */
+               return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+       }
+
+       phys_addr = addr + _mali_osk_resource_base_address();
+       for (i = 0; i < mali_platform_device->num_resources; i++) {
+               if (IORESOURCE_MEM == resource_type(&(mali_platform_device->resource[i])) &&
+                   mali_platform_device->resource[i].start == phys_addr) {
+                       if (NULL != res) {
+                               res->base = phys_addr;
+                               res->description = mali_platform_device->resource[i].name;
+
+                               /* Any (optional) IRQ resource belonging to this resource will follow */
+                               if ((i + 1) < mali_platform_device->num_resources &&
+                                   IORESOURCE_IRQ == resource_type(&(mali_platform_device->resource[i + 1]))) {
+                                       res->irq = mali_platform_device->resource[i + 1].start;
+                               } else {
+                                       res->irq = -1;
+                               }
+                       }
+                       return _MALI_OSK_ERR_OK;
+               }
+       }
+
+       return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+}
+
+uintptr_t _mali_osk_resource_base_address(void)
+{
+       uintptr_t lowest_addr = (uintptr_t)(0 - 1);
+       uintptr_t ret = 0;
+
+       if (NULL != mali_platform_device) {
+               int i;
+               for (i = 0; i < mali_platform_device->num_resources; i++) {
+                       if (mali_platform_device->resource[i].flags & IORESOURCE_MEM &&
+                           mali_platform_device->resource[i].start < lowest_addr) {
+                               lowest_addr = mali_platform_device->resource[i].start;
+                               ret = lowest_addr;
+                       }
+               }
+       }
+
+       return ret;
+}
+
+void _mali_osk_device_data_pmu_config_get(u16 *domain_config_array, int array_size)
+{
+       _mali_osk_device_data data = { 0, };
+
+       MALI_DEBUG_PRINT(2, ("Get pmu config from platform device data.\n"));
+       if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+               /* Copy the custom customer power domain config */
+               _mali_osk_memcpy(domain_config_array, data.pmu_domain_config, sizeof(data.pmu_domain_config));
+       }
+
+       return;
+}
+
+u32 _mali_osk_get_pmu_switch_delay(void)
+{
+       _mali_osk_errcode_t err;
+       _mali_osk_device_data data = { 0, };
+
+       err = _mali_osk_device_data_get(&data);
+
+       if (_MALI_OSK_ERR_OK == err) {
+               return data.pmu_switch_delay;
+       }
+
+       return 0;
+}
+#endif /* CONFIG_MALI_DT */
+
+_mali_osk_errcode_t _mali_osk_device_data_get(_mali_osk_device_data *data)
+{
+       MALI_DEBUG_ASSERT_POINTER(data);
+
+       if (NULL != mali_platform_device) {
+               struct mali_gpu_device_data *os_data = NULL;
+
+               os_data = (struct mali_gpu_device_data *)mali_platform_device->dev.platform_data;
+               if (NULL != os_data) {
+                       /* Copy data from OS dependant struct to Mali neutral struct (identical!) */
+                       BUILD_BUG_ON(sizeof(*os_data) != sizeof(*data));
+                       _mali_osk_memcpy(data, os_data, sizeof(*os_data));
+
+                       return _MALI_OSK_ERR_OK;
+               }
+       }
+
+       return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+}
+
+u32 _mali_osk_identify_gpu_resource(void)
+{
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_L2_RESOURCE1, NULL))
+               /* Mali 450 */
+               return 0x450;
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_DLBU, NULL))
+               /* Mali 470 */
+               return 0x470;
+
+       /* Mali 400 */
+       return 0x400;
+}
+
+mali_bool _mali_osk_shared_interrupts(void)
+{
+       u32 irqs[128];
+       u32 i, j, irq, num_irqs_found = 0;
+
+       MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
+       MALI_DEBUG_ASSERT(128 >= mali_platform_device->num_resources);
+
+       for (i = 0; i < mali_platform_device->num_resources; i++) {
+               if (IORESOURCE_IRQ & mali_platform_device->resource[i].flags) {
+                       irq = mali_platform_device->resource[i].start;
+
+                       for (j = 0; j < num_irqs_found; ++j) {
+                               if (irq == irqs[j]) {
+                                       return MALI_TRUE;
+                               }
+                       }
+
+                       irqs[num_irqs_found++] = irq;
+               }
+       }
+
+       return MALI_FALSE;
+}
+
+_mali_osk_errcode_t _mali_osk_gpu_secure_mode_init(void)
+{
+       _mali_osk_device_data data = { 0, };
+
+       if (_MALI_OSK_ERR_OK ==  _mali_osk_device_data_get(&data)) {
+               if ((NULL != data.secure_mode_init) && (NULL != data.secure_mode_deinit)
+                   && (NULL != data.gpu_reset_and_secure_mode_enable) && (NULL != data.gpu_reset_and_secure_mode_disable)) {
+                       int err = data.secure_mode_init();
+                       if (err) {
+                               MALI_DEBUG_PRINT(1, ("Failed to init gpu secure mode.\n"));
+                               return _MALI_OSK_ERR_FAULT;
+                       }
+
+                       mali_secure_mode_deinit = data.secure_mode_deinit;
+                       mali_gpu_reset_and_secure_mode_enable = data.gpu_reset_and_secure_mode_enable;
+                       mali_gpu_reset_and_secure_mode_disable = data.gpu_reset_and_secure_mode_disable;
+
+                       mali_secure_mode_supported = MALI_TRUE;
+                       mali_secure_mode_enabled = MALI_FALSE;
+                       return _MALI_OSK_ERR_OK;
+               }
+       }
+       MALI_DEBUG_PRINT(3, ("GPU secure mode not supported.\n"));
+       return _MALI_OSK_ERR_UNSUPPORTED;
+
+}
+
+_mali_osk_errcode_t _mali_osk_gpu_secure_mode_deinit(void)
+{
+       if (NULL !=  mali_secure_mode_deinit) {
+               mali_secure_mode_deinit();
+               mali_secure_mode_enabled = MALI_FALSE;
+               mali_secure_mode_supported = MALI_FALSE;
+               return _MALI_OSK_ERR_OK;
+       }
+       MALI_DEBUG_PRINT(3, ("GPU secure mode not supported.\n"));
+       return _MALI_OSK_ERR_UNSUPPORTED;
+
+}
+
+
+_mali_osk_errcode_t _mali_osk_gpu_reset_and_secure_mode_enable(void)
+{
+       /* the mali executor lock must be held before enter this function. */
+
+       MALI_DEBUG_ASSERT(MALI_FALSE == mali_secure_mode_enabled);
+
+       if (NULL !=  mali_gpu_reset_and_secure_mode_enable) {
+               if (mali_gpu_reset_and_secure_mode_enable()) {
+                       MALI_DEBUG_PRINT(1, ("Failed to reset GPU or enable gpu secure mode.\n"));
+                       return _MALI_OSK_ERR_FAULT;
+               }
+               mali_secure_mode_enabled = MALI_TRUE;
+               return _MALI_OSK_ERR_OK;
+       }
+       MALI_DEBUG_PRINT(1, ("GPU secure mode not supported.\n"));
+       return _MALI_OSK_ERR_UNSUPPORTED;
+}
+
+_mali_osk_errcode_t _mali_osk_gpu_reset_and_secure_mode_disable(void)
+{
+       /* the mali executor lock must be held before enter this function. */
+
+       MALI_DEBUG_ASSERT(MALI_TRUE == mali_secure_mode_enabled);
+
+       if (NULL != mali_gpu_reset_and_secure_mode_disable) {
+               if (mali_gpu_reset_and_secure_mode_disable()) {
+                       MALI_DEBUG_PRINT(1, ("Failed to reset GPU or disable gpu secure mode.\n"));
+                       return _MALI_OSK_ERR_FAULT;
+               }
+               mali_secure_mode_enabled = MALI_FALSE;
+
+               return _MALI_OSK_ERR_OK;
+
+       }
+       MALI_DEBUG_PRINT(1, ("GPU secure mode not supported.\n"));
+       return _MALI_OSK_ERR_UNSUPPORTED;
+
+}
+
+mali_bool _mali_osk_gpu_secure_mode_is_enabled(void)
+{
+       return mali_secure_mode_enabled;
+}
+
+mali_bool _mali_osk_gpu_secure_mode_is_supported(void)
+{
+       return mali_secure_mode_supported;
+}
+
+
diff --git a/utgard/r8p0/linux/mali_osk_math.c b/utgard/r8p0/linux/mali_osk_math.c
new file mode 100755 (executable)
index 0000000..3f06723
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2010, 2013-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_math.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/bitops.h>
+
+u32 _mali_osk_clz(u32 input)
+{
+       return 32 - fls(input);
+}
+
+u32 _mali_osk_fls(u32 input)
+{
+       return fls(input);
+}
diff --git a/utgard/r8p0/linux/mali_osk_memory.c b/utgard/r8p0/linux/mali_osk_memory.c
new file mode 100755 (executable)
index 0000000..ad5494d
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2010-2011, 2013-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_memory.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+void inline *_mali_osk_calloc(u32 n, u32 size)
+{
+       return kcalloc(n, size, GFP_KERNEL);
+}
+
+void inline *_mali_osk_malloc(u32 size)
+{
+       return kmalloc(size, GFP_KERNEL);
+}
+
+void inline _mali_osk_free(void *ptr)
+{
+       kfree(ptr);
+}
+
+void inline *_mali_osk_valloc(u32 size)
+{
+       return vmalloc(size);
+}
+
+void inline _mali_osk_vfree(void *ptr)
+{
+       vfree(ptr);
+}
+
+void inline *_mali_osk_memcpy(void *dst, const void *src, u32  len)
+{
+       return memcpy(dst, src, len);
+}
+
+void inline *_mali_osk_memset(void *s, u32 c, u32 n)
+{
+       return memset(s, c, n);
+}
+
+mali_bool _mali_osk_mem_check_allocated(u32 max_allocated)
+{
+       /* No need to prevent an out-of-memory dialogue appearing on Linux,
+        * so we always return MALI_TRUE.
+        */
+       return MALI_TRUE;
+}
diff --git a/utgard/r8p0/linux/mali_osk_misc.c b/utgard/r8p0/linux/mali_osk_misc.c
new file mode 100755 (executable)
index 0000000..2a6ae91
--- /dev/null
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_misc.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+#include <linux/kernel.h>
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
+#include <linux/uaccess.h>
+#else
+#include <asm/uaccess.h>
+#endif
+#include <asm/cacheflush.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/module.h>
+#include "mali_osk.h"
+
+#if !defined(CONFIG_MALI_QUIET)
+void _mali_osk_dbgmsg(const char *fmt, ...)
+{
+       va_list args;
+       va_start(args, fmt);
+       vprintk(fmt, args);
+       va_end(args);
+}
+#endif /* !defined(CONFIG_MALI_QUIET) */
+
+u32 _mali_osk_snprintf(char *buf, u32 size, const char *fmt, ...)
+{
+       int res;
+       va_list args;
+       va_start(args, fmt);
+
+       res = vscnprintf(buf, (size_t)size, fmt, args);
+
+       va_end(args);
+       return res;
+}
+
+void _mali_osk_abort(void)
+{
+       /* make a simple fault by dereferencing a NULL pointer */
+       dump_stack();
+       *(int *)0 = 0;
+}
+
+void _mali_osk_break(void)
+{
+       _mali_osk_abort();
+}
+
+u32 _mali_osk_get_pid(void)
+{
+       /* Thread group ID is the process ID on Linux */
+       return (u32)current->tgid;
+}
+
+char *_mali_osk_get_comm(void)
+{
+       return (char *)current->comm;
+}
+
+
+u32 _mali_osk_get_tid(void)
+{
+       /* pid is actually identifying the thread on Linux */
+       u32 tid = current->pid;
+
+       /* If the pid is 0 the core was idle.  Instead of returning 0 we return a special number
+        * identifying which core we are on. */
+       if (0 == tid) {
+               tid = -(1 + raw_smp_processor_id());
+       }
+
+       return tid;
+}
diff --git a/utgard/r8p0/linux/mali_osk_notification.c b/utgard/r8p0/linux/mali_osk_notification.c
new file mode 100755 (executable)
index 0000000..5867841
--- /dev/null
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_notification.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+/**
+ * Declaration of the notification queue object type
+ * Contains a linked list of notification pending delivery to user space.
+ * It also contains a wait queue of exclusive waiters blocked in the ioctl
+ * When a new notification is posted a single thread is resumed.
+ */
+struct _mali_osk_notification_queue_t_struct {
+       spinlock_t mutex; /**< Mutex protecting the list */
+       wait_queue_head_t receive_queue; /**< Threads waiting for new entries to the queue */
+       struct list_head head; /**< List of notifications waiting to be picked up */
+};
+
+typedef struct _mali_osk_notification_wrapper_t_struct {
+       struct list_head list;           /**< Internal linked list variable */
+       _mali_osk_notification_t data;   /**< Notification data */
+} _mali_osk_notification_wrapper_t;
+
+_mali_osk_notification_queue_t *_mali_osk_notification_queue_init(void)
+{
+       _mali_osk_notification_queue_t         *result;
+
+       result = (_mali_osk_notification_queue_t *)kmalloc(sizeof(_mali_osk_notification_queue_t), GFP_KERNEL);
+       if (NULL == result) return NULL;
+
+       spin_lock_init(&result->mutex);
+       init_waitqueue_head(&result->receive_queue);
+       INIT_LIST_HEAD(&result->head);
+
+       return result;
+}
+
+_mali_osk_notification_t *_mali_osk_notification_create(u32 type, u32 size)
+{
+       /* OPT Recycling of notification objects */
+       _mali_osk_notification_wrapper_t *notification;
+
+       notification = (_mali_osk_notification_wrapper_t *)kmalloc(sizeof(_mali_osk_notification_wrapper_t) + size,
+                       GFP_KERNEL | __GFP_HIGH | __GFP_REPEAT);
+       if (NULL == notification) {
+               MALI_DEBUG_PRINT(1, ("Failed to create a notification object\n"));
+               return NULL;
+       }
+
+       /* Init the list */
+       INIT_LIST_HEAD(&notification->list);
+
+       if (0 != size) {
+               notification->data.result_buffer = ((u8 *)notification) + sizeof(_mali_osk_notification_wrapper_t);
+       } else {
+               notification->data.result_buffer = NULL;
+       }
+
+       /* set up the non-allocating fields */
+       notification->data.notification_type = type;
+       notification->data.result_buffer_size = size;
+
+       /* all ok */
+       return &(notification->data);
+}
+
+void _mali_osk_notification_delete(_mali_osk_notification_t *object)
+{
+       _mali_osk_notification_wrapper_t *notification;
+       MALI_DEBUG_ASSERT_POINTER(object);
+
+       notification = container_of(object, _mali_osk_notification_wrapper_t, data);
+
+       /* Free the container */
+       kfree(notification);
+}
+
+void _mali_osk_notification_queue_term(_mali_osk_notification_queue_t *queue)
+{
+       _mali_osk_notification_t *result;
+       MALI_DEBUG_ASSERT_POINTER(queue);
+
+       while (_MALI_OSK_ERR_OK == _mali_osk_notification_queue_dequeue(queue, &result)) {
+               _mali_osk_notification_delete(result);
+       }
+
+       /* not much to do, just free the memory */
+       kfree(queue);
+}
+void _mali_osk_notification_queue_send(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t *object)
+{
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+       unsigned long irq_flags;
+#endif
+
+       _mali_osk_notification_wrapper_t *notification;
+       MALI_DEBUG_ASSERT_POINTER(queue);
+       MALI_DEBUG_ASSERT_POINTER(object);
+
+       notification = container_of(object, _mali_osk_notification_wrapper_t, data);
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+       spin_lock_irqsave(&queue->mutex, irq_flags);
+#else
+       spin_lock(&queue->mutex);
+#endif
+
+       list_add_tail(&notification->list, &queue->head);
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+       spin_unlock_irqrestore(&queue->mutex, irq_flags);
+#else
+       spin_unlock(&queue->mutex);
+#endif
+
+       /* and wake up one possible exclusive waiter */
+       wake_up(&queue->receive_queue);
+}
+
+_mali_osk_errcode_t _mali_osk_notification_queue_dequeue(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result)
+{
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+       unsigned long irq_flags;
+#endif
+
+       _mali_osk_errcode_t ret = _MALI_OSK_ERR_ITEM_NOT_FOUND;
+       _mali_osk_notification_wrapper_t *wrapper_object;
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+       spin_lock_irqsave(&queue->mutex, irq_flags);
+#else
+       spin_lock(&queue->mutex);
+#endif
+
+       if (!list_empty(&queue->head)) {
+               wrapper_object = list_entry(queue->head.next, _mali_osk_notification_wrapper_t, list);
+               *result = &(wrapper_object->data);
+               list_del_init(&wrapper_object->list);
+               ret = _MALI_OSK_ERR_OK;
+       }
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+       spin_unlock_irqrestore(&queue->mutex, irq_flags);
+#else
+       spin_unlock(&queue->mutex);
+#endif
+
+       return ret;
+}
+
+_mali_osk_errcode_t _mali_osk_notification_queue_receive(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result)
+{
+       /* check input */
+       MALI_DEBUG_ASSERT_POINTER(queue);
+       MALI_DEBUG_ASSERT_POINTER(result);
+
+       /* default result */
+       *result = NULL;
+
+       if (wait_event_interruptible(queue->receive_queue,
+                                    _MALI_OSK_ERR_OK == _mali_osk_notification_queue_dequeue(queue, result))) {
+               return _MALI_OSK_ERR_RESTARTSYSCALL;
+       }
+
+       return _MALI_OSK_ERR_OK; /* all ok */
+}
diff --git a/utgard/r8p0/linux/mali_osk_pm.c b/utgard/r8p0/linux/mali_osk_pm.c
new file mode 100755 (executable)
index 0000000..6a579ad
--- /dev/null
@@ -0,0 +1,83 @@
+/**
+ * Copyright (C) 2010-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_pm.c
+ * Implementation of the callback functions from common power management
+ */
+
+#include <linux/sched.h>
+
+#include "mali_kernel_linux.h"
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif /* CONFIG_PM_RUNTIME */
+#include <linux/platform_device.h>
+#include <linux/version.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+/* Can NOT run in atomic context */
+_mali_osk_errcode_t _mali_osk_pm_dev_ref_get_sync(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+       int err;
+       MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
+       err = pm_runtime_get_sync(&(mali_platform_device->dev));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
+       pm_runtime_mark_last_busy(&(mali_platform_device->dev));
+#endif
+       if (0 > err) {
+               MALI_PRINT_ERROR(("Mali OSK PM: pm_runtime_get_sync() returned error code %d\n", err));
+               return _MALI_OSK_ERR_FAULT;
+       }
+#endif
+       return _MALI_OSK_ERR_OK;
+}
+
+/* Can run in atomic context */
+_mali_osk_errcode_t _mali_osk_pm_dev_ref_get_async(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+       int err;
+       MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
+       err = pm_runtime_get(&(mali_platform_device->dev));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
+       pm_runtime_mark_last_busy(&(mali_platform_device->dev));
+#endif
+       if (0 > err && -EINPROGRESS != err) {
+               MALI_PRINT_ERROR(("Mali OSK PM: pm_runtime_get() returned error code %d\n", err));
+               return _MALI_OSK_ERR_FAULT;
+       }
+#endif
+       return _MALI_OSK_ERR_OK;
+}
+
+
+/* Can run in atomic context */
+void _mali_osk_pm_dev_ref_put(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+       MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
+       pm_runtime_mark_last_busy(&(mali_platform_device->dev));
+       pm_runtime_put_autosuspend(&(mali_platform_device->dev));
+#else
+       pm_runtime_put(&(mali_platform_device->dev));
+#endif
+#endif
+}
+
+void _mali_osk_pm_dev_barrier(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+       pm_runtime_barrier(&(mali_platform_device->dev));
+#endif
+}
diff --git a/utgard/r8p0/linux/mali_osk_profiling.c b/utgard/r8p0/linux/mali_osk_profiling.c
new file mode 100755 (executable)
index 0000000..35d1abc
--- /dev/null
@@ -0,0 +1,1282 @@
+/*
+ * Copyright (C) 2012-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include <linux/hrtimer.h>
+#include <linux/module.h>
+#include <linux/file.h>
+#include <linux/poll.h>
+#include <linux/anon_inodes.h>
+#include <linux/sched.h>
+
+#include <mali_profiling_gator_api.h>
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+#include "mali_uk_types.h"
+#include "mali_osk_profiling.h"
+#include "mali_linux_trace.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_l2_cache.h"
+#include "mali_user_settings_db.h"
+#include "mali_executor.h"
+#include "mali_memory_manager.h"
+
+#define MALI_PROFILING_STREAM_DATA_DEFAULT_SIZE 100
+#define MALI_PROFILING_STREAM_HOLD_TIME 1000000         /*1 ms */
+
+#define MALI_PROFILING_STREAM_BUFFER_SIZE       (1 << 12)
+#define MALI_PROFILING_STREAM_BUFFER_NUM        100
+
+/**
+ * Define the mali profiling stream struct.
+ */
+typedef struct mali_profiling_stream {
+       u8 data[MALI_PROFILING_STREAM_BUFFER_SIZE];
+       u32 used_size;
+       struct list_head list;
+} mali_profiling_stream;
+
+typedef struct mali_profiling_stream_list {
+       spinlock_t spin_lock;
+       struct list_head free_list;
+       struct list_head queue_list;
+} mali_profiling_stream_list;
+
+static const char mali_name[] = "4xx";
+static const char utgard_setup_version[] = "ANNOTATE_SETUP 1\n";
+
+static u32 profiling_sample_rate = 0;
+static u32 first_sw_counter_index = 0;
+
+static mali_bool l2_cache_counter_if_enabled = MALI_FALSE;
+static u32 num_counters_enabled = 0;
+static u32 mem_counters_enabled = 0;
+
+static _mali_osk_atomic_t stream_fd_if_used;
+
+static wait_queue_head_t stream_fd_wait_queue;
+static mali_profiling_counter *global_mali_profiling_counters = NULL;
+static u32 num_global_mali_profiling_counters = 0;
+
+static mali_profiling_stream_list *global_mali_stream_list = NULL;
+static mali_profiling_stream *mali_counter_stream = NULL;
+static mali_profiling_stream *mali_core_activity_stream = NULL;
+static u64 mali_core_activity_stream_dequeue_time = 0;
+static spinlock_t mali_activity_lock;
+static u32 mali_activity_cores_num =  0;
+static struct hrtimer profiling_sampling_timer;
+
+const char *_mali_mem_counter_descriptions[] = _MALI_MEM_COUTNER_DESCRIPTIONS;
+const char *_mali_special_counter_descriptions[] = _MALI_SPCIAL_COUNTER_DESCRIPTIONS;
+
+static u32 current_profiling_pid = 0;
+
+static void _mali_profiling_stream_list_destory(mali_profiling_stream_list *profiling_stream_list)
+{
+       mali_profiling_stream *profiling_stream, *tmp_profiling_stream;
+       MALI_DEBUG_ASSERT_POINTER(profiling_stream_list);
+
+       list_for_each_entry_safe(profiling_stream, tmp_profiling_stream, &profiling_stream_list->free_list, list) {
+               list_del(&profiling_stream->list);
+               kfree(profiling_stream);
+       }
+
+       list_for_each_entry_safe(profiling_stream, tmp_profiling_stream, &profiling_stream_list->queue_list, list) {
+               list_del(&profiling_stream->list);
+               kfree(profiling_stream);
+       }
+
+       kfree(profiling_stream_list);
+}
+
+static void _mali_profiling_global_stream_list_free(void)
+{
+       mali_profiling_stream *profiling_stream, *tmp_profiling_stream;
+       unsigned long irq_flags;
+
+       MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list);
+       spin_lock_irqsave(&global_mali_stream_list->spin_lock, irq_flags);
+       list_for_each_entry_safe(profiling_stream, tmp_profiling_stream, &global_mali_stream_list->queue_list, list) {
+               profiling_stream->used_size = 0;
+               list_move(&profiling_stream->list, &global_mali_stream_list->free_list);
+       }
+       spin_unlock_irqrestore(&global_mali_stream_list->spin_lock, irq_flags);
+}
+
+static _mali_osk_errcode_t _mali_profiling_global_stream_list_dequeue(struct list_head *stream_list, mali_profiling_stream **new_mali_profiling_stream)
+{
+       unsigned long irq_flags;
+       _mali_osk_errcode_t ret = _MALI_OSK_ERR_OK;
+       MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list);
+       MALI_DEBUG_ASSERT_POINTER(stream_list);
+
+       spin_lock_irqsave(&global_mali_stream_list->spin_lock, irq_flags);
+
+       if (!list_empty(stream_list)) {
+               *new_mali_profiling_stream = list_entry(stream_list->next, mali_profiling_stream, list);
+               list_del_init(&(*new_mali_profiling_stream)->list);
+       } else {
+               ret = _MALI_OSK_ERR_NOMEM;
+       }
+
+       spin_unlock_irqrestore(&global_mali_stream_list->spin_lock, irq_flags);
+
+       return ret;
+}
+
+static void _mali_profiling_global_stream_list_queue(struct list_head *stream_list, mali_profiling_stream *current_mali_profiling_stream)
+{
+       unsigned long irq_flags;
+       MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list);
+       MALI_DEBUG_ASSERT_POINTER(stream_list);
+
+       spin_lock_irqsave(&global_mali_stream_list->spin_lock, irq_flags);
+       list_add_tail(&current_mali_profiling_stream->list, stream_list);
+       spin_unlock_irqrestore(&global_mali_stream_list->spin_lock, irq_flags);
+}
+
+static mali_bool _mali_profiling_global_stream_queue_list_if_empty(void)
+{
+       MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list);
+       return list_empty(&global_mali_stream_list->queue_list);
+}
+
+static u32 _mali_profiling_global_stream_queue_list_next_size(void)
+{
+       unsigned long irq_flags;
+       u32 size = 0;
+       MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list);
+
+       spin_lock_irqsave(&global_mali_stream_list->spin_lock, irq_flags);
+       if (!list_empty(&global_mali_stream_list->queue_list)) {
+               mali_profiling_stream *next_mali_profiling_stream =
+                       list_entry(global_mali_stream_list->queue_list.next, mali_profiling_stream, list);
+               size = next_mali_profiling_stream->used_size;
+       }
+       spin_unlock_irqrestore(&global_mali_stream_list->spin_lock, irq_flags);
+       return size;
+}
+
+/* The mali profiling stream file operations functions. */
+static ssize_t _mali_profiling_stream_read(
+       struct file *filp,
+       char __user *buffer,
+       size_t      size,
+       loff_t      *f_pos);
+
+static unsigned int  _mali_profiling_stream_poll(struct file *filp, poll_table *wait);
+
+static int  _mali_profiling_stream_release(struct inode *inode, struct file *filp);
+
+/* The timeline stream file operations structure. */
+static const struct file_operations mali_profiling_stream_fops = {
+       .release = _mali_profiling_stream_release,
+       .read    = _mali_profiling_stream_read,
+       .poll    = _mali_profiling_stream_poll,
+};
+
+static ssize_t _mali_profiling_stream_read(
+       struct file *filp,
+       char __user *buffer,
+       size_t      size,
+       loff_t      *f_pos)
+{
+       u32 copy_len = 0;
+       mali_profiling_stream *current_mali_profiling_stream;
+       u32 used_size;
+       MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list);
+
+       while (!_mali_profiling_global_stream_queue_list_if_empty()) {
+               used_size = _mali_profiling_global_stream_queue_list_next_size();
+               if (used_size <= ((u32)size - copy_len)) {
+                       current_mali_profiling_stream = NULL;
+                       _mali_profiling_global_stream_list_dequeue(&global_mali_stream_list->queue_list,
+                                       &current_mali_profiling_stream);
+                       MALI_DEBUG_ASSERT_POINTER(current_mali_profiling_stream);
+                       if (copy_to_user(&buffer[copy_len], current_mali_profiling_stream->data, current_mali_profiling_stream->used_size)) {
+                               current_mali_profiling_stream->used_size = 0;
+                               _mali_profiling_global_stream_list_queue(&global_mali_stream_list->free_list, current_mali_profiling_stream);
+                               return -EFAULT;
+                       }
+                       copy_len += current_mali_profiling_stream->used_size;
+                       current_mali_profiling_stream->used_size = 0;
+                       _mali_profiling_global_stream_list_queue(&global_mali_stream_list->free_list, current_mali_profiling_stream);
+               } else {
+                       break;
+               }
+       }
+       return (ssize_t)copy_len;
+}
+
+static unsigned int  _mali_profiling_stream_poll(struct file *filp, poll_table *wait)
+{
+       poll_wait(filp, &stream_fd_wait_queue, wait);
+       if (!_mali_profiling_global_stream_queue_list_if_empty())
+               return POLLIN;
+       return 0;
+}
+
+static int  _mali_profiling_stream_release(struct inode *inode, struct file *filp)
+{
+       _mali_osk_atomic_init(&stream_fd_if_used, 0);
+       return 0;
+}
+
+/* The funs for control packet and stream data.*/
+static void _mali_profiling_set_packet_size(unsigned char *const buf, const u32 size)
+{
+       u32 i;
+
+       for (i = 0; i < sizeof(size); ++i)
+               buf[i] = (size >> 8 * i) & 0xFF;
+}
+
+static u32 _mali_profiling_get_packet_size(unsigned char *const buf)
+{
+       u32 i;
+       u32 size = 0;
+       for (i = 0; i < sizeof(size); ++i)
+               size |= (u32)buf[i] << 8 * i;
+       return size;
+}
+
+static u32 _mali_profiling_read_packet_int(unsigned char *const buf, u32 *const pos, u32 const packet_size)
+{
+       u64 int_value = 0;
+       u8 shift = 0;
+       u8 byte_value = ~0;
+
+       while ((byte_value & 0x80) != 0) {
+               if ((*pos) >= packet_size)
+                       return -1;
+               byte_value = buf[*pos];
+               *pos += 1;
+               int_value |= (u32)(byte_value & 0x7f) << shift;
+               shift += 7;
+       }
+
+       if (shift < 8 * sizeof(int_value) && (byte_value & 0x40) != 0) {
+               int_value |= -(1 << shift);
+       }
+
+       return int_value;
+}
+
+static u32 _mali_profiling_pack_int(u8 *const buf, u32 const buf_size, u32 const pos, s32 value)
+{
+       u32 add_bytes = 0;
+       int more = 1;
+       while (more) {
+               /* low order 7 bits of val */
+               char byte_value = value & 0x7f;
+               value >>= 7;
+
+               if ((value == 0 && (byte_value & 0x40) == 0) || (value == -1 && (byte_value & 0x40) != 0)) {
+                       more = 0;
+               } else {
+                       byte_value |= 0x80;
+               }
+
+               if ((pos + add_bytes) >= buf_size)
+                       return 0;
+               buf[pos + add_bytes] = byte_value;
+               add_bytes++;
+       }
+
+       return add_bytes;
+}
+
+static int _mali_profiling_pack_long(uint8_t *const buf, u32 const buf_size, u32 const pos, s64 val)
+{
+       int add_bytes = 0;
+       int more = 1;
+       while (more) {
+               /* low order 7 bits of x */
+               char byte_value = val & 0x7f;
+               val >>= 7;
+
+               if ((val == 0 && (byte_value & 0x40) == 0) || (val == -1 && (byte_value & 0x40) != 0)) {
+                       more = 0;
+               } else {
+                       byte_value |= 0x80;
+               }
+
+               MALI_DEBUG_ASSERT((pos + add_bytes) < buf_size);
+               buf[pos + add_bytes] = byte_value;
+               add_bytes++;
+       }
+
+       return add_bytes;
+}
+
+static void _mali_profiling_stream_add_counter(mali_profiling_stream *profiling_stream, s64 current_time, u32 key, u32 counter_value)
+{
+       u32 add_size = STREAM_HEADER_SIZE;
+       MALI_DEBUG_ASSERT_POINTER(profiling_stream);
+       MALI_DEBUG_ASSERT((profiling_stream->used_size) < MALI_PROFILING_STREAM_BUFFER_SIZE);
+
+       profiling_stream->data[profiling_stream->used_size] = STREAM_HEADER_COUNTER_VALUE;
+
+       add_size += _mali_profiling_pack_long(profiling_stream->data, MALI_PROFILING_STREAM_BUFFER_SIZE,
+                                             profiling_stream->used_size + add_size, current_time);
+       add_size += _mali_profiling_pack_int(profiling_stream->data, MALI_PROFILING_STREAM_BUFFER_SIZE,
+                                            profiling_stream->used_size + add_size, (s32)0);
+       add_size += _mali_profiling_pack_int(profiling_stream->data, MALI_PROFILING_STREAM_BUFFER_SIZE,
+                                            profiling_stream->used_size + add_size, (s32)key);
+       add_size += _mali_profiling_pack_int(profiling_stream->data, MALI_PROFILING_STREAM_BUFFER_SIZE,
+                                            profiling_stream->used_size + add_size, (s32)counter_value);
+
+       _mali_profiling_set_packet_size(profiling_stream->data + profiling_stream->used_size + 1,
+                                       add_size - STREAM_HEADER_SIZE);
+
+       profiling_stream->used_size += add_size;
+}
+
+/* The callback function for sampling timer.*/
+static enum hrtimer_restart  _mali_profiling_sampling_counters(struct hrtimer *timer)
+{
+       u32 counter_index;
+       s64 current_time;
+       MALI_DEBUG_ASSERT_POINTER(global_mali_profiling_counters);
+       MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list);
+
+       MALI_DEBUG_ASSERT(NULL == mali_counter_stream);
+       if (_MALI_OSK_ERR_OK == _mali_profiling_global_stream_list_dequeue(
+                   &global_mali_stream_list->free_list, &mali_counter_stream)) {
+
+               MALI_DEBUG_ASSERT_POINTER(mali_counter_stream);
+               MALI_DEBUG_ASSERT(0 == mali_counter_stream->used_size);
+
+               /* Capture l2 cache counter values if enabled */
+               if (MALI_TRUE == l2_cache_counter_if_enabled) {
+                       int i, j = 0;
+                       _mali_profiling_l2_counter_values l2_counters_values;
+                       _mali_profiling_get_l2_counters(&l2_counters_values);
+
+                       for (i  = COUNTER_L2_0_C0; i <= COUNTER_L2_2_C1; i++) {
+                               if (0 == (j % 2))
+                                       _mali_osk_profiling_record_global_counters(i, l2_counters_values.cores[j / 2].value0);
+                               else
+                                       _mali_osk_profiling_record_global_counters(i, l2_counters_values.cores[j / 2].value1);
+                               j++;
+                       }
+               }
+
+               current_time = (s64)_mali_osk_boot_time_get_ns();
+
+               /* Add all enabled counter values into stream */
+               for (counter_index = 0; counter_index < num_global_mali_profiling_counters; counter_index++) {
+                       /* No need to sample these couners here. */
+                       if (global_mali_profiling_counters[counter_index].enabled) {
+                               if ((global_mali_profiling_counters[counter_index].counter_id >= FIRST_MEM_COUNTER &&
+                                    global_mali_profiling_counters[counter_index].counter_id <= LAST_MEM_COUNTER)
+                                   || (global_mali_profiling_counters[counter_index].counter_id == COUNTER_VP_ACTIVITY)
+                                   || (global_mali_profiling_counters[counter_index].counter_id == COUNTER_FP_ACTIVITY)
+                                   || (global_mali_profiling_counters[counter_index].counter_id == COUNTER_FILMSTRIP)) {
+
+                                       continue;
+                               }
+
+                               if (global_mali_profiling_counters[counter_index].counter_id >= COUNTER_L2_0_C0 &&
+                                   global_mali_profiling_counters[counter_index].counter_id <= COUNTER_L2_2_C1) {
+
+                                       u32 prev_val = global_mali_profiling_counters[counter_index].prev_counter_value;
+
+                                       _mali_profiling_stream_add_counter(mali_counter_stream, current_time, global_mali_profiling_counters[counter_index].key,
+                                                                          global_mali_profiling_counters[counter_index].current_counter_value - prev_val);
+
+                                       prev_val = global_mali_profiling_counters[counter_index].current_counter_value;
+
+                                       global_mali_profiling_counters[counter_index].prev_counter_value = prev_val;
+                               } else {
+
+                                       if (global_mali_profiling_counters[counter_index].counter_id == COUNTER_TOTAL_ALLOC_PAGES) {
+                                               u32 total_alloc_mem = _mali_ukk_report_memory_usage();
+                                               global_mali_profiling_counters[counter_index].current_counter_value = total_alloc_mem / _MALI_OSK_MALI_PAGE_SIZE;
+                                       }
+                                       _mali_profiling_stream_add_counter(mali_counter_stream, current_time, global_mali_profiling_counters[counter_index].key,
+                                                                          global_mali_profiling_counters[counter_index].current_counter_value);
+                                       if (global_mali_profiling_counters[counter_index].counter_id < FIRST_SPECIAL_COUNTER)
+                                               global_mali_profiling_counters[counter_index].current_counter_value = 0;
+                               }
+                       }
+               }
+               _mali_profiling_global_stream_list_queue(&global_mali_stream_list->queue_list, mali_counter_stream);
+               mali_counter_stream = NULL;
+       } else {
+               MALI_DEBUG_PRINT(1, ("Not enough mali profiling stream buffer!\n"));
+       }
+
+       wake_up_interruptible(&stream_fd_wait_queue);
+
+       /*Enable the sampling timer again*/
+       if (0 != num_counters_enabled && 0 != profiling_sample_rate) {
+               hrtimer_forward_now(&profiling_sampling_timer, ns_to_ktime(profiling_sample_rate));
+               return HRTIMER_RESTART;
+       }
+       return HRTIMER_NORESTART;
+}
+
+static void _mali_profiling_sampling_core_activity_switch(int counter_id, int core, u32 activity, u32 pid)
+{
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(&mali_activity_lock, irq_flags);
+       if (activity == 0)
+               mali_activity_cores_num--;
+       else
+               mali_activity_cores_num++;
+       spin_unlock_irqrestore(&mali_activity_lock, irq_flags);
+
+       if (NULL != global_mali_profiling_counters) {
+               int i ;
+               for (i = 0; i < num_global_mali_profiling_counters; i++) {
+                       if (counter_id == global_mali_profiling_counters[i].counter_id && global_mali_profiling_counters[i].enabled) {
+                               u64 current_time = _mali_osk_boot_time_get_ns();
+                               u32 add_size = STREAM_HEADER_SIZE;
+
+                               if (NULL != mali_core_activity_stream) {
+                                       if ((mali_core_activity_stream_dequeue_time +  MALI_PROFILING_STREAM_HOLD_TIME < current_time) ||
+                                           (MALI_PROFILING_STREAM_DATA_DEFAULT_SIZE > MALI_PROFILING_STREAM_BUFFER_SIZE
+                                            - mali_core_activity_stream->used_size)) {
+                                               _mali_profiling_global_stream_list_queue(&global_mali_stream_list->queue_list, mali_core_activity_stream);
+                                               mali_core_activity_stream = NULL;
+                                               wake_up_interruptible(&stream_fd_wait_queue);
+                                       }
+                               }
+
+                               if (NULL == mali_core_activity_stream) {
+                                       if (_MALI_OSK_ERR_OK == _mali_profiling_global_stream_list_dequeue(
+                                                   &global_mali_stream_list->free_list, &mali_core_activity_stream)) {
+                                               mali_core_activity_stream_dequeue_time = current_time;
+                                       } else {
+                                               MALI_DEBUG_PRINT(1, ("Not enough mali profiling stream buffer!\n"));
+                                               wake_up_interruptible(&stream_fd_wait_queue);
+                                               break;
+                                       }
+
+                               }
+
+                               mali_core_activity_stream->data[mali_core_activity_stream->used_size] = STREAM_HEADER_CORE_ACTIVITY;
+
+                               add_size += _mali_profiling_pack_long(mali_core_activity_stream->data,
+                                                                     MALI_PROFILING_STREAM_BUFFER_SIZE, mali_core_activity_stream->used_size + add_size, (s64)current_time);
+                               add_size += _mali_profiling_pack_int(mali_core_activity_stream->data,
+                                                                    MALI_PROFILING_STREAM_BUFFER_SIZE, mali_core_activity_stream->used_size + add_size, core);
+                               add_size += _mali_profiling_pack_int(mali_core_activity_stream->data,
+                                                                    MALI_PROFILING_STREAM_BUFFER_SIZE, mali_core_activity_stream->used_size + add_size, (s32)global_mali_profiling_counters[i].key);
+                               add_size += _mali_profiling_pack_int(mali_core_activity_stream->data,
+                                                                    MALI_PROFILING_STREAM_BUFFER_SIZE, mali_core_activity_stream->used_size + add_size, activity);
+                               add_size += _mali_profiling_pack_int(mali_core_activity_stream->data,
+                                                                    MALI_PROFILING_STREAM_BUFFER_SIZE, mali_core_activity_stream->used_size + add_size, pid);
+
+                               _mali_profiling_set_packet_size(mali_core_activity_stream->data + mali_core_activity_stream->used_size + 1,
+                                                               add_size - STREAM_HEADER_SIZE);
+
+                               mali_core_activity_stream->used_size += add_size;
+
+                               if (0 == mali_activity_cores_num) {
+                                       _mali_profiling_global_stream_list_queue(&global_mali_stream_list->queue_list, mali_core_activity_stream);
+                                       mali_core_activity_stream = NULL;
+                                       wake_up_interruptible(&stream_fd_wait_queue);
+                               }
+
+                               break;
+                       }
+               }
+       }
+}
+
+static mali_bool _mali_profiling_global_counters_init(void)
+{
+       int core_id, counter_index, counter_number, counter_id;
+       u32 num_l2_cache_cores;
+       u32 num_pp_cores;
+       u32 num_gp_cores = 1;
+
+       MALI_DEBUG_ASSERT(NULL == global_mali_profiling_counters);
+       num_pp_cores = mali_pp_get_glob_num_pp_cores();
+       num_l2_cache_cores =    mali_l2_cache_core_get_glob_num_l2_cores();
+
+       num_global_mali_profiling_counters = 3 * (num_gp_cores + num_pp_cores) + 2 * num_l2_cache_cores
+                                            + MALI_PROFILING_SW_COUNTERS_NUM
+                                            + MALI_PROFILING_SPECIAL_COUNTERS_NUM
+                                            + MALI_PROFILING_MEM_COUNTERS_NUM;
+       global_mali_profiling_counters = _mali_osk_calloc(num_global_mali_profiling_counters, sizeof(mali_profiling_counter));
+
+       if (NULL == global_mali_profiling_counters)
+               return MALI_FALSE;
+
+       counter_index = 0;
+       /*Vertex processor counters */
+       for (core_id = 0; core_id < num_gp_cores; core_id ++) {
+               global_mali_profiling_counters[counter_index].counter_id = ACTIVITY_VP_0 + core_id;
+               _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name,
+                                  sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_VP_%d_active", mali_name, core_id);
+
+               for (counter_number = 0; counter_number < 2; counter_number++) {
+                       counter_index++;
+                       global_mali_profiling_counters[counter_index].counter_id = COUNTER_VP_0_C0 + (2 * core_id) + counter_number;
+                       _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name,
+                                          sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_VP_%d_cnt%d", mali_name, core_id, counter_number);
+               }
+       }
+
+       /* Fragment processors' counters */
+       for (core_id = 0; core_id < num_pp_cores; core_id++) {
+               counter_index++;
+               global_mali_profiling_counters[counter_index].counter_id = ACTIVITY_FP_0 + core_id;
+               _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name,
+                                  sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_FP_%d_active", mali_name, core_id);
+
+               for (counter_number = 0; counter_number < 2; counter_number++) {
+                       counter_index++;
+                       global_mali_profiling_counters[counter_index].counter_id = COUNTER_FP_0_C0 + (2 * core_id) + counter_number;
+                       _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name,
+                                          sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_FP_%d_cnt%d", mali_name, core_id, counter_number);
+               }
+       }
+
+       /* L2 Cache counters */
+       for (core_id = 0; core_id < num_l2_cache_cores; core_id++) {
+               for (counter_number = 0; counter_number < 2; counter_number++) {
+                       counter_index++;
+                       global_mali_profiling_counters[counter_index].counter_id = COUNTER_L2_0_C0 + (2 * core_id) + counter_number;
+                       _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name,
+                                          sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_L2_%d_cnt%d", mali_name, core_id, counter_number);
+               }
+       }
+
+       /* Now set up the software counter entries */
+       for (counter_id = FIRST_SW_COUNTER; counter_id <= LAST_SW_COUNTER; counter_id++) {
+               counter_index++;
+
+               if (0 == first_sw_counter_index)
+                       first_sw_counter_index = counter_index;
+
+               global_mali_profiling_counters[counter_index].counter_id = counter_id;
+               _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name,
+                                  sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_SW_%d", mali_name, counter_id - FIRST_SW_COUNTER);
+       }
+
+       /* Now set up the special counter entries */
+       for (counter_id = FIRST_SPECIAL_COUNTER; counter_id <= LAST_SPECIAL_COUNTER; counter_id++) {
+
+               counter_index++;
+               _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name,
+                                  sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_%s",
+                                  mali_name, _mali_special_counter_descriptions[counter_id - FIRST_SPECIAL_COUNTER]);
+
+               global_mali_profiling_counters[counter_index].counter_id = counter_id;
+       }
+
+       /* Now set up the mem counter entries*/
+       for (counter_id = FIRST_MEM_COUNTER; counter_id <= LAST_MEM_COUNTER; counter_id++) {
+
+               counter_index++;
+               _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name,
+                                  sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_%s",
+                                  mali_name, _mali_mem_counter_descriptions[counter_id - FIRST_MEM_COUNTER]);
+
+               global_mali_profiling_counters[counter_index].counter_id = counter_id;
+       }
+
+       MALI_DEBUG_ASSERT((counter_index + 1) == num_global_mali_profiling_counters);
+
+       return MALI_TRUE;
+}
+
+void _mali_profiling_notification_mem_counter(struct mali_session_data *session, u32 counter_id, u32 key, int enable)
+{
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       if (NULL != session) {
+               _mali_osk_notification_t *notification;
+               _mali_osk_notification_queue_t *queue;
+
+               queue = session->ioctl_queue;
+               MALI_DEBUG_ASSERT(NULL != queue);
+
+               notification = _mali_osk_notification_create(_MALI_NOTIFICATION_ANNOTATE_PROFILING_MEM_COUNTER,
+                               sizeof(_mali_uk_annotate_profiling_mem_counter_s));
+
+               if (NULL != notification) {
+                       _mali_uk_annotate_profiling_mem_counter_s *data = notification->result_buffer;
+                       data->counter_id = counter_id;
+                       data->key = key;
+                       data->enable = enable;
+
+                       _mali_osk_notification_queue_send(queue, notification);
+               } else {
+                       MALI_PRINT_ERROR(("Failed to create notification object!\n"));
+               }
+       } else {
+               MALI_PRINT_ERROR(("Failed to find the right session!\n"));
+       }
+}
+
+void _mali_profiling_notification_enable(struct mali_session_data *session, u32 sampling_rate, int enable)
+{
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       if (NULL != session) {
+               _mali_osk_notification_t *notification;
+               _mali_osk_notification_queue_t *queue;
+
+               queue = session->ioctl_queue;
+               MALI_DEBUG_ASSERT(NULL != queue);
+
+               notification = _mali_osk_notification_create(_MALI_NOTIFICATION_ANNOTATE_PROFILING_ENABLE,
+                               sizeof(_mali_uk_annotate_profiling_enable_s));
+
+               if (NULL != notification) {
+                       _mali_uk_annotate_profiling_enable_s *data = notification->result_buffer;
+                       data->sampling_rate = sampling_rate;
+                       data->enable = enable;
+
+                       _mali_osk_notification_queue_send(queue, notification);
+               } else {
+                       MALI_PRINT_ERROR(("Failed to create notification object!\n"));
+               }
+       } else {
+               MALI_PRINT_ERROR(("Failed to find the right session!\n"));
+       }
+}
+
+
+_mali_osk_errcode_t _mali_osk_profiling_init(mali_bool auto_start)
+{
+       int i;
+       mali_profiling_stream *new_mali_profiling_stream = NULL;
+       mali_profiling_stream_list *new_mali_profiling_stream_list = NULL;
+       if (MALI_TRUE == auto_start) {
+               mali_set_user_setting(_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, MALI_TRUE);
+       }
+
+       /*Init the global_mali_stream_list*/
+       MALI_DEBUG_ASSERT(NULL == global_mali_stream_list);
+       new_mali_profiling_stream_list = (mali_profiling_stream_list *)kmalloc(sizeof(mali_profiling_stream_list), GFP_KERNEL);
+
+       if (NULL == new_mali_profiling_stream_list) {
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       spin_lock_init(&new_mali_profiling_stream_list->spin_lock);
+       INIT_LIST_HEAD(&new_mali_profiling_stream_list->free_list);
+       INIT_LIST_HEAD(&new_mali_profiling_stream_list->queue_list);
+
+       spin_lock_init(&mali_activity_lock);
+       mali_activity_cores_num =  0;
+
+       for (i = 0; i < MALI_PROFILING_STREAM_BUFFER_NUM; i++) {
+               new_mali_profiling_stream = (mali_profiling_stream *)kmalloc(sizeof(mali_profiling_stream), GFP_KERNEL);
+               if (NULL == new_mali_profiling_stream) {
+                       _mali_profiling_stream_list_destory(new_mali_profiling_stream_list);
+                       return _MALI_OSK_ERR_NOMEM;
+               }
+
+               INIT_LIST_HEAD(&new_mali_profiling_stream->list);
+               new_mali_profiling_stream->used_size = 0;
+               list_add_tail(&new_mali_profiling_stream->list, &new_mali_profiling_stream_list->free_list);
+
+       }
+
+       _mali_osk_atomic_init(&stream_fd_if_used, 0);
+       init_waitqueue_head(&stream_fd_wait_queue);
+
+       hrtimer_init(&profiling_sampling_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+
+       profiling_sampling_timer.function = _mali_profiling_sampling_counters;
+
+       global_mali_stream_list = new_mali_profiling_stream_list;
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void _mali_osk_profiling_term(void)
+{
+       if (0 != profiling_sample_rate) {
+               hrtimer_cancel(&profiling_sampling_timer);
+               profiling_sample_rate = 0;
+       }
+       _mali_osk_atomic_term(&stream_fd_if_used);
+
+       if (NULL != global_mali_profiling_counters) {
+               _mali_osk_free(global_mali_profiling_counters);
+               global_mali_profiling_counters = NULL;
+               num_global_mali_profiling_counters = 0;
+       }
+
+       if (NULL != global_mali_stream_list) {
+               _mali_profiling_stream_list_destory(global_mali_stream_list);
+               global_mali_stream_list = NULL;
+       }
+
+}
+
+void _mali_osk_profiling_stop_sampling(u32 pid)
+{
+       if (pid == current_profiling_pid) {
+
+               int i;
+               /* Reset all counter states when closing connection.*/
+               for (i = 0; i < num_global_mali_profiling_counters; ++i) {
+                       _mali_profiling_set_event(global_mali_profiling_counters[i].counter_id, MALI_HW_CORE_NO_COUNTER);
+                       global_mali_profiling_counters[i].enabled = 0;
+                       global_mali_profiling_counters[i].prev_counter_value = 0;
+                       global_mali_profiling_counters[i].current_counter_value = 0;
+               }
+               l2_cache_counter_if_enabled = MALI_FALSE;
+               num_counters_enabled = 0;
+               mem_counters_enabled = 0;
+               _mali_profiling_control(FBDUMP_CONTROL_ENABLE, 0);
+               _mali_profiling_control(SW_COUNTER_ENABLE, 0);
+               /* Delete sampling timer when closing connection. */
+               if (0 != profiling_sample_rate) {
+                       hrtimer_cancel(&profiling_sampling_timer);
+                       profiling_sample_rate = 0;
+               }
+               current_profiling_pid = 0;
+       }
+}
+
+void    _mali_osk_profiling_add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4)
+{
+       /*Record the freq & volt to global_mali_profiling_counters here. */
+       if (0 != profiling_sample_rate) {
+               u32 channel;
+               u32 state;
+               channel = (event_id >> 16) & 0xFF;
+               state = ((event_id >> 24) & 0xF) << 24;
+
+               switch (state) {
+               case MALI_PROFILING_EVENT_TYPE_SINGLE:
+                       if ((MALI_PROFILING_EVENT_CHANNEL_GPU >> 16) == channel) {
+                               u32 reason = (event_id & 0xFFFF);
+                               if (MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE == reason) {
+                                       _mali_osk_profiling_record_global_counters(COUNTER_FREQUENCY, data0);
+                                       _mali_osk_profiling_record_global_counters(COUNTER_VOLTAGE, data1);
+                               }
+                       }
+                       break;
+               case MALI_PROFILING_EVENT_TYPE_START:
+                       if ((MALI_PROFILING_EVENT_CHANNEL_GP0 >> 16) == channel) {
+                               _mali_profiling_sampling_core_activity_switch(COUNTER_VP_ACTIVITY, 0, 1, data1);
+                       } else if (channel >= (MALI_PROFILING_EVENT_CHANNEL_PP0 >> 16) &&
+                                  (MALI_PROFILING_EVENT_CHANNEL_PP7 >> 16) >= channel) {
+                               u32 core_id = channel - (MALI_PROFILING_EVENT_CHANNEL_PP0 >> 16);
+                               _mali_profiling_sampling_core_activity_switch(COUNTER_FP_ACTIVITY, core_id, 1, data1);
+                       }
+                       break;
+               case MALI_PROFILING_EVENT_TYPE_STOP:
+                       if ((MALI_PROFILING_EVENT_CHANNEL_GP0 >> 16) == channel) {
+                               _mali_profiling_sampling_core_activity_switch(COUNTER_VP_ACTIVITY, 0, 0, 0);
+                       } else if (channel >= (MALI_PROFILING_EVENT_CHANNEL_PP0 >> 16) &&
+                                  (MALI_PROFILING_EVENT_CHANNEL_PP7 >> 16) >= channel) {
+                               u32 core_id = channel - (MALI_PROFILING_EVENT_CHANNEL_PP0 >> 16);
+                               _mali_profiling_sampling_core_activity_switch(COUNTER_FP_ACTIVITY, core_id, 0, 0);
+                       }
+                       break;
+               default:
+                       break;
+               }
+       }
+       trace_mali_timeline_event(event_id, data0, data1, data2, data3, data4);
+}
+
+void _mali_osk_profiling_report_sw_counters(u32 *counters)
+{
+       trace_mali_sw_counters(_mali_osk_get_pid(), _mali_osk_get_tid(), NULL, counters);
+}
+
+void _mali_osk_profiling_record_global_counters(int counter_id, u32 value)
+{
+       if (NULL != global_mali_profiling_counters) {
+               int i ;
+               for (i = 0; i < num_global_mali_profiling_counters; i++) {
+                       if (counter_id == global_mali_profiling_counters[i].counter_id && global_mali_profiling_counters[i].enabled) {
+                               global_mali_profiling_counters[i].current_counter_value = value;
+                               break;
+                       }
+               }
+       }
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args)
+{
+       /* Always add process and thread identificator in the first two data elements for events from user space */
+       _mali_osk_profiling_add_event(args->event_id, _mali_osk_get_pid(), _mali_osk_get_tid(), args->data[2], args->data[3], args->data[4]);
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_sw_counters_report(_mali_uk_sw_counters_report_s *args)
+{
+       u32 *counters = (u32 *)(uintptr_t)args->counters;
+
+       _mali_osk_profiling_report_sw_counters(counters);
+
+       if (NULL != global_mali_profiling_counters) {
+               int i;
+               for (i = 0; i < MALI_PROFILING_SW_COUNTERS_NUM; i ++) {
+                       if (global_mali_profiling_counters[first_sw_counter_index + i].enabled) {
+                               global_mali_profiling_counters[first_sw_counter_index + i].current_counter_value = *(counters + i);
+                       }
+               }
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_stream_fd_get(_mali_uk_profiling_stream_fd_get_s *args)
+{
+       struct  mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       if (1 == _mali_osk_atomic_inc_return(&stream_fd_if_used)) {
+
+               s32 fd = anon_inode_getfd("[mali_profiling_stream]", &mali_profiling_stream_fops,
+                                         session,
+                                         O_RDONLY | O_CLOEXEC);
+
+               args->stream_fd = fd;
+               if (0 > fd) {
+                       _mali_osk_atomic_dec(&stream_fd_if_used);
+                       return _MALI_OSK_ERR_FAULT;
+               }
+               args->stream_fd = fd;
+       } else {
+               _mali_osk_atomic_dec(&stream_fd_if_used);
+               args->stream_fd = -1;
+               return _MALI_OSK_ERR_BUSY;
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_control_set(_mali_uk_profiling_control_set_s *args)
+{
+       u32 control_packet_size;
+       u32 output_buffer_size;
+
+       struct  mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       if (NULL == global_mali_profiling_counters && MALI_FALSE == _mali_profiling_global_counters_init()) {
+               MALI_PRINT_ERROR(("Failed to create global_mali_profiling_counters.\n"));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       control_packet_size = args->control_packet_size;
+       output_buffer_size = args->response_packet_size;
+
+       if (0 != control_packet_size) {
+               u8 control_type;
+               u8 *control_packet_data;
+               u8 *response_packet_data;
+               u32 version_length = sizeof(utgard_setup_version) - 1;
+
+               control_packet_data = (u8 *)(uintptr_t)args->control_packet_data;
+               MALI_DEBUG_ASSERT_POINTER(control_packet_data);
+               response_packet_data = (u8 *)(uintptr_t)args->response_packet_data;
+               MALI_DEBUG_ASSERT_POINTER(response_packet_data);
+
+               /*Decide if need to ignore Utgard setup version.*/
+               if (control_packet_size >= version_length) {
+                       if (0 == memcmp(control_packet_data, utgard_setup_version, version_length)) {
+                               if (control_packet_size == version_length) {
+                                       args->response_packet_size = 0;
+                                       return _MALI_OSK_ERR_OK;
+                               } else {
+                                       control_packet_data += version_length;
+                                       control_packet_size -= version_length;
+                               }
+                       }
+               }
+
+               current_profiling_pid = _mali_osk_get_pid();
+
+               control_type = control_packet_data[0];
+               switch (control_type) {
+               case PACKET_HEADER_COUNTERS_REQUEST: {
+                       int i;
+
+                       if (PACKET_HEADER_SIZE > control_packet_size ||
+                           control_packet_size !=  _mali_profiling_get_packet_size(control_packet_data + 1)) {
+                               MALI_PRINT_ERROR(("Wrong control packet  size, type 0x%x,size 0x%x.\n", control_packet_data[0], control_packet_size));
+                               return _MALI_OSK_ERR_FAULT;
+                       }
+
+                       /* Send supported counters */
+                       if (PACKET_HEADER_SIZE > output_buffer_size)
+                               return _MALI_OSK_ERR_FAULT;
+
+                       *response_packet_data = PACKET_HEADER_COUNTERS_ACK;
+                       args->response_packet_size = PACKET_HEADER_SIZE;
+
+                       for (i = 0; i < num_global_mali_profiling_counters; ++i) {
+                               u32 name_size = strlen(global_mali_profiling_counters[i].counter_name);
+
+                               if ((args->response_packet_size + name_size + 1) > output_buffer_size) {
+                                       MALI_PRINT_ERROR(("Response packet data is too large..\n"));
+                                       return _MALI_OSK_ERR_FAULT;
+                               }
+
+                               memcpy(response_packet_data + args->response_packet_size,
+                                      global_mali_profiling_counters[i].counter_name, name_size + 1);
+
+                               args->response_packet_size += (name_size + 1);
+
+                               if (global_mali_profiling_counters[i].counter_id == COUNTER_VP_ACTIVITY) {
+                                       args->response_packet_size += _mali_profiling_pack_int(response_packet_data,
+                                                                     output_buffer_size, args->response_packet_size, (s32)1);
+                               } else if (global_mali_profiling_counters[i].counter_id == COUNTER_FP_ACTIVITY) {
+                                       args->response_packet_size += _mali_profiling_pack_int(response_packet_data,
+                                                                     output_buffer_size, args->response_packet_size, (s32)mali_pp_get_glob_num_pp_cores());
+                               } else {
+                                       args->response_packet_size += _mali_profiling_pack_int(response_packet_data,
+                                                                     output_buffer_size, args->response_packet_size, (s32) - 1);
+                               }
+                       }
+
+                       _mali_profiling_set_packet_size(response_packet_data + 1, args->response_packet_size);
+                       break;
+               }
+
+               case PACKET_HEADER_COUNTERS_ENABLE: {
+                       int i;
+                       u32 request_pos = PACKET_HEADER_SIZE;
+                       mali_bool sw_counter_if_enabled = MALI_FALSE;
+
+                       if (PACKET_HEADER_SIZE > control_packet_size ||
+                           control_packet_size !=  _mali_profiling_get_packet_size(control_packet_data + 1)) {
+                               MALI_PRINT_ERROR(("Wrong control packet  size , type 0x%x,size 0x%x.\n", control_packet_data[0], control_packet_size));
+                               return _MALI_OSK_ERR_FAULT;
+                       }
+
+                       /* Init all counter states before enable requested counters.*/
+                       for (i = 0; i < num_global_mali_profiling_counters; ++i) {
+                               _mali_profiling_set_event(global_mali_profiling_counters[i].counter_id, MALI_HW_CORE_NO_COUNTER);
+                               global_mali_profiling_counters[i].enabled = 0;
+                               global_mali_profiling_counters[i].prev_counter_value = 0;
+                               global_mali_profiling_counters[i].current_counter_value = 0;
+
+                               if (global_mali_profiling_counters[i].counter_id >= FIRST_MEM_COUNTER &&
+                                   global_mali_profiling_counters[i].counter_id <= LAST_MEM_COUNTER) {
+                                       _mali_profiling_notification_mem_counter(session, global_mali_profiling_counters[i].counter_id, 0, 0);
+                               }
+                       }
+
+                       l2_cache_counter_if_enabled = MALI_FALSE;
+                       num_counters_enabled = 0;
+                       mem_counters_enabled = 0;
+                       _mali_profiling_control(FBDUMP_CONTROL_ENABLE, 0);
+                       _mali_profiling_control(SW_COUNTER_ENABLE, 0);
+                       _mali_profiling_notification_enable(session, 0, 0);
+
+                       /* Enable requested counters */
+                       while (request_pos < control_packet_size) {
+                               u32 begin = request_pos;
+                               u32 event;
+                               u32 key;
+
+                               /* Check the counter name which should be ended with null */
+                               while (request_pos < control_packet_size && control_packet_data[request_pos] != '\0') {
+                                       ++request_pos;
+                               }
+
+                               if (request_pos >= control_packet_size)
+                                       return _MALI_OSK_ERR_FAULT;
+
+                               ++request_pos;
+                               event = _mali_profiling_read_packet_int(control_packet_data, &request_pos, control_packet_size);
+                               key = _mali_profiling_read_packet_int(control_packet_data, &request_pos, control_packet_size);
+
+                               for (i = 0; i < num_global_mali_profiling_counters; ++i) {
+                                       u32 name_size = strlen((char *)(control_packet_data + begin));
+
+                                       if (strncmp(global_mali_profiling_counters[i].counter_name, (char *)(control_packet_data + begin), name_size) == 0) {
+                                               if (!sw_counter_if_enabled && (FIRST_SW_COUNTER <= global_mali_profiling_counters[i].counter_id
+                                                                              && global_mali_profiling_counters[i].counter_id <= LAST_SW_COUNTER)) {
+                                                       sw_counter_if_enabled = MALI_TRUE;
+                                                       _mali_profiling_control(SW_COUNTER_ENABLE, 1);
+                                               }
+
+                                               if (COUNTER_FILMSTRIP == global_mali_profiling_counters[i].counter_id) {
+                                                       _mali_profiling_control(FBDUMP_CONTROL_ENABLE, 1);
+                                                       _mali_profiling_control(FBDUMP_CONTROL_RATE, event & 0xff);
+                                                       _mali_profiling_control(FBDUMP_CONTROL_RESIZE_FACTOR, (event >> 8) & 0xff);
+                                               }
+
+                                               if (global_mali_profiling_counters[i].counter_id >= FIRST_MEM_COUNTER &&
+                                                   global_mali_profiling_counters[i].counter_id <= LAST_MEM_COUNTER) {
+                                                       _mali_profiling_notification_mem_counter(session, global_mali_profiling_counters[i].counter_id,
+                                                                       key, 1);
+                                                       mem_counters_enabled++;
+                                               }
+
+                                               global_mali_profiling_counters[i].counter_event = event;
+                                               global_mali_profiling_counters[i].key = key;
+                                               global_mali_profiling_counters[i].enabled = 1;
+
+                                               _mali_profiling_set_event(global_mali_profiling_counters[i].counter_id,
+                                                                         global_mali_profiling_counters[i].counter_event);
+                                               num_counters_enabled++;
+                                               break;
+                                       }
+                               }
+
+                               if (i == num_global_mali_profiling_counters) {
+                                       MALI_PRINT_ERROR(("Counter name does not match for type %u.\n", control_type));
+                                       return _MALI_OSK_ERR_FAULT;
+                               }
+                       }
+
+                       if (PACKET_HEADER_SIZE <= output_buffer_size) {
+                               *response_packet_data = PACKET_HEADER_ACK;
+                               _mali_profiling_set_packet_size(response_packet_data + 1, PACKET_HEADER_SIZE);
+                               args->response_packet_size = PACKET_HEADER_SIZE;
+                       } else {
+                               return _MALI_OSK_ERR_FAULT;
+                       }
+
+                       break;
+               }
+
+               case PACKET_HEADER_START_CAPTURE_VALUE: {
+                       u32 live_rate;
+                       u32 request_pos = PACKET_HEADER_SIZE;
+
+                       if (PACKET_HEADER_SIZE > control_packet_size ||
+                           control_packet_size !=  _mali_profiling_get_packet_size(control_packet_data + 1)) {
+                               MALI_PRINT_ERROR(("Wrong control packet  size , type 0x%x,size 0x%x.\n", control_packet_data[0], control_packet_size));
+                               return _MALI_OSK_ERR_FAULT;
+                       }
+
+                       /* Read samping rate in nanoseconds and live rate, start capture.*/
+                       profiling_sample_rate =  _mali_profiling_read_packet_int(control_packet_data,
+                                                &request_pos, control_packet_size);
+
+                       live_rate = _mali_profiling_read_packet_int(control_packet_data, &request_pos, control_packet_size);
+
+                       if (PACKET_HEADER_SIZE <= output_buffer_size) {
+                               *response_packet_data = PACKET_HEADER_ACK;
+                               _mali_profiling_set_packet_size(response_packet_data + 1, PACKET_HEADER_SIZE);
+                               args->response_packet_size = PACKET_HEADER_SIZE;
+                       } else {
+                               return _MALI_OSK_ERR_FAULT;
+                       }
+
+                       if (0 != num_counters_enabled && 0 != profiling_sample_rate) {
+                               _mali_profiling_global_stream_list_free();
+                               if (mem_counters_enabled > 0) {
+                                       _mali_profiling_notification_enable(session, profiling_sample_rate, 1);
+                               }
+                               hrtimer_start(&profiling_sampling_timer,
+                                             ktime_set(profiling_sample_rate / 1000000000, profiling_sample_rate % 1000000000),
+                                             HRTIMER_MODE_REL_PINNED);
+                       }
+
+                       break;
+               }
+               default:
+                       MALI_PRINT_ERROR(("Unsupported  profiling packet header type %u.\n", control_type));
+                       args->response_packet_size  = 0;
+                       return _MALI_OSK_ERR_FAULT;
+               }
+       } else {
+               _mali_osk_profiling_stop_sampling(current_profiling_pid);
+               _mali_profiling_notification_enable(session, 0, 0);
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+/**
+ * Called by gator.ko to set HW counters
+ *
+ * @param counter_id The counter ID.
+ * @param event_id Event ID that the counter should count (HW counter value from TRM).
+ *
+ * @return 1 on success, 0 on failure.
+ */
+int _mali_profiling_set_event(u32 counter_id, s32 event_id)
+{
+       if (COUNTER_VP_0_C0 == counter_id) {
+               mali_gp_job_set_gp_counter_src0(event_id);
+       } else if (COUNTER_VP_0_C1 == counter_id) {
+               mali_gp_job_set_gp_counter_src1(event_id);
+       } else if (COUNTER_FP_0_C0 <= counter_id && COUNTER_FP_7_C1 >= counter_id) {
+               /*
+                * Two compatibility notes for this function:
+                *
+                * 1) Previously the DDK allowed per core counters.
+                *
+                *    This did not make much sense on Mali-450 with the "virtual PP core" concept,
+                *    so this option was removed, and only the same pair of HW counters was allowed on all cores,
+                *    beginning with r3p2 release.
+                *
+                *    Starting with r4p0, it is now possible to set different HW counters for the different sub jobs.
+                *    This should be almost the same, since sub job 0 is designed to run on core 0,
+                *    sub job 1 on core 1, and so on.
+                *
+                *    The scheduling of PP sub jobs is not predictable, and this often led to situations where core 0 ran 2
+                *    sub jobs, while for instance core 1 ran zero. Having the counters set per sub job would thus increase
+                *    the predictability of the returned data (as you would be guaranteed data for all the selected HW counters).
+                *
+                *    PS: Core scaling needs to be disabled in order to use this reliably (goes for both solutions).
+                *
+                *    The framework/#defines with Gator still indicates that the counter is for a particular core,
+                *    but this is internally used as a sub job ID instead (no translation needed).
+                *
+                *  2) Global/default vs per sub job counters
+                *
+                *     Releases before r3p2 had only per PP core counters.
+                *     r3p2 releases had only one set of default/global counters which applied to all PP cores
+                *     Starting with r4p0, we have both a set of default/global counters,
+                *     and individual counters per sub job (equal to per core).
+                *
+                *     To keep compatibility with Gator/DS-5/streamline, the following scheme is used:
+                *
+                *     r3p2 release; only counters set for core 0 is handled,
+                *     this is applied as the default/global set of counters, and will thus affect all cores.
+                *
+                *     r4p0 release; counters set for core 0 is applied as both the global/default set of counters,
+                *     and counters for sub job 0.
+                *     Counters set for core 1-7 is only applied for the corresponding sub job.
+                *
+                *     This should allow the DS-5/Streamline GUI to have a simple mode where it only allows setting the
+                *     values for core 0, and thus this will be applied to all PP sub jobs/cores.
+                *     Advanced mode will also be supported, where individual pairs of HW counters can be selected.
+                *
+                *     The GUI will (until it is updated) still refer to cores instead of sub jobs, but this is probably
+                *     something we can live with!
+                *
+                *     Mali-450 note: Each job is not divided into a deterministic number of sub jobs, as the HW DLBU
+                *     automatically distributes the load between whatever number of cores is available at this particular time.
+                *     A normal PP job on Mali-450 is thus considered a single (virtual) job, and it will thus only be possible
+                *     to use a single pair of HW counters (even if the job ran on multiple PP cores).
+                *     In other words, only the global/default pair of PP HW counters will be used for normal Mali-450 jobs.
+                */
+               u32 sub_job = (counter_id - COUNTER_FP_0_C0) >> 1;
+               u32 counter_src = (counter_id - COUNTER_FP_0_C0) & 1;
+               if (0 == counter_src) {
+                       mali_pp_job_set_pp_counter_sub_job_src0(sub_job, event_id);
+                       if (0 == sub_job) {
+                               mali_pp_job_set_pp_counter_global_src0(event_id);
+                       }
+               } else {
+                       mali_pp_job_set_pp_counter_sub_job_src1(sub_job, event_id);
+                       if (0 == sub_job) {
+                               mali_pp_job_set_pp_counter_global_src1(event_id);
+                       }
+               }
+       } else if (COUNTER_L2_0_C0 <= counter_id && COUNTER_L2_2_C1 >= counter_id) {
+               u32 core_id = (counter_id - COUNTER_L2_0_C0) >> 1;
+               struct mali_l2_cache_core *l2_cache_core = mali_l2_cache_core_get_glob_l2_core(core_id);
+
+               if (NULL != l2_cache_core) {
+                       u32 counter_src = (counter_id - COUNTER_L2_0_C0) & 1;
+                       mali_l2_cache_core_set_counter_src(l2_cache_core,
+                                                          counter_src, event_id);
+                       l2_cache_counter_if_enabled = MALI_TRUE;
+               }
+       } else {
+               return 0; /* Failure, unknown event */
+       }
+
+       return 1; /* success */
+}
+
+/**
+ * Called by gator.ko to retrieve the L2 cache counter values for all L2 cache cores.
+ * The L2 cache counters are unique in that they are polled by gator, rather than being
+ * transmitted via the tracepoint mechanism.
+ *
+ * @param values Pointer to a _mali_profiling_l2_counter_values structure where
+ *               the counter sources and values will be output
+ * @return 0 if all went well; otherwise, return the mask with the bits set for the powered off cores
+ */
+u32 _mali_profiling_get_l2_counters(_mali_profiling_l2_counter_values *values)
+{
+       u32 l2_cores_num = mali_l2_cache_core_get_glob_num_l2_cores();
+       u32 i;
+
+       MALI_DEBUG_ASSERT(l2_cores_num <= 3);
+
+       for (i = 0; i < l2_cores_num; i++) {
+               struct mali_l2_cache_core *l2_cache = mali_l2_cache_core_get_glob_l2_core(i);
+
+               if (NULL == l2_cache) {
+                       continue;
+               }
+
+               mali_l2_cache_core_get_counter_values(l2_cache,
+                                                     &values->cores[i].source0,
+                                                     &values->cores[i].value0,
+                                                     &values->cores[i].source1,
+                                                     &values->cores[i].value1);
+       }
+
+       return 0;
+}
+
+/**
+ * Called by gator to control the production of profiling information at runtime.
+ */
+void _mali_profiling_control(u32 action, u32 value)
+{
+       switch (action) {
+       case FBDUMP_CONTROL_ENABLE:
+               mali_set_user_setting(_MALI_UK_USER_SETTING_COLORBUFFER_CAPTURE_ENABLED, (value == 0 ? MALI_FALSE : MALI_TRUE));
+               break;
+       case FBDUMP_CONTROL_RATE:
+               mali_set_user_setting(_MALI_UK_USER_SETTING_BUFFER_CAPTURE_N_FRAMES, value);
+               break;
+       case SW_COUNTER_ENABLE:
+               mali_set_user_setting(_MALI_UK_USER_SETTING_SW_COUNTER_ENABLED, value);
+               break;
+       case FBDUMP_CONTROL_RESIZE_FACTOR:
+               mali_set_user_setting(_MALI_UK_USER_SETTING_BUFFER_CAPTURE_RESIZE_FACTOR, value);
+               break;
+       default:
+               break;  /* Ignore unimplemented actions */
+       }
+}
+
+/**
+ * Called by gator to get mali api version.
+ */
+u32 _mali_profiling_get_api_version(void)
+{
+       return MALI_PROFILING_API_VERSION;
+}
+
+/**
+* Called by gator to get the data about Mali instance in use:
+* product id, version, number of cores
+*/
+void _mali_profiling_get_mali_version(struct _mali_profiling_mali_version *values)
+{
+       values->mali_product_id = (u32)mali_kernel_core_get_product_id();
+       values->mali_version_major = mali_kernel_core_get_gpu_major_version();
+       values->mali_version_minor = mali_kernel_core_get_gpu_minor_version();
+       values->num_of_l2_cores = mali_l2_cache_core_get_glob_num_l2_cores();
+       values->num_of_fp_cores = mali_executor_get_num_cores_total();
+       values->num_of_vp_cores = 1;
+}
+
+
+EXPORT_SYMBOL(_mali_profiling_set_event);
+EXPORT_SYMBOL(_mali_profiling_get_l2_counters);
+EXPORT_SYMBOL(_mali_profiling_control);
+EXPORT_SYMBOL(_mali_profiling_get_api_version);
+EXPORT_SYMBOL(_mali_profiling_get_mali_version);
diff --git a/utgard/r8p0/linux/mali_osk_specific.h b/utgard/r8p0/linux/mali_osk_specific.h
new file mode 100755 (executable)
index 0000000..0826b08
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2010, 2012-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_specific.h
+ * Defines per-OS Kernel level specifics, such as unusual workarounds for
+ * certain OSs.
+ */
+
+#ifndef __MALI_OSK_SPECIFIC_H__
+#define __MALI_OSK_SPECIFIC_H__
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
+#include <linux/uaccess.h>
+#else
+#include <asm/uaccess.h>
+#endif
+#include <linux/platform_device.h>
+#include <linux/gfp.h>
+#include <linux/hardirq.h>
+
+
+#include "mali_osk_types.h"
+#include "mali_kernel_linux.h"
+
+#define MALI_STATIC_INLINE static inline
+#define MALI_NON_STATIC_INLINE inline
+
+typedef struct dma_pool *mali_dma_pool;
+
+typedef u32 mali_dma_addr;
+
+#if MALI_ENABLE_CPU_CYCLES
+/* Reads out the clock cycle performance counter of the current cpu.
+   It is useful for cost-free (2 cycle) measuring of the time spent
+   in a code path. Sample before and after, the diff number of cycles.
+   When the CPU is idle it will not increase this clock counter.
+   It means that the counter is accurate if only spin-locks are used,
+   but mutexes may lead to too low values since the cpu might "idle"
+   waiting for the mutex to become available.
+   The clock source is configured on the CPU during mali module load,
+   but will not give useful output after a CPU has been power cycled.
+   It is therefore important to configure the system to not turn of
+   the cpu cores when using this functionallity.*/
+static inline unsigned int mali_get_cpu_cyclecount(void)
+{
+       unsigned int value;
+       /* Reading the CCNT Register - CPU clock counter */
+       asm volatile("MRC p15, 0, %0, c9, c13, 0\t\n": "=r"(value));
+       return value;
+}
+
+void mali_init_cpu_time_counters(int reset, int enable_divide_by_64);
+#endif
+
+
+MALI_STATIC_INLINE u32 _mali_osk_copy_from_user(void *to, void *from, u32 n)
+{
+       return (u32)copy_from_user(to, from, (unsigned long)n);
+}
+
+MALI_STATIC_INLINE mali_bool _mali_osk_in_atomic(void)
+{
+       return in_atomic();
+}
+
+#define _mali_osk_put_user(x, ptr) put_user(x, ptr)
+
+#endif /* __MALI_OSK_SPECIFIC_H__ */
diff --git a/utgard/r8p0/linux/mali_osk_time.c b/utgard/r8p0/linux/mali_osk_time.c
new file mode 100755 (executable)
index 0000000..76876b6
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2010, 2013-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_time.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/jiffies.h>
+#include <linux/time.h>
+#include <asm/delay.h>
+
+mali_bool _mali_osk_time_after_eq(unsigned long ticka, unsigned long tickb)
+{
+       return time_after_eq(ticka, tickb) ?
+              MALI_TRUE : MALI_FALSE;
+}
+
+unsigned long _mali_osk_time_mstoticks(u32 ms)
+{
+       return msecs_to_jiffies(ms);
+}
+
+u32 _mali_osk_time_tickstoms(unsigned long ticks)
+{
+       return jiffies_to_msecs(ticks);
+}
+
+unsigned long _mali_osk_time_tickcount(void)
+{
+       return jiffies;
+}
+
+void _mali_osk_time_ubusydelay(u32 usecs)
+{
+       udelay(usecs);
+}
+
+u64 _mali_osk_time_get_ns(void)
+{
+       struct timespec tsval;
+       getnstimeofday(&tsval);
+       return (u64)timespec_to_ns(&tsval);
+}
+
+u64 _mali_osk_boot_time_get_ns(void)
+{
+       struct timespec tsval;
+       get_monotonic_boottime(&tsval);
+       return (u64)timespec_to_ns(&tsval);
+}
diff --git a/utgard/r8p0/linux/mali_osk_timers.c b/utgard/r8p0/linux/mali_osk_timers.c
new file mode 100755 (executable)
index 0000000..8ada2da
--- /dev/null
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_timers.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+struct _mali_osk_timer_t_struct {
+       struct timer_list timer;
+};
+
+typedef void (*timer_timeout_function_t)(unsigned long);
+
+_mali_osk_timer_t *_mali_osk_timer_init(void)
+{
+       _mali_osk_timer_t *t = (_mali_osk_timer_t *)kmalloc(sizeof(_mali_osk_timer_t), GFP_KERNEL);
+       if (NULL != t) init_timer(&t->timer);
+       return t;
+}
+
+void _mali_osk_timer_add(_mali_osk_timer_t *tim, unsigned long ticks_to_expire)
+{
+       MALI_DEBUG_ASSERT_POINTER(tim);
+       tim->timer.expires = jiffies + ticks_to_expire;
+       add_timer(&(tim->timer));
+}
+
+void _mali_osk_timer_mod(_mali_osk_timer_t *tim, unsigned long ticks_to_expire)
+{
+       MALI_DEBUG_ASSERT_POINTER(tim);
+       mod_timer(&(tim->timer), jiffies + ticks_to_expire);
+}
+
+void _mali_osk_timer_del(_mali_osk_timer_t *tim)
+{
+       MALI_DEBUG_ASSERT_POINTER(tim);
+       del_timer_sync(&(tim->timer));
+}
+
+void _mali_osk_timer_del_async(_mali_osk_timer_t *tim)
+{
+       MALI_DEBUG_ASSERT_POINTER(tim);
+       del_timer(&(tim->timer));
+}
+
+mali_bool _mali_osk_timer_pending(_mali_osk_timer_t *tim)
+{
+       MALI_DEBUG_ASSERT_POINTER(tim);
+       return 1 == timer_pending(&(tim->timer));
+}
+
+void _mali_osk_timer_setcallback(_mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data)
+{
+       MALI_DEBUG_ASSERT_POINTER(tim);
+       tim->timer.data = (unsigned long)data;
+       tim->timer.function = (timer_timeout_function_t)callback;
+}
+
+void _mali_osk_timer_term(_mali_osk_timer_t *tim)
+{
+       MALI_DEBUG_ASSERT_POINTER(tim);
+       kfree(tim);
+}
diff --git a/utgard/r8p0/linux/mali_osk_wait_queue.c b/utgard/r8p0/linux/mali_osk_wait_queue.c
new file mode 100755 (executable)
index 0000000..caa3abe
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2012-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_wait_queue.c
+ * Implemenation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/wait.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+struct _mali_osk_wait_queue_t_struct {
+       wait_queue_head_t wait_queue;
+};
+
+_mali_osk_wait_queue_t *_mali_osk_wait_queue_init(void)
+{
+       _mali_osk_wait_queue_t *ret = NULL;
+
+       ret = kmalloc(sizeof(_mali_osk_wait_queue_t), GFP_KERNEL);
+
+       if (NULL == ret) {
+               return ret;
+       }
+
+       init_waitqueue_head(&ret->wait_queue);
+       MALI_DEBUG_ASSERT(!waitqueue_active(&ret->wait_queue));
+
+       return ret;
+}
+
+void _mali_osk_wait_queue_wait_event(_mali_osk_wait_queue_t *queue, mali_bool(*condition)(void *), void *data)
+{
+       MALI_DEBUG_ASSERT_POINTER(queue);
+       MALI_DEBUG_PRINT(6, ("Adding to wait queue %p\n", queue));
+       wait_event(queue->wait_queue, condition(data));
+}
+
+void _mali_osk_wait_queue_wait_event_timeout(_mali_osk_wait_queue_t *queue, mali_bool(*condition)(void *), void *data, u32 timeout)
+{
+       MALI_DEBUG_ASSERT_POINTER(queue);
+       MALI_DEBUG_PRINT(6, ("Adding to wait queue %p\n", queue));
+       wait_event_timeout(queue->wait_queue, condition(data), _mali_osk_time_mstoticks(timeout));
+}
+
+void _mali_osk_wait_queue_wake_up(_mali_osk_wait_queue_t *queue)
+{
+       MALI_DEBUG_ASSERT_POINTER(queue);
+
+       /* if queue is empty, don't attempt to wake up its elements */
+       if (!waitqueue_active(&queue->wait_queue)) return;
+
+       MALI_DEBUG_PRINT(6, ("Waking up elements in wait queue %p ....\n", queue));
+
+       wake_up_all(&queue->wait_queue);
+
+       MALI_DEBUG_PRINT(6, ("... elements in wait queue %p woken up\n", queue));
+}
+
+void _mali_osk_wait_queue_term(_mali_osk_wait_queue_t *queue)
+{
+       /* Parameter validation  */
+       MALI_DEBUG_ASSERT_POINTER(queue);
+
+       /* Linux requires no explicit termination of wait queues */
+       kfree(queue);
+}
diff --git a/utgard/r8p0/linux/mali_osk_wq.c b/utgard/r8p0/linux/mali_osk_wq.c
new file mode 100755 (executable)
index 0000000..06afa04
--- /dev/null
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_wq.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/slab.h> /* For memory allocation */
+#include <linux/workqueue.h>
+#include <linux/version.h>
+#include <linux/sched.h>
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_license.h"
+#include "mali_kernel_linux.h"
+
+typedef struct _mali_osk_wq_work_s {
+       _mali_osk_wq_work_handler_t handler;
+       void *data;
+       mali_bool high_pri;
+       struct work_struct work_handle;
+} mali_osk_wq_work_object_t;
+
+typedef struct _mali_osk_wq_delayed_work_s {
+       _mali_osk_wq_work_handler_t handler;
+       void *data;
+       struct delayed_work work;
+} mali_osk_wq_delayed_work_object_t;
+
+#if MALI_LICENSE_IS_GPL
+static struct workqueue_struct *mali_wq_normal = NULL;
+static struct workqueue_struct *mali_wq_high = NULL;
+#endif
+
+static void _mali_osk_wq_work_func(struct work_struct *work);
+
+_mali_osk_errcode_t _mali_osk_wq_init(void)
+{
+#if MALI_LICENSE_IS_GPL
+       MALI_DEBUG_ASSERT(NULL == mali_wq_normal);
+       MALI_DEBUG_ASSERT(NULL == mali_wq_high);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+       mali_wq_normal = alloc_workqueue("mali", WQ_UNBOUND, 0);
+       mali_wq_high = alloc_workqueue("mali_high_pri", WQ_HIGHPRI | WQ_UNBOUND, 0);
+#else
+       mali_wq_normal = create_workqueue("mali");
+       mali_wq_high = create_workqueue("mali_high_pri");
+#endif
+       if (NULL == mali_wq_normal || NULL == mali_wq_high) {
+               MALI_PRINT_ERROR(("Unable to create Mali workqueues\n"));
+
+               if (mali_wq_normal) destroy_workqueue(mali_wq_normal);
+               if (mali_wq_high)   destroy_workqueue(mali_wq_high);
+
+               mali_wq_normal = NULL;
+               mali_wq_high   = NULL;
+
+               return _MALI_OSK_ERR_FAULT;
+       }
+#endif /* MALI_LICENSE_IS_GPL */
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void _mali_osk_wq_flush(void)
+{
+#if MALI_LICENSE_IS_GPL
+       flush_workqueue(mali_wq_high);
+       flush_workqueue(mali_wq_normal);
+#else
+       flush_scheduled_work();
+#endif
+}
+
+void _mali_osk_wq_term(void)
+{
+#if MALI_LICENSE_IS_GPL
+       MALI_DEBUG_ASSERT(NULL != mali_wq_normal);
+       MALI_DEBUG_ASSERT(NULL != mali_wq_high);
+
+       flush_workqueue(mali_wq_normal);
+       destroy_workqueue(mali_wq_normal);
+
+       flush_workqueue(mali_wq_high);
+       destroy_workqueue(mali_wq_high);
+
+       mali_wq_normal = NULL;
+       mali_wq_high   = NULL;
+#else
+       flush_scheduled_work();
+#endif
+}
+
+_mali_osk_wq_work_t *_mali_osk_wq_create_work(_mali_osk_wq_work_handler_t handler, void *data)
+{
+       mali_osk_wq_work_object_t *work = kmalloc(sizeof(mali_osk_wq_work_object_t), GFP_KERNEL);
+
+       if (NULL == work) return NULL;
+
+       work->handler = handler;
+       work->data = data;
+       work->high_pri = MALI_FALSE;
+
+       INIT_WORK(&work->work_handle, _mali_osk_wq_work_func);
+
+       return work;
+}
+
+_mali_osk_wq_work_t *_mali_osk_wq_create_work_high_pri(_mali_osk_wq_work_handler_t handler, void *data)
+{
+       mali_osk_wq_work_object_t *work = kmalloc(sizeof(mali_osk_wq_work_object_t), GFP_KERNEL);
+
+       if (NULL == work) return NULL;
+
+       work->handler = handler;
+       work->data = data;
+       work->high_pri = MALI_TRUE;
+
+       INIT_WORK(&work->work_handle, _mali_osk_wq_work_func);
+
+       return work;
+}
+
+void _mali_osk_wq_delete_work(_mali_osk_wq_work_t *work)
+{
+       mali_osk_wq_work_object_t *work_object = (mali_osk_wq_work_object_t *)work;
+       _mali_osk_wq_flush();
+       kfree(work_object);
+}
+
+void _mali_osk_wq_delete_work_nonflush(_mali_osk_wq_work_t *work)
+{
+       mali_osk_wq_work_object_t *work_object = (mali_osk_wq_work_object_t *)work;
+       kfree(work_object);
+}
+
+void _mali_osk_wq_schedule_work(_mali_osk_wq_work_t *work)
+{
+       mali_osk_wq_work_object_t *work_object = (mali_osk_wq_work_object_t *)work;
+#if MALI_LICENSE_IS_GPL
+       queue_work(mali_wq_normal, &work_object->work_handle);
+#else
+       schedule_work(&work_object->work_handle);
+#endif
+}
+
+void _mali_osk_wq_schedule_work_high_pri(_mali_osk_wq_work_t *work)
+{
+       mali_osk_wq_work_object_t *work_object = (mali_osk_wq_work_object_t *)work;
+#if MALI_LICENSE_IS_GPL
+       queue_work(mali_wq_high, &work_object->work_handle);
+#else
+       schedule_work(&work_object->work_handle);
+#endif
+}
+
+static void _mali_osk_wq_work_func(struct work_struct *work)
+{
+       mali_osk_wq_work_object_t *work_object;
+
+       work_object = _MALI_OSK_CONTAINER_OF(work, mali_osk_wq_work_object_t, work_handle);
+
+#if MALI_LICENSE_IS_GPL
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
+       /* We want highest Dynamic priority of the thread so that the Jobs depending
+       ** on this thread could be scheduled in time. Without this, this thread might
+       ** sometimes need to wait for some threads in user mode to finish its round-robin
+       ** time, causing *bubble* in the Mali pipeline. Thanks to the new implementation
+       ** of high-priority workqueue in new kernel, this only happens in older kernel.
+       */
+       if (MALI_TRUE == work_object->high_pri) {
+               set_user_nice(current, -19);
+       }
+#endif
+#endif /* MALI_LICENSE_IS_GPL */
+
+       work_object->handler(work_object->data);
+}
+
+static void _mali_osk_wq_delayed_work_func(struct work_struct *work)
+{
+       mali_osk_wq_delayed_work_object_t *work_object;
+
+       work_object = _MALI_OSK_CONTAINER_OF(work, mali_osk_wq_delayed_work_object_t, work.work);
+       work_object->handler(work_object->data);
+}
+
+mali_osk_wq_delayed_work_object_t *_mali_osk_wq_delayed_create_work(_mali_osk_wq_work_handler_t handler, void *data)
+{
+       mali_osk_wq_delayed_work_object_t *work = kmalloc(sizeof(mali_osk_wq_delayed_work_object_t), GFP_KERNEL);
+
+       if (NULL == work) return NULL;
+
+       work->handler = handler;
+       work->data = data;
+
+       INIT_DELAYED_WORK(&work->work, _mali_osk_wq_delayed_work_func);
+
+       return work;
+}
+
+void _mali_osk_wq_delayed_delete_work_nonflush(_mali_osk_wq_delayed_work_t *work)
+{
+       mali_osk_wq_delayed_work_object_t *work_object = (mali_osk_wq_delayed_work_object_t *)work;
+       kfree(work_object);
+}
+
+void _mali_osk_wq_delayed_cancel_work_async(_mali_osk_wq_delayed_work_t *work)
+{
+       mali_osk_wq_delayed_work_object_t *work_object = (mali_osk_wq_delayed_work_object_t *)work;
+       cancel_delayed_work(&work_object->work);
+}
+
+void _mali_osk_wq_delayed_cancel_work_sync(_mali_osk_wq_delayed_work_t *work)
+{
+       mali_osk_wq_delayed_work_object_t *work_object = (mali_osk_wq_delayed_work_object_t *)work;
+       cancel_delayed_work_sync(&work_object->work);
+}
+
+void _mali_osk_wq_delayed_schedule_work(_mali_osk_wq_delayed_work_t *work, u32 delay)
+{
+       mali_osk_wq_delayed_work_object_t *work_object = (mali_osk_wq_delayed_work_object_t *)work;
+
+#if MALI_LICENSE_IS_GPL
+       queue_delayed_work(mali_wq_normal, &work_object->work, delay);
+#else
+       schedule_delayed_work(&work_object->work, delay);
+#endif
+
+}
diff --git a/utgard/r8p0/linux/mali_pmu_power_up_down.c b/utgard/r8p0/linux/mali_pmu_power_up_down.c
new file mode 100755 (executable)
index 0000000..6a6c9f8
--- /dev/null
@@ -0,0 +1,27 @@
+/**
+ * Copyright (C) 2010, 2012-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmu_power_up_down.c
+ */
+
+#include <linux/module.h>
+#include "mali_executor.h"
+
+int mali_perf_set_num_pp_cores(unsigned int num_cores)
+{
+#ifndef CONFIG_MALI_DVFS
+       return mali_executor_set_perf_level(num_cores, MALI_TRUE);
+#else
+       return mali_executor_set_perf_level(num_cores, MALI_FALSE);
+#endif
+}
+
+EXPORT_SYMBOL(mali_perf_set_num_pp_cores);
diff --git a/utgard/r8p0/linux/mali_profiling_events.h b/utgard/r8p0/linux/mali_profiling_events.h
new file mode 100755 (executable)
index 0000000..5e51095
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2012, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_PROFILING_EVENTS_H__
+#define __MALI_PROFILING_EVENTS_H__
+
+/* Simple wrapper in order to find the OS specific location of this file */
+#include <linux/mali/mali_utgard_profiling_events.h>
+
+#endif /* __MALI_PROFILING_EVENTS_H__ */
diff --git a/utgard/r8p0/linux/mali_profiling_gator_api.h b/utgard/r8p0/linux/mali_profiling_gator_api.h
new file mode 100755 (executable)
index 0000000..e371971
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2012-2013, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_PROFILING_GATOR_API_H__
+#define __MALI_PROFILING_GATOR_API_H__
+
+/* Simple wrapper in order to find the OS specific location of this file */
+#include <linux/mali/mali_utgard_profiling_gator_api.h>
+
+#endif /* __MALI_PROFILING_GATOR_API_H__ */
diff --git a/utgard/r8p0/linux/mali_profiling_internal.c b/utgard/r8p0/linux/mali_profiling_internal.c
new file mode 100755 (executable)
index 0000000..918caa0
--- /dev/null
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) 2010-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+#include "mali_timestamp.h"
+#include "mali_osk_profiling.h"
+#include "mali_user_settings_db.h"
+#include "mali_profiling_internal.h"
+
+typedef struct mali_profiling_entry {
+       u64 timestamp;
+       u32 event_id;
+       u32 data[5];
+} mali_profiling_entry;
+
+typedef enum mali_profiling_state {
+       MALI_PROFILING_STATE_UNINITIALIZED,
+       MALI_PROFILING_STATE_IDLE,
+       MALI_PROFILING_STATE_RUNNING,
+       MALI_PROFILING_STATE_RETURN,
+} mali_profiling_state;
+
+static _mali_osk_mutex_t *lock = NULL;
+static mali_profiling_state prof_state = MALI_PROFILING_STATE_UNINITIALIZED;
+static mali_profiling_entry *profile_entries = NULL;
+static _mali_osk_atomic_t profile_insert_index;
+static u32 profile_mask = 0;
+
+static inline void add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4);
+
+void probe_mali_timeline_event(void *data, TP_PROTO(unsigned int event_id, unsigned int d0, unsigned int d1, unsigned
+                              int d2, unsigned int d3, unsigned int d4))
+{
+       add_event(event_id, d0, d1, d2, d3, d4);
+}
+
+_mali_osk_errcode_t _mali_internal_profiling_init(mali_bool auto_start)
+{
+       profile_entries = NULL;
+       profile_mask = 0;
+       _mali_osk_atomic_init(&profile_insert_index, 0);
+
+       lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_PROFILING);
+       if (NULL == lock) {
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       prof_state = MALI_PROFILING_STATE_IDLE;
+
+       if (MALI_TRUE == auto_start) {
+               u32 limit = MALI_PROFILING_MAX_BUFFER_ENTRIES; /* Use maximum buffer size */
+
+               mali_set_user_setting(_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, MALI_TRUE);
+               if (_MALI_OSK_ERR_OK != _mali_internal_profiling_start(&limit)) {
+                       return _MALI_OSK_ERR_FAULT;
+               }
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void _mali_internal_profiling_term(void)
+{
+       u32 count;
+
+       /* Ensure profiling is stopped */
+       _mali_internal_profiling_stop(&count);
+
+       prof_state = MALI_PROFILING_STATE_UNINITIALIZED;
+
+       if (NULL != profile_entries) {
+               _mali_osk_vfree(profile_entries);
+               profile_entries = NULL;
+       }
+
+       if (NULL != lock) {
+               _mali_osk_mutex_term(lock);
+               lock = NULL;
+       }
+}
+
+_mali_osk_errcode_t _mali_internal_profiling_start(u32 *limit)
+{
+       _mali_osk_errcode_t ret;
+       mali_profiling_entry *new_profile_entries;
+
+       _mali_osk_mutex_wait(lock);
+
+       if (MALI_PROFILING_STATE_RUNNING == prof_state) {
+               _mali_osk_mutex_signal(lock);
+               return _MALI_OSK_ERR_BUSY;
+       }
+
+       new_profile_entries = _mali_osk_valloc(*limit * sizeof(mali_profiling_entry));
+
+       if (NULL == new_profile_entries) {
+               _mali_osk_mutex_signal(lock);
+               _mali_osk_vfree(new_profile_entries);
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       if (MALI_PROFILING_MAX_BUFFER_ENTRIES < *limit) {
+               *limit = MALI_PROFILING_MAX_BUFFER_ENTRIES;
+       }
+
+       profile_mask = 1;
+       while (profile_mask <= *limit) {
+               profile_mask <<= 1;
+       }
+       profile_mask >>= 1;
+
+       *limit = profile_mask;
+
+       profile_mask--; /* turns the power of two into a mask of one less */
+
+       if (MALI_PROFILING_STATE_IDLE != prof_state) {
+               _mali_osk_mutex_signal(lock);
+               _mali_osk_vfree(new_profile_entries);
+               return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+       }
+
+       profile_entries = new_profile_entries;
+
+       ret = _mali_timestamp_reset();
+
+       if (_MALI_OSK_ERR_OK == ret) {
+               prof_state = MALI_PROFILING_STATE_RUNNING;
+       } else {
+               _mali_osk_vfree(profile_entries);
+               profile_entries = NULL;
+       }
+
+       register_trace_mali_timeline_event(probe_mali_timeline_event, NULL);
+
+       _mali_osk_mutex_signal(lock);
+       return ret;
+}
+
+static inline void add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4)
+{
+       u32 cur_index = (_mali_osk_atomic_inc_return(&profile_insert_index) - 1) & profile_mask;
+
+       profile_entries[cur_index].timestamp = _mali_timestamp_get();
+       profile_entries[cur_index].event_id = event_id;
+       profile_entries[cur_index].data[0] = data0;
+       profile_entries[cur_index].data[1] = data1;
+       profile_entries[cur_index].data[2] = data2;
+       profile_entries[cur_index].data[3] = data3;
+       profile_entries[cur_index].data[4] = data4;
+
+       /* If event is "leave API function", add current memory usage to the event
+        * as data point 4.  This is used in timeline profiling to indicate how
+        * much memory was used when leaving a function. */
+       if (event_id == (MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_SINGLE_SW_LEAVE_API_FUNC)) {
+               profile_entries[cur_index].data[4] = _mali_ukk_report_memory_usage();
+       }
+}
+
+_mali_osk_errcode_t _mali_internal_profiling_stop(u32 *count)
+{
+       _mali_osk_mutex_wait(lock);
+
+       if (MALI_PROFILING_STATE_RUNNING != prof_state) {
+               _mali_osk_mutex_signal(lock);
+               return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+       }
+
+       /* go into return state (user to retreive events), no more events will be added after this */
+       prof_state = MALI_PROFILING_STATE_RETURN;
+
+       unregister_trace_mali_timeline_event(probe_mali_timeline_event, NULL);
+
+       _mali_osk_mutex_signal(lock);
+
+       tracepoint_synchronize_unregister();
+
+       *count = _mali_osk_atomic_read(&profile_insert_index);
+       if (*count > profile_mask) *count = profile_mask;
+
+       return _MALI_OSK_ERR_OK;
+}
+
+u32 _mali_internal_profiling_get_count(void)
+{
+       u32 retval = 0;
+
+       _mali_osk_mutex_wait(lock);
+       if (MALI_PROFILING_STATE_RETURN == prof_state) {
+               retval = _mali_osk_atomic_read(&profile_insert_index);
+               if (retval > profile_mask) retval = profile_mask;
+       }
+       _mali_osk_mutex_signal(lock);
+
+       return retval;
+}
+
+_mali_osk_errcode_t _mali_internal_profiling_get_event(u32 index, u64 *timestamp, u32 *event_id, u32 data[5])
+{
+       u32 raw_index = _mali_osk_atomic_read(&profile_insert_index);
+
+       _mali_osk_mutex_wait(lock);
+
+       if (index < profile_mask) {
+               if ((raw_index & ~profile_mask) != 0) {
+                       index += raw_index;
+                       index &= profile_mask;
+               }
+
+               if (prof_state != MALI_PROFILING_STATE_RETURN) {
+                       _mali_osk_mutex_signal(lock);
+                       return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+               }
+
+               if (index >= raw_index) {
+                       _mali_osk_mutex_signal(lock);
+                       return _MALI_OSK_ERR_FAULT;
+               }
+
+               *timestamp = profile_entries[index].timestamp;
+               *event_id = profile_entries[index].event_id;
+               data[0] = profile_entries[index].data[0];
+               data[1] = profile_entries[index].data[1];
+               data[2] = profile_entries[index].data[2];
+               data[3] = profile_entries[index].data[3];
+               data[4] = profile_entries[index].data[4];
+       } else {
+               _mali_osk_mutex_signal(lock);
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       _mali_osk_mutex_signal(lock);
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_internal_profiling_clear(void)
+{
+       _mali_osk_mutex_wait(lock);
+
+       if (MALI_PROFILING_STATE_RETURN != prof_state) {
+               _mali_osk_mutex_signal(lock);
+               return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+       }
+
+       prof_state = MALI_PROFILING_STATE_IDLE;
+       profile_mask = 0;
+       _mali_osk_atomic_init(&profile_insert_index, 0);
+
+       if (NULL != profile_entries) {
+               _mali_osk_vfree(profile_entries);
+               profile_entries = NULL;
+       }
+
+       _mali_osk_mutex_signal(lock);
+       return _MALI_OSK_ERR_OK;
+}
+
+mali_bool _mali_internal_profiling_is_recording(void)
+{
+       return prof_state == MALI_PROFILING_STATE_RUNNING ? MALI_TRUE : MALI_FALSE;
+}
+
+mali_bool _mali_internal_profiling_have_recording(void)
+{
+       return prof_state == MALI_PROFILING_STATE_RETURN ? MALI_TRUE : MALI_FALSE;
+}
diff --git a/utgard/r8p0/linux/mali_profiling_internal.h b/utgard/r8p0/linux/mali_profiling_internal.h
new file mode 100755 (executable)
index 0000000..6e05ffd
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2012-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_PROFILING_INTERNAL_H__
+#define __MALI_PROFILING_INTERNAL_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "mali_osk.h"
+
+int _mali_internal_profiling_init(mali_bool auto_start);
+void _mali_internal_profiling_term(void);
+
+mali_bool _mali_internal_profiling_is_recording(void);
+mali_bool _mali_internal_profiling_have_recording(void);
+_mali_osk_errcode_t _mali_internal_profiling_clear(void);
+_mali_osk_errcode_t _mali_internal_profiling_get_event(u32 index, u64 *timestamp, u32 *event_id, u32 data[5]);
+u32 _mali_internal_profiling_get_count(void);
+int _mali_internal_profiling_stop(u32 *count);
+int _mali_internal_profiling_start(u32 *limit);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_PROFILING_INTERNAL_H__ */
diff --git a/utgard/r8p0/linux/mali_sync.c b/utgard/r8p0/linux/mali_sync.c
new file mode 100755 (executable)
index 0000000..1712cfd
--- /dev/null
@@ -0,0 +1,668 @@
+/*
+ * Copyright (C) 2012-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_sync.h"
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_timeline.h"
+#include "mali_executor.h"
+
+#include <linux/file.h>
+#include <linux/seq_file.h>
+#include <linux/module.h>
+#include <asm-generic/fcntl.h>
+
+struct mali_sync_pt {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+       struct sync_pt         sync_pt;
+#else
+       struct mali_internal_sync_point         sync_pt;
+#endif
+       struct mali_sync_flag *flag;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+       struct sync_timeline *sync_tl;  /**< Sync timeline this pt is connected to. */
+#else
+       struct mali_internal_sync_timeline *sync_tl;  /**< Sync timeline this pt is connected to. */
+#endif
+};
+
+/**
+ * The sync flag is used to connect sync fences to the Mali Timeline system.  Sync fences can be
+ * created from a sync flag, and when the flag is signaled, the sync fences will also be signaled.
+ */
+struct mali_sync_flag {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+       struct sync_timeline *sync_tl;  /**< Sync timeline this flag is connected to. */
+#else
+       struct mali_internal_sync_timeline *sync_tl;  /**< Sync timeline this flag is connected to. */
+#endif
+       u32                   point;    /**< Point on timeline. */
+       int                   status;   /**< 0 if unsignaled, 1 if signaled without error or negative if signaled with error. */
+       struct kref           refcount; /**< Reference count. */
+};
+
+/**
+ * Mali sync timeline is used to connect mali timeline to sync_timeline.
+ * When fence timeout can print more detailed mali timeline system info.
+ */
+struct mali_sync_timeline_container {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+       struct sync_timeline sync_timeline;
+#else
+       struct mali_internal_sync_timeline sync_timeline;
+#endif
+       struct mali_timeline *timeline;
+};
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+MALI_STATIC_INLINE struct mali_sync_pt *to_mali_sync_pt(struct sync_pt *pt)
+#else
+MALI_STATIC_INLINE struct mali_sync_pt *to_mali_sync_pt(struct mali_internal_sync_point *pt)
+#endif
+{
+       return container_of(pt, struct mali_sync_pt, sync_pt);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+MALI_STATIC_INLINE struct mali_sync_timeline_container *to_mali_sync_tl_container(struct sync_timeline *sync_tl)
+#else
+MALI_STATIC_INLINE struct mali_sync_timeline_container *to_mali_sync_tl_container(struct mali_internal_sync_timeline *sync_tl)
+#endif
+{
+       return container_of(sync_tl, struct mali_sync_timeline_container, sync_timeline);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+static int timeline_has_signaled(struct sync_pt *pt)
+#else
+static int timeline_has_signaled(struct mali_internal_sync_point *pt)
+#endif
+{
+       struct mali_sync_pt *mpt;
+
+       MALI_DEBUG_ASSERT_POINTER(pt);
+       mpt = to_mali_sync_pt(pt);
+
+       MALI_DEBUG_ASSERT_POINTER(mpt->flag);
+
+       return mpt->flag->status;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+static void timeline_free_pt(struct sync_pt *pt)
+#else
+static void timeline_free_pt(struct mali_internal_sync_point *pt)
+#endif
+{
+       struct mali_sync_pt *mpt;
+
+       MALI_DEBUG_ASSERT_POINTER(pt);
+       mpt = to_mali_sync_pt(pt);
+
+       mali_sync_flag_put(mpt->flag);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+static void timeline_release(struct sync_timeline *sync_timeline)
+#else
+static void timeline_release(struct mali_internal_sync_timeline *sync_timeline)
+#endif
+{
+       struct mali_sync_timeline_container *mali_sync_tl = NULL;
+       struct mali_timeline *mali_tl = NULL;
+
+       MALI_DEBUG_ASSERT_POINTER(sync_timeline);
+
+       mali_sync_tl = to_mali_sync_tl_container(sync_timeline);
+       MALI_DEBUG_ASSERT_POINTER(mali_sync_tl);
+
+       mali_tl = mali_sync_tl->timeline;
+
+       /* always signaled timeline didn't have mali container */
+       if (mali_tl) {
+               if (NULL != mali_tl->spinlock) {
+                       mali_spinlock_reentrant_term(mali_tl->spinlock);
+               }
+               _mali_osk_free(mali_tl);
+       }
+
+       module_put(THIS_MODULE);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+static struct sync_pt *timeline_dup(struct sync_pt *pt)
+{
+       struct mali_sync_pt *mpt, *new_mpt;
+       struct sync_pt *new_pt;
+       MALI_DEBUG_ASSERT_POINTER(pt);
+
+       mpt = to_mali_sync_pt(pt);
+
+       new_pt = sync_pt_create(mpt->sync_tl, sizeof(struct mali_sync_pt));
+       if (NULL == new_pt) return NULL;
+
+       new_mpt = to_mali_sync_pt(new_pt);
+
+       mali_sync_flag_get(mpt->flag);
+       new_mpt->flag = mpt->flag;
+       new_mpt->sync_tl = mpt->sync_tl;
+
+       return new_pt;
+}
+
+static int timeline_compare(struct sync_pt *pta, struct sync_pt *ptb)
+{
+       struct mali_sync_pt *mpta;
+       struct mali_sync_pt *mptb;
+       u32 a, b;
+
+       MALI_DEBUG_ASSERT_POINTER(pta);
+       MALI_DEBUG_ASSERT_POINTER(ptb);
+       mpta = to_mali_sync_pt(pta);
+       mptb = to_mali_sync_pt(ptb);
+
+       MALI_DEBUG_ASSERT_POINTER(mpta->flag);
+       MALI_DEBUG_ASSERT_POINTER(mptb->flag);
+
+       a = mpta->flag->point;
+       b = mptb->flag->point;
+
+       if (a == b) return 0;
+
+       return ((b - a) < (a - b) ? -1 : 1);
+}
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+static void timeline_print_pt(struct seq_file *s, struct sync_pt *sync_pt)
+{
+       struct mali_sync_pt *mpt;
+
+       MALI_DEBUG_ASSERT_POINTER(s);
+       MALI_DEBUG_ASSERT_POINTER(sync_pt);
+
+       mpt = to_mali_sync_pt(sync_pt);
+
+       /* It is possible this sync point is just under construct,
+        * make sure the flag is valid before accessing it
+       */
+       if (mpt->flag) {
+               seq_printf(s, "%u", mpt->flag->point);
+       } else {
+               seq_printf(s, "uninitialized");
+       }
+}
+
+static void timeline_print_obj(struct seq_file *s, struct sync_timeline *sync_tl)
+{
+       struct mali_sync_timeline_container *mali_sync_tl = NULL;
+       struct mali_timeline *mali_tl = NULL;
+
+       MALI_DEBUG_ASSERT_POINTER(sync_tl);
+
+       mali_sync_tl = to_mali_sync_tl_container(sync_tl);
+       MALI_DEBUG_ASSERT_POINTER(mali_sync_tl);
+
+       mali_tl = mali_sync_tl->timeline;
+
+       if (NULL != mali_tl) {
+               seq_printf(s, "oldest (%u) ", mali_tl->point_oldest);
+               seq_printf(s, "next (%u)", mali_tl->point_next);
+               seq_printf(s, "\n");
+
+#if defined(MALI_TIMELINE_DEBUG_FUNCTIONS)
+               {
+                       u32 tid = _mali_osk_get_tid();
+                       struct mali_timeline_system *system = mali_tl->system;
+
+                       mali_spinlock_reentrant_wait(mali_tl->spinlock, tid);
+                       if (!mali_tl->destroyed) {
+                               mali_spinlock_reentrant_wait(system->spinlock, tid);
+                               mali_timeline_debug_print_timeline(mali_tl, s);
+                               mali_spinlock_reentrant_signal(system->spinlock, tid);
+                       }
+                       mali_spinlock_reentrant_signal(mali_tl->spinlock, tid);
+
+                       /* dump job queue status and group running status */
+                       mali_executor_status_dump();
+               }
+#endif
+       }
+}
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+static void timeline_pt_value_str(struct sync_pt *pt, char *str, int size)
+{
+       struct mali_sync_pt *mpt;
+
+       MALI_DEBUG_ASSERT_POINTER(str);
+       MALI_DEBUG_ASSERT_POINTER(pt);
+
+       mpt = to_mali_sync_pt(pt);
+
+       /* It is possible this sync point is just under construct,
+        * make sure the flag is valid before accessing it
+       */
+       if (mpt->flag) {
+               _mali_osk_snprintf(str, size, "%u", mpt->flag->point);
+       } else {
+               _mali_osk_snprintf(str, size, "uninitialized");
+       }
+}
+
+static void timeline_value_str(struct sync_timeline *timeline, char *str, int size)
+{
+       struct mali_sync_timeline_container *mali_sync_tl = NULL;
+       struct mali_timeline *mali_tl = NULL;
+
+       MALI_DEBUG_ASSERT_POINTER(timeline);
+
+       mali_sync_tl = to_mali_sync_tl_container(timeline);
+       MALI_DEBUG_ASSERT_POINTER(mali_sync_tl);
+
+       mali_tl = mali_sync_tl->timeline;
+
+       if (NULL != mali_tl) {
+               _mali_osk_snprintf(str, size, "oldest (%u) ", mali_tl->point_oldest);
+               _mali_osk_snprintf(str, size, "next (%u)", mali_tl->point_next);
+               _mali_osk_snprintf(str, size, "\n");
+
+#if defined(MALI_TIMELINE_DEBUG_FUNCTIONS)
+               {
+                       u32 tid = _mali_osk_get_tid();
+                       struct mali_timeline_system *system = mali_tl->system;
+
+                       mali_spinlock_reentrant_wait(mali_tl->spinlock, tid);
+                       if (!mali_tl->destroyed) {
+                               mali_spinlock_reentrant_wait(system->spinlock, tid);
+                               mali_timeline_debug_direct_print_timeline(mali_tl);
+                               mali_spinlock_reentrant_signal(system->spinlock, tid);
+                       }
+                       mali_spinlock_reentrant_signal(mali_tl->spinlock, tid);
+
+                       /* dump job queue status and group running status */
+                       mali_executor_status_dump();
+               }
+#endif
+       }
+}
+#else
+static void timeline_print_sync_pt(struct mali_internal_sync_point *sync_pt)
+{
+       struct mali_sync_pt *mpt;
+
+       MALI_DEBUG_ASSERT_POINTER(sync_pt);
+
+       mpt = to_mali_sync_pt(sync_pt);
+
+       if (mpt->flag) {
+               MALI_DEBUG_PRINT(2, ("mali_internal_sync_pt: %u\n", mpt->flag->point));
+       } else {
+               MALI_DEBUG_PRINT(2, ("uninitialized\n", mpt->flag->point));
+       }
+}
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+static struct sync_timeline_ops mali_timeline_ops = {
+       .driver_name    = "Mali",
+       .dup            = timeline_dup,
+       .has_signaled   = timeline_has_signaled,
+       .compare        = timeline_compare,
+       .free_pt        = timeline_free_pt,
+       .release_obj    = timeline_release,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+       .print_pt       = timeline_print_pt,
+       .print_obj      = timeline_print_obj,
+#else
+       .pt_value_str = timeline_pt_value_str,
+       .timeline_value_str = timeline_value_str,
+#endif
+};
+
+struct sync_timeline *mali_sync_timeline_create(struct mali_timeline *timeline, const char *name)
+{
+       struct sync_timeline *sync_tl;
+       struct mali_sync_timeline_container *mali_sync_tl;
+
+       sync_tl = sync_timeline_create(&mali_timeline_ops, sizeof(struct mali_sync_timeline_container), name);
+       if (NULL == sync_tl) return NULL;
+
+       mali_sync_tl = to_mali_sync_tl_container(sync_tl);
+       mali_sync_tl->timeline = timeline;
+
+       /* Grab a reference on the module to ensure the callbacks are present
+        * as long some timeline exists. The reference is released when the
+        * timeline is freed.
+        * Since this function is called from a ioctl on an open file we know
+        * we already have a reference, so using __module_get is safe. */
+       __module_get(THIS_MODULE);
+
+       return sync_tl;
+}
+
+s32 mali_sync_fence_fd_alloc(struct sync_fence *sync_fence)
+{
+       s32 fd = -1;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)
+       fd = get_unused_fd();
+#else
+       fd = get_unused_fd_flags(O_CLOEXEC);
+#endif
+
+       if (fd < 0) {
+               sync_fence_put(sync_fence);
+               return -1;
+       }
+       sync_fence_install(sync_fence, fd);
+
+       return fd;
+}
+
+struct sync_fence *mali_sync_fence_merge(struct sync_fence *sync_fence1, struct sync_fence *sync_fence2)
+{
+       struct sync_fence *sync_fence;
+
+       MALI_DEBUG_ASSERT_POINTER(sync_fence1);
+       MALI_DEBUG_ASSERT_POINTER(sync_fence1);
+
+       sync_fence = sync_fence_merge("mali_merge_fence", sync_fence1, sync_fence2);
+       sync_fence_put(sync_fence1);
+       sync_fence_put(sync_fence2);
+
+       return sync_fence;
+}
+
+struct sync_fence *mali_sync_timeline_create_signaled_fence(struct sync_timeline *sync_tl)
+{
+       struct mali_sync_flag *flag;
+       struct sync_fence *sync_fence;
+
+       MALI_DEBUG_ASSERT_POINTER(sync_tl);
+
+       flag = mali_sync_flag_create(sync_tl, 0);
+       if (NULL == flag) return NULL;
+
+       sync_fence = mali_sync_flag_create_fence(flag);
+
+       mali_sync_flag_signal(flag, 0);
+       mali_sync_flag_put(flag);
+
+       return sync_fence;
+}
+
+struct mali_sync_flag *mali_sync_flag_create(struct sync_timeline *sync_tl, mali_timeline_point point)
+{
+       struct mali_sync_flag *flag;
+
+       if (NULL == sync_tl) return NULL;
+
+       flag = _mali_osk_calloc(1, sizeof(*flag));
+       if (NULL == flag) return NULL;
+
+       flag->sync_tl = sync_tl;
+       flag->point = point;
+
+       flag->status = 0;
+       kref_init(&flag->refcount);
+
+       return flag;
+}
+
+/**
+ * Create a sync point attached to given sync flag.
+ *
+ * @note Sync points must be triggered in *exactly* the same order as they are created.
+ *
+ * @param flag Sync flag.
+ * @return New sync point if successful, NULL if not.
+ */
+static struct sync_pt *mali_sync_flag_create_pt(struct mali_sync_flag *flag)
+{
+       struct sync_pt *pt;
+       struct mali_sync_pt *mpt;
+
+       MALI_DEBUG_ASSERT_POINTER(flag);
+       MALI_DEBUG_ASSERT_POINTER(flag->sync_tl);
+
+       pt = sync_pt_create(flag->sync_tl, sizeof(struct mali_sync_pt));
+       if (NULL == pt) return NULL;
+
+       mali_sync_flag_get(flag);
+
+       mpt = to_mali_sync_pt(pt);
+       mpt->flag = flag;
+       mpt->sync_tl = flag->sync_tl;
+
+       return pt;
+}
+
+struct sync_fence *mali_sync_flag_create_fence(struct mali_sync_flag *flag)
+{
+       struct sync_pt    *sync_pt;
+       struct sync_fence *sync_fence;
+
+       MALI_DEBUG_ASSERT_POINTER(flag);
+       MALI_DEBUG_ASSERT_POINTER(flag->sync_tl);
+
+       sync_pt = mali_sync_flag_create_pt(flag);
+       if (NULL == sync_pt) return NULL;
+
+       sync_fence = sync_fence_create("mali_flag_fence", sync_pt);
+       if (NULL == sync_fence) {
+               sync_pt_free(sync_pt);
+               return NULL;
+       }
+
+       return sync_fence;
+}
+#else
+static struct mali_internal_sync_timeline_ops mali_timeline_ops = {
+       .driver_name    = "Mali",
+       .has_signaled   = timeline_has_signaled,
+       .free_pt        = timeline_free_pt,
+       .release_obj    = timeline_release,
+       .print_sync_pt = timeline_print_sync_pt,
+};
+
+struct mali_internal_sync_timeline *mali_sync_timeline_create(struct mali_timeline *timeline, const char *name)
+{
+       struct mali_internal_sync_timeline *sync_tl;
+       struct mali_sync_timeline_container *mali_sync_tl;
+
+       sync_tl = mali_internal_sync_timeline_create(&mali_timeline_ops, sizeof(struct mali_sync_timeline_container), name);
+       if (NULL == sync_tl) return NULL;
+
+       mali_sync_tl = to_mali_sync_tl_container(sync_tl);
+       mali_sync_tl->timeline = timeline;
+
+       /* Grab a reference on the module to ensure the callbacks are present
+        * as long some timeline exists. The reference is released when the
+        * timeline is freed.
+        * Since this function is called from a ioctl on an open file we know
+        * we already have a reference, so using __module_get is safe. */
+       __module_get(THIS_MODULE);
+
+       return sync_tl;
+}
+
+s32 mali_sync_fence_fd_alloc(struct mali_internal_sync_fence *sync_fence)
+{
+       s32 fd = -1;
+
+       fd = get_unused_fd_flags(0);
+
+       if (fd < 0) {
+               fput(sync_fence->file);
+               return -1;
+       }
+       fd_install(fd, sync_fence->file);
+       return fd;
+}
+
+struct mali_internal_sync_fence *mali_sync_fence_merge(struct mali_internal_sync_fence *sync_fence1, struct mali_internal_sync_fence *sync_fence2)
+{
+       struct mali_internal_sync_fence *sync_fence;
+
+       MALI_DEBUG_ASSERT_POINTER(sync_fence1);
+       MALI_DEBUG_ASSERT_POINTER(sync_fence1);
+
+       sync_fence = mali_internal_sync_fence_merge(sync_fence1, sync_fence2);
+       fput(sync_fence1->file);
+       fput(sync_fence2->file);
+
+       return sync_fence;
+}
+
+struct mali_internal_sync_fence *mali_sync_timeline_create_signaled_fence(struct mali_internal_sync_timeline *sync_tl)
+{
+       struct mali_sync_flag *flag;
+       struct mali_internal_sync_fence *sync_fence;
+
+       MALI_DEBUG_ASSERT_POINTER(sync_tl);
+
+       flag = mali_sync_flag_create(sync_tl, 0);
+       if (NULL == flag) return NULL;
+
+       sync_fence = mali_sync_flag_create_fence(flag);
+
+       mali_sync_flag_signal(flag, 0);
+       mali_sync_flag_put(flag);
+
+       return sync_fence;
+}
+
+struct mali_sync_flag *mali_sync_flag_create(struct mali_internal_sync_timeline *sync_tl, mali_timeline_point point)
+{
+       struct mali_sync_flag *flag;
+
+       if (NULL == sync_tl) return NULL;
+
+       flag = _mali_osk_calloc(1, sizeof(*flag));
+       if (NULL == flag) return NULL;
+
+       flag->sync_tl = sync_tl;
+       flag->point = point;
+
+       flag->status = 0;
+       kref_init(&flag->refcount);
+
+       return flag;
+}
+
+/**
+ * Create a sync point attached to given sync flag.
+ *
+ * @note Sync points must be triggered in *exactly* the same order as they are created.
+ *
+ * @param flag Sync flag.
+ * @return New sync point if successful, NULL if not.
+ */
+static struct mali_internal_sync_point *mali_sync_flag_create_pt(struct mali_sync_flag *flag)
+{
+       struct mali_internal_sync_point *pt;
+       struct mali_sync_pt *mpt;
+
+       MALI_DEBUG_ASSERT_POINTER(flag);
+       MALI_DEBUG_ASSERT_POINTER(flag->sync_tl);
+
+       pt = mali_internal_sync_point_create(flag->sync_tl, sizeof(struct mali_sync_pt));
+
+       if (pt == NULL) {
+               MALI_PRINT_ERROR(("Mali sync: sync_pt creation failed\n"));
+               return NULL;
+       }
+       mali_sync_flag_get(flag);
+
+       mpt = to_mali_sync_pt(pt);
+       mpt->flag = flag;
+       mpt->sync_tl = flag->sync_tl;
+
+       return pt;
+}
+
+struct mali_internal_sync_fence *mali_sync_flag_create_fence(struct mali_sync_flag *flag)
+{
+       struct mali_internal_sync_point    *sync_pt;
+       struct mali_internal_sync_fence *sync_fence;
+
+       MALI_DEBUG_ASSERT_POINTER(flag);
+       MALI_DEBUG_ASSERT_POINTER(flag->sync_tl);
+
+       sync_pt = mali_sync_flag_create_pt(flag);
+       if (NULL == sync_pt) {
+               MALI_PRINT_ERROR(("Mali sync: sync_pt creation failed\n"));
+               return NULL;
+       }
+       sync_fence = (struct mali_internal_sync_fence *)sync_file_create(&sync_pt->base);
+       if (NULL == sync_fence) {
+               MALI_PRINT_ERROR(("Mali sync: sync_fence creation failed\n"));
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+               dma_fence_put(&sync_pt->base);
+#else
+               fence_put(&sync_pt->base);
+#endif
+               return NULL;
+       }
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+       fence_put(&sync_pt->base);
+#else
+       dma_fence_put(&sync_pt->base);
+#endif
+
+       return sync_fence;
+}
+#endif
+
+void mali_sync_flag_get(struct mali_sync_flag *flag)
+{
+       MALI_DEBUG_ASSERT_POINTER(flag);
+       kref_get(&flag->refcount);
+}
+
+/**
+ * Free sync flag.
+ *
+ * @param ref kref object embedded in sync flag that should be freed.
+ */
+static void mali_sync_flag_free(struct kref *ref)
+{
+       struct mali_sync_flag *flag;
+
+       MALI_DEBUG_ASSERT_POINTER(ref);
+       flag = container_of(ref, struct mali_sync_flag, refcount);
+
+       _mali_osk_free(flag);
+}
+
+void mali_sync_flag_put(struct mali_sync_flag *flag)
+{
+       MALI_DEBUG_ASSERT_POINTER(flag);
+       kref_put(&flag->refcount, mali_sync_flag_free);
+}
+
+void mali_sync_flag_signal(struct mali_sync_flag *flag, int error)
+{
+       MALI_DEBUG_ASSERT_POINTER(flag);
+
+       MALI_DEBUG_ASSERT(0 == flag->status);
+       flag->status = (0 > error) ? error : 1;
+
+       _mali_osk_write_mem_barrier();
+#if  LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+       sync_timeline_signal(flag->sync_tl);
+#else
+       mali_internal_sync_timeline_signal(flag->sync_tl);
+#endif
+}
+
+
diff --git a/utgard/r8p0/linux/mali_sync.h b/utgard/r8p0/linux/mali_sync.h
new file mode 100755 (executable)
index 0000000..553dba7
--- /dev/null
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2012-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_sync.h
+ *
+ * Mali interface for Linux sync objects.
+ */
+
+#ifndef _MALI_SYNC_H_
+#define _MALI_SYNC_H_
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+
+#include <linux/seq_file.h>
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
+#include <linux/sync.h>
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+#include <sync.h>
+#else
+#include "mali_internal_sync.h"
+#endif
+
+
+#include "mali_osk.h"
+
+struct mali_sync_flag;
+struct mali_timeline;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+/**
+ * Create a sync timeline.
+ *
+ * @param name Name of the sync timeline.
+ * @return The new sync timeline if successful, NULL if not.
+ */
+struct sync_timeline *mali_sync_timeline_create(struct mali_timeline *timeline, const char *name);
+
+/**
+ * Creates a file descriptor representing the sync fence.  Will release sync fence if allocation of
+ * file descriptor fails.
+ *
+ * @param sync_fence Sync fence.
+ * @return File descriptor representing sync fence if successful, or -1 if not.
+ */
+s32 mali_sync_fence_fd_alloc(struct sync_fence *sync_fence);
+
+/**
+ * Merges two sync fences.  Both input sync fences will be released.
+ *
+ * @param sync_fence1 First sync fence.
+ * @param sync_fence2 Second sync fence.
+ * @return New sync fence that is the result of the merger if successful, or NULL if not.
+ */
+struct sync_fence *mali_sync_fence_merge(struct sync_fence *sync_fence1, struct sync_fence *sync_fence2);
+
+/**
+ * Create a sync fence that is already signaled.
+ *
+ * @param tl Sync timeline.
+ * @return New signaled sync fence if successful, NULL if not.
+ */
+struct sync_fence *mali_sync_timeline_create_signaled_fence(struct sync_timeline *sync_tl);
+
+
+/**
+ * Create a sync flag.
+ *
+ * @param sync_tl Sync timeline.
+ * @param point Point on Mali timeline.
+ * @return New sync flag if successful, NULL if not.
+ */
+struct mali_sync_flag *mali_sync_flag_create(struct sync_timeline *sync_tl, u32 point);
+
+/**
+ * Create a sync fence attached to given sync flag.
+ *
+ * @param flag Sync flag.
+ * @return New sync fence if successful, NULL if not.
+ */
+struct sync_fence *mali_sync_flag_create_fence(struct mali_sync_flag *flag);
+#else
+/**
+ * Create a sync timeline.
+ *
+ * @param name Name of the sync timeline.
+ * @return The new sync timeline if successful, NULL if not.
+ */
+struct mali_internal_sync_timeline *mali_sync_timeline_create(struct mali_timeline *timeline, const char *name);
+
+/**
+ * Creates a file descriptor representing the sync fence.  Will release sync fence if allocation of
+ * file descriptor fails.
+ *
+ * @param sync_fence Sync fence.
+ * @return File descriptor representing sync fence if successful, or -1 if not.
+ */
+s32 mali_sync_fence_fd_alloc(struct mali_internal_sync_fence *sync_fence);
+
+/**
+ * Merges two sync fences.  Both input sync fences will be released.
+ *
+ * @param sync_fence1 First sync fence.
+ * @param sync_fence2 Second sync fence.
+ * @return New sync fence that is the result of the merger if successful, or NULL if not.
+ */
+struct mali_internal_sync_fence *mali_sync_fence_merge(struct mali_internal_sync_fence *sync_fence1, struct mali_internal_sync_fence *sync_fence2);
+
+/**
+ * Create a sync fence that is already signaled.
+ *
+ * @param tl Sync timeline.
+ * @return New signaled sync fence if successful, NULL if not.
+ */
+struct mali_internal_sync_fence *mali_sync_timeline_create_signaled_fence(struct mali_internal_sync_timeline *sync_tl);
+
+
+/**
+ * Create a sync flag.
+ *
+ * @param sync_tl Sync timeline.
+ * @param point Point on Mali timeline.
+ * @return New sync flag if successful, NULL if not.
+ */
+struct mali_sync_flag *mali_sync_flag_create(struct mali_internal_sync_timeline *sync_tl, u32 point);
+
+/**
+ * Create a sync fence attached to given sync flag.
+ *
+ * @param flag Sync flag.
+ * @return New sync fence if successful, NULL if not.
+ */
+struct mali_internal_sync_fence *mali_sync_flag_create_fence(struct mali_sync_flag *flag);
+
+#endif
+/**
+ * Grab sync flag reference.
+ *
+ * @param flag Sync flag.
+ */
+void mali_sync_flag_get(struct mali_sync_flag *flag);
+
+/**
+ * Release sync flag reference.  If this was the last reference, the sync flag will be freed.
+ *
+ * @param flag Sync flag.
+ */
+void mali_sync_flag_put(struct mali_sync_flag *flag);
+
+/**
+ * Signal sync flag.  All sync fences created from this flag will be signaled.
+ *
+ * @param flag Sync flag to signal.
+ * @param error Negative error code, or 0 if no error.
+ */
+void mali_sync_flag_signal(struct mali_sync_flag *flag, int error);
+
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+
+#endif /* _MALI_SYNC_H_ */
diff --git a/utgard/r8p0/linux/mali_uk_types.h b/utgard/r8p0/linux/mali_uk_types.h
new file mode 100755 (executable)
index 0000000..00ba28b
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2012, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_UK_TYPES_H__
+#define __MALI_UK_TYPES_H__
+
+/* Simple wrapper in order to find the OS specific location of this file */
+#include <linux/mali/mali_utgard_uk_types.h>
+
+#endif /* __MALI_UK_TYPES_H__ */
diff --git a/utgard/r8p0/linux/mali_ukk_core.c b/utgard/r8p0/linux/mali_ukk_core.c
new file mode 100755 (executable)
index 0000000..62e31c9
--- /dev/null
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2010-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include <linux/fs.h>       /* file system operations */
+#include <linux/slab.h>     /* memort allocation functions */
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
+#include <linux/uaccess.h>
+#else
+#include <asm/uaccess.h>
+#endif
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int get_api_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_s __user *uargs)
+{
+       _mali_uk_get_api_version_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       if (0 != get_user(kargs.version, &uargs->version)) return -EFAULT;
+
+       kargs.ctx = (uintptr_t)session_data;
+       err = _mali_ukk_get_api_version(&kargs);
+       if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+       if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+       if (0 != put_user(kargs.compatible, &uargs->compatible)) return -EFAULT;
+
+       return 0;
+}
+
+int get_api_version_v2_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_v2_s __user *uargs)
+{
+       _mali_uk_get_api_version_v2_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       if (0 != get_user(kargs.version, &uargs->version)) return -EFAULT;
+
+       kargs.ctx = (uintptr_t)session_data;
+       err = _mali_ukk_get_api_version_v2(&kargs);
+       if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+       if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+       if (0 != put_user(kargs.compatible, &uargs->compatible)) return -EFAULT;
+
+       return 0;
+}
+
+int wait_for_notification_wrapper(struct mali_session_data *session_data, _mali_uk_wait_for_notification_s __user *uargs)
+{
+       _mali_uk_wait_for_notification_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       kargs.ctx = (uintptr_t)session_data;
+       err = _mali_ukk_wait_for_notification(&kargs);
+       if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+       if (_MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS != kargs.type) {
+               kargs.ctx = (uintptr_t)NULL; /* prevent kernel address to be returned to user space */
+               if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_wait_for_notification_s))) return -EFAULT;
+       } else {
+               if (0 != put_user(kargs.type, &uargs->type)) return -EFAULT;
+       }
+
+       return 0;
+}
+
+int post_notification_wrapper(struct mali_session_data *session_data, _mali_uk_post_notification_s __user *uargs)
+{
+       _mali_uk_post_notification_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       kargs.ctx = (uintptr_t)session_data;
+
+       if (0 != get_user(kargs.type, &uargs->type)) {
+               return -EFAULT;
+       }
+
+       err = _mali_ukk_post_notification(&kargs);
+       if (_MALI_OSK_ERR_OK != err) {
+               return map_errcode(err);
+       }
+
+       return 0;
+}
+
+int get_user_settings_wrapper(struct mali_session_data *session_data, _mali_uk_get_user_settings_s __user *uargs)
+{
+       _mali_uk_get_user_settings_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       kargs.ctx = (uintptr_t)session_data;
+       err = _mali_ukk_get_user_settings(&kargs);
+       if (_MALI_OSK_ERR_OK != err) {
+               return map_errcode(err);
+       }
+
+       kargs.ctx = 0; /* prevent kernel address to be returned to user space */
+       if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_get_user_settings_s))) return -EFAULT;
+
+       return 0;
+}
+
+int request_high_priority_wrapper(struct mali_session_data *session_data, _mali_uk_request_high_priority_s __user *uargs)
+{
+       _mali_uk_request_high_priority_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       kargs.ctx = (uintptr_t)session_data;
+       err = _mali_ukk_request_high_priority(&kargs);
+
+       kargs.ctx = 0;
+
+       return map_errcode(err);
+}
+
+int pending_submit_wrapper(struct mali_session_data *session_data, _mali_uk_pending_submit_s __user *uargs)
+{
+       _mali_uk_pending_submit_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       kargs.ctx = (uintptr_t)session_data;
+       err = _mali_ukk_pending_submit(&kargs);
+       if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+       return 0;
+}
diff --git a/utgard/r8p0/linux/mali_ukk_gp.c b/utgard/r8p0/linux/mali_ukk_gp.c
new file mode 100755 (executable)
index 0000000..6909e52
--- /dev/null
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2010, 2012-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include <linux/fs.h>       /* file system operations */
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
+#include <linux/uaccess.h>
+#else
+#include <asm/uaccess.h>
+#endif
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_gp_start_job_s __user *uargs)
+{
+       _mali_osk_errcode_t err;
+
+       /* If the job was started successfully, 0 is returned.  If there was an error, but the job
+        * was started, we return -ENOENT.  For anything else returned, the job was not started. */
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+       err = _mali_ukk_gp_start_job(session_data, uargs);
+       if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+       return 0;
+}
+
+int gp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_core_version_s __user *uargs)
+{
+       _mali_uk_get_gp_core_version_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+       kargs.ctx = (uintptr_t)session_data;
+       err =  _mali_ukk_get_gp_core_version(&kargs);
+       if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+       /* no known transactions to roll-back */
+
+       if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+
+       return 0;
+}
+
+int gp_suspend_response_wrapper(struct mali_session_data *session_data, _mali_uk_gp_suspend_response_s __user *uargs)
+{
+       _mali_uk_gp_suspend_response_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_gp_suspend_response_s))) return -EFAULT;
+
+       kargs.ctx = (uintptr_t)session_data;
+       err = _mali_ukk_gp_suspend_response(&kargs);
+       if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+       if (0 != put_user(kargs.cookie, &uargs->cookie)) return -EFAULT;
+
+       /* no known transactions to roll-back */
+       return 0;
+}
+
+int gp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_number_of_cores_s __user *uargs)
+{
+       _mali_uk_get_gp_number_of_cores_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+       kargs.ctx = (uintptr_t)session_data;
+       err = _mali_ukk_get_gp_number_of_cores(&kargs);
+       if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+       /* no known transactions to roll-back */
+
+       if (0 != put_user(kargs.number_of_cores, &uargs->number_of_cores)) return -EFAULT;
+
+       return 0;
+}
diff --git a/utgard/r8p0/linux/mali_ukk_mem.c b/utgard/r8p0/linux/mali_ukk_mem.c
new file mode 100755 (executable)
index 0000000..440b477
--- /dev/null
@@ -0,0 +1,339 @@
+/*
+ * Copyright (C) 2010-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include <linux/fs.h>       /* file system operations */
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
+#include <linux/uaccess.h>
+#else
+#include <asm/uaccess.h>
+#endif
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int mem_alloc_wrapper(struct mali_session_data *session_data, _mali_uk_alloc_mem_s __user *uargs)
+{
+       _mali_uk_alloc_mem_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_alloc_mem_s))) {
+               return -EFAULT;
+       }
+       kargs.ctx = (uintptr_t)session_data;
+
+       err = _mali_ukk_mem_allocate(&kargs);
+
+       if (_MALI_OSK_ERR_OK != err) {
+               MALI_PRINT_ERROR(("_mali_ukk_mem_allocate return ERROR\n"));
+               return map_errcode(err);
+       }
+
+       if (0 != put_user(kargs.backend_handle, &uargs->backend_handle)) {
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+int mem_free_wrapper(struct mali_session_data *session_data, _mali_uk_free_mem_s __user *uargs)
+{
+       _mali_uk_free_mem_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_free_mem_s))) {
+               return -EFAULT;
+       }
+       kargs.ctx = (uintptr_t)session_data;
+
+       err = _mali_ukk_mem_free(&kargs);
+
+       if (_MALI_OSK_ERR_OK != err) {
+               return map_errcode(err);
+       }
+
+       if (0 != put_user(kargs.free_pages_nr, &uargs->free_pages_nr)) {
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+int mem_bind_wrapper(struct mali_session_data *session_data, _mali_uk_bind_mem_s __user *uargs)
+{
+       _mali_uk_bind_mem_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_bind_mem_s))) {
+               return -EFAULT;
+       }
+       kargs.ctx = (uintptr_t)session_data;
+
+       err = _mali_ukk_mem_bind(&kargs);
+
+       if (_MALI_OSK_ERR_OK != err) {
+               return map_errcode(err);
+       }
+
+       return 0;
+}
+
+int mem_unbind_wrapper(struct mali_session_data *session_data, _mali_uk_unbind_mem_s __user *uargs)
+{
+       _mali_uk_unbind_mem_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_unbind_mem_s))) {
+               return -EFAULT;
+       }
+       kargs.ctx = (uintptr_t)session_data;
+
+       err = _mali_ukk_mem_unbind(&kargs);
+
+       if (_MALI_OSK_ERR_OK != err) {
+               return map_errcode(err);
+       }
+
+       return 0;
+}
+
+
+int mem_cow_wrapper(struct mali_session_data *session_data, _mali_uk_cow_mem_s __user *uargs)
+{
+       _mali_uk_cow_mem_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_cow_mem_s))) {
+               return -EFAULT;
+       }
+       kargs.ctx = (uintptr_t)session_data;
+
+       err = _mali_ukk_mem_cow(&kargs);
+
+       if (_MALI_OSK_ERR_OK != err) {
+               return map_errcode(err);
+       }
+
+       if (0 != put_user(kargs.backend_handle, &uargs->backend_handle)) {
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+int mem_cow_modify_range_wrapper(struct mali_session_data *session_data, _mali_uk_cow_modify_range_s __user *uargs)
+{
+       _mali_uk_cow_modify_range_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_cow_modify_range_s))) {
+               return -EFAULT;
+       }
+       kargs.ctx = (uintptr_t)session_data;
+
+       err = _mali_ukk_mem_cow_modify_range(&kargs);
+
+       if (_MALI_OSK_ERR_OK != err) {
+               return map_errcode(err);
+       }
+
+       if (0 != put_user(kargs.change_pages_nr, &uargs->change_pages_nr)) {
+               return -EFAULT;
+       }
+       return 0;
+}
+
+
+int mem_resize_mem_wrapper(struct mali_session_data *session_data, _mali_uk_mem_resize_s __user *uargs)
+{
+       _mali_uk_mem_resize_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_mem_resize_s))) {
+               return -EFAULT;
+       }
+       kargs.ctx = (uintptr_t)session_data;
+
+       err = _mali_ukk_mem_resize(&kargs);
+
+       if (_MALI_OSK_ERR_OK != err) {
+               return map_errcode(err);
+       }
+
+       return 0;
+}
+
+int mem_write_safe_wrapper(struct mali_session_data *session_data, _mali_uk_mem_write_safe_s __user *uargs)
+{
+       _mali_uk_mem_write_safe_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_mem_write_safe_s))) {
+               return -EFAULT;
+       }
+
+       kargs.ctx = (uintptr_t)session_data;
+
+       /* Check if we can access the buffers */
+       if (!access_ok(VERIFY_WRITE, kargs.dest, kargs.size)
+           || !access_ok(VERIFY_READ, kargs.src, kargs.size)) {
+               return -EINVAL;
+       }
+
+       /* Check if size wraps */
+       if ((kargs.size + kargs.dest) <= kargs.dest
+           || (kargs.size + kargs.src) <= kargs.src) {
+               return -EINVAL;
+       }
+
+       err = _mali_ukk_mem_write_safe(&kargs);
+       if (_MALI_OSK_ERR_OK != err) {
+               return map_errcode(err);
+       }
+
+       if (0 != put_user(kargs.size, &uargs->size)) {
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+
+
+int mem_query_mmu_page_table_dump_size_wrapper(struct mali_session_data *session_data, _mali_uk_query_mmu_page_table_dump_size_s __user *uargs)
+{
+       _mali_uk_query_mmu_page_table_dump_size_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+       kargs.ctx = (uintptr_t)session_data;
+
+       err = _mali_ukk_query_mmu_page_table_dump_size(&kargs);
+       if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+       if (0 != put_user(kargs.size, &uargs->size)) return -EFAULT;
+
+       return 0;
+}
+
+int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mali_uk_dump_mmu_page_table_s __user *uargs)
+{
+       _mali_uk_dump_mmu_page_table_s kargs;
+       _mali_osk_errcode_t err;
+       void __user *user_buffer;
+       void *buffer = NULL;
+       int rc = -EFAULT;
+
+       /* validate input */
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       /* the session_data pointer was validated by caller */
+
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_dump_mmu_page_table_s)))
+               goto err_exit;
+
+       user_buffer = (void __user *)(uintptr_t)kargs.buffer;
+       if (!access_ok(VERIFY_WRITE, user_buffer, kargs.size))
+               goto err_exit;
+
+       /* allocate temporary buffer (kernel side) to store mmu page table info */
+       if (kargs.size <= 0)
+               return -EINVAL;
+       /* Allow at most 8MiB buffers, this is more than enough to dump a fully
+        * populated page table. */
+       if (kargs.size > SZ_8M)
+               return -EINVAL;
+
+       buffer = (void *)(uintptr_t)_mali_osk_valloc(kargs.size);
+       if (NULL == buffer) {
+               rc = -ENOMEM;
+               goto err_exit;
+       }
+
+       kargs.ctx = (uintptr_t)session_data;
+       kargs.buffer = (uintptr_t)buffer;
+       err = _mali_ukk_dump_mmu_page_table(&kargs);
+       if (_MALI_OSK_ERR_OK != err) {
+               rc = map_errcode(err);
+               goto err_exit;
+       }
+
+       /* copy mmu page table info back to user space and update pointers */
+       if (0 != copy_to_user(user_buffer, buffer, kargs.size))
+               goto err_exit;
+
+       kargs.register_writes = kargs.register_writes -
+                               (uintptr_t)buffer + (uintptr_t)user_buffer;
+       kargs.page_table_dump = kargs.page_table_dump -
+                               (uintptr_t)buffer + (uintptr_t)user_buffer;
+
+       if (0 != copy_to_user(uargs, &kargs, sizeof(kargs)))
+               goto err_exit;
+
+       rc = 0;
+
+err_exit:
+       if (buffer) _mali_osk_vfree(buffer);
+       return rc;
+}
+
+int mem_usage_get_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_memory_usage_get_s __user *uargs)
+{
+       _mali_osk_errcode_t err;
+       _mali_uk_profiling_memory_usage_get_s kargs;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_memory_usage_get_s))) {
+               return -EFAULT;
+       }
+
+       kargs.ctx = (uintptr_t)session_data;
+       err = _mali_ukk_mem_usage_get(&kargs);
+       if (_MALI_OSK_ERR_OK != err) {
+               return map_errcode(err);
+       }
+
+       kargs.ctx = (uintptr_t)NULL; /* prevent kernel address to be returned to user space */
+       if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_profiling_memory_usage_get_s))) {
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
diff --git a/utgard/r8p0/linux/mali_ukk_pp.c b/utgard/r8p0/linux/mali_ukk_pp.c
new file mode 100755 (executable)
index 0000000..17ac889
--- /dev/null
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2010, 2012-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include <linux/fs.h>       /* file system operations */
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
+#include <linux/uaccess.h>
+#else
+#include <asm/uaccess.h>
+#endif
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int pp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_start_job_s __user *uargs)
+{
+       _mali_osk_errcode_t err;
+
+       /* If the job was started successfully, 0 is returned.  If there was an error, but the job
+        * was started, we return -ENOENT.  For anything else returned, the job was not started. */
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+       err = _mali_ukk_pp_start_job(session_data, uargs);
+       if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+       return 0;
+}
+
+int pp_and_gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_and_gp_start_job_s __user *uargs)
+{
+       _mali_osk_errcode_t err;
+
+       /* If the jobs were started successfully, 0 is returned.  If there was an error, but the
+        * jobs were started, we return -ENOENT.  For anything else returned, the jobs were not
+        * started. */
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+       err = _mali_ukk_pp_and_gp_start_job(session_data, uargs);
+       if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+       return 0;
+}
+
+int pp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_number_of_cores_s __user *uargs)
+{
+       _mali_uk_get_pp_number_of_cores_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+       kargs.ctx = (uintptr_t)session_data;
+
+       err = _mali_ukk_get_pp_number_of_cores(&kargs);
+       if (_MALI_OSK_ERR_OK != err) {
+               return map_errcode(err);
+       }
+
+       kargs.ctx = (uintptr_t)NULL; /* prevent kernel address to be returned to user space */
+       if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_get_pp_number_of_cores_s))) {
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+int pp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_core_version_s __user *uargs)
+{
+       _mali_uk_get_pp_core_version_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+       kargs.ctx = (uintptr_t)session_data;
+       err = _mali_ukk_get_pp_core_version(&kargs);
+       if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+       if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+
+       return 0;
+}
+
+int pp_disable_wb_wrapper(struct mali_session_data *session_data, _mali_uk_pp_disable_wb_s __user *uargs)
+{
+       _mali_uk_pp_disable_wb_s kargs;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_pp_disable_wb_s))) return -EFAULT;
+
+       kargs.ctx = (uintptr_t)session_data;
+       _mali_ukk_pp_job_disable_wb(&kargs);
+
+       return 0;
+}
diff --git a/utgard/r8p0/linux/mali_ukk_profiling.c b/utgard/r8p0/linux/mali_ukk_profiling.c
new file mode 100755 (executable)
index 0000000..1c82611
--- /dev/null
@@ -0,0 +1,189 @@
+/*
+ * Copyright (C) 2010-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include <linux/fs.h>       /* file system operations */
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
+#include <linux/uaccess.h>
+#else
+#include <asm/uaccess.h>
+#endif
+#include <linux/slab.h>
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_add_event_s __user *uargs)
+{
+       _mali_uk_profiling_add_event_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_add_event_s))) {
+               return -EFAULT;
+       }
+
+       kargs.ctx = (uintptr_t)session_data;
+       err = _mali_ukk_profiling_add_event(&kargs);
+       if (_MALI_OSK_ERR_OK != err) {
+               return map_errcode(err);
+       }
+
+       return 0;
+}
+
+int profiling_report_sw_counters_wrapper(struct mali_session_data *session_data, _mali_uk_sw_counters_report_s __user *uargs)
+{
+       _mali_uk_sw_counters_report_s kargs;
+       _mali_osk_errcode_t err;
+       u32 *counter_buffer;
+       u32 __user *counters;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_sw_counters_report_s))) {
+               return -EFAULT;
+       }
+
+       /* make sure that kargs.num_counters is [at least somewhat] sane */
+       if (kargs.num_counters > 10000) {
+               MALI_DEBUG_PRINT(1, ("User space attempted to allocate too many counters.\n"));
+               return -EINVAL;
+       }
+
+       counter_buffer = (u32 *)kmalloc(sizeof(u32) * kargs.num_counters, GFP_KERNEL);
+       if (NULL == counter_buffer) {
+               return -ENOMEM;
+       }
+
+       counters = (u32 *)(uintptr_t)kargs.counters;
+
+       if (0 != copy_from_user(counter_buffer, counters, sizeof(u32) * kargs.num_counters)) {
+               kfree(counter_buffer);
+               return -EFAULT;
+       }
+
+       kargs.ctx = (uintptr_t)session_data;
+       kargs.counters = (uintptr_t)counter_buffer;
+
+       err = _mali_ukk_sw_counters_report(&kargs);
+
+       kfree(counter_buffer);
+
+       if (_MALI_OSK_ERR_OK != err) {
+               return map_errcode(err);
+       }
+
+       return 0;
+}
+
+int profiling_get_stream_fd_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stream_fd_get_s __user *uargs)
+{
+       _mali_uk_profiling_stream_fd_get_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_stream_fd_get_s))) {
+               return -EFAULT;
+       }
+
+       kargs.ctx = (uintptr_t)session_data;
+       err = _mali_ukk_profiling_stream_fd_get(&kargs);
+       if (_MALI_OSK_ERR_OK != err) {
+               return map_errcode(err);
+       }
+
+       if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_profiling_stream_fd_get_s))) {
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+int profiling_control_set_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_control_set_s __user *uargs)
+{
+       _mali_uk_profiling_control_set_s kargs;
+       _mali_osk_errcode_t err;
+       u8 *kernel_control_data = NULL;
+       u8 *kernel_response_data = NULL;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       if (0 != get_user(kargs.control_packet_size, &uargs->control_packet_size)) return -EFAULT;
+       if (0 != get_user(kargs.response_packet_size, &uargs->response_packet_size)) return -EFAULT;
+
+       kargs.ctx = (uintptr_t)session_data;
+
+
+       /* Sanity check about the size */
+       if (kargs.control_packet_size > PAGE_SIZE || kargs.response_packet_size > PAGE_SIZE)
+               return -EINVAL;
+
+       if (0 !=  kargs.control_packet_size) {
+
+               if (0 == kargs.response_packet_size)
+                       return -EINVAL;
+
+               kernel_control_data = _mali_osk_calloc(1, kargs.control_packet_size);
+               if (NULL == kernel_control_data) {
+                       return -ENOMEM;
+               }
+
+               kernel_response_data = _mali_osk_calloc(1, kargs.response_packet_size);
+               if (NULL == kernel_response_data) {
+                       _mali_osk_free(kernel_control_data);
+                       return -ENOMEM;
+               }
+
+               kargs.control_packet_data = (uintptr_t)kernel_control_data;
+               kargs.response_packet_data = (uintptr_t)kernel_response_data;
+
+               if (0 != copy_from_user((void *)(uintptr_t)kernel_control_data, (void *)(uintptr_t)uargs->control_packet_data, kargs.control_packet_size)) {
+                       _mali_osk_free(kernel_control_data);
+                       _mali_osk_free(kernel_response_data);
+                       return -EFAULT;
+               }
+
+               err = _mali_ukk_profiling_control_set(&kargs);
+               if (_MALI_OSK_ERR_OK != err) {
+                       _mali_osk_free(kernel_control_data);
+                       _mali_osk_free(kernel_response_data);
+                       return map_errcode(err);
+               }
+
+               if (0 != kargs.response_packet_size && 0 != copy_to_user(((void *)(uintptr_t)uargs->response_packet_data), ((void *)(uintptr_t)kargs.response_packet_data), kargs.response_packet_size)) {
+                       _mali_osk_free(kernel_control_data);
+                       _mali_osk_free(kernel_response_data);
+                       return -EFAULT;
+               }
+
+               if (0 != put_user(kargs.response_packet_size, &uargs->response_packet_size)) {
+                       _mali_osk_free(kernel_control_data);
+                       _mali_osk_free(kernel_response_data);
+                       return -EFAULT;
+               }
+
+               _mali_osk_free(kernel_control_data);
+               _mali_osk_free(kernel_response_data);
+       } else {
+
+               err = _mali_ukk_profiling_control_set(&kargs);
+               if (_MALI_OSK_ERR_OK != err) {
+                       return map_errcode(err);
+               }
+
+       }
+       return 0;
+}
diff --git a/utgard/r8p0/linux/mali_ukk_soft_job.c b/utgard/r8p0/linux/mali_ukk_soft_job.c
new file mode 100755 (executable)
index 0000000..5d700ea
--- /dev/null
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2013-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include <linux/fs.h>       /* file system operations */
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
+#include <linux/uaccess.h>
+#else
+#include <asm/uaccess.h>
+#endif
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+#include "mali_soft_job.h"
+#include "mali_timeline.h"
+
+int soft_job_start_wrapper(struct mali_session_data *session, _mali_uk_soft_job_start_s __user *uargs)
+{
+       _mali_uk_soft_job_start_s kargs;
+       u32 type, point;
+       u64 user_job;
+       struct mali_timeline_fence fence;
+       struct mali_soft_job *job = NULL;
+       u32 __user *job_id_ptr = NULL;
+
+       /* If the job was started successfully, 0 is returned.  If there was an error, but the job
+        * was started, we return -ENOENT.  For anything else returned, the job was not started. */
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session, -EINVAL);
+
+       MALI_DEBUG_ASSERT_POINTER(session->soft_job_system);
+
+       if (0 != copy_from_user(&kargs, uargs, sizeof(kargs))) {
+               return -EFAULT;
+       }
+
+       type = kargs.type;
+       user_job = kargs.user_job;
+       job_id_ptr = (u32 __user *)(uintptr_t)kargs.job_id_ptr;
+
+       mali_timeline_fence_copy_uk_fence(&fence, &kargs.fence);
+
+       if ((MALI_SOFT_JOB_TYPE_USER_SIGNALED != type) && (MALI_SOFT_JOB_TYPE_SELF_SIGNALED != type)) {
+               MALI_DEBUG_PRINT_ERROR(("Invalid soft job type specified\n"));
+               return -EINVAL;
+       }
+
+       /* Create soft job. */
+       job = mali_soft_job_create(session->soft_job_system, (enum mali_soft_job_type)type, user_job);
+       if (unlikely(NULL == job)) {
+               return map_errcode(_MALI_OSK_ERR_NOMEM);
+       }
+
+       /* Write job id back to user space. */
+       if (0 != put_user(job->id, job_id_ptr)) {
+               MALI_PRINT_ERROR(("Mali Soft Job: failed to put job id"));
+               mali_soft_job_destroy(job);
+               return map_errcode(_MALI_OSK_ERR_NOMEM);
+       }
+
+       /* Start soft job. */
+       point = mali_soft_job_start(job, &fence);
+
+       if (0 != put_user(point, &uargs->point)) {
+               /* Let user space know that something failed after the job was started. */
+               return -ENOENT;
+       }
+
+       return 0;
+}
+
+int soft_job_signal_wrapper(struct mali_session_data *session, _mali_uk_soft_job_signal_s __user *uargs)
+{
+       u32 job_id;
+       _mali_osk_errcode_t err;
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       if (0 != get_user(job_id, &uargs->job_id)) return -EFAULT;
+
+       err = mali_soft_job_system_signal_job(session->soft_job_system, job_id);
+
+       return map_errcode(err);
+}
diff --git a/utgard/r8p0/linux/mali_ukk_timeline.c b/utgard/r8p0/linux/mali_ukk_timeline.c
new file mode 100755 (executable)
index 0000000..8670d04
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2013, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include <linux/fs.h>       /* file system operations */
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
+#include <linux/uaccess.h>
+#else
+#include <asm/uaccess.h>
+#endif
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+#include "mali_timeline.h"
+#include "mali_timeline_fence_wait.h"
+#include "mali_timeline_sync_fence.h"
+
+int timeline_get_latest_point_wrapper(struct mali_session_data *session, _mali_uk_timeline_get_latest_point_s __user *uargs)
+{
+       u32 val;
+       mali_timeline_id timeline;
+       mali_timeline_point point;
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       if (0 != get_user(val, &uargs->timeline)) return -EFAULT;
+
+       if (MALI_UK_TIMELINE_MAX <= val) {
+               return -EINVAL;
+       }
+
+       timeline = (mali_timeline_id)val;
+
+       point = mali_timeline_system_get_latest_point(session->timeline_system, timeline);
+
+       if (0 != put_user(point, &uargs->point)) return -EFAULT;
+
+       return 0;
+}
+
+int timeline_wait_wrapper(struct mali_session_data *session, _mali_uk_timeline_wait_s __user *uargs)
+{
+       u32 timeout, status;
+       mali_bool ret;
+       _mali_uk_fence_t uk_fence;
+       struct mali_timeline_fence fence;
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       if (0 != copy_from_user(&uk_fence, &uargs->fence, sizeof(_mali_uk_fence_t))) return -EFAULT;
+       if (0 != get_user(timeout, &uargs->timeout)) return -EFAULT;
+
+       mali_timeline_fence_copy_uk_fence(&fence, &uk_fence);
+
+       ret = mali_timeline_fence_wait(session->timeline_system, &fence, timeout);
+       status = (MALI_TRUE == ret ? 1 : 0);
+
+       if (0 != put_user(status, &uargs->status)) return -EFAULT;
+
+       return 0;
+}
+
+int timeline_create_sync_fence_wrapper(struct mali_session_data *session, _mali_uk_timeline_create_sync_fence_s __user *uargs)
+{
+       s32 sync_fd = -1;
+       _mali_uk_fence_t uk_fence;
+       struct mali_timeline_fence fence;
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       if (0 != copy_from_user(&uk_fence, &uargs->fence, sizeof(_mali_uk_fence_t))) return -EFAULT;
+       mali_timeline_fence_copy_uk_fence(&fence, &uk_fence);
+
+#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
+       sync_fd = mali_timeline_sync_fence_create(session->timeline_system, &fence);
+#else
+       sync_fd = -1;
+#endif /* defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) */
+
+       if (0 != put_user(sync_fd, &uargs->sync_fd)) return -EFAULT;
+
+       return 0;
+}
diff --git a/utgard/r8p0/linux/mali_ukk_vsync.c b/utgard/r8p0/linux/mali_ukk_vsync.c
new file mode 100755 (executable)
index 0000000..e3f1d32
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2011-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include <linux/fs.h>       /* file system operations */
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)
+#include <linux/uaccess.h>
+#else
+#include <asm/uaccess.h>
+#endif
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+
+int vsync_event_report_wrapper(struct mali_session_data *session_data, _mali_uk_vsync_event_report_s __user *uargs)
+{
+       _mali_uk_vsync_event_report_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_vsync_event_report_s))) {
+               return -EFAULT;
+       }
+
+       kargs.ctx = (uintptr_t)session_data;
+       err = _mali_ukk_vsync_event_report(&kargs);
+       if (_MALI_OSK_ERR_OK != err) {
+               return map_errcode(err);
+       }
+
+       return 0;
+}
+
diff --git a/utgard/r8p0/linux/mali_ukk_wrappers.h b/utgard/r8p0/linux/mali_ukk_wrappers.h
new file mode 100755 (executable)
index 0000000..7e59346
--- /dev/null
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2010-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/**
+ * @file mali_ukk_wrappers.h
+ * Defines the wrapper functions for each user-kernel function
+ */
+
+#ifndef __MALI_UKK_WRAPPERS_H__
+#define __MALI_UKK_WRAPPERS_H__
+
+#include "mali_uk_types.h"
+#include "mali_osk.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int wait_for_notification_wrapper(struct mali_session_data *session_data, _mali_uk_wait_for_notification_s __user *uargs);
+int get_api_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_s __user *uargs);
+int get_api_version_v2_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_v2_s __user *uargs);
+int get_user_settings_wrapper(struct mali_session_data *session_data, _mali_uk_get_user_settings_s __user *uargs);
+int post_notification_wrapper(struct mali_session_data *session_data, _mali_uk_post_notification_s __user *uargs);
+int request_high_priority_wrapper(struct mali_session_data *session_data, _mali_uk_request_high_priority_s __user *uargs);
+int pending_submit_wrapper(struct mali_session_data *session_data, _mali_uk_pending_submit_s __user *uargs);
+
+int mem_alloc_wrapper(struct mali_session_data *session_data, _mali_uk_alloc_mem_s __user *uargs);
+int mem_free_wrapper(struct mali_session_data *session_data, _mali_uk_free_mem_s __user *uargs);
+int mem_bind_wrapper(struct mali_session_data *session_data, _mali_uk_bind_mem_s __user *uargs);
+int mem_unbind_wrapper(struct mali_session_data *session_data, _mali_uk_unbind_mem_s __user *uargs);
+int mem_cow_wrapper(struct mali_session_data *session_data, _mali_uk_cow_mem_s __user *uargs);
+int mem_cow_modify_range_wrapper(struct mali_session_data *session_data, _mali_uk_cow_modify_range_s __user *uargs);
+int mem_resize_mem_wrapper(struct mali_session_data *session_data, _mali_uk_mem_resize_s __user *uargs);
+int mem_write_safe_wrapper(struct mali_session_data *session_data, _mali_uk_mem_write_safe_s __user *uargs);
+int mem_query_mmu_page_table_dump_size_wrapper(struct mali_session_data *session_data, _mali_uk_query_mmu_page_table_dump_size_s __user *uargs);
+int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mali_uk_dump_mmu_page_table_s __user *uargs);
+int mem_usage_get_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_memory_usage_get_s __user *uargs);
+
+int timeline_get_latest_point_wrapper(struct mali_session_data *session, _mali_uk_timeline_get_latest_point_s __user *uargs);
+int timeline_wait_wrapper(struct mali_session_data *session, _mali_uk_timeline_wait_s __user *uargs);
+int timeline_create_sync_fence_wrapper(struct mali_session_data *session, _mali_uk_timeline_create_sync_fence_s __user *uargs);
+int soft_job_start_wrapper(struct mali_session_data *session, _mali_uk_soft_job_start_s __user *uargs);
+int soft_job_signal_wrapper(struct mali_session_data *session, _mali_uk_soft_job_signal_s __user *uargs);
+int pp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_start_job_s __user *uargs);
+int pp_and_gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_and_gp_start_job_s __user *uargs);
+int pp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_number_of_cores_s __user *uargs);
+int pp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_core_version_s __user *uargs);
+int pp_disable_wb_wrapper(struct mali_session_data *session_data, _mali_uk_pp_disable_wb_s __user *uargs);
+int gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_gp_start_job_s __user *uargs);
+int gp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_number_of_cores_s __user *uargs);
+int gp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_core_version_s __user *uargs);
+int gp_suspend_response_wrapper(struct mali_session_data *session_data, _mali_uk_gp_suspend_response_s __user *uargs);
+
+int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_add_event_s __user *uargs);
+int profiling_report_sw_counters_wrapper(struct mali_session_data *session_data, _mali_uk_sw_counters_report_s __user *uargs);
+int profiling_get_stream_fd_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stream_fd_get_s __user *uargs);
+int profiling_control_set_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_control_set_s __user *uargs);
+
+int vsync_event_report_wrapper(struct mali_session_data *session_data, _mali_uk_vsync_event_report_s __user *uargs);
+
+
+int map_errcode(_mali_osk_errcode_t err);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UKK_WRAPPERS_H__ */
diff --git a/utgard/r8p0/readme.txt b/utgard/r8p0/readme.txt
new file mode 100755 (executable)
index 0000000..6785ac9
--- /dev/null
@@ -0,0 +1,28 @@
+Building the Mali Device Driver for Linux
+-----------------------------------------
+
+Build the Mali Device Driver for Linux by running the following make command:
+
+KDIR=<kdir_path> USING_UMP=<ump_option> BUILD=<build_option> make
+
+where
+    kdir_path: Path to your Linux Kernel directory
+    ump_option: 1 = Enable UMP support(*)
+                0 = disable UMP support
+    build_option: debug = debug build of driver
+                  release = release build of driver
+
+(*)  For newer Linux Kernels, the Module.symvers file for the UMP device driver
+     must be available. The UMP_SYMVERS_FILE variable in the Makefile should
+     point to this file. This file is generated when the UMP driver is built.
+
+The result will be a mali.ko file, which can be loaded into the Linux kernel
+by using the insmod command.
+
+Use of UMP is not recommended. The dma-buf API in the Linux kernel has
+replaced UMP. The Mali Device Driver will be built with dma-buf support if the
+kernel config includes enabled dma-buf.
+
+The kernel needs to be provided with a platform_device struct for the Mali GPU
+device. See the mali_utgard.h header file for how to set up the Mali GPU
+resources.
diff --git a/utgard/r8p0/regs/mali_200_regs.h b/utgard/r8p0/regs/mali_200_regs.h
new file mode 100755 (executable)
index 0000000..c904ad2
--- /dev/null
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2010, 2012-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef _MALI200_REGS_H_
+#define _MALI200_REGS_H_
+
+/**
+ *  Enum for management register addresses.
+ */
+enum mali200_mgmt_reg {
+       MALI200_REG_ADDR_MGMT_VERSION                              = 0x1000,
+       MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR               = 0x1004,
+       MALI200_REG_ADDR_MGMT_STATUS                               = 0x1008,
+       MALI200_REG_ADDR_MGMT_CTRL_MGMT                            = 0x100c,
+
+       MALI200_REG_ADDR_MGMT_INT_RAWSTAT                          = 0x1020,
+       MALI200_REG_ADDR_MGMT_INT_CLEAR                            = 0x1024,
+       MALI200_REG_ADDR_MGMT_INT_MASK                             = 0x1028,
+       MALI200_REG_ADDR_MGMT_INT_STATUS                           = 0x102c,
+
+       MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS                     = 0x1050,
+
+       MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE                    = 0x1080,
+       MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC                       = 0x1084,
+       MALI200_REG_ADDR_MGMT_PERF_CNT_0_LIMIT                     = 0x1088,
+       MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE                     = 0x108c,
+
+       MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE                    = 0x10a0,
+       MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC                       = 0x10a4,
+       MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE                     = 0x10ac,
+
+       MALI200_REG_ADDR_MGMT_PERFMON_CONTR                        = 0x10b0,
+       MALI200_REG_ADDR_MGMT_PERFMON_BASE                         = 0x10b4,
+
+       MALI200_REG_SIZEOF_REGISTER_BANK                           = 0x10f0
+
+};
+
+#define MALI200_REG_VAL_PERF_CNT_ENABLE 1
+
+enum mali200_mgmt_ctrl_mgmt {
+       MALI200_REG_VAL_CTRL_MGMT_STOP_BUS         = (1 << 0),
+       MALI200_REG_VAL_CTRL_MGMT_FLUSH_CACHES     = (1 << 3),
+       MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET      = (1 << 5),
+       MALI200_REG_VAL_CTRL_MGMT_START_RENDERING  = (1 << 6),
+       MALI400PP_REG_VAL_CTRL_MGMT_SOFT_RESET     = (1 << 7), /* Only valid for Mali-300 and later */
+};
+
+enum mali200_mgmt_irq {
+       MALI200_REG_VAL_IRQ_END_OF_FRAME          = (1 << 0),
+       MALI200_REG_VAL_IRQ_END_OF_TILE           = (1 << 1),
+       MALI200_REG_VAL_IRQ_HANG                  = (1 << 2),
+       MALI200_REG_VAL_IRQ_FORCE_HANG            = (1 << 3),
+       MALI200_REG_VAL_IRQ_BUS_ERROR             = (1 << 4),
+       MALI200_REG_VAL_IRQ_BUS_STOP              = (1 << 5),
+       MALI200_REG_VAL_IRQ_CNT_0_LIMIT           = (1 << 6),
+       MALI200_REG_VAL_IRQ_CNT_1_LIMIT           = (1 << 7),
+       MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR  = (1 << 8),
+       MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND = (1 << 9),
+       MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW  = (1 << 10),
+       MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW   = (1 << 11),
+       MALI400PP_REG_VAL_IRQ_RESET_COMPLETED       = (1 << 12),
+};
+
+#define MALI200_REG_VAL_IRQ_MASK_ALL  ((enum mali200_mgmt_irq) (\
+                                      MALI200_REG_VAL_IRQ_END_OF_FRAME                           |\
+                                      MALI200_REG_VAL_IRQ_END_OF_TILE                            |\
+                                      MALI200_REG_VAL_IRQ_HANG                                   |\
+                                      MALI200_REG_VAL_IRQ_FORCE_HANG                             |\
+                                      MALI200_REG_VAL_IRQ_BUS_ERROR                              |\
+                                      MALI200_REG_VAL_IRQ_BUS_STOP                               |\
+                                      MALI200_REG_VAL_IRQ_CNT_0_LIMIT                            |\
+                                      MALI200_REG_VAL_IRQ_CNT_1_LIMIT                            |\
+                                      MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR                   |\
+                                      MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND                  |\
+                                      MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW                   |\
+                                      MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW                    |\
+                                      MALI400PP_REG_VAL_IRQ_RESET_COMPLETED))
+
+#define MALI200_REG_VAL_IRQ_MASK_USED ((enum mali200_mgmt_irq) (\
+                                      MALI200_REG_VAL_IRQ_END_OF_FRAME                           |\
+                                      MALI200_REG_VAL_IRQ_FORCE_HANG                             |\
+                                      MALI200_REG_VAL_IRQ_BUS_ERROR                              |\
+                                      MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR                   |\
+                                      MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND                  |\
+                                      MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW                   |\
+                                      MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW))
+
+#define MALI200_REG_VAL_IRQ_MASK_NONE ((enum mali200_mgmt_irq)(0))
+
+enum mali200_mgmt_status {
+       MALI200_REG_VAL_STATUS_RENDERING_ACTIVE     = (1 << 0),
+       MALI200_REG_VAL_STATUS_BUS_STOPPED          = (1 << 4),
+};
+
+enum mali200_render_unit {
+       MALI200_REG_ADDR_FRAME = 0x0000,
+       MALI200_REG_ADDR_RSW   = 0x0004,
+       MALI200_REG_ADDR_STACK = 0x0030,
+       MALI200_REG_ADDR_STACK_SIZE = 0x0034,
+       MALI200_REG_ADDR_ORIGIN_OFFSET_X  = 0x0040
+};
+
+enum mali200_wb_unit {
+       MALI200_REG_ADDR_WB0 = 0x0100,
+       MALI200_REG_ADDR_WB1 = 0x0200,
+       MALI200_REG_ADDR_WB2 = 0x0300
+};
+
+enum mali200_wb_unit_regs {
+       MALI200_REG_ADDR_WB_SOURCE_SELECT = 0x0000,
+       MALI200_REG_ADDR_WB_SOURCE_ADDR   = 0x0004,
+};
+
+/* This should be in the top 16 bit of the version register of Mali PP */
+#define MALI200_PP_PRODUCT_ID 0xC807
+#define MALI300_PP_PRODUCT_ID 0xCE07
+#define MALI400_PP_PRODUCT_ID 0xCD07
+#define MALI450_PP_PRODUCT_ID 0xCF07
+#define MALI470_PP_PRODUCT_ID 0xCF08
+
+
+
+#endif /* _MALI200_REGS_H_ */
diff --git a/utgard/r8p0/regs/mali_gp_regs.h b/utgard/r8p0/regs/mali_gp_regs.h
new file mode 100755 (executable)
index 0000000..435953d
--- /dev/null
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2010, 2012-2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef _MALIGP2_CONROL_REGS_H_
+#define _MALIGP2_CONROL_REGS_H_
+
+/**
+ * These are the different geometry processor control registers.
+ * Their usage is to control and monitor the operation of the
+ * Vertex Shader and the Polygon List Builder in the geometry processor.
+ * Addresses are in 32-bit word relative sizes.
+ * @see [P0081] "Geometry Processor Data Structures" for details
+ */
+
+typedef enum {
+       MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR           = 0x00,
+       MALIGP2_REG_ADDR_MGMT_VSCL_END_ADDR             = 0x04,
+       MALIGP2_REG_ADDR_MGMT_PLBUCL_START_ADDR         = 0x08,
+       MALIGP2_REG_ADDR_MGMT_PLBUCL_END_ADDR           = 0x0c,
+       MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR     = 0x10,
+       MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR       = 0x14,
+       MALIGP2_REG_ADDR_MGMT_CMD                       = 0x20,
+       MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT               = 0x24,
+       MALIGP2_REG_ADDR_MGMT_INT_CLEAR                 = 0x28,
+       MALIGP2_REG_ADDR_MGMT_INT_MASK                  = 0x2C,
+       MALIGP2_REG_ADDR_MGMT_INT_STAT                  = 0x30,
+       MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE         = 0x3C,
+       MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE         = 0x40,
+       MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC            = 0x44,
+       MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC            = 0x48,
+       MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE          = 0x4C,
+       MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE          = 0x50,
+       MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_LIMIT          = 0x54,
+       MALIGP2_REG_ADDR_MGMT_STATUS                    = 0x68,
+       MALIGP2_REG_ADDR_MGMT_VERSION                   = 0x6C,
+       MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR_READ      = 0x80,
+       MALIGP2_REG_ADDR_MGMT_PLBCL_START_ADDR_READ     = 0x84,
+       MALIGP2_CONTR_AXI_BUS_ERROR_STAT                = 0x94,
+       MALIGP2_REGISTER_ADDRESS_SPACE_SIZE             = 0x98,
+} maligp_reg_addr_mgmt_addr;
+
+#define MALIGP2_REG_VAL_PERF_CNT_ENABLE 1
+
+/**
+ * Commands to geometry processor.
+ *  @see MALIGP2_CTRL_REG_CMD
+ */
+typedef enum {
+       MALIGP2_REG_VAL_CMD_START_VS                    = (1 << 0),
+       MALIGP2_REG_VAL_CMD_START_PLBU                  = (1 << 1),
+       MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC   = (1 << 4),
+       MALIGP2_REG_VAL_CMD_RESET                               = (1 << 5),
+       MALIGP2_REG_VAL_CMD_FORCE_HANG                  = (1 << 6),
+       MALIGP2_REG_VAL_CMD_STOP_BUS                    = (1 << 9),
+       MALI400GP_REG_VAL_CMD_SOFT_RESET                = (1 << 10), /* only valid for Mali-300 and later */
+} mgp_contr_reg_val_cmd;
+
+
+/**  @defgroup MALIGP2_IRQ
+ * Interrupt status of geometry processor.
+ *  @see MALIGP2_CTRL_REG_INT_RAWSTAT, MALIGP2_REG_ADDR_MGMT_INT_CLEAR,
+ *       MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_ADDR_MGMT_INT_STAT
+ * @{
+ */
+#define MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST      (1 << 0)
+#define MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST    (1 << 1)
+#define MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM     (1 << 2)
+#define MALIGP2_REG_VAL_IRQ_VS_SEM_IRQ          (1 << 3)
+#define MALIGP2_REG_VAL_IRQ_PLBU_SEM_IRQ        (1 << 4)
+#define MALIGP2_REG_VAL_IRQ_HANG                (1 << 5)
+#define MALIGP2_REG_VAL_IRQ_FORCE_HANG          (1 << 6)
+#define MALIGP2_REG_VAL_IRQ_PERF_CNT_0_LIMIT    (1 << 7)
+#define MALIGP2_REG_VAL_IRQ_PERF_CNT_1_LIMIT    (1 << 8)
+#define MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR     (1 << 9)
+#define MALIGP2_REG_VAL_IRQ_SYNC_ERROR          (1 << 10)
+#define MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR       (1 << 11)
+#define MALI400GP_REG_VAL_IRQ_AXI_BUS_STOPPED     (1 << 12)
+#define MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD      (1 << 13)
+#define MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD     (1 << 14)
+#define MALI400GP_REG_VAL_IRQ_RESET_COMPLETED     (1 << 19)
+#define MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW (1 << 20)
+#define MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW  (1 << 21)
+#define MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS  (1 << 22)
+
+/* Mask defining all IRQs in Mali GP */
+#define MALIGP2_REG_VAL_IRQ_MASK_ALL \
+       (\
+        MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST      | \
+        MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST    | \
+        MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM     | \
+        MALIGP2_REG_VAL_IRQ_VS_SEM_IRQ          | \
+        MALIGP2_REG_VAL_IRQ_PLBU_SEM_IRQ        | \
+        MALIGP2_REG_VAL_IRQ_HANG                | \
+        MALIGP2_REG_VAL_IRQ_FORCE_HANG          | \
+        MALIGP2_REG_VAL_IRQ_PERF_CNT_0_LIMIT    | \
+        MALIGP2_REG_VAL_IRQ_PERF_CNT_1_LIMIT    | \
+        MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR     | \
+        MALIGP2_REG_VAL_IRQ_SYNC_ERROR          | \
+        MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR       | \
+        MALI400GP_REG_VAL_IRQ_AXI_BUS_STOPPED     | \
+        MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD      | \
+        MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD     | \
+        MALI400GP_REG_VAL_IRQ_RESET_COMPLETED     | \
+        MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW | \
+        MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW  | \
+        MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
+
+/* Mask defining the IRQs in Mali GP which we use */
+#define MALIGP2_REG_VAL_IRQ_MASK_USED \
+       (\
+        MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST      | \
+        MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST    | \
+        MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM     | \
+        MALIGP2_REG_VAL_IRQ_FORCE_HANG          | \
+        MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR     | \
+        MALIGP2_REG_VAL_IRQ_SYNC_ERROR          | \
+        MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR       | \
+        MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD      | \
+        MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD     | \
+        MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW | \
+        MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW  | \
+        MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
+
+/* Mask defining non IRQs on MaliGP2*/
+#define MALIGP2_REG_VAL_IRQ_MASK_NONE 0
+
+/** }@ defgroup MALIGP2_IRQ*/
+
+/** @defgroup MALIGP2_STATUS
+ * The different Status values to the geometry processor.
+ *  @see MALIGP2_CTRL_REG_STATUS
+ * @{
+ */
+#define MALIGP2_REG_VAL_STATUS_VS_ACTIVE         0x0002
+#define MALIGP2_REG_VAL_STATUS_BUS_STOPPED       0x0004
+#define MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE       0x0008
+#define MALIGP2_REG_VAL_STATUS_BUS_ERROR         0x0040
+#define MALIGP2_REG_VAL_STATUS_WRITE_BOUND_ERR   0x0100
+/** }@ defgroup MALIGP2_STATUS*/
+
+#define MALIGP2_REG_VAL_STATUS_MASK_ACTIVE (\
+               MALIGP2_REG_VAL_STATUS_VS_ACTIVE|\
+               MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE)
+
+
+#define MALIGP2_REG_VAL_STATUS_MASK_ERROR (\
+               MALIGP2_REG_VAL_STATUS_BUS_ERROR |\
+               MALIGP2_REG_VAL_STATUS_WRITE_BOUND_ERR )
+
+/* This should be in the top 16 bit of the version register of gp.*/
+#define MALI200_GP_PRODUCT_ID 0xA07
+#define MALI300_GP_PRODUCT_ID 0xC07
+#define MALI400_GP_PRODUCT_ID 0xB07
+#define MALI450_GP_PRODUCT_ID 0xD07
+
+/**
+ * The different sources for instrumented on the geometry processor.
+ *  @see MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC
+ */
+
+enum MALIGP2_cont_reg_perf_cnt_src {
+       MALIGP2_REG_VAL_PERF_CNT1_SRC_NUMBER_OF_VERTICES_PROCESSED = 0x0a,
+};
+
+#endif
diff --git a/utgard/r8p0/timestamp-arm11-cc/mali_timestamp.c b/utgard/r8p0/timestamp-arm11-cc/mali_timestamp.c
new file mode 100755 (executable)
index 0000000..f7f7baf
--- /dev/null
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) 2010-2011, 2013, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_timestamp.h"
+
+/* This file is intentionally left empty, as all functions are inlined in mali_profiling_sampler.h */
diff --git a/utgard/r8p0/timestamp-arm11-cc/mali_timestamp.h b/utgard/r8p0/timestamp-arm11-cc/mali_timestamp.h
new file mode 100755 (executable)
index 0000000..757f643
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2010-2011, 2013-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_TIMESTAMP_H__
+#define __MALI_TIMESTAMP_H__
+
+#include "mali_osk.h"
+
+MALI_STATIC_INLINE _mali_osk_errcode_t _mali_timestamp_reset(void)
+{
+       /*
+        * reset counters and overflow flags
+        */
+
+       u32 mask = (1 << 0) | /* enable all three counters */
+                  (0 << 1) | /* reset both Count Registers to 0x0 */
+                  (1 << 2) | /* reset the Cycle Counter Register to 0x0 */
+                  (0 << 3) | /* 1 = Cycle Counter Register counts every 64th processor clock cycle */
+                  (0 << 4) | /* Count Register 0 interrupt enable */
+                  (0 << 5) | /* Count Register 1 interrupt enable */
+                  (0 << 6) | /* Cycle Counter interrupt enable */
+                  (0 << 8) | /* Count Register 0 overflow flag (clear or write, flag on read) */
+                  (0 << 9) | /* Count Register 1 overflow flag (clear or write, flag on read) */
+                  (1 << 10); /* Cycle Counter Register overflow flag (clear or write, flag on read) */
+
+       __asm__ __volatile__("MCR    p15, 0, %0, c15, c12, 0" : : "r"(mask));
+
+       return _MALI_OSK_ERR_OK;
+}
+
+MALI_STATIC_INLINE u64 _mali_timestamp_get(void)
+{
+       u32 result;
+
+       /* this is for the clock cycles */
+       __asm__ __volatile__("MRC    p15, 0, %0, c15, c12, 1" : "=r"(result));
+
+       return (u64)result;
+}
+
+#endif /* __MALI_TIMESTAMP_H__ */
diff --git a/utgard/r8p0/timestamp-default/mali_timestamp.c b/utgard/r8p0/timestamp-default/mali_timestamp.c
new file mode 100755 (executable)
index 0000000..f7f7baf
--- /dev/null
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) 2010-2011, 2013, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include "mali_timestamp.h"
+
+/* This file is intentionally left empty, as all functions are inlined in mali_profiling_sampler.h */
diff --git a/utgard/r8p0/timestamp-default/mali_timestamp.h b/utgard/r8p0/timestamp-default/mali_timestamp.h
new file mode 100755 (executable)
index 0000000..21700fe
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2010-2011, 2013-2014, 2016 ARM Limited. All rights reserved.
+ * 
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ * 
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __MALI_TIMESTAMP_H__
+#define __MALI_TIMESTAMP_H__
+
+#include "mali_osk.h"
+
+MALI_STATIC_INLINE _mali_osk_errcode_t _mali_timestamp_reset(void)
+{
+       return _MALI_OSK_ERR_OK;
+}
+
+MALI_STATIC_INLINE u64 _mali_timestamp_get(void)
+{
+       return _mali_osk_boot_time_get_ns();
+}
+
+#endif /* __MALI_TIMESTAMP_H__ */