import OT_8063_20170412 mali driver
authorStricted <info@stricted.net>
Fri, 22 Feb 2019 12:20:20 +0000 (12:20 +0000)
committerStricted <info@stricted.net>
Fri, 22 Feb 2019 12:20:20 +0000 (12:20 +0000)
246 files changed:
drivers/misc/mediatek/gpu/ged/Makefile [changed mode: 0755->0644]
drivers/misc/mediatek/gpu/ged/include/ged_base.h
drivers/misc/mediatek/gpu/ged/include/ged_bridge.h
drivers/misc/mediatek/gpu/ged/include/ged_dvfs.h [new file with mode: 0644]
drivers/misc/mediatek/gpu/ged/include/ged_error.h
drivers/misc/mediatek/gpu/ged/include/ged_hal.h [new file with mode: 0644]
drivers/misc/mediatek/gpu/ged/include/ged_log.h
drivers/misc/mediatek/gpu/ged/include/ged_mm.h [new file with mode: 0644]
drivers/misc/mediatek/gpu/ged/include/ged_monitor_3D_fence.h
drivers/misc/mediatek/gpu/ged/include/ged_notify_sw_vsync.h [new file with mode: 0644]
drivers/misc/mediatek/gpu/ged/include/ged_profile_dvfs.h
drivers/misc/mediatek/gpu/ged/include/ged_thread.h [new file with mode: 0644]
drivers/misc/mediatek/gpu/ged/include/ged_type.h
drivers/misc/mediatek/gpu/ged/src/ged_base.c
drivers/misc/mediatek/gpu/ged/src/ged_bridge.c
drivers/misc/mediatek/gpu/ged/src/ged_debugFS.c
drivers/misc/mediatek/gpu/ged/src/ged_dvfs.c [new file with mode: 0644]
drivers/misc/mediatek/gpu/ged/src/ged_hal.c [new file with mode: 0644]
drivers/misc/mediatek/gpu/ged/src/ged_hashtable.c
drivers/misc/mediatek/gpu/ged/src/ged_log.c
drivers/misc/mediatek/gpu/ged/src/ged_main.c
drivers/misc/mediatek/gpu/ged/src/ged_mm.c [new file with mode: 0644]
drivers/misc/mediatek/gpu/ged/src/ged_monitor_3D_fence.c
drivers/misc/mediatek/gpu/ged/src/ged_notify_sw_vsync.c [new file with mode: 0644]
drivers/misc/mediatek/gpu/ged/src/ged_profile_dvfs.c
drivers/misc/mediatek/gpu/ged/src/ged_thread.c [new file with mode: 0644]
drivers/misc/mediatek/gpu/ged/src/ged_thread.h [new file with mode: 0644]
drivers/misc/mediatek/gpu/hal/Makefile [changed mode: 0755->0644]
drivers/misc/mediatek/gpu/hal/mtk_gpu_utility.c
drivers/misc/mediatek/gpu/hal/mtk_gpu_utility.h [new file with mode: 0644]
drivers/misc/mediatek/gpu/mt8127/Makefile [changed mode: 0755->0644]
drivers/misc/mediatek/gpu/mt8127/mali/Makefile [changed mode: 0755->0644]
drivers/misc/mediatek/gpu/mt8127/mali/mali/Kbuild [changed mode: 0755->0644]
drivers/misc/mediatek/gpu/mt8127/mali/mali/Kbuild-mtk-custom-env [changed mode: 0755->0644]
drivers/misc/mediatek/gpu/mt8127/mali/mali/Kbuild-mtk-custom-src [changed mode: 0755->0644]
drivers/misc/mediatek/gpu/mt8127/mali/mali/Kconfig [changed mode: 0755->0644]
drivers/misc/mediatek/gpu/mt8127/mali/mali/Makefile [changed mode: 0755->0644]
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_broadcast.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_broadcast.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_control_timer.c [new file with mode: 0644]
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_control_timer.h [new file with mode: 0644]
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_dlbu.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_dlbu.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_dma.c [deleted file]
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_dma.h [deleted file]
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_dvfs_policy.c [new file with mode: 0644]
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_dvfs_policy.h [new file with mode: 0644]
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_executor.c [new file with mode: 0644]
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_executor.h [new file with mode: 0644]
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_gp.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_gp.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_gp_job.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_gp_job.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_gp_scheduler.c [deleted file]
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_gp_scheduler.h [deleted file]
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_group.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_group.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_hw_core.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_hw_core.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_kernel_common.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_kernel_core.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_kernel_core.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_kernel_descriptor_mapping.c [deleted file]
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_kernel_descriptor_mapping.h [deleted file]
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_kernel_utilization.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_kernel_utilization.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_kernel_vsync.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_l2_cache.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_l2_cache.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_mem_validation.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_mem_validation.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_mmu.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_mmu.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_mmu_page_directory.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_mmu_page_directory.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_osk.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_osk_bitops.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_osk_list.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_osk_mali.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_osk_profiling.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_osk_types.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_pm.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_pm.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_pm_domain.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_pm_domain.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_pmu.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_pmu.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_pp.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_pp.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_pp_job.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_pp_job.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_pp_scheduler.c [deleted file]
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_pp_scheduler.h [deleted file]
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_scheduler.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_scheduler.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_scheduler_types.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_session.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_session.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_soft_job.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_soft_job.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_spinlock_reentrant.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_spinlock_reentrant.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_timeline.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_timeline.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_timeline_fence_wait.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_timeline_fence_wait.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_timeline_sync_fence.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_timeline_sync_fence.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_ukk.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_user_settings_db.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_user_settings_db.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/include/linux/mali/mali_utgard.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/include/linux/mali/mali_utgard_counters.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/include/linux/mali/mali_utgard_ioctl.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/include/linux/mali/mali_utgard_profiling_events.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/include/linux/mali/mali_utgard_profiling_gator_api.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/include/linux/mali/mali_utgard_uk_types.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/license/gpl/mali_kernel_license.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/license/proprietary/mali_kernel_license.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_device_pause_resume.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_kernel_linux.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_kernel_linux.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_kernel_sysfs.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_kernel_sysfs.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_linux_trace.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_block_alloc.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_block_alloc.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_cow.c [new file with mode: 0644]
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_cow.h [new file with mode: 0644]
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_dma_buf.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_dma_buf.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_external.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_external.h [new file with mode: 0644]
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_manager.c [new file with mode: 0644]
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_manager.h [new file with mode: 0644]
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_os_alloc.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_os_alloc.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_types.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_ump.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_ump.h [new file with mode: 0644]
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_util.c [new file with mode: 0644]
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_util.h [new file with mode: 0644]
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_virtual.c [new file with mode: 0644]
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_virtual.h [new file with mode: 0644]
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_osk_atomics.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_osk_irq.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_osk_locks.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_osk_locks.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_osk_low_level_mem.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_osk_mali.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_osk_math.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_osk_memory.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_osk_misc.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_osk_notification.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_osk_pm.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_osk_profiling.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_osk_specific.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_osk_time.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_osk_timers.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_osk_wait_queue.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_osk_wq.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_pmu_power_up_down.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_profiling_events.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_profiling_gator_api.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_profiling_internal.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_profiling_internal.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_sync.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_sync.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_uk_types.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_ukk_core.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_ukk_gp.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_ukk_mem.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_ukk_pp.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_ukk_profiling.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_ukk_soft_job.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_ukk_timeline.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_ukk_vsync.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_ukk_wrappers.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/mtk_common/mtk_debug.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/mtk_common/mtk_mem_record.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/mtk_common/mtk_mem_record.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/mtk_common/mtk_pp.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/mtk_common/mtk_pp.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/platform/arm/arm.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/platform/arm/arm_core_scaling.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/platform/arm/arm_core_scaling.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/platform/arm_core_scaling.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/platform/arm_core_scaling.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/platform/platform.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/platform/platform_met.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/platform/platform_pmm.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/platform/platform_pmm.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/readme.txt [deleted file]
drivers/misc/mediatek/gpu/mt8127/mali/mali/regs/mali_200_regs.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/regs/mali_gp_regs.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/timestamp-arm11-cc/mali_timestamp.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/timestamp-arm11-cc/mali_timestamp.h
drivers/misc/mediatek/gpu/mt8127/mali/mali/timestamp-default/mali_timestamp.c
drivers/misc/mediatek/gpu/mt8127/mali/mali/timestamp-default/mali_timestamp.h
drivers/misc/mediatek/gpu/mt8127/mali/ump/Kbuild [changed mode: 0755->0644]
drivers/misc/mediatek/gpu/mt8127/mali/ump/Kconfig [changed mode: 0755->0644]
drivers/misc/mediatek/gpu/mt8127/mali/ump/Makefile [changed mode: 0755->0644]
drivers/misc/mediatek/gpu/mt8127/mali/ump/Makefile.common [changed mode: 0755->0644]
drivers/misc/mediatek/gpu/mt8127/mali/ump/arch-default/config.h [new file with mode: 0644]
drivers/misc/mediatek/gpu/mt8127/mali/ump/arch-pb-virtex5/config.h
drivers/misc/mediatek/gpu/mt8127/mali/ump/common/ump_kernel_api.c
drivers/misc/mediatek/gpu/mt8127/mali/ump/common/ump_kernel_common.c
drivers/misc/mediatek/gpu/mt8127/mali/ump/common/ump_kernel_common.h
drivers/misc/mediatek/gpu/mt8127/mali/ump/common/ump_kernel_descriptor_mapping.c
drivers/misc/mediatek/gpu/mt8127/mali/ump/common/ump_kernel_descriptor_mapping.h
drivers/misc/mediatek/gpu/mt8127/mali/ump/common/ump_kernel_memory_backend.h
drivers/misc/mediatek/gpu/mt8127/mali/ump/common/ump_kernel_ref_drv.c
drivers/misc/mediatek/gpu/mt8127/mali/ump/common/ump_kernel_types.h
drivers/misc/mediatek/gpu/mt8127/mali/ump/common/ump_osk.h
drivers/misc/mediatek/gpu/mt8127/mali/ump/common/ump_uk_types.h [deleted file]
drivers/misc/mediatek/gpu/mt8127/mali/ump/common/ump_ukk.h
drivers/misc/mediatek/gpu/mt8127/mali/ump/linux/license/gpl/ump_kernel_license.h [deleted file]
drivers/misc/mediatek/gpu/mt8127/mali/ump/linux/license/proprietary/ump_kernel_license.h
drivers/misc/mediatek/gpu/mt8127/mali/ump/linux/ump_ioctl.h
drivers/misc/mediatek/gpu/mt8127/mali/ump/linux/ump_kernel_linux.c
drivers/misc/mediatek/gpu/mt8127/mali/ump/linux/ump_kernel_linux.h
drivers/misc/mediatek/gpu/mt8127/mali/ump/linux/ump_kernel_memory_backend_dedicated.c
drivers/misc/mediatek/gpu/mt8127/mali/ump/linux/ump_kernel_memory_backend_dedicated.h
drivers/misc/mediatek/gpu/mt8127/mali/ump/linux/ump_kernel_memory_backend_os.c
drivers/misc/mediatek/gpu/mt8127/mali/ump/linux/ump_kernel_memory_backend_os.h
drivers/misc/mediatek/gpu/mt8127/mali/ump/linux/ump_kernel_random_mapping.c [new file with mode: 0644]
drivers/misc/mediatek/gpu/mt8127/mali/ump/linux/ump_kernel_random_mapping.h [new file with mode: 0644]
drivers/misc/mediatek/gpu/mt8127/mali/ump/linux/ump_memory_backend.c
drivers/misc/mediatek/gpu/mt8127/mali/ump/linux/ump_osk_atomics.c
drivers/misc/mediatek/gpu/mt8127/mali/ump/linux/ump_osk_low_level_mem.c
drivers/misc/mediatek/gpu/mt8127/mali/ump/linux/ump_osk_misc.c
drivers/misc/mediatek/gpu/mt8127/mali/ump/linux/ump_ukk_ref_wrappers.c
drivers/misc/mediatek/gpu/mt8127/mali/ump/linux/ump_ukk_ref_wrappers.h
drivers/misc/mediatek/gpu/mt8127/mali/ump/linux/ump_ukk_wrappers.c
drivers/misc/mediatek/gpu/mt8127/mali/ump/linux/ump_ukk_wrappers.h
drivers/misc/mediatek/gpu/mt8127/mali/ump/readme.txt [deleted file]
drivers/misc/mediatek/gpu/mt8127/mali/umplock/Makefile [deleted file]
drivers/misc/mediatek/gpu/mt8127/mali/umplock/umplock_driver.c [deleted file]
drivers/misc/mediatek/gpu/mt8127/mali/umplock/umplock_ioctl.h [deleted file]
drivers/misc/mediatek/gpu/mt8127/mediatek/mtk_mali_kernel.c
drivers/misc/mediatek/gpu/mt8127/mediatek/mtk_mali_kernel.h
drivers/misc/mediatek/gpu/mt8127/mediatek/mtk_mali_trace.c [deleted file]
drivers/misc/mediatek/gpu/mt8127/mediatek/mtk_mali_trace.h [deleted file]
drivers/misc/mediatek/gpu/mt8127/mediatek/mtk_memory_if.c

old mode 100755 (executable)
new mode 100644 (file)
index 2b787f2..0e9cd4d
@@ -1,17 +1,29 @@
-include $(srctree)/drivers/misc/mediatek/Makefile.custom
-
 GED_TOP = $(srctree)/drivers/misc/mediatek/gpu/ged
+MTK_TOP = $(srctree)/drivers/misc/mediatek/
 
 VER:=eng
 
 ifneq ($(strip $(TARGET_BUILD_VARIANT)),)
 ifneq ($(strip $(TARGET_BUILD_VARIANT)),eng)
 VER:=user
+ccflags-y += -DGED_LOG_SIZE_LIMITED
 endif
 endif
 
 ccflags-y += \
-       -I$(GED_TOP)/include/
+       -I$(srctree)/include/ \
+    -I$(MTK_TOP)/include/ \
+    -I$(GED_TOP)/include/ \
+    -I$(MTK_TOP)/base/power/$(subst ",,$(CONFIG_MTK_PLATFORM))/
+       
+$(info $$CONFIG_MTK_GPU_COMMON_DVFS_SUPPORT is [${CONFIG_MTK_GPU_COMMON_DVFS_SUPPORT}])
+    
+ifeq ($(CONFIG_MTK_GPU_COMMON_DVFS_SUPPORT),y)
+ccflags-y += -DGED_DVFS_ENABLE \
+             -DENABLE_COMMON_DVFS \
+            -DENABLE_TIMER_BACKUP \
+             -DGED_DVFS_DEBUG_BUF
+endif
 
 ccflags-y += \
        -include $(GED_TOP)/include/config_kernel_$(VER).h
@@ -23,7 +35,12 @@ ged-y += \
        src/ged_main.o \
        src/ged_debugFS.o \
        src/ged_log.o \
+       src/ged_hal.o \
        src/ged_bridge.o \
        src/ged_profile_dvfs.o \
        src/ged_monitor_3D_fence.o \
-       src/ged_hashtable.o
+       src/ged_notify_sw_vsync.o \
+       src/ged_hashtable.o \
+       src/ged_thread.o \
+       src/ged_dvfs.o
+
index 1e4d1930d58461b2c1cc337d378f2de0a6886e58..4a5bc839f9b1ce8946c5ba60a07dca85cb51d3c0 100644 (file)
@@ -1,15 +1,15 @@
 #ifndef __GED_BASE_H__
 #define __GED_BASE_H__
 
-#include <linux/xlog.h>
+#include <linux/compiler.h>
 #include "ged_type.h"
 
 #ifdef GED_DEBUG
-#define GED_LOGI(...)  xlog_printk(ANDROID_LOG_INFO, "GED", __VA_ARGS__)
+#define GED_LOGI(...)  pr_debug("GED:" __VA_ARGS__)
 #else
 #define GED_LOGI(...)
 #endif
-#define GED_LOGE(...)  xlog_printk(ANDROID_LOG_ERROR, "GED", __VA_ARGS__)
+#define GED_LOGE(...)  pr_debug("GED:" __VA_ARGS__)
 #define GED_CONTAINER_OF(ptr, type, member) ((type *)( ((char *)ptr) - offsetof(type,member) ))
 
 unsigned long ged_copy_to_user(void __user *pvTo, const void *pvFrom, unsigned long ulBytes);
@@ -18,8 +18,12 @@ unsigned long ged_copy_from_user(void *pvTo, const void __user *pvFrom, unsigned
 
 void* ged_alloc(int i32Size);
 
+void* ged_alloc_atomic(int i32Size);
+
 void ged_free(void* pvBuf, int i32Size);
 
 long ged_get_pid(void);
 
+unsigned long long ged_get_time(void);
+
 #endif
index 5281cf646077d66f2319c68385799a88a6e18924..74c2843f935bde414db999c65f6fb0f60649fe4d 100644 (file)
@@ -3,6 +3,7 @@
 
 #include "ged_base.h"
 #include "ged_log.h"
+#include "ged_type.h"
 
 typedef struct _GED_BRIDGE_PACKAGE
 {
@@ -26,16 +27,20 @@ typedef struct _GED_BRIDGE_PACKAGE
 #define GED_IOWR(INDEX)  _IOWR(GED_MAGIC, INDEX, GED_BRIDGE_PACKAGE)
 #define GED_GET_BRIDGE_ID(X)   _IOC_NR(X)
 
-/*****************************************************************************
+/******************************************************************************
  *  IOCTL Commands
- *****************************************************************************/
+ ******************************************************************************/
 typedef enum
 {
     GED_BRIDGE_COMMAND_LOG_BUF_GET,
     GED_BRIDGE_COMMAND_LOG_BUF_WRITE,
     GED_BRIDGE_COMMAND_LOG_BUF_RESET,
     GED_BRIDGE_COMMAND_BOOST_GPU_FREQ,
-    GED_BRIDGE_COMMAND_MONITOR_3D_FENCE
+    GED_BRIDGE_COMMAND_MONITOR_3D_FENCE,
+    GED_BRIDGE_COMMAND_QUERY_INFO,
+    GED_BRIDGE_COMMAND_NOTIFY_VSYNC,
+    GED_BRIDGE_COMMAND_DVFS_PROBE,
+    GED_BRIDGE_COMMAND_DVFS_UM_RETURN,
 } GED_BRIDGE_COMMAND_ID;
 
 #define GED_BRIDGE_IO_LOG_BUF_GET                      GED_IOWR(GED_BRIDGE_COMMAND_LOG_BUF_GET)
@@ -43,6 +48,10 @@ typedef enum
 #define GED_BRIDGE_IO_LOG_BUF_RESET                    GED_IOWR(GED_BRIDGE_COMMAND_LOG_BUF_RESET)
 #define GED_BRIDGE_IO_BOOST_GPU_FREQ           GED_IOWR(GED_BRIDGE_COMMAND_BOOST_GPU_FREQ)
 #define GED_BRIDGE_IO_MONITOR_3D_FENCE      GED_IOWR(GED_BRIDGE_COMMAND_MONITOR_3D_FENCE)
+#define GED_BRIDGE_IO_QUERY_INFO      GED_IOWR(GED_BRIDGE_COMMAND_QUERY_INFO)
+#define GED_BRIDGE_IO_NOTIFY_VSYNC       GED_IOWR(GED_BRIDGE_COMMAND_NOTIFY_VSYNC)
+#define GED_BRIDGE_IO_DVFS_PROBE       GED_IOWR(GED_BRIDGE_COMMAND_DVFS_PROBE)
+#define GED_BRIDGE_IO_DVFS_UM_RETURN GED_IOWR(GED_BRIDGE_COMMAND_DVFS_UM_RETURN)
 
 /*****************************************************************************
  *  LOG_BUF_GET
@@ -66,12 +75,13 @@ typedef struct GED_BRIDGE_OUT_LOGBUFGET_TAG
  *  LOG_BUF_WRITE
  *****************************************************************************/
 
-#define GED_BRIDGE_IN_LOGBUF_SIZE 512
+#define GED_BRIDGE_IN_LOGBUF_SIZE 320
 
 /* Bridge in structure for LOG_BUF_WRITE */
 typedef struct GED_BRIDGE_IN_LOGBUFWRITE_TAG
 {
     GED_LOG_BUF_HANDLE hLogBuf;
+    int attrs;
     char acLogBuf[GED_BRIDGE_IN_LOGBUF_SIZE];
 } GED_BRIDGE_IN_LOGBUFWRITE;
 
@@ -128,12 +138,78 @@ typedef struct GED_BRIDGE_IN_MONITOR3DFENCE_TAG
     int fd;
 } GED_BRIDGE_IN_MONITOR3DFENCE;
 
-/* Bridge out structure for RECORDSWAPBUFFERS */
+/* Bridge out structure for MONITOR3DFENCE */
 typedef struct GED_BRIDGE_OUT_MONITOR3DFENCE_TAG
 {
     GED_ERROR eError;
 } GED_BRIDGE_OUT_MONITOR3DFENCE;
 
+/*****************************************************************************
+ *  QUERY INFO
+ *****************************************************************************/
+
+/* Bridge in structure for QUERY INFO*/
+typedef struct GED_BRIDGE_IN_QUERY_INFO_TAG
+{
+    GED_INFO eType;
+} GED_BRIDGE_IN_QUERY_INFO;
+
+
+/* Bridge out structure for QUERY INFO*/
+typedef struct GED_BRIDGE_OUT_QUERY_INFO_TAG
+{
+    unsigned long   retrieve;
+} GED_BRIDGE_OUT_QUERY_INFO;
+/*****************************************************************************
+ *  NOTIFY VSYNC
+ *****************************************************************************/
+
+/* Bridge in structure for VSYNCEVENT */
+typedef struct GED_BRIDGE_IN_NOTIFY_VSYNC_TAG
+{
+    GED_VSYNC_TYPE eType;
+} GED_BRIDGE_IN_NOTIFY_VSYNC;
+
+/* Bridge out structure for VSYNCEVENT */
+typedef struct GED_BRIDGE_OUT_NOTIFY_VSYNC_TAG
+{
+    GED_DVFS_UM_QUERY_PACK sQueryData;
+    GED_ERROR eError;
+} GED_BRIDGE_OUT_NOTIFY_VSYNC;
+
+/*****************************************************************************
+ *  DVFS PROBE
+ *****************************************************************************/
+
+/* Bridge in structure for SWVSYNCEVENT */
+typedef struct GED_BRIDGE_IN_DVFS_PROBE_TAG
+{
+    int          pid;
+} GED_BRIDGE_IN_DVFS_PROBE;
+
+/* Bridge out structure for RECORDSWAPBUFFERS */
+typedef struct GED_BRIDGE_OUT_DVFS_PROBE_TAG
+{
+    GED_ERROR eError;
+} GED_BRIDGE_OUT_DVFS_PROBE;
+
+/*****************************************************************************
+ *  DVFS UM RETURN
+ *****************************************************************************/
+
+/* Bridge in structure for DVFS_UM_RETURN */
+typedef struct GED_BRIDGE_IN_DVFS_UM_RETURN_TAG
+{
+   unsigned long gpu_tar_freq; 
+   bool bFallback;
+} GED_BRIDGE_IN_DVFS_UM_RETURN;
+
+/* Bridge out structure for DVFS_UM_RETURN */
+typedef struct GED_BRIDGE_OUT_DVFS_UM_RETURN_TAG
+{
+    GED_ERROR eError;
+} GED_BRIDGE_OUT_DVFS_UM_RETURN;
+
 /*****************************************************************************
  *  BRIDGE FUNCTIONS
  *****************************************************************************/
@@ -158,4 +234,20 @@ int ged_bridge_monitor_3D_fence(
     GED_BRIDGE_IN_MONITOR3DFENCE *psMonitor3DFenceINT,
     GED_BRIDGE_OUT_MONITOR3DFENCE *psMonitor3DFenceOUT);
 
+int ged_bridge_query_info(
+    GED_BRIDGE_IN_QUERY_INFO *psQueryInfoINT,
+    GED_BRIDGE_OUT_QUERY_INFO *psQueryInfoOUT);
+
+int ged_bridge_notify_vsync(
+    GED_BRIDGE_IN_NOTIFY_VSYNC *psNotifyVsyncINT,
+    GED_BRIDGE_OUT_NOTIFY_VSYNC *psNotifyVsyncOUT);
+
+int ged_bridge_dvfs_probe(
+    GED_BRIDGE_IN_DVFS_PROBE *psDVFSProbeINT, 
+    GED_BRIDGE_OUT_DVFS_PROBE *psDVFSProbeOUT);
+
+int ged_bridge_dvfs_um_retrun(
+    GED_BRIDGE_IN_DVFS_UM_RETURN *psDVFS_UM_returnINT, 
+    GED_BRIDGE_OUT_DVFS_UM_RETURN *psDVFS_UM_returnOUT);
+
 #endif
diff --git a/drivers/misc/mediatek/gpu/ged/include/ged_dvfs.h b/drivers/misc/mediatek/gpu/ged/include/ged_dvfs.h
new file mode 100644 (file)
index 0000000..3905baf
--- /dev/null
@@ -0,0 +1,115 @@
+#include <linux/types.h>
+#include "ged_type.h"
+
+
+
+//#define ENABLE_COMMON_DVFS 1 
+//#define GED_DVFS_DEBUG 1
+
+#define GED_DVFS_UM_CAL 1
+
+#define GED_DVFS_PROBE_TO_UM 1
+#define GED_DVFS_PROBE_IN_KM 0
+
+#define GED_NO_UM_SERVICE -1
+
+#define GED_DVFS_VSYNC_OFFSET_SIGNAL_EVENT 44
+#define GED_FPS_CHANGE_SIGNAL_EVENT        45
+#define GED_SRV_SUICIDE_EVENT 46
+#define GED_LOW_POWER_MODE_SIGNAL_EVENT    47
+#define GED_MHL4K_VID_SIGNAL_EVENT         48
+#define GED_GAS_SIGNAL_EVENT               49
+
+#define GED_DVFS_FALLBACK 0x5566dead
+
+typedef enum GED_DVFS_COMMIT_TAG
+{
+    GED_DVFS_DEFAULT_COMMIT,
+    GED_DVFS_CUSTOM_CEIL_COMMIT,
+    GED_DVFS_CUSTOM_BOOST_COMMIT,
+    GED_DVFS_SET_BOTTOM_COMMIT,
+    GED_DVFS_SET_LIMIT_COMMIT,
+    GED_DVFS_INPUT_BOOST_COMMIT
+} GED_DVFS_COMMIT_TYPE;
+
+typedef enum GED_DVFS_TUNING_MODE_TAG
+{
+    GED_DVFS_DEFAULT,
+    GED_DVFS_LP,
+    GED_DVFS_JUST_MAKE,
+    GED_DVFS_PERFORMANCE
+} GED_DVFS_TUNING_MODE;
+
+typedef enum GED_DVFS_VSYNC_OFFSET_SWITCH_CMD_TAG
+{
+    GED_DVFS_VSYNC_OFFSET_DEBUG_CLEAR_EVENT,
+    GED_DVFS_VSYNC_OFFSET_FORCE_ON,
+    GED_DVFS_VSYNC_OFFSET_FORCE_OFF,
+    GED_DVFS_VSYNC_OFFSET_TOUCH_EVENT,
+    GED_DVFS_VSYNC_OFFSET_THERMAL_EVENT,
+    GED_DVFS_VSYNC_OFFSET_WFD_EVENT,
+    GED_DVFS_VSYNC_OFFSET_MHL_EVENT,
+    GED_DVFS_VSYNC_OFFSET_GAS_EVENT,
+    GED_DVFS_VSYNC_OFFSET_LOW_POWER_MODE_EVENT,
+    GED_DVFS_VSYNC_OFFSET_MHL4K_VID_EVENT,
+} GED_DVFS_VSYNC_OFFSET_SWITCH_CMD;
+
+#define GED_EVENT_TOUCH (1 << 0)
+#define GED_EVENT_THERMAL (1 << 1)
+#define GED_EVENT_WFD (1 << 2)
+#define GED_EVENT_MHL  (1 << 3)
+#define GED_EVENT_GAS  (1 << 4)
+#define GED_EVENT_LOW_POWER_MODE (1 << 5)
+#define GED_EVENT_MHL4K_VID      (1 << 6)
+
+#define GED_EVENT_FORCE_ON  (1 << 0)
+#define GED_EVENT_FORCE_OFF  (1 << 1)
+#define GED_EVENT_NOT_SYNC  (1 << 2)
+
+
+#define GED_VSYNC_OFFSET_NOT_SYNC -2
+#define GED_VSYNC_OFFSET_SYNC -3
+
+typedef struct GED_DVFS_FREQ_DATA_TAG
+{
+    unsigned int ui32Idx;
+    unsigned long ulFreq;    
+}GED_DVFS_FREQ_DATA;
+
+
+bool ged_dvfs_cal_gpu_utilization(unsigned int* pui32Loading , unsigned int* pui32Block,unsigned int* pui32Idle);
+void ged_dvfs_cal_gpu_utilization_force(void);
+
+void ged_dvfs_run(unsigned long t, long phase, unsigned long ul3DFenceDoneTime);
+
+void ged_dvfs_set_tuning_mode(GED_DVFS_TUNING_MODE eMode);
+GED_DVFS_TUNING_MODE ged_dvfs_get_tuning_mode(void);
+
+void ged_dvfs_vsync_offset_event_switch(GED_DVFS_VSYNC_OFFSET_SWITCH_CMD eEvent, bool bSwitch);
+void ged_dvfs_vsync_offset_level_set(int i32level);
+int ged_dvfs_vsync_offset_level_get(void);
+
+unsigned int ged_dvfs_get_gpu_loading(void);
+unsigned int ged_dvfs_get_gpu_blocking(void);
+unsigned int ged_dvfs_get_gpu_idle(void);
+
+unsigned long ged_query_info( GED_INFO eType);
+
+void ged_dvfs_get_gpu_cur_freq(GED_DVFS_FREQ_DATA* psData);
+void ged_dvfs_get_gpu_pre_freq(GED_DVFS_FREQ_DATA* psData);
+
+void ged_dvfs_sw_vsync_query_data(GED_DVFS_UM_QUERY_PACK* psQueryData);
+
+void ged_dvfs_boost_gpu_freq(void);
+
+GED_ERROR ged_dvfs_probe(int pid);
+GED_ERROR ged_dvfs_um_commit( unsigned long gpu_tar_freq, bool bFallback);
+
+void ged_dvfs_probe_signal(int signo);
+
+void ged_dvfs_gpu_clock_switch_notify(bool bSwitch);
+
+GED_ERROR ged_dvfs_system_init(void);
+void ged_dvfs_system_exit(void);
+
+
index b1c065037143f121904646b2aa945f3634fe610f..57fc6b6a77762ba242b7dc8283039b4823bb4ead 100644 (file)
@@ -4,13 +4,14 @@
 typedef enum GED_ERROR_TAG
 {
        GED_OK,
-    GED_ERROR_FAIL,
-    GED_ERROR_OOM,
-    GED_ERROR_OUT_OF_FD,
-    GED_ERROR_FAIL_WITH_LIMIT,
-    GED_ERROR_TIMEOUT,
-    GED_ERROR_CMD_NOT_PROCESSED,
-    GED_ERROR_INVALID_PARAMS
+       GED_ERROR_FAIL,
+       GED_ERROR_OOM,
+       GED_ERROR_OUT_OF_FD,
+       GED_ERROR_FAIL_WITH_LIMIT,
+       GED_ERROR_TIMEOUT,
+       GED_ERROR_CMD_NOT_PROCESSED,
+       GED_ERROR_INVALID_PARAMS,
+       GED_INTENTIONAL_BLOCK
 } GED_ERROR;
 
 
diff --git a/drivers/misc/mediatek/gpu/ged/include/ged_hal.h b/drivers/misc/mediatek/gpu/ged/include/ged_hal.h
new file mode 100644 (file)
index 0000000..68605fa
--- /dev/null
@@ -0,0 +1,10 @@
+#ifndef __GED_HAL_H__
+#define __GED_HAL_H__
+
+#include "ged_type.h"
+
+GED_ERROR ged_hal_init(void);
+
+void ged_hal_exit(void);
+
+#endif
index 60df01eb822b54ea3aa86bb415584b589797c7e9..66df2540f6968a236f8e17e6a5c0ec6dd82db633 100644 (file)
 
 typedef enum GED_LOG_BUF_TYPE_TAG
 {
-       GED_LOG_BUF_TYPE_RINGBUFFER,
-       GED_LOG_BUF_TYPE_QUEUEBUFFER,
+    GED_LOG_BUF_TYPE_RINGBUFFER,
+    GED_LOG_BUF_TYPE_QUEUEBUFFER,
     GED_LOG_BUF_TYPE_QUEUEBUFFER_AUTO_INCREASE,
 } GED_LOG_BUF_TYPE;
 
-GED_LOG_BUF_HANDLE ged_log_buf_alloc(int i32LineCount, int i32LineBufferSize, GED_LOG_BUF_TYPE eType, const char* pszName, const char* pszNodeName);
+GED_LOG_BUF_HANDLE ged_log_buf_alloc(int i32MaxLineCount, int i32MaxBufferSizeByte, GED_LOG_BUF_TYPE eType, const char* pszName, const char* pszNodeName);
 
-GED_ERROR ged_log_buf_resize(GED_LOG_BUF_HANDLE hLogBuf, int i32NewLineCount);
+GED_ERROR ged_log_buf_resize(GED_LOG_BUF_HANDLE hLogBuf, int i32NewMaxLineCount, int i32NewMaxBufferSizeByte);
 
 GED_ERROR ged_log_buf_ignore_lines(GED_LOG_BUF_HANDLE hLogBuf, int i32LineCount);
 
-GED_LOG_BUF_HANDLE ged_log_buf_get(const char* pszName);
+GED_ERROR ged_log_buf_reset(GED_LOG_BUF_HANDLE hLogBuf);
 
 void ged_log_buf_free(GED_LOG_BUF_HANDLE hLogBuf);
 
+/* query by Name, return NULL if not found */
+GED_LOG_BUF_HANDLE ged_log_buf_get(const char* pszName);
+
+/* register a pointer, it will be set after the corresponding buffer is allcated. */
+int ged_log_buf_get_early(const char* pszName, GED_LOG_BUF_HANDLE *callback_set_handle);
+
 GED_ERROR ged_log_buf_print(GED_LOG_BUF_HANDLE hLogBuf, const char *fmt, ...) GED_LOG_BUF_FORMAT_PRINTF(2,3);
 
-GED_ERROR ged_log_buf_reset(GED_LOG_BUF_HANDLE hLogBuf);
+enum
+{
+    /* bit 0~7 reserved for internal used */
+    GED_RESVERED                = 0xFF,
+
+    /* log with a prefix kernel time */
+    GED_LOG_ATTR_TIME           = 0x100,
+
+    /* log with a prefix user time, pid, tid */
+    GED_LOG_ATTR_TIME_TPT       = 0x200,
+};
+
+GED_ERROR ged_log_buf_print2(GED_LOG_BUF_HANDLE hLogBuf, int i32LogAttrs, const char *fmt, ...) GED_LOG_BUF_FORMAT_PRINTF(3,4);
 
 GED_ERROR ged_log_system_init(void);
 
diff --git a/drivers/misc/mediatek/gpu/ged/include/ged_mm.h b/drivers/misc/mediatek/gpu/ged/include/ged_mm.h
new file mode 100644 (file)
index 0000000..5dac101
--- /dev/null
@@ -0,0 +1,2 @@
+GED_ERROR ged_mm_init(void);
+void ged_mm_exit(void);
\ No newline at end of file
index ce22eefe9cff930106676a9b46b62ec3d6e8423f..80b75dcb69f1833e620b211c382afba56e882732 100644 (file)
@@ -4,5 +4,7 @@
 #include "ged_type.h"
 
 GED_ERROR ged_monitor_3D_fence_add(int fence_fd);
+void ged_monitor_3D_fence_notify(void);
+unsigned long ged_monitor_3D_fence_done_time(void);
 
 #endif
diff --git a/drivers/misc/mediatek/gpu/ged/include/ged_notify_sw_vsync.h b/drivers/misc/mediatek/gpu/ged/include/ged_notify_sw_vsync.h
new file mode 100644 (file)
index 0000000..3fb3cf4
--- /dev/null
@@ -0,0 +1,17 @@
+#ifndef __GED_NOTIFY_SW_VSYNC_H__
+#define __GED_NOTIFY_SW_VSYNC_H__
+
+#include "ged_type.h"
+
+
+GED_ERROR ged_notify_sw_vsync(GED_VSYNC_TYPE eType, GED_DVFS_UM_QUERY_PACK* psQueryData);
+
+GED_ERROR ged_notify_sw_vsync_system_init(void);
+
+void ged_notify_sw_vsync_system_exit(void);
+
+
+void ged_sodi_start(void);
+void ged_sodi_stop(void);
+
+#endif
index 5437aea0bf076d73322606bcfd5f0c5ec653e448..7f16572c9e6bcda77d22eacddbf2380ae14e94d3 100644 (file)
@@ -29,4 +29,8 @@ void ged_profile_dvfs_record_clock_on(void);
 
 void ged_profile_dvfs_record_clock_off(void);
 
+void ged_profile_dvfs_record_SW_vsync(unsigned long ulTimeStamp, long lPhase, unsigned long ul3DFenceDoneTime);
+
+void ged_profile_dvfs_record_policy(long lFreq, unsigned int ui32GpuLoading, long lPreT1, unsigned long ulPreFreq, long t0, unsigned long ulCurFreq, long t1, long lPhase);
+
 #endif
diff --git a/drivers/misc/mediatek/gpu/ged/include/ged_thread.h b/drivers/misc/mediatek/gpu/ged/include/ged_thread.h
new file mode 100644 (file)
index 0000000..a834d08
--- /dev/null
@@ -0,0 +1,14 @@
+#ifndef __GED_THREAD_H__
+#define __GED_THREAD_H__
+
+#include "ged_type.h"
+
+typedef void* GED_THREAD_HANDLE;
+
+typedef void (*GED_THREAD_FUNC)(void*);
+
+GED_ERROR ged_thread_create(GED_THREAD_HANDLE *phThread, const char* szThreadName, GED_THREAD_FUNC pFunc, void* pvData);
+
+void ged_thread_destroy(GED_THREAD_HANDLE hThread);
+
+#endif
index 11dd19300c09de7835972eedbc4dbd682be37cf1..b8120ceb0971aa12811981b24e3f65892cc3f9d7 100644 (file)
@@ -13,4 +13,52 @@ typedef      enum GED_BOOL_TAG
        GED_TRUE
 } GED_BOOL;
 
+typedef enum GED_INFO_TAG
+{
+    GED_LOADING,
+    GED_IDLE,
+    GED_BLOCKING,
+    GED_PRE_FREQ,
+    GED_PRE_FREQ_IDX,
+    GED_CUR_FREQ,
+    GED_CUR_FREQ_IDX,
+    GED_MAX_FREQ_IDX,
+    GED_MAX_FREQ_IDX_FREQ,
+    GED_MIN_FREQ_IDX,
+    GED_MIN_FREQ_IDX_FREQ,    
+    GED_3D_FENCE_DONE_TIME,
+    GED_VSYNC_OFFSET,
+    GED_EVENT_STATUS,
+    GED_EVENT_DEBUG_STATUS,
+    GED_EVENT_GAS_MODE,
+    GED_SRV_SUICIDE,
+    GED_PRE_HALF_PERIOD,
+    GED_LATEST_START,
+    GED_UNDEFINED
+} GED_INFO;
+
+typedef enum GED_VSYNC_TYPE_TAG
+{
+    GED_VSYNC_SW_EVENT,
+    GED_VSYNC_HW_EVENT
+} GED_VSYNC_TYPE;
+
+typedef struct GED_DVFS_UM_QUERY_PACK_TAG
+{
+    unsigned int ui32GPULoading;
+    unsigned int ui32GPUFreqID;
+    unsigned long gpu_cur_freq;
+    unsigned long gpu_pre_freq;
+    long long usT;
+    long long nsOffset;
+    unsigned long ul3DFenceDoneTime;    
+    unsigned long ulPreCalResetTS_us;
+    unsigned long ulWorkingPeriod_us;
+}GED_DVFS_UM_QUERY_PACK;
+
+enum {
+       GAS_CATEGORY_GAME,
+       GAS_CATEGORY_OTHERS,
+};
+
 #endif
index 91ed382caf88fad36e7e3976d5d00d29ebbb3a0b..1884068391eea085e5a5da9c851a16e39824e047 100644 (file)
 unsigned long ged_copy_to_user(void __user *pvTo, const void *pvFrom, unsigned long ulBytes)
 {
 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
-    if (access_ok(VERIFY_WRITE, pvTo, ulBytes))
-    {
-       return __copy_to_user(pvTo, pvFrom, ulBytes);
-    }
-    return ulBytes;
+       if (access_ok(VERIFY_WRITE, pvTo, ulBytes))
+       {
+               return __copy_to_user(pvTo, pvFrom, ulBytes);
+       }
+       return ulBytes;
 #else
-    return copy_to_user(pvTo, pvFrom, ulBytes);
+       return copy_to_user(pvTo, pvFrom, ulBytes);
 #endif
 }
 
 unsigned long ged_copy_from_user(void *pvTo, const void __user *pvFrom, unsigned long ulBytes)
 {
 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
-    if (access_ok(VERIFY_READ, pvFrom, ulBytes))
-    {
-           return __copy_from_user(pvTo, pvFrom, ulBytes);
-    }
-    return ulBytes;
+       if (access_ok(VERIFY_READ, pvFrom, ulBytes))
+       {
+               return __copy_from_user(pvTo, pvFrom, ulBytes);
+       }
+       return ulBytes;
 #else
-    return copy_from_user(pvTo, pvFrom, ulBytes);
+       return copy_from_user(pvTo, pvFrom, ulBytes);
 #endif
 }
 
 void* ged_alloc(int i32Size)
 {
-    void *pvBuf;
-
-    if (i32Size <= PAGE_SIZE)
-    {
-        pvBuf = kmalloc(i32Size, GFP_KERNEL);
-    }
-    else
-    {
-        pvBuf = vmalloc(i32Size);
-    }
-
-    return pvBuf;
+       void *pvBuf;
+
+       if (i32Size <= PAGE_SIZE)
+       {
+               pvBuf = kmalloc(i32Size, GFP_KERNEL);
+       }
+       else
+       {
+               pvBuf = vmalloc(i32Size);
+       }
+
+       return pvBuf;
+}
+
+void* ged_alloc_atomic(int i32Size)
+{
+       void *pvBuf;
+
+       if (i32Size <= PAGE_SIZE)
+       {
+               pvBuf = kmalloc(i32Size, GFP_ATOMIC);
+       }
+       else
+       {
+               pvBuf = vmalloc(i32Size);
+       }
+
+       return pvBuf;
 }
 
 void ged_free(void* pvBuf, int i32Size)
 {
-    if (pvBuf)
-    {
-        if (i32Size <= PAGE_SIZE)
-        {
-            kfree(pvBuf);
-        }
-        else
-        {
-            vfree(pvBuf);
-        }
-    }
+       if (pvBuf)
+       {
+               if (i32Size <= PAGE_SIZE)
+               {
+                       kfree(pvBuf);
+               }
+               else
+               {
+                       vfree(pvBuf);
+               }
+       }
 }
 
 long ged_get_pid(void)
 {
-    if (in_interrupt())
-    {
-        return 0xffffffffL;
-    }
+       if (in_interrupt())
+       {
+               return 0xffffffffL;
+       }
 
 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
-    return (long)current->pgrp;
+       return (long)current->pgrp;
 #else
 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
-    return (long)task_tgid_nr(current);
+       return (long)task_tgid_nr(current);
 #else
-    return (long)current->tgid;
+       return (long)current->tgid;
 #endif
 #endif
 }
 
+unsigned long long ged_get_time()
+{
+       unsigned long long temp;
+
+       preempt_disable();
+       temp = cpu_clock(smp_processor_id());
+       preempt_enable();
+
+       return temp;
+}
+
index 59ff31fc2ee82088637863611f352cd9d546ae14..435ef78e7f0c1aed09cdbb524a8556730a974c55 100644 (file)
 #include <linux/kernel.h>
-#include <linux/mtk_gpu_utility.h>
+#include <mt-plat/mtk_gpu_utility.h>
 
 #include "ged_base.h"
 #include "ged_bridge.h"
 #include "ged_log.h"
 #include "ged_profile_dvfs.h"
 #include "ged_monitor_3D_fence.h"
+#include "ged_notify_sw_vsync.h"
+#include "ged_dvfs.h"
 
 //-----------------------------------------------------------------------------
 int ged_bridge_log_buf_get(
-    GED_BRIDGE_IN_LOGBUFGET *psLogBufGetIN,
-    GED_BRIDGE_OUT_LOGBUFGET *psLogBufGetOUT)
+               GED_BRIDGE_IN_LOGBUFGET *psLogBufGetIN,
+               GED_BRIDGE_OUT_LOGBUFGET *psLogBufGetOUT)
 {
-    psLogBufGetOUT->hLogBuf = ged_log_buf_get(psLogBufGetIN->acName);
-    psLogBufGetOUT->eError = psLogBufGetOUT->hLogBuf ? GED_OK : GED_ERROR_FAIL;
-    return 0;
+       psLogBufGetOUT->hLogBuf = ged_log_buf_get(psLogBufGetIN->acName);
+       psLogBufGetOUT->eError = psLogBufGetOUT->hLogBuf ? GED_OK : GED_ERROR_FAIL;
+       return 0;
 }
 //-----------------------------------------------------------------------------
 int ged_bridge_log_buf_write(
-    GED_BRIDGE_IN_LOGBUFWRITE *psLogBufWriteIN,
-    GED_BRIDGE_OUT_LOGBUFWRITE *psLogBufWriteOUT)
+               GED_BRIDGE_IN_LOGBUFWRITE *psLogBufWriteIN,
+               GED_BRIDGE_OUT_LOGBUFWRITE *psLogBufWriteOUT)
 {
-    psLogBufWriteOUT->eError = 
-        ged_log_buf_print(psLogBufWriteIN->hLogBuf, "%s", psLogBufWriteIN->acLogBuf);
+       psLogBufWriteOUT->eError = 
+               ged_log_buf_print2(psLogBufWriteIN->hLogBuf, psLogBufWriteIN->attrs, psLogBufWriteIN->acLogBuf);
 
 #if 0
-    if (ged_log_buf_write(
-            psLogBufWriteIN->hLogBuf, 
-            /*from user*/psLogBufWriteIN->acLogBuf,
-            GED_BRIDGE_IN_LOGBUF_SIZE) > 0)
-    {
-        psLogBufWriteOUT->eError = GED_OK;
-    }
-    else
-    {
-        psLogBufWriteOUT->eError = GED_ERROR_FAIL;
-    }
+       if (ged_log_buf_write(
+                               psLogBufWriteIN->hLogBuf, 
+                               /*from user*/psLogBufWriteIN->acLogBuf,
+                               GED_BRIDGE_IN_LOGBUF_SIZE) > 0)
+       {
+               psLogBufWriteOUT->eError = GED_OK;
+       }
+       else
+       {
+               psLogBufWriteOUT->eError = GED_ERROR_FAIL;
+       }
 #endif
-    return 0;
+       return 0;
 }
 //-----------------------------------------------------------------------------
 int ged_bridge_log_buf_reset(
-    GED_BRIDGE_IN_LOGBUFRESET *psLogBufResetIn,
-    GED_BRIDGE_OUT_LOGBUFRESET *psLogBufResetOUT)
+               GED_BRIDGE_IN_LOGBUFRESET *psLogBufResetIn,
+               GED_BRIDGE_OUT_LOGBUFRESET *psLogBufResetOUT)
 {
-    psLogBufResetOUT->eError = ged_log_buf_reset(psLogBufResetIn->hLogBuf);
-    return 0;
+       psLogBufResetOUT->eError = ged_log_buf_reset(psLogBufResetIn->hLogBuf);
+       return 0;
 }
 //-----------------------------------------------------------------------------
 int ged_bridge_boost_gpu_freq(
-    GED_BRIDGE_IN_BOOSTGPUFREQ *psBoostGpuFreqIN,
-    GED_BRIDGE_OUT_BOOSTGPUFREQ *psBoostGpuFreqOUT)
+               GED_BRIDGE_IN_BOOSTGPUFREQ *psBoostGpuFreqIN,
+               GED_BRIDGE_OUT_BOOSTGPUFREQ *psBoostGpuFreqOUT)
 {
 #if 1
-    psBoostGpuFreqOUT->eError = mtk_boost_gpu_freq() ? GED_OK : GED_ERROR_FAIL;
+       psBoostGpuFreqOUT->eError = mtk_boost_gpu_freq() ? GED_OK : GED_ERROR_FAIL;
 #else
-    unsigned int ui32Count;
-    if (mtk_custom_get_gpu_freq_level_count(&ui32Count))
-    {
-        int i32Level = (ui32Count - 1) - GED_BOOST_GPU_FREQ_LEVEL_MAX - psBoostGpuFreqIN->eGPUFreqLevel;
-        mtk_boost_gpu_freq(i32Level);
-        psBoostGpuFreqOUT->eError = GED_OK;
-    }
-    else
-    {
-        psBoostGpuFreqOUT->eError = GED_ERROR_FAIL;
-    }
+       unsigned int ui32Count;
+       if (mtk_custom_get_gpu_freq_level_count(&ui32Count))
+       {
+               int i32Level = (ui32Count - 1) - GED_BOOST_GPU_FREQ_LEVEL_MAX - psBoostGpuFreqIN->eGPUFreqLevel;
+               mtk_boost_gpu_freq(i32Level);
+               psBoostGpuFreqOUT->eError = GED_OK;
+       }
+       else
+       {
+               psBoostGpuFreqOUT->eError = GED_ERROR_FAIL;
+       }
 #endif
-    return 0;
+       return 0;
 }
 //-----------------------------------------------------------------------------
 int ged_bridge_monitor_3D_fence(
-    GED_BRIDGE_IN_MONITOR3DFENCE *psMonitor3DFenceINT,
-    GED_BRIDGE_OUT_MONITOR3DFENCE *psMonitor3DFenceOUT)
+               GED_BRIDGE_IN_MONITOR3DFENCE *psMonitor3DFenceINT,
+               GED_BRIDGE_OUT_MONITOR3DFENCE *psMonitor3DFenceOUT)
 {
-    psMonitor3DFenceOUT->eError = ged_monitor_3D_fence_add(psMonitor3DFenceINT->fd);
-    return 0;
+       psMonitor3DFenceOUT->eError = ged_monitor_3D_fence_add(psMonitor3DFenceINT->fd);
+       return 0;
 }
+//-----------------------------------------------------------------------------
+int ged_bridge_query_info(
+               GED_BRIDGE_IN_QUERY_INFO *psQueryInfoINT,
+               GED_BRIDGE_OUT_QUERY_INFO *psQueryInfoOUT)
+{
+       psQueryInfoOUT->retrieve = ged_query_info( psQueryInfoINT->eType);
+       return 0;
+}
+//-----------------------------------------------------------------------------
+int ged_bridge_notify_vsync(
+               GED_BRIDGE_IN_NOTIFY_VSYNC *psNotifyVsyncINT,
+               GED_BRIDGE_OUT_NOTIFY_VSYNC *psNotifyVsyncOUT)
+{
+       psNotifyVsyncOUT->eError = 
+               //ged_notify_vsync(psNotifyVsyncINT->eType, &psNotifyVsyncOUT->t);
+               ged_notify_sw_vsync(psNotifyVsyncINT->eType, &psNotifyVsyncOUT->sQueryData);
+
+       return 0;
+}
+//-----------------------------------------------------------------------------
+int ged_bridge_dvfs_probe(
+               GED_BRIDGE_IN_DVFS_PROBE *psDVFSProbeINT, 
+               GED_BRIDGE_OUT_DVFS_PROBE *psDVFSProbeOUT)
+{
+       psDVFSProbeOUT->eError = ged_dvfs_probe(psDVFSProbeINT->pid);
+       return 0;
+}
+
+//-----------------------------------------------------------------------------
+int ged_bridge_dvfs_um_retrun(
+               GED_BRIDGE_IN_DVFS_UM_RETURN *psDVFS_UM_returnINT, 
+               GED_BRIDGE_OUT_DVFS_UM_RETURN *psDVFS_UM_returnOUT)
+{
+       psDVFS_UM_returnOUT->eError = 
+               ged_dvfs_um_commit( psDVFS_UM_returnINT->gpu_tar_freq,
+                               psDVFS_UM_returnINT->bFallback);
+       return 0;
+}
+
+
index 0e208903d097ed33b0915317d8a54a966c9fe8fe..fc23ece6085b622cf87f31b955e4baa29d95baf8 100644 (file)
@@ -17,7 +17,7 @@ typedef struct _GED_DEBUGFS_PRIV_DATA_
        void*                   pvData;
 } GED_DEBUGFS_PRIV_DATA;
 //-----------------------------------------------------------------------------
-static GED_ERROR ged_debugFS_open(struct inode *psINode, struct file *psFile)
+static int ged_debugFS_open(struct inode *psINode, struct file *psFile)
 {
        GED_DEBUGFS_PRIV_DATA *psPrivData = (GED_DEBUGFS_PRIV_DATA *)psINode->i_private;
        int iResult;
@@ -29,17 +29,17 @@ static GED_ERROR ged_debugFS_open(struct inode *psINode, struct file *psFile)
 
                psSeqFile->private = psPrivData->pvData;
 
-        return GED_OK;
+               return GED_OK;
        }
 
        return GED_ERROR_FAIL;
 }
 //-----------------------------------------------------------------------------
 static ssize_t ged_debugFS_write(
-    struct file*        psFile,
-    const char __user*  pszBuffer,
-    size_t              uiCount,
-    loff_t*             puiPosition)
+               struct file*        psFile,
+               const char __user*  pszBuffer,
+               size_t              uiCount,
+               loff_t*             puiPosition)
 {
        struct inode *psINode = psFile->f_path.dentry->d_inode;
        GED_DEBUGFS_PRIV_DATA *psPrivData = (GED_DEBUGFS_PRIV_DATA *)psINode->i_private;
@@ -63,12 +63,12 @@ static const struct file_operations gsGEDDebugFSFileOps =
 };
 //-----------------------------------------------------------------------------
 GED_ERROR ged_debugFS_create_entry(
-    const char*             pszName,
-       void*                   pvDir,
-       struct seq_operations*  psReadOps,
-    GED_ENTRY_WRITE_FUNC*   pfnWrite,
-       void*                   pvData,
-       struct dentry**         ppsEntry)
+               const char*             pszName,
+               void*                   pvDir,
+               struct seq_operations*  psReadOps,
+               GED_ENTRY_WRITE_FUNC*   pfnWrite,
+               void*                   pvData,
+               struct dentry**         ppsEntry)
 {
        GED_DEBUGFS_PRIV_DATA* psPrivData;
        struct dentry* psEntry;
@@ -95,17 +95,17 @@ GED_ERROR ged_debugFS_create_entry(
 
        if (pfnWrite != NULL)
        {
-               uiMode |= S_IWUSR;
+               uiMode |= S_IWUSR | S_IWGRP | S_IWOTH;
        }
 
        psEntry = debugfs_create_file(pszName,
-                                     uiMode,
-                                     (pvDir != NULL) ? (struct dentry *)pvDir : gpsDebugFSEntryDir,
-                                     psPrivData,
-                                     &gsGEDDebugFSFileOps);
+                       uiMode,
+                       (pvDir != NULL) ? (struct dentry *)pvDir : gpsDebugFSEntryDir,
+                       psPrivData,
+                       &gsGEDDebugFSFileOps);
        if (IS_ERR(psEntry))
        {
-        GED_LOGE("Failed to create '%s' debugfs entry\n", pszName);
+               GED_LOGE("Failed to create '%s' debugfs entry\n", pszName);
                return GED_ERROR_FAIL;
        }
 
@@ -125,9 +125,9 @@ void ged_debugFS_remove_entry(struct dentry *psEntry)
 }
 //-----------------------------------------------------------------------------
 GED_ERROR ged_debugFS_create_entry_dir(
-    const char*     pszName,
-    struct dentry*  psParentDir,
-    struct dentry** ppsDir)
+               const char*     pszName,
+               struct dentry*  psParentDir,
+               struct dentry** ppsDir)
 {
        struct dentry *psDir;
 
@@ -139,7 +139,7 @@ GED_ERROR ged_debugFS_create_entry_dir(
        psDir = debugfs_create_dir(pszName, (psParentDir) ? psParentDir : gpsDebugFSEntryDir);
        if (psDir == NULL)
        {
-        GED_LOGE("Failed to create '%s' debugfs directory\n", pszName);
+               GED_LOGE("Failed to create '%s' debugfs directory\n", pszName);
                return GED_ERROR_OOM;
        }
 
@@ -160,7 +160,7 @@ GED_ERROR ged_debugFS_init(void)
        gpsDebugFSEntryDir = debugfs_create_dir(GED_DEBUGFS_DIR_NAME, NULL);
        if (gpsDebugFSEntryDir == NULL)
        {
-        GED_LOGE("Failed to create '%s' debugfs root directory\n", GED_DEBUGFS_DIR_NAME);
+               GED_LOGE("Failed to create '%s' debugfs root directory\n", GED_DEBUGFS_DIR_NAME);
                return GED_ERROR_OOM;
        }
 
diff --git a/drivers/misc/mediatek/gpu/ged/src/ged_dvfs.c b/drivers/misc/mediatek/gpu/ged/src/ged_dvfs.c
new file mode 100644 (file)
index 0000000..6e7f13b
--- /dev/null
@@ -0,0 +1,1026 @@
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+#ifdef GED_DVFS_ENABLE
+#include <mt-plat/mt_boot.h>
+#include <mt_gpufreq.h>
+#endif
+
+#include <trace/events/mtk_events.h>
+#include <mt-plat/mtk_gpu_utility.h>
+
+#include <asm/siginfo.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+
+
+#include "ged_dvfs.h"
+#include "ged_monitor_3D_fence.h"
+#include "ged_profile_dvfs.h"
+#include "ged_log.h"
+#include "ged_base.h"
+
+#define MTK_DEFER_DVFS_WORK_MS          10000
+#define MTK_DVFS_SWITCH_INTERVAL_MS     50//16//100
+
+/*Definition of GED_DVFS_SKIP_ROUNDS is to skip DVFS when boost raised 
+  the value stands for counting down rounds of DVFS period
+  Current using vsync that would be 16ms as period, 
+  below boost at (32, 48] seconds per boost
+#define GED_DVFS_SKIP_ROUNDS 3 */
+#define GED_DVFS_SKIP_ROUNDS 3
+
+extern GED_LOG_BUF_HANDLE ghLogBuf_DVFS;
+extern GED_LOG_BUF_HANDLE ghLogBuf_ged_srv;
+
+static struct mutex gsDVFSLock;
+static struct mutex gsVSyncOffsetLock;
+
+static unsigned int g_iSkipCount=0;
+static int g_dvfs_skip_round=0;
+
+static unsigned int gpu_power = 0;
+static unsigned int gpu_dvfs_enable;
+#ifdef GED_DVFS_ENABLE
+static unsigned int boost_gpu_enable;
+static unsigned int  gpu_bottom_freq;
+static unsigned int  gpu_cust_boost_freq;
+static unsigned int  gpu_cust_upbound_freq;
+static unsigned int  g_ui32PreFreqID;
+static unsigned int  g_bottom_freq_id;
+
+static unsigned int  g_cust_upbound_freq_id;
+
+#endif
+
+static unsigned int  g_computed_freq_id = 0;
+
+static unsigned int gpu_debug_enable;
+
+
+static unsigned int  g_cust_boost_freq_id;
+
+unsigned int g_gpu_timer_based_emu = 0;
+
+static unsigned int gpu_pre_loading = 0;
+unsigned int gpu_loading = 0;
+unsigned int gpu_av_loading = 0;
+static unsigned int gpu_block = 0;
+static unsigned int gpu_idle = 0;
+
+
+static unsigned long g_ulCalResetTS_us = 0; // calculate loading reset time stamp
+static unsigned long g_ulPreCalResetTS_us = 0; // previous calculate loading reset time stamp
+static unsigned long g_ulWorkingPeriod_us = 0; // last frame half, t0
+
+unsigned long g_ulPreDVFS_TS_us = 0; // record previous DVFS applying time stamp
+
+
+static unsigned int  g_ui32FreqIDFromPolicy = 0;
+
+unsigned long g_ulvsync_period;
+static GED_DVFS_TUNING_MODE g_eTuningMode = 0;
+
+unsigned int g_ui32EventStatus = 0;
+unsigned int g_ui32EventDebugStatus = 0;
+static int g_VsyncOffsetLevel = 0;
+
+static int g_probe_pid=GED_NO_UM_SERVICE;
+
+
+
+typedef void (*gpufreq_input_boost_notify)(unsigned int );
+typedef void (*gpufreq_power_limit_notify)(unsigned int );
+extern void mt_gpufreq_input_boost_notify_registerCB(gpufreq_input_boost_notify pCB);
+extern void mt_gpufreq_power_limit_notify_registerCB(gpufreq_power_limit_notify pCB);
+extern void (*mtk_boost_gpu_freq_fp)(void);
+extern void (*mtk_set_bottom_gpu_freq_fp)(unsigned int);
+extern unsigned int (*mtk_get_bottom_gpu_freq_fp)(void);
+extern unsigned int (*mtk_custom_get_gpu_freq_level_count_fp)(void);
+extern void (*mtk_custom_boost_gpu_freq_fp)(unsigned int ui32FreqLevel);
+extern void (*mtk_custom_upbound_gpu_freq_fp)(unsigned int ui32FreqLevel);
+extern unsigned int (*mtk_get_custom_boost_gpu_freq_fp)(void);
+extern unsigned int (*mtk_get_custom_upbound_gpu_freq_fp)(void);
+extern unsigned int (*mtk_get_gpu_loading_fp)(void);
+extern unsigned int (*mtk_get_gpu_block_fp)(void);
+extern unsigned int (*mtk_get_gpu_idle_fp)(void);
+extern void (*mtk_do_gpu_dvfs_fp)(unsigned long t, long phase, unsigned long ul3DFenceDoneTime);
+extern void (*mtk_gpu_dvfs_set_mode_fp)(int eMode);
+extern void (ged_monitor_3D_fence_set_disable)(GED_BOOL bFlag);
+
+static bool ged_dvfs_policy(
+               unsigned int ui32GPULoading, unsigned int* pui32NewFreqID, 
+               unsigned long t, long phase, unsigned long ul3DFenceDoneTime, bool bRefreshed);
+unsigned long ged_gas_query_mode(void);
+
+unsigned long ged_query_info( GED_INFO eType)
+{
+       switch(eType)
+       {
+#ifdef GED_DVFS_ENABLE    
+               case GED_LOADING:
+                       return gpu_loading;
+               case GED_IDLE:
+                       return gpu_idle;
+               case GED_BLOCKING:
+                       return gpu_block;
+               case GED_PRE_FREQ:
+                       return mt_gpufreq_get_freq_by_idx(g_ui32PreFreqID);
+               case GED_PRE_FREQ_IDX:
+                       return g_ui32PreFreqID;
+               case GED_CUR_FREQ:
+                       return mt_gpufreq_get_freq_by_idx(mt_gpufreq_get_cur_freq_index());
+               case GED_CUR_FREQ_IDX:
+                       return mt_gpufreq_get_cur_freq_index();
+               case GED_MAX_FREQ_IDX:
+                       return mt_gpufreq_get_dvfs_table_num()-1;
+               case GED_MAX_FREQ_IDX_FREQ:   
+                       return mt_gpufreq_get_freq_by_idx(mt_gpufreq_get_dvfs_table_num()-1);
+               case GED_MIN_FREQ_IDX:
+                       return 0;
+               case GED_MIN_FREQ_IDX_FREQ:
+                       return mt_gpufreq_get_freq_by_idx(0);
+               case GED_3D_FENCE_DONE_TIME:
+                       return ged_monitor_3D_fence_done_time();
+               case GED_VSYNC_OFFSET:
+                       return ged_dvfs_vsync_offset_level_get();
+               case GED_EVENT_STATUS:
+                       return g_ui32EventStatus;
+               case GED_EVENT_DEBUG_STATUS:
+                       return g_ui32EventDebugStatus;
+               case GED_EVENT_GAS_MODE:
+                       return ged_gas_query_mode();
+               case GED_SRV_SUICIDE:
+                       ged_dvfs_probe_signal(GED_SRV_SUICIDE_EVENT);
+                       return g_probe_pid;
+               case GED_PRE_HALF_PERIOD:
+                       return g_ulWorkingPeriod_us;
+               case GED_LATEST_START:
+                       return g_ulPreCalResetTS_us;
+#endif             
+               default:
+                       return 0;
+       }
+}
+EXPORT_SYMBOL(ged_query_info);
+
+//-----------------------------------------------------------------------------
+void (*ged_dvfs_cal_gpu_utilization_fp)(unsigned int* pui32Loading , unsigned int* pui32Block,unsigned int* pui32Idle) = NULL;
+EXPORT_SYMBOL(ged_dvfs_cal_gpu_utilization_fp);
+//-----------------------------------------------------------------------------
+
+bool ged_dvfs_cal_gpu_utilization(unsigned int* pui32Loading , unsigned int* pui32Block,unsigned int* pui32Idle)
+{
+
+       if (NULL != ged_dvfs_cal_gpu_utilization_fp)
+       {
+               ged_dvfs_cal_gpu_utilization_fp(pui32Loading, pui32Block, pui32Idle);        
+               return true;
+       }
+       return false;
+}
+
+//-----------------------------------------------------------------------------
+// void (*ged_dvfs_gpu_freq_commit_fp)(unsigned long ui32NewFreqID)
+// call back function
+// This shall be registered in vendor's GPU driver,
+// since each IP has its own rule
+void (*ged_dvfs_gpu_freq_commit_fp)(unsigned long ui32NewFreqID, GED_DVFS_COMMIT_TYPE eCommitType, int* pbCommited) = NULL;
+EXPORT_SYMBOL(ged_dvfs_gpu_freq_commit_fp);
+//-----------------------------------------------------------------------------
+
+bool ged_dvfs_gpu_freq_commit(unsigned long ui32NewFreqID, GED_DVFS_COMMIT_TYPE eCommitType)
+{
+       int bCommited=false;
+#ifdef GED_DVFS_ENABLE    
+       unsigned long ui32CurFreqID;
+       ui32CurFreqID = mt_gpufreq_get_cur_freq_index();
+       if (NULL != ged_dvfs_gpu_freq_commit_fp)
+       {
+
+               if (ui32NewFreqID > g_bottom_freq_id)
+               {
+                       ui32NewFreqID = g_bottom_freq_id;
+               }
+               if (ui32NewFreqID > g_cust_boost_freq_id)
+               {
+                       ui32NewFreqID = g_cust_boost_freq_id;
+               }
+
+               // up bound
+               if (ui32NewFreqID < g_cust_upbound_freq_id)
+               {
+                       ui32NewFreqID = g_cust_upbound_freq_id;
+               }
+
+               // thermal power limit
+               if (ui32NewFreqID < mt_gpufreq_get_thermal_limit_index())
+               {
+                       ui32NewFreqID = mt_gpufreq_get_thermal_limit_index();
+               }
+
+               // do change
+               if (ui32NewFreqID != ui32CurFreqID)
+               {
+                       // call to DVFS module
+                       ged_dvfs_gpu_freq_commit_fp(ui32NewFreqID, eCommitType, &bCommited);
+                       /* 
+                        * To-Do: refine previous freq contributions, 
+                        * since it is possible to have multiple freq settings in previous execution period
+                        * Does this fatal for precision?
+                        */
+                       ged_log_buf_print(ghLogBuf_DVFS, "[GED_K] new freq ID commited: idx=%lu type=%u",ui32NewFreqID, eCommitType);
+                       if(true==bCommited)
+                       {
+                               ged_log_buf_print(ghLogBuf_DVFS, "[GED_K] commited true");
+                               g_ui32PreFreqID = ui32CurFreqID;            
+                       }
+               }       
+       }
+#endif    
+       return bCommited;
+}
+
+
+unsigned long get_ns_period_from_fps(unsigned int ui32Fps)
+{
+       return 1000000/ui32Fps;
+}
+
+
+void ged_dvfs_set_tuning_mode(GED_DVFS_TUNING_MODE eMode)
+{
+       g_eTuningMode=eMode;    
+
+}
+
+void ged_dvfs_set_tuning_mode_wrap(int eMode)
+{
+       ged_dvfs_set_tuning_mode( (GED_DVFS_TUNING_MODE) eMode) ;
+}
+
+
+
+GED_DVFS_TUNING_MODE ged_dvfs_get_tuning_mode()
+{
+       return g_eTuningMode;
+}
+
+//g_i32EvenStatus
+
+
+
+void ged_dvfs_vsync_offset_event_switch(GED_DVFS_VSYNC_OFFSET_SWITCH_CMD eEvent, bool bSwitch)
+{
+       unsigned int  ui32BeforeSwitchInterpret;
+       unsigned int  ui32BeforeDebugInterpret;
+       mutex_lock(&gsVSyncOffsetLock);
+
+       ui32BeforeSwitchInterpret = g_ui32EventStatus;
+       ui32BeforeDebugInterpret = g_ui32EventDebugStatus;
+
+       switch(eEvent)
+       {
+               case GED_DVFS_VSYNC_OFFSET_FORCE_ON:
+                       g_ui32EventDebugStatus |= GED_EVENT_FORCE_ON;
+                       g_ui32EventDebugStatus &= (~GED_EVENT_FORCE_OFF);
+                       break;
+               case GED_DVFS_VSYNC_OFFSET_FORCE_OFF:
+                       g_ui32EventDebugStatus |= GED_EVENT_FORCE_OFF;
+                       g_ui32EventDebugStatus &= (~GED_EVENT_FORCE_ON);
+                       break;
+               case GED_DVFS_VSYNC_OFFSET_DEBUG_CLEAR_EVENT:
+                       g_ui32EventDebugStatus &= (~GED_EVENT_FORCE_ON);
+                       g_ui32EventDebugStatus &= (~GED_EVENT_FORCE_OFF);
+                       break;
+               case GED_DVFS_VSYNC_OFFSET_TOUCH_EVENT:
+                       if(GED_TRUE==bSwitch) // touch boost
+                               ged_dvfs_boost_gpu_freq(); 
+                       (bSwitch)? (g_ui32EventStatus|=GED_EVENT_TOUCH): (g_ui32EventStatus&= (~GED_EVENT_TOUCH));            
+                       break;
+               case GED_DVFS_VSYNC_OFFSET_THERMAL_EVENT:
+                       (bSwitch)? (g_ui32EventStatus|=GED_EVENT_THERMAL): (g_ui32EventStatus&= (~GED_EVENT_THERMAL));
+                       break;
+               case GED_DVFS_VSYNC_OFFSET_WFD_EVENT:
+                       (bSwitch)? (g_ui32EventStatus|=GED_EVENT_WFD): (g_ui32EventStatus&= (~GED_EVENT_WFD));
+                       break;
+               case GED_DVFS_VSYNC_OFFSET_MHL_EVENT:
+                       (bSwitch)? (g_ui32EventStatus|=GED_EVENT_MHL): (g_ui32EventStatus&= (~GED_EVENT_MHL));
+                       break;
+               case GED_DVFS_VSYNC_OFFSET_GAS_EVENT:
+                       (bSwitch)? (g_ui32EventStatus|=GED_EVENT_GAS): (g_ui32EventStatus&= (~GED_EVENT_GAS));
+                       ged_dvfs_probe_signal(GED_GAS_SIGNAL_EVENT);
+                       break;
+               case GED_DVFS_VSYNC_OFFSET_LOW_POWER_MODE_EVENT:
+                       (bSwitch) ? (g_ui32EventStatus |= GED_EVENT_LOW_POWER_MODE) : (g_ui32EventStatus &= (~GED_EVENT_LOW_POWER_MODE));
+                       ged_dvfs_probe_signal(GED_LOW_POWER_MODE_SIGNAL_EVENT);
+                       break;
+               case GED_DVFS_VSYNC_OFFSET_MHL4K_VID_EVENT:
+                       (bSwitch) ? (g_ui32EventStatus |= GED_EVENT_MHL4K_VID) : (g_ui32EventStatus &= (~GED_EVENT_MHL4K_VID));
+                       ged_dvfs_probe_signal(GED_MHL4K_VID_SIGNAL_EVENT);
+                       break;
+               default:
+                       GED_LOGE("%s: not acceptable event:%u \n", __func__,  eEvent); 
+                       goto CHECK_OUT;
+       }
+
+       if(ui32BeforeSwitchInterpret != g_ui32EventStatus || ui32BeforeDebugInterpret != g_ui32EventDebugStatus 
+                       || g_ui32EventDebugStatus&GED_EVENT_NOT_SYNC)
+       {
+               ged_dvfs_probe_signal(GED_DVFS_VSYNC_OFFSET_SIGNAL_EVENT);
+       }
+
+CHECK_OUT:    
+       mutex_unlock(&gsVSyncOffsetLock);
+}
+
+void ged_dvfs_vsync_offset_level_set(int i32level)
+{
+       g_VsyncOffsetLevel = i32level;
+}
+
+int ged_dvfs_vsync_offset_level_get()
+{
+       return g_VsyncOffsetLevel;
+}
+
+
+GED_ERROR ged_dvfs_um_commit( unsigned long gpu_tar_freq, bool bFallback)
+{
+#ifdef ENABLE_COMMON_DVFS    
+       int i32MaxLevel = 0;
+       unsigned int  ui32NewFreqID;
+       int i ;
+       unsigned long gpu_freq ;
+
+
+#ifdef GED_DVFS_ENABLE
+       unsigned int ui32CurFreqID;
+       i32MaxLevel = (int)(mt_gpufreq_get_dvfs_table_num() - 1);
+       ui32CurFreqID = mt_gpufreq_get_cur_freq_index();
+#endif  
+       if(g_gpu_timer_based_emu)
+       {
+               return GED_INTENTIONAL_BLOCK;
+       }
+
+#ifdef GED_DVFS_UM_CAL
+       mutex_lock(&gsDVFSLock);
+
+       if(g_ulCalResetTS_us  - g_ulPreDVFS_TS_us !=0)
+               gpu_loading = (( gpu_loading * (g_ulCalResetTS_us - g_ulPreCalResetTS_us)  ) + 100*g_ulWorkingPeriod_us ) / (g_ulCalResetTS_us  - g_ulPreDVFS_TS_us); 
+       else
+               gpu_loading =0 ;
+       gpu_pre_loading = gpu_av_loading;
+       gpu_av_loading = gpu_loading;
+
+       g_ulPreDVFS_TS_us = g_ulCalResetTS_us;        
+
+       if(gpu_tar_freq&0x1) // Magic to kill ged_srv
+       {
+               ged_dvfs_probe_signal(GED_SRV_SUICIDE_EVENT);
+       }
+
+       if(bFallback==true) // in the fallback mode, gpu_tar_freq taking as freq index
+       {
+               ged_dvfs_policy(gpu_loading, &ui32NewFreqID, 0, 0, 0, true);  
+       }
+       else
+       {
+               // Search suitable frequency level
+               ui32NewFreqID = i32MaxLevel;
+               for (i = 0; i <= i32MaxLevel; i++)
+               {
+#ifdef GED_DVFS_ENABLE
+                       gpu_freq = mt_gpufreq_get_freq_by_idx(i);
+#endif
+                       if (gpu_tar_freq > gpu_freq)
+                       {
+                               if(i==0)
+                                       ui32NewFreqID = 0;
+                               else
+                                       ui32NewFreqID = i-1;
+                               break;
+                       }
+               }
+       }    
+
+
+
+
+       if(g_eTuningMode==GED_DVFS_LP)
+       {
+               if(ui32NewFreqID!=i32MaxLevel && bFallback==GED_FALSE)
+               {
+                       ui32NewFreqID++;
+               }            
+               ged_monitor_3D_fence_set_disable(GED_TRUE);
+       }
+       else
+               ged_monitor_3D_fence_set_disable(GED_FALSE);
+
+
+       ged_log_buf_print(ghLogBuf_DVFS, "[GED_K] rdy to commit (%u)",ui32NewFreqID);
+
+       g_computed_freq_id = ui32NewFreqID;
+       ged_dvfs_gpu_freq_commit(ui32NewFreqID, GED_DVFS_DEFAULT_COMMIT);
+
+       g_ulWorkingPeriod_us = 0;
+
+       mutex_unlock(&gsDVFSLock);            
+#endif
+#else
+       gpu_pre_loading = 0;
+#endif
+
+       return GED_OK;
+}
+
+
+static bool ged_dvfs_policy(
+               unsigned int ui32GPULoading, unsigned int* pui32NewFreqID, 
+               unsigned long t, long phase, unsigned long ul3DFenceDoneTime, bool bRefreshed)
+{
+#ifdef GED_DVFS_ENABLE    
+       int i32MaxLevel = (int)(mt_gpufreq_get_dvfs_table_num() - 1);
+       unsigned int ui32GPUFreq = mt_gpufreq_get_cur_freq_index();
+
+       int i32NewFreqID = (int)ui32GPUFreq;
+
+       if(false==bRefreshed)
+       {
+               if(g_ulCalResetTS_us  - g_ulPreDVFS_TS_us !=0)
+                       gpu_loading = (( gpu_loading * (g_ulCalResetTS_us - g_ulPreCalResetTS_us)  ) + 100*g_ulWorkingPeriod_us ) / (g_ulCalResetTS_us  - g_ulPreDVFS_TS_us); 
+               else
+                       gpu_loading = 0;
+               g_ulPreDVFS_TS_us = g_ulCalResetTS_us;        
+
+               gpu_pre_loading = gpu_av_loading;
+               ui32GPULoading = gpu_loading;
+               gpu_av_loading = gpu_loading;
+       }
+
+       //GED_LOGE("[5566]  HWEvent Fallback\n");
+       if (ui32GPULoading >= 99)
+       {
+               i32NewFreqID = 0;
+       }
+       else if (ui32GPULoading <= 1)
+       {
+               i32NewFreqID = i32MaxLevel;
+       }
+       else if (ui32GPULoading >= 85)
+       {
+               i32NewFreqID -= 2;
+       }
+       else if (ui32GPULoading <= 30)
+       {
+               i32NewFreqID += 2;
+       }
+       else if (ui32GPULoading >= 70)
+       {
+               i32NewFreqID -= 1;
+       }
+       else if (ui32GPULoading <= 50)
+       {
+               i32NewFreqID += 1;
+       }
+
+       if (i32NewFreqID < ui32GPUFreq)
+       {
+               if (gpu_pre_loading * 17 / 10 < ui32GPULoading)
+               {
+                       i32NewFreqID -= 1;
+               }
+       }
+       else if (i32NewFreqID > ui32GPUFreq)
+       {
+               if (ui32GPULoading * 17 / 10 < gpu_pre_loading)
+               {
+                       i32NewFreqID += 1;
+               }
+       }
+
+       if (i32NewFreqID > i32MaxLevel)
+       {
+               i32NewFreqID = i32MaxLevel;
+       }
+       else if (i32NewFreqID < 0)
+       {
+               i32NewFreqID = 0;
+       }
+
+       *pui32NewFreqID = (unsigned int)i32NewFreqID;
+
+       g_ulWorkingPeriod_us = 0;
+
+       return *pui32NewFreqID != ui32GPUFreq ? GED_TRUE : GED_FALSE;
+#else
+       return GED_FALSE;
+#endif    
+}
+
+
+static void ged_dvfs_freq_input_boostCB(unsigned int ui32BoostFreqID)
+{
+#ifdef GED_DVFS_ENABLE
+       if (0 < g_iSkipCount)
+       {
+               return;
+       }
+
+       if (boost_gpu_enable == 0)
+       {
+               return;
+       }
+
+       mutex_lock(&gsDVFSLock);
+
+       if (ui32BoostFreqID < mt_gpufreq_get_cur_freq_index())
+       {
+               if (ged_dvfs_gpu_freq_commit(ui32BoostFreqID,GED_DVFS_INPUT_BOOST_COMMIT ))
+               {
+                       g_dvfs_skip_round = GED_DVFS_SKIP_ROUNDS; // of course this must be fixed
+               }
+       }
+
+       mutex_unlock(&gsDVFSLock);
+#endif    
+}
+
+#ifdef GED_DVFS_ENABLE
+static void ged_dvfs_freq_thermal_limitCB(unsigned int ui32LimitFreqID)
+{
+
+       if (0 < g_iSkipCount)
+       {
+               return;
+       }
+
+       if(ui32LimitFreqID == 0) // thermal event disable
+               ged_dvfs_vsync_offset_event_switch(GED_DVFS_VSYNC_OFFSET_THERMAL_EVENT , GED_FALSE);
+       else
+               ged_dvfs_vsync_offset_event_switch(GED_DVFS_VSYNC_OFFSET_THERMAL_EVENT , GED_TRUE);
+
+       mutex_lock(&gsDVFSLock);
+
+       if (ui32LimitFreqID > mt_gpufreq_get_cur_freq_index())
+       {
+               if (ged_dvfs_gpu_freq_commit(ui32LimitFreqID, GED_DVFS_SET_LIMIT_COMMIT))
+               {
+                       g_dvfs_skip_round = GED_DVFS_SKIP_ROUNDS; // of course this must be fixed
+               }
+       }
+
+       mutex_unlock(&gsDVFSLock);
+
+}
+#endif      
+
+void ged_dvfs_boost_gpu_freq(void)
+{
+       if (gpu_debug_enable)
+       {
+               GED_LOGE("%s", __func__);
+       }
+       ged_dvfs_freq_input_boostCB(0);
+}
+
+#ifdef GED_DVFS_ENABLE
+static void ged_dvfs_set_bottom_gpu_freq(unsigned int ui32FreqLevel)
+{
+
+
+       unsigned int ui32MaxLevel;
+       if (gpu_debug_enable)
+       {
+               GED_LOGE("%s: freq = %d", __func__,ui32FreqLevel);
+       }
+
+       ui32MaxLevel = mt_gpufreq_get_dvfs_table_num() - 1;
+       if (ui32MaxLevel < ui32FreqLevel)
+       {
+               ui32FreqLevel = ui32MaxLevel;
+       }
+
+       mutex_lock(&gsDVFSLock);
+
+       // 0 => The highest frequency
+       // table_num - 1 => The lowest frequency
+       g_bottom_freq_id = ui32MaxLevel - ui32FreqLevel;
+
+       gpu_bottom_freq = mt_gpufreq_get_freq_by_idx(g_bottom_freq_id);
+
+       //if current id is larger, ie lower freq, we need to reflect immedately
+       if(g_bottom_freq_id < mt_gpufreq_get_cur_freq_index()) 
+               ged_dvfs_gpu_freq_commit(g_bottom_freq_id, GED_DVFS_SET_BOTTOM_COMMIT);
+
+       mutex_unlock(&gsDVFSLock);
+
+}
+
+
+static unsigned int ged_dvfs_get_gpu_freq_level_count(void)
+{
+
+       return mt_gpufreq_get_dvfs_table_num();
+
+}
+
+
+static void ged_dvfs_custom_boost_gpu_freq(unsigned int ui32FreqLevel)
+{
+       unsigned int ui32MaxLevel;
+
+       if (gpu_debug_enable)
+       {
+               GED_LOGE("%s: freq = %d", __func__ ,ui32FreqLevel);
+       }
+
+       ui32MaxLevel = mt_gpufreq_get_dvfs_table_num() - 1;
+       if (ui32MaxLevel < ui32FreqLevel)
+       {
+               ui32FreqLevel = ui32MaxLevel;
+       }
+
+       mutex_lock(&gsDVFSLock);
+
+       // 0 => The highest frequency
+       // table_num - 1 => The lowest frequency
+       g_cust_boost_freq_id = ui32MaxLevel - ui32FreqLevel;
+
+       gpu_cust_boost_freq = mt_gpufreq_get_freq_by_idx(g_cust_boost_freq_id);
+
+
+       if (g_cust_boost_freq_id < mt_gpufreq_get_cur_freq_index())
+       {
+               ged_dvfs_gpu_freq_commit(g_cust_boost_freq_id, GED_DVFS_CUSTOM_BOOST_COMMIT);
+       }
+
+       mutex_unlock(&gsDVFSLock);
+
+}
+
+
+static void ged_dvfs_custom_ceiling_gpu_freq(unsigned int ui32FreqLevel)
+{
+
+       unsigned int ui32MaxLevel;
+
+       if (gpu_debug_enable)
+       {
+               GED_LOGE("%s: freq = %d", __func__,ui32FreqLevel);
+       }
+
+       ui32MaxLevel = mt_gpufreq_get_dvfs_table_num() - 1;
+       if (ui32MaxLevel < ui32FreqLevel)
+       {
+               ui32FreqLevel = ui32MaxLevel;
+       }
+
+       mutex_lock(&gsDVFSLock);
+
+       // 0 => The highest frequency
+       // table_num - 1 => The lowest frequency
+       g_cust_upbound_freq_id = ui32MaxLevel - ui32FreqLevel;
+
+       gpu_cust_upbound_freq = mt_gpufreq_get_freq_by_idx(g_cust_upbound_freq_id);
+
+
+       if (g_cust_upbound_freq_id > mt_gpufreq_get_cur_freq_index())
+       {
+               ged_dvfs_gpu_freq_commit(g_cust_upbound_freq_id, GED_DVFS_CUSTOM_CEIL_COMMIT);
+       }
+
+       mutex_unlock(&gsDVFSLock);
+
+}
+#endif        
+
+unsigned int ged_dvfs_get_custom_boost_gpu_freq(void)
+{
+#ifdef GED_DVFS_ENABLE
+       unsigned int ui32MaxLevel = mt_gpufreq_get_dvfs_table_num() - 1;
+#else    
+       unsigned int ui32MaxLevel = 0;
+#endif           
+       return ui32MaxLevel - g_cust_boost_freq_id;
+}
+
+
+
+void ged_dvfs_cal_gpu_utilization_force()
+{
+       unsigned int loading;
+       unsigned int block;
+       unsigned int idle;
+       unsigned long long t;
+       unsigned long ulwork;
+
+
+       t = ged_get_time();
+
+       do_div(t,1000);
+
+       ged_dvfs_cal_gpu_utilization(&loading, &block, &idle);
+
+       ulwork = (( t - g_ulCalResetTS_us ) * loading );
+       do_div(ulwork, 100);
+
+       g_ulWorkingPeriod_us += ulwork;
+
+       g_ulPreCalResetTS_us = g_ulCalResetTS_us;
+       g_ulCalResetTS_us = t;
+}
+
+void ged_dvfs_run(unsigned long t, long phase, unsigned long ul3DFenceDoneTime)
+{
+       bool bError;    
+       //ged_profile_dvfs_record_SW_vsync(t, phase, ul3DFenceDoneTime);
+       mutex_lock(&gsDVFSLock);
+
+       //gpu_pre_loading = gpu_loading;
+
+       if (0 == gpu_dvfs_enable)
+       {
+               gpu_power = 0;
+               gpu_loading = 0;
+               gpu_block= 0;
+               gpu_idle = 0;
+               goto EXIT_ged_dvfs_run;         
+       }
+
+       // SKIP for keeping boost freq
+       if(g_dvfs_skip_round>0)
+       {
+               g_dvfs_skip_round--;
+               goto EXIT_ged_dvfs_run;                 
+       }               
+
+       if (g_iSkipCount > 0)
+       {
+               gpu_power = 0;
+               gpu_loading = 0;
+               gpu_block= 0;
+               gpu_idle = 0;
+               g_iSkipCount -= 1;
+       }
+       else
+       {
+               g_ulPreCalResetTS_us = g_ulCalResetTS_us;
+               g_ulCalResetTS_us = t;
+               bError=ged_dvfs_cal_gpu_utilization(&gpu_loading, &gpu_block, &gpu_idle);
+
+#ifdef GED_DVFS_UM_CAL        
+               if(GED_DVFS_FALLBACK==phase) // timer-based DVFS use only
+#endif             
+               {
+
+                       if (ged_dvfs_policy(gpu_loading, &g_ui32FreqIDFromPolicy, t, phase, ul3DFenceDoneTime, false))
+                       {
+                               g_computed_freq_id = g_ui32FreqIDFromPolicy;
+                               ged_dvfs_gpu_freq_commit(g_ui32FreqIDFromPolicy, GED_DVFS_DEFAULT_COMMIT);
+                       }
+
+               }
+
+       }
+
+       if(gpu_debug_enable)
+       {
+#ifdef GED_DVFS_ENABLE 
+               GED_LOGE("%s:gpu_loading=%d %d, g_iSkipCount=%d",__func__, gpu_loading, mt_gpufreq_get_cur_freq_index(), g_iSkipCount);
+#endif        
+       }
+
+EXIT_ged_dvfs_run:
+       mutex_unlock(&gsDVFSLock);
+}
+
+#ifdef GED_DVFS_ENABLE 
+static unsigned int ged_dvfs_get_bottom_gpu_freq(void)
+{
+
+       unsigned int ui32MaxLevel = mt_gpufreq_get_dvfs_table_num() - 1;
+
+
+       return ui32MaxLevel - g_bottom_freq_id;
+}
+
+
+static unsigned int ged_dvfs_get_custom_ceiling_gpu_freq(void)
+{
+
+       unsigned int ui32MaxLevel = mt_gpufreq_get_dvfs_table_num() - 1;
+
+       return ui32MaxLevel - g_cust_upbound_freq_id;
+}
+
+void ged_dvfs_sw_vsync_query_data(GED_DVFS_UM_QUERY_PACK* psQueryData)
+{
+       psQueryData->ui32GPULoading = gpu_loading;
+       psQueryData->ui32GPUFreqID =  mt_gpufreq_get_cur_freq_index();
+       psQueryData->gpu_cur_freq = mt_gpufreq_get_freq_by_idx(psQueryData->ui32GPUFreqID) ;
+       psQueryData->gpu_pre_freq = g_ui32PreFreqID;
+       psQueryData->nsOffset = ged_dvfs_vsync_offset_level_get();
+
+       psQueryData->ulWorkingPeriod_us = g_ulWorkingPeriod_us;
+       psQueryData->ulPreCalResetTS_us = g_ulPreCalResetTS_us;
+
+}
+
+#endif
+
+unsigned int ged_dvfs_get_gpu_loading(void)
+{
+       return gpu_av_loading;
+}
+
+unsigned int ged_dvfs_get_gpu_blocking(void)
+{
+       return gpu_block;
+}
+
+
+unsigned int ged_dvfs_get_gpu_idle(void)
+{
+       return 100 - gpu_av_loading;
+}
+
+void ged_dvfs_get_gpu_cur_freq(GED_DVFS_FREQ_DATA* psData)
+{
+#ifdef GED_DVFS_ENABLE    
+       psData->ui32Idx = mt_gpufreq_get_cur_freq_index();    
+       psData->ulFreq = mt_gpufreq_get_freq_by_idx(psData->ui32Idx);
+#endif    
+}
+
+void ged_dvfs_get_gpu_pre_freq(GED_DVFS_FREQ_DATA* psData)
+{
+#ifdef GED_DVFS_ENABLE    
+       psData->ui32Idx = g_ui32PreFreqID;
+       psData->ulFreq = mt_gpufreq_get_freq_by_idx(g_ui32PreFreqID);
+#endif    
+}
+
+
+
+
+
+void ged_dvfs_probe_signal(int signo)
+{
+       int cache_pid=GED_NO_UM_SERVICE;
+       struct task_struct *t=NULL;
+       struct siginfo info;
+
+
+       info.si_signo = signo;
+       info.si_code = SI_QUEUE;
+       info.si_int = 1234; 
+
+       if(cache_pid!=g_probe_pid)
+       {
+               cache_pid = g_probe_pid;
+               if(g_probe_pid==GED_NO_UM_SERVICE)
+                       t = NULL;
+               else
+                       t = pid_task(find_vpid(g_probe_pid), PIDTYPE_PID); 
+       }
+
+       if(t!=NULL)
+       {
+               send_sig_info(signo, &info, t);
+               ged_log_buf_print(ghLogBuf_ged_srv, "[GED_K] send signo %d to ged_srv [%d]",signo, g_probe_pid);
+       }
+       else
+       {
+               g_probe_pid = GED_NO_UM_SERVICE;
+               ged_log_buf_print(ghLogBuf_ged_srv, "[GED_K] ged_srv not running");
+       }
+
+
+}
+
+void set_target_fps(int i32FPS)
+{
+       g_ulvsync_period = get_ns_period_from_fps(i32FPS);
+
+}
+
+#ifdef GED_DVFS_ENABLE
+unsigned long ged_gas_query_mode()
+{
+       if (g_ui32EventStatus & GED_EVENT_GAS)
+               return GAS_CATEGORY_OTHERS;
+       else
+               return GAS_CATEGORY_GAME;
+}
+#endif
+
+GED_ERROR ged_dvfs_probe(int pid)
+{
+       // lock here, wait vsync to relief
+       //wait_for_completion(&gsVSyncOffsetLock);
+
+       if(GED_VSYNC_OFFSET_NOT_SYNC ==pid)
+       {
+               g_ui32EventDebugStatus |= GED_EVENT_NOT_SYNC;
+               return GED_OK;
+       }
+
+       if(GED_VSYNC_OFFSET_SYNC ==pid)
+       {
+               g_ui32EventDebugStatus &= (~GED_EVENT_NOT_SYNC);
+               return GED_OK;
+       }
+
+       g_probe_pid = pid;
+
+       /* clear bits among start */
+       if(g_probe_pid!=GED_NO_UM_SERVICE)
+       {
+               g_ui32EventStatus &= (~GED_EVENT_TOUCH);
+               g_ui32EventStatus &= (~GED_EVENT_WFD);
+               g_ui32EventStatus &= (~GED_EVENT_GAS);
+
+               g_ui32EventDebugStatus = 0;
+       }
+
+       ged_log_buf_print(ghLogBuf_ged_srv, "[GED_K] ged_srv pid: %d",g_probe_pid);
+
+       return GED_OK;
+}
+
+GED_ERROR ged_dvfs_system_init()
+{
+       mutex_init(&gsDVFSLock);
+       mutex_init(&gsVSyncOffsetLock);
+
+       // initial as locked, signal when vsync_sw_notify    
+
+       g_iSkipCount = MTK_DEFER_DVFS_WORK_MS / MTK_DVFS_SWITCH_INTERVAL_MS;
+
+       g_ulvsync_period = get_ns_period_from_fps(60);
+
+
+#ifdef GED_DVFS_ENABLE
+       gpu_dvfs_enable = 1;
+#else
+       gpu_dvfs_enable = 0;
+#endif 
+
+       g_dvfs_skip_round = 0;
+
+#ifdef GED_DVFS_ENABLE 
+       g_bottom_freq_id = mt_gpufreq_get_dvfs_table_num() - 1; 
+       gpu_bottom_freq = mt_gpufreq_get_freq_by_idx(g_bottom_freq_id);
+
+       g_cust_boost_freq_id = mt_gpufreq_get_dvfs_table_num() - 1;
+       gpu_cust_boost_freq = mt_gpufreq_get_freq_by_idx(g_cust_boost_freq_id);
+
+       g_cust_upbound_freq_id = 0;
+       gpu_cust_upbound_freq = mt_gpufreq_get_freq_by_idx(g_cust_upbound_freq_id);
+
+
+
+       // GPU HAL fp mount     
+       //mt_gpufreq_input_boost_notify_registerCB(ged_dvfs_freq_input_boostCB); // MTKFreqInputBoostCB
+       mt_gpufreq_power_limit_notify_registerCB(ged_dvfs_freq_thermal_limitCB); // MTKFreqPowerLimitCB
+       mtk_boost_gpu_freq_fp = ged_dvfs_boost_gpu_freq;
+       mtk_set_bottom_gpu_freq_fp = ged_dvfs_set_bottom_gpu_freq;
+       mtk_get_bottom_gpu_freq_fp = ged_dvfs_get_bottom_gpu_freq;
+       mtk_custom_get_gpu_freq_level_count_fp = ged_dvfs_get_gpu_freq_level_count;
+       mtk_custom_boost_gpu_freq_fp = ged_dvfs_custom_boost_gpu_freq;
+       mtk_custom_upbound_gpu_freq_fp = ged_dvfs_custom_ceiling_gpu_freq;
+       mtk_get_custom_boost_gpu_freq_fp = ged_dvfs_get_custom_boost_gpu_freq;
+       mtk_get_custom_upbound_gpu_freq_fp = ged_dvfs_get_custom_ceiling_gpu_freq;
+       mtk_get_gpu_loading_fp = ged_dvfs_get_gpu_loading;
+       mtk_get_gpu_block_fp = ged_dvfs_get_gpu_blocking;
+       mtk_get_gpu_idle_fp = ged_dvfs_get_gpu_idle;
+       mtk_do_gpu_dvfs_fp = ged_dvfs_run;
+       mtk_gpu_dvfs_set_mode_fp = ged_dvfs_set_tuning_mode_wrap;
+#endif 
+
+       return GED_OK;
+}
+
+void ged_dvfs_system_exit()
+{
+       mutex_destroy(&gsDVFSLock);
+       mutex_destroy(&gsVSyncOffsetLock);
+
+}
+
+#ifdef ENABLE_COMMON_DVFS      
+module_param(gpu_loading, uint, 0644);
+module_param(gpu_block, uint, 0644);
+module_param(gpu_idle, uint, 0644);
+module_param(gpu_dvfs_enable, uint, 0644);
+module_param(boost_gpu_enable, uint, 0644);
+module_param(gpu_debug_enable, uint, 0644);
+module_param(gpu_bottom_freq, uint, 0644);
+module_param(gpu_cust_boost_freq, uint, 0644);
+module_param(gpu_cust_upbound_freq, uint, 0644);
+module_param(g_gpu_timer_based_emu, uint, 0644);
+#endif 
+
diff --git a/drivers/misc/mediatek/gpu/ged/src/ged_hal.c b/drivers/misc/mediatek/gpu/ged/src/ged_hal.c
new file mode 100644 (file)
index 0000000..a05eb5c
--- /dev/null
@@ -0,0 +1,1005 @@
+#include <linux/version.h>
+#include <asm/io.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/genalloc.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+//#include <linux/xlog.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <mt-plat/mtk_gpu_utility.h>
+
+#include "ged_base.h"
+#include "ged_hal.h"
+#include "ged_debugFS.h"
+
+#include "ged_dvfs.h"
+
+#include "ged_notify_sw_vsync.h"
+
+static struct dentry* gpsHALDir = NULL;
+static struct dentry* gpsTotalGPUFreqLevelCountEntry = NULL;
+static struct dentry* gpsCustomBoostGPUFreqEntry = NULL;
+static struct dentry* gpsCustomUpboundGPUFreqEntry = NULL;
+static struct dentry* gpsVsyncOffsetLevelEntry = NULL;
+static struct dentry* gpsVsyncOffsetEnableEntry = NULL;
+static struct dentry* gpsDvfsTuningModeEntry = NULL;
+static struct dentry* gpsDvfsCurFreqEntry = NULL;
+static struct dentry* gpsDvfsPreFreqEntry = NULL;
+static struct dentry* gpsDvfsGpuUtilizationEntry = NULL;
+static struct dentry* gpsFpsUpperBoundEntry = NULL;
+static struct dentry* gpsIntegrationReportReadEntry = NULL;
+
+int tokenizer(char* pcSrc, int i32len, int* pi32IndexArray, int i32NumToken)
+{
+       int i = 0;
+       int j = 0;
+       int head = -1;
+
+       for( ;i<i32len;i++)
+       {
+               if(pcSrc[i]!=' ')
+               {
+                       if(head==-1)
+                       {
+                               head = i;
+                       }
+               }
+               else
+               {
+                       if(head!=-1)
+                       {
+                               pi32IndexArray[j] = head;
+                               j++;
+                               if(j==i32NumToken)
+                                       return j;
+                               head = -1;
+                       }
+                       pcSrc[i] = 0;
+               }
+       }
+
+       if(head!=-1)
+       {
+               pi32IndexArray[j] = head;
+               j++;
+               return j;
+       }
+
+       return -1;
+}
+
+
+//-----------------------------------------------------------------------------
+static void* ged_total_gpu_freq_level_count_seq_start(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+       if (0 == *puiPosition)
+       {
+               return SEQ_START_TOKEN;
+       }
+
+       return NULL;
+}
+//-----------------------------------------------------------------------------
+static void ged_total_gpu_freq_level_count_seq_stop(struct seq_file *psSeqFile, void *pvData)
+{
+
+}
+//-----------------------------------------------------------------------------
+static void* ged_total_gpu_freq_level_count_seq_next(struct seq_file *psSeqFile, void *pvData, loff_t *puiPosition)
+{
+       return NULL;
+}
+//-----------------------------------------------------------------------------
+static int ged_total_gpu_freq_level_count_seq_show(struct seq_file *psSeqFile, void *pvData)
+{
+       if (pvData != NULL)
+       {
+               unsigned int ui32FreqLevelCount;
+               if (false == mtk_custom_get_gpu_freq_level_count(&ui32FreqLevelCount))
+               {
+                       ui32FreqLevelCount = 0;
+               }
+               seq_printf(psSeqFile, "%u\n", ui32FreqLevelCount);
+       }
+
+       return 0;
+}
+//-----------------------------------------------------------------------------
+static struct seq_operations gsTotalGPUFreqLevelCountReadOps = 
+{
+       .start = ged_total_gpu_freq_level_count_seq_start,
+       .stop = ged_total_gpu_freq_level_count_seq_stop,
+       .next = ged_total_gpu_freq_level_count_seq_next,
+       .show = ged_total_gpu_freq_level_count_seq_show,
+};
+//-----------------------------------------------------------------------------
+static ssize_t ged_custom_boost_gpu_freq_write_entry(const char __user *pszBuffer, size_t uiCount, loff_t uiPosition, void *pvData)
+{
+#define GED_HAL_DEBUGFS_SIZE 64
+       char acBuffer[GED_HAL_DEBUGFS_SIZE];
+
+       int i32Value;
+
+       if ((0 < uiCount) && (uiCount < GED_HAL_DEBUGFS_SIZE))
+       {
+               if (0 == ged_copy_from_user(acBuffer, pszBuffer, uiCount))
+               {
+                       acBuffer[uiCount] = '\0';
+                       if (sscanf(acBuffer, "%d", &i32Value) == 1)
+                       {
+                               if (i32Value < 0)
+                                       i32Value = 0;
+                               mtk_custom_boost_gpu_freq(i32Value);
+                       }
+                       //else if (...) //for other commands
+                       //{
+                       //}
+               }
+       }
+
+       return uiCount;
+}
+//-----------------------------------------------------------------------------
+static void* ged_custom_boost_gpu_freq_seq_start(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+       if (0 == *puiPosition)
+       {
+               return SEQ_START_TOKEN;
+       }
+
+       return NULL;
+}
+//-----------------------------------------------------------------------------
+static void ged_custom_boost_gpu_freq_seq_stop(struct seq_file *psSeqFile, void *pvData)
+{
+
+}
+//-----------------------------------------------------------------------------
+static void* ged_custom_boost_gpu_freq_seq_next(struct seq_file *psSeqFile, void *pvData, loff_t *puiPosition)
+{
+       return NULL;
+}
+//-----------------------------------------------------------------------------
+static int ged_custom_boost_gpu_freq_seq_show(struct seq_file *psSeqFile, void *pvData)
+{
+       if (pvData != NULL)
+       {
+               unsigned int ui32BoostGpuFreqLevel;
+               if (false == mtk_get_custom_boost_gpu_freq(&ui32BoostGpuFreqLevel))
+               {
+                       ui32BoostGpuFreqLevel = 0;
+               }
+               seq_printf(psSeqFile, "%u\n", ui32BoostGpuFreqLevel);
+       }
+
+       return 0;
+}
+//-----------------------------------------------------------------------------
+static struct seq_operations gsCustomBoostGpuFreqReadOps = 
+{
+       .start = ged_custom_boost_gpu_freq_seq_start,
+       .stop = ged_custom_boost_gpu_freq_seq_stop,
+       .next = ged_custom_boost_gpu_freq_seq_next,
+       .show = ged_custom_boost_gpu_freq_seq_show,
+};
+//-----------------------------------------------------------------------------
+static ssize_t ged_custom_upbound_gpu_freq_write_entry(const char __user *pszBuffer, size_t uiCount, loff_t uiPosition, void *pvData)
+{
+#define GED_HAL_DEBUGFS_SIZE 64
+       char acBuffer[GED_HAL_DEBUGFS_SIZE];
+
+       int i32Value;
+
+       if ((0 < uiCount) && (uiCount < GED_HAL_DEBUGFS_SIZE))
+       {
+               if (0 == ged_copy_from_user(acBuffer, pszBuffer, uiCount))
+               {
+                       acBuffer[uiCount] = '\0';
+                       if (sscanf(acBuffer, "%d", &i32Value) == 1)
+                       {
+                               if (i32Value < 0)
+                                       i32Value = 0;
+                               mtk_custom_upbound_gpu_freq(i32Value);
+                       }
+                       //else if (...) //for other commands
+                       //{
+                       //}
+               }
+       }
+
+       return uiCount;
+}
+//-----------------------------------------------------------------------------
+static void* ged_custom_upbound_gpu_freq_seq_start(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+       if (0 == *puiPosition)
+       {
+               return SEQ_START_TOKEN;
+       }
+
+       return NULL;
+}
+//-----------------------------------------------------------------------------
+static void ged_custom_upbound_gpu_freq_seq_stop(struct seq_file *psSeqFile, void *pvData)
+{
+
+}
+//-----------------------------------------------------------------------------
+static void* ged_custom_upbound_gpu_freq_seq_next(struct seq_file *psSeqFile, void *pvData, loff_t *puiPosition)
+{
+       return NULL;
+}
+//-----------------------------------------------------------------------------
+static int ged_custom_upbound_gpu_freq_seq_show(struct seq_file *psSeqFile, void *pvData)
+{
+       if (pvData != NULL)
+       {
+               unsigned int ui32UpboundGpuFreqLevel;
+               if (false == mtk_get_custom_upbound_gpu_freq(&ui32UpboundGpuFreqLevel))
+               {
+                       ui32UpboundGpuFreqLevel = 0;
+                       seq_printf(psSeqFile, "call mtk_get_custom_upbound_gpu_freq false\n");
+               }
+               seq_printf(psSeqFile, "%u\n", ui32UpboundGpuFreqLevel);
+       }
+
+       return 0;
+}
+//-----------------------------------------------------------------------------
+static struct seq_operations gsCustomUpboundGpuFreqReadOps = 
+{
+       .start = ged_custom_upbound_gpu_freq_seq_start,
+       .stop = ged_custom_upbound_gpu_freq_seq_stop,
+       .next = ged_custom_upbound_gpu_freq_seq_next,
+       .show = ged_custom_upbound_gpu_freq_seq_show,
+};
+//-----------------------------------------------------------------------------
+
+static bool bForce=GED_FALSE;
+static ssize_t ged_vsync_offset_enable_write_entry(const char __user *pszBuffer, size_t uiCount, loff_t uiPosition, void *pvData)
+{
+#define GED_HAL_DEBUGFS_SIZE 64
+#define NUM_TOKEN 2
+
+       /*
+        *  This proc node accept only: [CMD] [NUM]
+        *  for ex: "touch 1"
+        *  
+        */    
+
+       char acBuffer[GED_HAL_DEBUGFS_SIZE];
+       int aint32Indx[NUM_TOKEN];
+       char* pcCMD;
+       char* pcValue;
+       int i;
+
+
+
+       if ((0 < uiCount) && (uiCount < GED_HAL_DEBUGFS_SIZE))
+       {
+               if (0 == ged_copy_from_user(acBuffer, pszBuffer, uiCount))
+               {
+                       acBuffer[uiCount] = '\0';
+                       i=tokenizer(acBuffer, uiCount, aint32Indx, NUM_TOKEN);
+                       if(i==NUM_TOKEN)
+                       {
+                               pcCMD = acBuffer+aint32Indx[0];
+
+                               pcValue = acBuffer+aint32Indx[1];
+#ifdef ENABLE_COMMON_DVFS                
+                               if(strcmp(pcCMD,"touch_down")==0)
+                               {
+                                       if ( (*pcValue)=='1'|| (*pcValue) =='0')
+                                       {
+                                               if( (*pcValue) -'0'==0) // touch up
+                                                       ged_dvfs_vsync_offset_event_switch(GED_DVFS_VSYNC_OFFSET_TOUCH_EVENT , false);
+                                               else // touch down
+                                                       ged_dvfs_vsync_offset_event_switch(GED_DVFS_VSYNC_OFFSET_TOUCH_EVENT , true);
+                                       }  
+                               }
+                               else if(strcmp(pcCMD,"enable_WFD")==0)
+                               {
+                                       if ( (*pcValue) =='1'|| (*pcValue) =='0')
+                                       {
+                                               if( (*pcValue) -'0'==0) // WFD turn-off
+                                                       ged_dvfs_vsync_offset_event_switch(GED_DVFS_VSYNC_OFFSET_WFD_EVENT , false);
+                                               else // WFD turn-on
+                                                       ged_dvfs_vsync_offset_event_switch(GED_DVFS_VSYNC_OFFSET_WFD_EVENT , true);
+                                       }
+                               }
+                               else 
+#endif                    
+                                       if(strcmp(pcCMD,"enable_debug")==0)
+                                       {
+                                               if ( (*pcValue) =='1'|| (*pcValue) =='0'||(*pcValue) =='2')
+                                               {
+                                                       if( (*pcValue) -'0'==1) // force off
+                                                       {
+                                                               ged_dvfs_vsync_offset_event_switch(GED_DVFS_VSYNC_OFFSET_FORCE_OFF , true);
+                                                               bForce = GED_FALSE;
+                                                       }
+                                                       else if( (*pcValue) -'0'==2) // force on
+                                                       {
+                                                               ged_dvfs_vsync_offset_event_switch(GED_DVFS_VSYNC_OFFSET_FORCE_ON , true);
+                                                               bForce = GED_TRUE;
+                                                       }
+                                                       else // turn-off debug
+                                                               ged_dvfs_vsync_offset_event_switch(GED_DVFS_VSYNC_OFFSET_DEBUG_CLEAR_EVENT , true);
+                                               }
+                                       }
+                                       else if(strcmp(pcCMD, "gas") == 0)
+                                       {
+                                               if ( (*pcValue) =='1'|| (*pcValue) =='0')
+                                               {
+                                                       if( (*pcValue) -'0'==0)
+                                                               ged_dvfs_vsync_offset_event_switch(GED_DVFS_VSYNC_OFFSET_GAS_EVENT, false);
+                                                       else
+                                                               ged_dvfs_vsync_offset_event_switch(GED_DVFS_VSYNC_OFFSET_GAS_EVENT, true);
+                                               }
+                                       }
+                                       else if (strcmp(pcCMD, "mhl4k-vid") == 0)
+                                       {
+                                               if ((*pcValue) == '1'|| (*pcValue) == '0')
+                                               {
+                                                       if ((*pcValue) -'0' == 0)
+                                                               ged_dvfs_vsync_offset_event_switch(GED_DVFS_VSYNC_OFFSET_MHL4K_VID_EVENT, false);
+                                                       else
+                                                               ged_dvfs_vsync_offset_event_switch(GED_DVFS_VSYNC_OFFSET_MHL4K_VID_EVENT, true);
+                                               }
+                                       }
+                                       else if (strcmp(pcCMD, "low-power-mode") == 0)
+                                        {
+                                                if ((*pcValue) == '1'|| (*pcValue) == '0')
+                                                {
+                                                        if ((*pcValue) -'0' == 0)
+                                                                ged_dvfs_vsync_offset_event_switch(GED_DVFS_VSYNC_OFFSET_LOW_POWER_MODE_EVENT, false);
+                                                        else
+                                                                ged_dvfs_vsync_offset_event_switch(GED_DVFS_VSYNC_OFFSET_LOW_POWER_MODE_EVENT, true);
+                                                }
+                                       }
+                                       else
+                                       {
+                                               GED_LOGE("unknow command:%s %c",pcCMD,*pcValue);
+                                       }
+                       }
+
+               }
+       }
+       return uiCount;
+
+}
+//-----------------------------------------------------------------------------
+static void* ged_vsync_offset_enable_seq_start(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+       if (0 == *puiPosition)
+       {
+               return SEQ_START_TOKEN;
+       }
+
+       return NULL;
+}
+//-----------------------------------------------------------------------------
+static void ged_vsync_offset_enable_seq_stop(struct seq_file *psSeqFile, void *pvData)
+{
+
+}
+//-----------------------------------------------------------------------------
+static void* ged_vsync_offset_enable_seq_next(struct seq_file *psSeqFile, void *pvData, loff_t *puiPosition)
+{
+       return NULL;
+}
+//-----------------------------------------------------------------------------
+
+
+
+extern unsigned int g_ui32EventStatus; 
+extern unsigned int g_ui32EventDebugStatus;
+
+static int ged_vsync_offset_enable_seq_show(struct seq_file *psSeqFile, void *pvData)
+{
+       if (pvData != NULL)
+       {
+               seq_printf(psSeqFile, "g_ui32EventStatus =%x\n",g_ui32EventStatus);
+               seq_printf(psSeqFile, "g_ui32EventDebugStatus =%x\n",g_ui32EventDebugStatus);
+               if( g_ui32EventDebugStatus&GED_EVENT_FORCE_ON )
+               {
+                       seq_printf(psSeqFile, "Debug mode: Force on\n");
+               }
+               else if ( g_ui32EventDebugStatus&GED_EVENT_FORCE_OFF )
+               {
+                       seq_printf(psSeqFile, "Debug mode: Force off\n");
+               }
+               else
+               {
+                       seq_printf(psSeqFile, "Touch: %d\n",  g_ui32EventStatus&GED_EVENT_TOUCH?1:0 );
+                       seq_printf(psSeqFile, "WFD: %d\n",  g_ui32EventStatus&GED_EVENT_WFD?1:0 );
+                       seq_printf(psSeqFile, "MHL: %d\n",  g_ui32EventStatus&GED_EVENT_MHL?1:0 );
+                       seq_printf(psSeqFile, "GAS: %d\n",  g_ui32EventStatus&GED_EVENT_GAS?1:0 );
+                       seq_printf(psSeqFile, "Thermal: %d\n", g_ui32EventStatus&GED_EVENT_THERMAL?1:0 );
+                       seq_printf(psSeqFile, "Low power mode: %d\n", g_ui32EventStatus & GED_EVENT_LOW_POWER_MODE ? 1 : 0);
+                       seq_printf(psSeqFile, "MHL4K Video: %d\n", g_ui32EventStatus & GED_EVENT_MHL4K_VID ? 1 : 0);
+               }
+       }
+
+       return 0;
+}
+//-----------------------------------------------------------------------------
+static struct seq_operations gsVsync_offset_enableReadOps = 
+{
+       .start = ged_vsync_offset_enable_seq_start,
+       .stop = ged_vsync_offset_enable_seq_stop,
+       .next = ged_vsync_offset_enable_seq_next,
+       .show = ged_vsync_offset_enable_seq_show,
+};
+//-----------------------------------------------------------------------------
+
+//ged_dvfs_vsync_offset_level_set
+static ssize_t ged_vsync_offset_level_write_entry(
+               const char __user *pszBuffer,
+               size_t uiCount,
+               loff_t uiPosition,
+               void *pvData)
+{
+#define GED_HAL_DEBUGFS_SIZE 64
+#define NUM_TOKEN 2
+
+       /*
+        *  This proc node accept only: [CMD] [NUM]
+        *  for ex: "touch 1"
+        *  
+        */    
+
+       char acBuffer[GED_HAL_DEBUGFS_SIZE];
+       int aint32Indx[NUM_TOKEN];
+       char* pcCMD;
+       char* pcValue;
+       int i;
+       int i32VsyncOffsetLevel;
+       int ret;
+
+       if (!((0 < uiCount) && (uiCount < GED_HAL_DEBUGFS_SIZE - 1)))
+               return 0;
+
+       if (ged_copy_from_user(acBuffer, pszBuffer, uiCount))
+               return 0;
+
+       acBuffer[uiCount] = '\n';
+       acBuffer[uiCount+1] = 0;
+       i=tokenizer(acBuffer, uiCount, aint32Indx, NUM_TOKEN);
+       GED_LOGE("i=%d",i);
+       if(i==NUM_TOKEN)
+       {
+               pcCMD = acBuffer+aint32Indx[0];
+
+               pcValue = acBuffer+aint32Indx[1];
+               if(strcmp(pcCMD,"set_vsync_offset")==0)
+               {
+                       ret = kstrtoint(pcValue, 0, &i32VsyncOffsetLevel);
+                       ged_dvfs_vsync_offset_level_set(i32VsyncOffsetLevel);
+               }
+       }
+
+       return 0;
+}
+
+//-----------------------------------------------------------------------------
+
+static void* ged_vsync_offset_level_seq_start(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+       if (0 == *puiPosition)
+       {
+               return SEQ_START_TOKEN;
+       }
+
+       return NULL;
+}
+//-----------------------------------------------------------------------------
+static void ged_vsync_offset_level_seq_stop(struct seq_file *psSeqFile, void *pvData)
+{
+
+}
+//-----------------------------------------------------------------------------
+static void* ged_vsync_offset_level_seq_next(struct seq_file *psSeqFile, void *pvData, loff_t *puiPosition)
+{
+       return NULL;
+}
+//-----------------------------------------------------------------------------
+static int ged_vsync_offset_level_seq_show(struct seq_file *psSeqFile, void *pvData)
+{
+       if (pvData != NULL)
+       {
+               seq_printf(psSeqFile, "%d\n", ged_dvfs_vsync_offset_level_get());
+       }
+
+       return 0;
+}
+//-----------------------------------------------------------------------------
+static struct seq_operations gsVsync_offset_levelReadOps = 
+{
+       .start = ged_vsync_offset_level_seq_start,
+       .stop = ged_vsync_offset_level_seq_stop,
+       .next = ged_vsync_offset_level_seq_next,
+       .show = ged_vsync_offset_level_seq_show,
+};
+//-----------------------------------------------------------------------------
+
+
+static ssize_t ged_dvfs_tuning_mode_write_entry(const char __user *pszBuffer, size_t uiCount, loff_t uiPosition, void *pvData)
+{
+#define GED_HAL_DEBUGFS_SIZE 64
+       char acBuffer[GED_HAL_DEBUGFS_SIZE];
+
+
+       if ((0 < uiCount) && (uiCount < GED_HAL_DEBUGFS_SIZE))
+       {
+               if (0 == ged_copy_from_user(acBuffer, pszBuffer, uiCount))
+               {
+                       GED_DVFS_TUNING_MODE eTuningMode;
+                       acBuffer[uiCount] = '\0';
+                       if (sscanf(acBuffer, "%u", &eTuningMode) == 1)
+                       {
+                               if( GED_DVFS_DEFAULT<=eTuningMode && eTuningMode<=GED_DVFS_PERFORMANCE)
+                                       ged_dvfs_set_tuning_mode(eTuningMode);
+                       }
+                       //else if (...) //for other commands
+                       //{
+                       //}
+               }
+       }
+
+       return uiCount;
+}
+//-----------------------------------------------------------------------------
+static void* ged_dvfs_tuning_mode_seq_start(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+       if (0 == *puiPosition)
+       {
+               return SEQ_START_TOKEN;
+       }
+
+       return NULL;
+}
+//-----------------------------------------------------------------------------
+static void ged_dvfs_tuning_mode_seq_stop(struct seq_file *psSeqFile, void *pvData)
+{
+
+}
+//-----------------------------------------------------------------------------
+static void* ged_dvfs_tuning_mode_seq_next(struct seq_file *psSeqFile, void *pvData, loff_t *puiPosition)
+{
+       return NULL;
+}
+//-----------------------------------------------------------------------------
+
+static int ged_dvfs_tuning_mode_seq_show(struct seq_file *psSeqFile, void *pvData)
+{
+       if (pvData != NULL)
+       {
+               GED_DVFS_TUNING_MODE eTuningMode;
+               eTuningMode = ged_dvfs_get_tuning_mode();
+               seq_printf(psSeqFile, "%u\n",eTuningMode);      
+       }
+
+       return 0;
+}
+//-----------------------------------------------------------------------------
+static struct seq_operations gsDvfs_tuning_mode_ReadOps = 
+{
+       .start = ged_dvfs_tuning_mode_seq_start,
+       .stop = ged_dvfs_tuning_mode_seq_stop,
+       .next = ged_dvfs_tuning_mode_seq_next,
+       .show = ged_dvfs_tuning_mode_seq_show,
+};
+//-----------------------------------------------------------------------------
+
+static void* ged_dvfs_cur_freq_seq_start(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+       if (0 == *puiPosition)
+       {
+               return SEQ_START_TOKEN;
+       }
+
+       return NULL;
+}
+//-----------------------------------------------------------------------------
+static void ged_dvfs_cur_freq_seq_stop(struct seq_file *psSeqFile, void *pvData)
+{
+
+}
+//-----------------------------------------------------------------------------
+static void* ged_dvfs_cur_freq_seq_next(struct seq_file *psSeqFile, void *pvData, loff_t *puiPosition)
+{
+       return NULL;
+}
+//-----------------------------------------------------------------------------
+
+static int ged_dvfs_cur_freq_seq_show(struct seq_file *psSeqFile, void *pvData)
+{
+       if (pvData != NULL)
+       {
+               GED_DVFS_FREQ_DATA sFreqInfo;
+               ged_dvfs_get_gpu_cur_freq(&sFreqInfo);
+               seq_printf(psSeqFile, "%u %lu\n", sFreqInfo.ui32Idx, sFreqInfo.ulFreq);
+       }
+
+       return 0;
+}
+//-----------------------------------------------------------------------------
+static struct seq_operations gsDvfs_cur_freq_ReadOps = 
+{
+       .start = ged_dvfs_cur_freq_seq_start,
+       .stop = ged_dvfs_cur_freq_seq_stop,
+       .next = ged_dvfs_cur_freq_seq_next,
+       .show = ged_dvfs_cur_freq_seq_show,
+};
+
+
+//-----------------------------------------------------------------------------
+
+static void* ged_dvfs_pre_freq_seq_start(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+       if (0 == *puiPosition)
+       {
+               return SEQ_START_TOKEN;
+       }
+
+       return NULL;
+}
+//-----------------------------------------------------------------------------
+static void ged_dvfs_pre_freq_seq_stop(struct seq_file *psSeqFile, void *pvData)
+{
+
+}
+//-----------------------------------------------------------------------------
+static void* ged_dvfs_pre_freq_seq_next(struct seq_file *psSeqFile, void *pvData, loff_t *puiPosition)
+{
+       return NULL;
+}
+//-----------------------------------------------------------------------------
+
+static int ged_dvfs_pre_freq_seq_show(struct seq_file *psSeqFile, void *pvData)
+{
+       if (pvData != NULL)
+       {
+               GED_DVFS_FREQ_DATA sFreqInfo;
+               ged_dvfs_get_gpu_pre_freq(&sFreqInfo);
+               seq_printf(psSeqFile, "%u %lu\n", sFreqInfo.ui32Idx, sFreqInfo.ulFreq);
+       }
+
+       return 0;
+}
+//-----------------------------------------------------------------------------
+static struct seq_operations gsDvfs_pre_freq_ReadOps = 
+{
+       .start = ged_dvfs_pre_freq_seq_start,
+       .stop = ged_dvfs_pre_freq_seq_stop,
+       .next = ged_dvfs_pre_freq_seq_next,
+       .show = ged_dvfs_pre_freq_seq_show,
+};
+
+
+//-----------------------------------------------------------------------------
+
+static void* ged_dvfs_gpu_util_seq_start(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+       if (0 == *puiPosition)
+       {
+               return SEQ_START_TOKEN;
+       }
+
+       return NULL;
+}
+//-----------------------------------------------------------------------------
+static void ged_dvfs_gpu_util_seq_stop(struct seq_file *psSeqFile, void *pvData)
+{
+
+}
+//-----------------------------------------------------------------------------
+static void* ged_dvfs_gpu_util_seq_next(struct seq_file *psSeqFile, void *pvData, loff_t *puiPosition)
+{
+       return NULL;
+}
+//-----------------------------------------------------------------------------
+
+static int ged_dvfs_gpu_util_seq_show(struct seq_file *psSeqFile, void *pvData)
+{
+       if (pvData != NULL)
+       {
+               seq_printf(psSeqFile, "%u %u %u\n",ged_dvfs_get_gpu_loading(),ged_dvfs_get_gpu_blocking(),ged_dvfs_get_gpu_idle());      
+       }
+
+       return 0;
+}
+//-----------------------------------------------------------------------------
+static struct seq_operations gsDvfs_gpu_util_ReadOps = 
+{
+       .start = ged_dvfs_gpu_util_seq_start,
+       .stop = ged_dvfs_gpu_util_seq_stop,
+       .next = ged_dvfs_gpu_util_seq_next,
+       .show = ged_dvfs_gpu_util_seq_show,
+};
+//-----------------------------------------------------------------------------
+
+static uint32_t _fps_upper_bound = 60;
+
+static void *ged_fps_ub_seq_start(struct seq_file *seq, loff_t *pos)
+{
+#if 0
+       if (0 == *pos)
+               return SEQ_START_TOKEN;
+
+       return NULL;
+#else
+       return *pos ? NULL : SEQ_START_TOKEN;
+#endif
+}
+
+static void ged_fps_ub_seq_stop(struct seq_file *seq, void *v)
+{
+}
+
+static void *ged_fps_ub_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+       return NULL;
+}
+
+static int ged_fps_ub_seq_show(struct seq_file *seq, void *v)
+{
+       printk("+%s", __func__);
+       seq_printf(seq, "%u\n", _fps_upper_bound);
+       return 0;
+}
+
+static struct seq_operations gs_fps_ub_read_ops =
+{
+       .start  = ged_fps_ub_seq_start,
+       .stop   = ged_fps_ub_seq_stop,
+       .next   = ged_fps_ub_seq_next,
+       .show   = ged_fps_ub_seq_show,
+};
+
+#define MAX_FPS_DIGITS 2
+static ssize_t ged_fps_ub_write(const char __user *pszBuffer, size_t uiCount,
+               loff_t uiPosition, void *pvData)
+{
+       char str_num[MAX_FPS_DIGITS + 1];
+
+       if (0 == ged_copy_from_user(str_num, pszBuffer, MAX_FPS_DIGITS))
+       {
+               str_num[MAX_FPS_DIGITS] = 0;
+               _fps_upper_bound = simple_strtol(str_num, NULL, 10);
+               ged_dvfs_probe_signal(GED_FPS_CHANGE_SIGNAL_EVENT);
+               printk("GED: fps is set to %d", _fps_upper_bound);
+       }
+
+       return uiCount;
+}
+
+//-----------------------------------------------------------------------------
+
+static void* ged_dvfs_integration_report_seq_start(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+       if (0 == *puiPosition)
+       {
+               return SEQ_START_TOKEN;
+       }
+
+       return NULL;
+}
+//-----------------------------------------------------------------------------
+static void ged_dvfs_integration_report_seq_stop(struct seq_file *psSeqFile, void *pvData)
+{
+
+}
+//-----------------------------------------------------------------------------
+static void* ged_dvfs_integration_report_seq_next(struct seq_file *psSeqFile, void *pvData, loff_t *puiPosition)
+{
+       return NULL;
+}
+//-----------------------------------------------------------------------------
+
+extern void (*ged_dvfs_cal_gpu_utilization_fp)(unsigned int* pui32Loading , unsigned int* pui32Block,unsigned int* pui32Idle) ;
+extern void (*ged_dvfs_gpu_freq_commit_fp)(unsigned long ui32NewFreqID, GED_DVFS_COMMIT_TYPE eCommitType, int* pbCommited) ;
+extern bool ged_gpu_power_on_notified;
+extern bool ged_gpu_power_off_notified;
+static int ged_dvfs_integration_report_seq_show(struct seq_file *psSeqFile, void *pvData)
+{
+       if (pvData != NULL)
+       {
+               seq_printf(psSeqFile, "GPU Utilization fp: %p\n", ged_dvfs_cal_gpu_utilization_fp);
+               seq_printf(psSeqFile, "GPU DVFS idx commit fp: %p\n", ged_dvfs_gpu_freq_commit_fp);
+               seq_printf(psSeqFile, "GPU clock notify on: %d\n", ged_gpu_power_on_notified);
+               seq_printf(psSeqFile, "GPU clock notify off: %d\n", ged_gpu_power_off_notified);
+       }
+       return 0;
+}
+//-----------------------------------------------------------------------------
+static struct seq_operations gsIntegrationReportReadOps = 
+{
+       .start = ged_dvfs_integration_report_seq_start,
+       .stop = ged_dvfs_integration_report_seq_stop,
+       .next = ged_dvfs_integration_report_seq_next,
+       .show = ged_dvfs_integration_report_seq_show,
+};
+//-----------------------------------------------------------------------------
+
+GED_ERROR ged_hal_init(void)
+{
+       GED_ERROR err = GED_OK;
+
+       err = ged_debugFS_create_entry_dir(
+                       "hal",
+                       NULL,
+                       &gpsHALDir);
+
+       if (unlikely(err != GED_OK))
+       {
+               err = GED_ERROR_FAIL;
+               GED_LOGE("ged: failed to create hal dir!\n");
+               goto ERROR;
+       }
+
+       /* Feedback the gpu freq level count */
+       err = ged_debugFS_create_entry(
+                       "total_gpu_freq_level_count",
+                       gpsHALDir,
+                       &gsTotalGPUFreqLevelCountReadOps,
+                       NULL,
+                       NULL,
+                       &gpsTotalGPUFreqLevelCountEntry);
+
+       if (unlikely(err != GED_OK))
+       {
+               GED_LOGE("ged: failed to create total_gpu_freq_level_count entry!\n");
+               goto ERROR;
+       }
+
+       /* Control the gpu freq */
+       err = ged_debugFS_create_entry(
+                       "custom_boost_gpu_freq",
+                       gpsHALDir,
+                       &gsCustomBoostGpuFreqReadOps,
+                       ged_custom_boost_gpu_freq_write_entry,
+                       NULL,
+                       &gpsCustomBoostGPUFreqEntry);
+
+       if (unlikely(err != GED_OK))
+       {
+               GED_LOGE("ged: failed to create custom_boost_gpu_freq entry!\n");
+               goto ERROR;
+       }
+
+       /* Control the gpu freq */
+       err = ged_debugFS_create_entry(
+                       "custom_upbound_gpu_freq",
+                       gpsHALDir,
+                       &gsCustomUpboundGpuFreqReadOps,
+                       ged_custom_upbound_gpu_freq_write_entry,
+                       NULL,
+                       &gpsCustomUpboundGPUFreqEntry);
+
+       if (unlikely(err != GED_OK))
+       {
+               GED_LOGE("ged: failed to create custom_upbound_gpu_freq entry!\n");
+               goto ERROR;
+       }
+
+       /* Enable/Disable the vsync offset */
+
+       err = ged_debugFS_create_entry(
+                       "event_notify",
+                       gpsHALDir,
+                       &gsVsync_offset_enableReadOps,
+                       ged_vsync_offset_enable_write_entry,
+                       NULL,
+                       &gpsVsyncOffsetEnableEntry);
+
+
+       /* Control the vsync offset level */
+
+       err = ged_debugFS_create_entry(
+                       "vsync_offset_level",
+                       gpsHALDir,
+                       &gsVsync_offset_levelReadOps,
+                       ged_vsync_offset_level_write_entry,
+                       NULL,
+                       &gpsVsyncOffsetLevelEntry);
+
+       /* Control the dvfs policy threshold level */
+
+       err = ged_debugFS_create_entry(
+                       "custom_dvfs_mode",
+                       gpsHALDir,
+                       &gsDvfs_tuning_mode_ReadOps, 
+                       ged_dvfs_tuning_mode_write_entry, 
+                       NULL,
+                       &gpsDvfsTuningModeEntry);
+
+
+       /* Get current GPU freq */
+
+       err = ged_debugFS_create_entry(
+                       "current_freqency",
+                       gpsHALDir,
+                       &gsDvfs_cur_freq_ReadOps, 
+                       NULL, 
+                       NULL,
+                       &gpsDvfsCurFreqEntry);
+
+       /* Get previous GPU freq */
+
+       err = ged_debugFS_create_entry(
+                       "previous_freqency",
+                       gpsHALDir,
+                       &gsDvfs_pre_freq_ReadOps, 
+                       NULL, 
+                       NULL,
+                       &gpsDvfsPreFreqEntry);
+
+       /* Get GPU Utilization */
+
+       err = ged_debugFS_create_entry(
+                       "gpu_utilization",
+                       gpsHALDir,
+                       &gsDvfs_gpu_util_ReadOps, 
+                       NULL, 
+                       NULL,
+                       &gpsDvfsGpuUtilizationEntry);
+
+       /* Get FPS upper bound */
+       err = ged_debugFS_create_entry(
+                       "fps_upper_bound",
+                       gpsHALDir,
+                       &gs_fps_ub_read_ops,
+                       ged_fps_ub_write,
+                       NULL,
+                       &gpsFpsUpperBoundEntry
+                       );
+
+       if (unlikely(err != GED_OK))
+       {
+               GED_LOGE("ged: failed to create vsync_offset_level entry!\n");
+               goto ERROR;
+       }
+
+       /* Report Integration Status */
+       err = ged_debugFS_create_entry(
+                       "integration_report",
+                       gpsHALDir,
+                       &gsIntegrationReportReadOps,
+                       NULL,
+                       NULL,
+                       &gpsIntegrationReportReadEntry);
+
+       if (unlikely(err != GED_OK))
+       {
+               GED_LOGE("ged: failed to create integration_report entry!\n");
+               goto ERROR;
+       }
+
+       return err;
+
+ERROR:
+
+       ged_hal_exit();
+
+       return err;
+}
+//-----------------------------------------------------------------------------
+void ged_hal_exit(void)
+{
+       ged_debugFS_remove_entry(gpsIntegrationReportReadEntry);
+       ged_debugFS_remove_entry(gpsFpsUpperBoundEntry);
+       ged_debugFS_remove_entry(gpsVsyncOffsetLevelEntry);
+       ged_debugFS_remove_entry(gpsCustomUpboundGPUFreqEntry);
+       ged_debugFS_remove_entry(gpsCustomBoostGPUFreqEntry);
+       ged_debugFS_remove_entry(gpsVsyncOffsetEnableEntry);
+       ged_debugFS_remove_entry(gpsTotalGPUFreqLevelCountEntry);
+       ged_debugFS_remove_entry(gpsDvfsCurFreqEntry);
+       ged_debugFS_remove_entry(gpsDvfsPreFreqEntry);
+       ged_debugFS_remove_entry(gpsDvfsGpuUtilizationEntry);
+       ged_debugFS_remove_entry_dir(gpsHALDir);
+}
+//-----------------------------------------------------------------------------
index 456ca364d8e96e1f5c09159e83c49dd418f193bc..0fbe3829fa7b6dcddcc86315a692de645c4d0ac3 100644 (file)
 
 typedef struct GED_HASHTABLE_TAG
 {
-    unsigned int        ui32Bits;
-    unsigned int        ui32Length;
-    unsigned int        ui32CurrentID;
-    unsigned int        ui32Count;
-    struct hlist_head*  psHashTable;
+       unsigned int        ui32Bits;
+       unsigned int        ui32Length;
+       unsigned int        ui32CurrentID;
+       unsigned int        ui32Count;
+       struct hlist_head*  psHashTable;
 } GED_HASHTABLE;
 
 typedef struct GED_HASHNODE_TAG
 {
-    unsigned int        ui32ID;
-    void*               pvoid;
-    struct hlist_node   sNode;
+       unsigned int        ui32ID;
+       void*               pvoid;
+       struct hlist_node   sNode;
 } GED_HASHNODE;
 
 #define GED_HASHTABLE_INIT_ID 1234 // 0 = invalid
 
 void* __ged_hashtable_find(struct hlist_head *head, unsigned int ui32ID)
 {
-    GED_HASHNODE* psHN;
-    hlist_for_each_entry_rcu(psHN, head, sNode) 
-    {
-        if (psHN->ui32ID == ui32ID)
-        {
-            return psHN;
-        }
-    }
-    return NULL;
+       GED_HASHNODE* psHN;
+       hlist_for_each_entry_rcu(psHN, head, sNode) 
+       {
+               if (psHN->ui32ID == ui32ID)
+               {
+                       return psHN;
+               }
+       }
+       return NULL;
 }
 
 static int ged_hash(GED_HASHTABLE_HANDLE hHashTable, unsigned int ui32ID)
 {
-    GED_HASHTABLE* psHT = (GED_HASHTABLE*)hHashTable;
-    return hash_32(ui32ID, psHT->ui32Bits);
+       GED_HASHTABLE* psHT = (GED_HASHTABLE*)hHashTable;
+       return hash_32(ui32ID, psHT->ui32Bits);
 }
 
 GED_HASHTABLE_HANDLE ged_hashtable_create(unsigned int ui32Bits)
 {
-    GED_HASHTABLE* psHT;
-    unsigned int i;
-
-    if (ui32Bits > 20)
-    {
-        // 1048576 slots !?
-        // Need to check the necessary
-        return NULL;
-    }
-
-    psHT = (GED_HASHTABLE*)ged_alloc(sizeof(GED_HASHTABLE));
-    if (psHT)
-    {
-        psHT->ui32Bits = ui32Bits;
-        psHT->ui32Length = 1 << ui32Bits;
-        psHT->ui32CurrentID = GED_HASHTABLE_INIT_ID; // 0 = invalid
-        psHT->psHashTable = (struct hlist_head*)ged_alloc(psHT->ui32Length * sizeof(struct hlist_head));
-        if (psHT->psHashTable)
-        {
-            for (i = 0; i < psHT->ui32Length; i++)
-            {
-                INIT_HLIST_HEAD(&psHT->psHashTable[i]);
-            }
-            return (GED_HASHTABLE_HANDLE)psHT;
-        }
-    }
-
-    ged_hashtable_destroy(psHT);
-    return NULL;
+       GED_HASHTABLE* psHT;
+       unsigned int i;
+
+       if (ui32Bits > 20)
+       {
+               // 1048576 slots !?
+               // Need to check the necessary
+               return NULL;
+       }
+
+       psHT = (GED_HASHTABLE*)ged_alloc(sizeof(GED_HASHTABLE));
+       if (psHT)
+       {
+               psHT->ui32Bits = ui32Bits;
+               psHT->ui32Length = 1 << ui32Bits;
+               psHT->ui32CurrentID = GED_HASHTABLE_INIT_ID; // 0 = invalid
+               psHT->psHashTable = (struct hlist_head*)ged_alloc(psHT->ui32Length * sizeof(struct hlist_head));
+               if (psHT->psHashTable)
+               {
+                       for (i = 0; i < psHT->ui32Length; i++)
+                       {
+                               INIT_HLIST_HEAD(&psHT->psHashTable[i]);
+                       }
+                       return (GED_HASHTABLE_HANDLE)psHT;
+               }
+       }
+
+       ged_hashtable_destroy(psHT);
+       return NULL;
 }
 
 void ged_hashtable_destroy(GED_HASHTABLE_HANDLE hHashTable)
 {
-    GED_HASHTABLE* psHT = (GED_HASHTABLE*)hHashTable;
-    if (psHT)
-    {
-        int i = 0;
-        while(psHT->ui32Count > 0)
-        {
-            unsigned int ui32ID = 0;
-            GED_HASHNODE* psHN;
-            // get one to be freed
-            for (;i < psHT->ui32Length; i++)
-            {
-                struct hlist_head *head = &psHT->psHashTable[i];
-                hlist_for_each_entry_rcu(psHN, head, sNode) 
-                {
-                    ui32ID = psHN->ui32ID;
-                    break;
-                }
-                if (0 < ui32ID)
-                {
-                    break;
-                }
-            }
-
-            if (i >= psHT->ui32Length)
-            {
-                break;
-            }
-
-            ged_hashtable_remove(psHT, ui32ID);
-        }
-
-        /* free the hash table */
-        ged_free(psHT->psHashTable, psHT->ui32Length * sizeof(struct hlist_head));
-        ged_free(psHT, sizeof(GED_HASHTABLE));
-    }
+       GED_HASHTABLE* psHT = (GED_HASHTABLE*)hHashTable;
+       if (psHT)
+       {
+               int i = 0;
+               while(psHT->ui32Count > 0)
+               {
+                       unsigned int ui32ID = 0;
+                       GED_HASHNODE* psHN;
+                       // get one to be freed
+                       for (;i < psHT->ui32Length; i++)
+                       {
+                               struct hlist_head *head = &psHT->psHashTable[i];
+                               hlist_for_each_entry_rcu(psHN, head, sNode) 
+                               {
+                                       ui32ID = psHN->ui32ID;
+                                       break;
+                               }
+                               if (0 < ui32ID)
+                               {
+                                       break;
+                               }
+                       }
+
+                       if (i >= psHT->ui32Length)
+                       {
+                               break;
+                       }
+
+                       ged_hashtable_remove(psHT, ui32ID);
+               }
+
+               /* free the hash table */
+               ged_free(psHT->psHashTable, psHT->ui32Length * sizeof(struct hlist_head));
+               ged_free(psHT, sizeof(GED_HASHTABLE));
+       }
 }
 
 GED_ERROR ged_hashtable_insert(GED_HASHTABLE_HANDLE hHashTable, void* pvoid, unsigned int* pui32ID)
 {
-    GED_HASHTABLE* psHT = (GED_HASHTABLE*)hHashTable;
-    GED_HASHNODE* psHN = NULL;
-    unsigned int ui32Hash, ui32ID;
-
-    if ((!psHT) || (!pui32ID))
-    {
-        return GED_ERROR_INVALID_PARAMS;
-    }
-
-    ui32ID = psHT->ui32CurrentID + 1;
-    while(1)
-    {
-        ui32Hash = ged_hash(psHT, ui32ID);
-        psHN = __ged_hashtable_find(&psHT->psHashTable[ui32Hash], ui32ID);
-        if (psHN != NULL)
-        {
-            ui32ID++;
-            if (ui32ID == 0)//skip the value 0
-            {
-                ui32ID = 1;
-            }
-            if (ui32ID == psHT->ui32CurrentID)
-            {
-                return GED_ERROR_FAIL;
-            }
-        }
-        else
-        {
-            break;
-        }
-    };
-
-    psHN = (GED_HASHNODE*)ged_alloc(sizeof(GED_HASHNODE));
-    if (psHN)
-    {
-        psHN->pvoid = pvoid;
-        psHN->ui32ID = ui32ID;
-        psHT->ui32CurrentID = ui32ID;
-        *pui32ID = ui32ID;
-        hlist_add_head_rcu(&psHN->sNode, &psHT->psHashTable[ui32Hash]);
-        psHT->ui32Count += 1;
-        return GED_OK;
-    }
-
-    return GED_ERROR_OOM;
+       GED_HASHTABLE* psHT = (GED_HASHTABLE*)hHashTable;
+       GED_HASHNODE* psHN = NULL;
+       unsigned int ui32Hash, ui32ID;
+
+       if ((!psHT) || (!pui32ID))
+       {
+               return GED_ERROR_INVALID_PARAMS;
+       }
+
+       ui32ID = psHT->ui32CurrentID + 1;
+       while(1)
+       {
+               ui32Hash = ged_hash(psHT, ui32ID);
+               psHN = __ged_hashtable_find(&psHT->psHashTable[ui32Hash], ui32ID);
+               if (psHN != NULL)
+               {
+                       ui32ID++;
+                       if (ui32ID == 0)//skip the value 0
+                       {
+                               ui32ID = 1;
+                       }
+                       if (ui32ID == psHT->ui32CurrentID)
+                       {
+                               return GED_ERROR_FAIL;
+                       }
+               }
+               else
+               {
+                       break;
+               }
+       };
+
+       psHN = (GED_HASHNODE*)ged_alloc(sizeof(GED_HASHNODE));
+       if (psHN)
+       {
+               psHN->pvoid = pvoid;
+               psHN->ui32ID = ui32ID;
+               psHT->ui32CurrentID = ui32ID;
+               *pui32ID = ui32ID;
+               hlist_add_head_rcu(&psHN->sNode, &psHT->psHashTable[ui32Hash]);
+               psHT->ui32Count += 1;
+               return GED_OK;
+       }
+
+       return GED_ERROR_OOM;
 }
 
 void ged_hashtable_remove(GED_HASHTABLE_HANDLE hHashTable, unsigned int ui32ID)
 {
-    GED_HASHTABLE* psHT = (GED_HASHTABLE*)hHashTable;
-    if (psHT)
-    {
-        unsigned int ui32Hash = ged_hash(psHT, ui32ID);
-        GED_HASHNODE* psHN = __ged_hashtable_find(&psHT->psHashTable[ui32Hash], ui32ID);
-        if (psHN)
-        {
-            hlist_del_rcu(&psHN->sNode);
-            synchronize_rcu();
-            ged_free(psHN, sizeof(GED_HASHNODE));
-            psHT->ui32Count -= 1;
-        }
-    }
+       GED_HASHTABLE* psHT = (GED_HASHTABLE*)hHashTable;
+       if (psHT)
+       {
+               unsigned int ui32Hash = ged_hash(psHT, ui32ID);
+               GED_HASHNODE* psHN = __ged_hashtable_find(&psHT->psHashTable[ui32Hash], ui32ID);
+               if (psHN)
+               {
+                       hlist_del_rcu(&psHN->sNode);
+                       synchronize_rcu();
+                       ged_free(psHN, sizeof(GED_HASHNODE));
+                       psHT->ui32Count -= 1;
+               }
+       }
 }
 
 void* ged_hashtable_find(GED_HASHTABLE_HANDLE hHashTable, unsigned int ui32ID)
 {
-    GED_HASHTABLE* psHT = (GED_HASHTABLE*)hHashTable;
-    if (psHT)
-    {
-        unsigned int ui32Hash = ged_hash(psHT, ui32ID);
-        GED_HASHNODE* psHN = __ged_hashtable_find(&psHT->psHashTable[ui32Hash], ui32ID);
-        if (psHN)
-        {
-            return psHN->pvoid;
-        }
+       GED_HASHTABLE* psHT = (GED_HASHTABLE*)hHashTable;
+       if (psHT)
+       {
+               unsigned int ui32Hash = ged_hash(psHT, ui32ID);
+               GED_HASHNODE* psHN = __ged_hashtable_find(&psHT->psHashTable[ui32Hash], ui32ID);
+               if (psHN)
+               {
+                       return psHN->pvoid;
+               }
 #ifdef GED_DEBUG
-        if (ui32ID != 0)
-        {
-            GED_LOGE("ged_hashtable_find: ui32ID=%u ui32Hash=%u psHN=%p\n", ui32ID, ui32Hash, psHN);
-        }
+               if (ui32ID != 0)
+               {
+                       GED_LOGE("ged_hashtable_find: ui32ID=%u ui32Hash=%u psHN=%p\n", ui32ID, ui32Hash, psHN);
+               }
 #endif
-    }
-    return NULL;
+       }
+       return NULL;
 }
 
 GED_ERROR ged_hashtable_set(GED_HASHTABLE_HANDLE hHashTable, unsigned int ui32ID, void* pvoid)
 {
-    GED_HASHTABLE* psHT = (GED_HASHTABLE*)hHashTable;
-    if (psHT)
-    {
-        unsigned int ui32Hash = ged_hash(psHT, ui32ID);
-        GED_HASHNODE* psHN = __ged_hashtable_find(&psHT->psHashTable[ui32Hash], ui32ID);
-        if (psHN)
-        {
-            psHN->pvoid = pvoid;
-            return GED_OK;
-        }
-    }
-
-    return GED_ERROR_INVALID_PARAMS;
+       GED_HASHTABLE* psHT = (GED_HASHTABLE*)hHashTable;
+       if (psHT)
+       {
+               unsigned int ui32Hash = ged_hash(psHT, ui32ID);
+               GED_HASHNODE* psHN = __ged_hashtable_find(&psHT->psHashTable[ui32Hash], ui32ID);
+               if (psHN)
+               {
+                       psHN->pvoid = pvoid;
+                       return GED_OK;
+               }
+       }
+
+       return GED_ERROR_INVALID_PARAMS;
 }
 
index ffeabc8b32de6cc54a5364444979f951723dce53..83541811a7b0c3a2907961bce837e462536bd0ea 100644 (file)
@@ -3,10 +3,12 @@
 #include <linux/mm.h>
 #include <linux/pagemap.h>
 #include <linux/genalloc.h>
+#include <linux/sched.h>
 #include <linux/mutex.h>
-#include <linux/xlog.h>
+//#include <linux/xlog.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
+#include <linux/rtc.h>
 
 #include "ged_base.h"
 #include "ged_log.h"
 #include "ged_profile_dvfs.h"
 #include "ged_hashtable.h"
 
+enum
+{
+       /* 0x00 - 0xff reserved for internal buffer type */
+
+       /* rewrite the oldest log when buffer is full */
+       GED_LOG_ATTR_RINGBUFFER     = 0x1,
+       /* stop logging when buffer is full */
+       GED_LOG_ATTR_QUEUEBUFFER    = 0x2,
+       /* increase buffersize when buffer is full */
+       GED_LOG_ATTR_AUTO_INCREASE  = 0x4,
+};
+
+typedef struct GED_LOG_BUF_LINE_TAG
+{
+       int         offset;
+       int         tattrs;
+       long long   time;
+       long int    time_usec;
+       int         pid;
+       int         tid;
+} GED_LOG_BUF_LINE;
+
 typedef struct GED_LOG_BUF_TAG
 {
-    GED_LOG_BUF_TYPE    eType;
-       char                *pcBuffer;
+       GED_LOG_BUF_TYPE    eType;
+       int                 attrs;
+
+       void                *pMemory;
+       int                 i32MemorySize;
 
-       int                 i32LineCountOrg;
+       GED_LOG_BUF_LINE    *psLine;
+       char                *pcBuffer;
        int                 i32LineCount;
-       int                 i32LineBufferSize;
+       int                 i32BufferSize;
        int                 i32LineCurrent;
-    int                 i32LineValidCount;
+       int                 i32BufferCurrent;
 
        spinlock_t          sSpinLock;
        unsigned long       ui32IRQFlags;
 
-    char                acName[GED_LOG_BUF_NAME_LENGTH];
-    char                acNodeName[GED_LOG_BUF_NODE_NAME_LENGTH];
+       char                acName[GED_LOG_BUF_NAME_LENGTH];
+       char                acNodeName[GED_LOG_BUF_NODE_NAME_LENGTH];
 
-    struct dentry*      psEntry;
+       struct dentry*      psEntry;
 
-    struct list_head    sList;
+       struct list_head    sList;
 
-    unsigned int        ui32HashNodeID;
+       unsigned int        ui32HashNodeID;
 
 } GED_LOG_BUF;
 
+typedef struct GED_LOG_LISTEN_TAG
+{
+       GED_LOG_BUF_HANDLE  *pCBHnd;
+       char                acName[GED_LOG_BUF_NAME_LENGTH];
+       struct list_head    sList;
+} GED_LOG_LISTEN;
+
 typedef struct GED_LOG_BUF_LIST_TAG
 {
        rwlock_t sLock;
-       struct list_head sList;
+       struct list_head sList_buf;
+       struct list_head sList_listen;
 } GED_LOG_BUF_LIST;
 
-static GED_LOG_BUF_LIST gsGEDLogBufList;
+static GED_LOG_BUF_LIST gsGEDLogBufList = {
+       .sLock          = __RW_LOCK_UNLOCKED(gsGEDLogBufList.sLock),
+       .sList_buf      = LIST_HEAD_INIT(gsGEDLogBufList.sList_buf),
+       .sList_listen   = LIST_HEAD_INIT(gsGEDLogBufList.sList_listen), 
+};
 
 static struct dentry* gpsGEDLogEntry = NULL;
 static struct dentry* gpsGEDLogBufsDir = NULL;
@@ -59,79 +99,212 @@ static GED_HASHTABLE_HANDLE ghHashTable = NULL;
 //-----------------------------------------------------------------------------
 static GED_LOG_BUF* ged_log_buf_from_handle(GED_LOG_BUF_HANDLE hLogBuf)
 {
-    return ged_hashtable_find(ghHashTable, (unsigned int)hLogBuf);
+       return ged_hashtable_find(ghHashTable, (unsigned int)hLogBuf);
 }
 
-static int __ged_log_buf_write(GED_LOG_BUF *psGEDLogBuf, const char __user *pszBuffer, int i32Count)
+static GED_ERROR __ged_log_buf_vprint(GED_LOG_BUF *psGEDLogBuf, const char *fmt, va_list args, int attrs)
 {
-    bool bUpdate = false;
-    char *buf;
-    int cnt;
+       int buf_n;
+       int len;
 
-    if (!psGEDLogBuf)
-    {
-        return 0;
-    }
+       if (!psGEDLogBuf)
+               return GED_OK;
 
-    cnt = i32Count < psGEDLogBuf->i32LineBufferSize ? i32Count : psGEDLogBuf->i32LineBufferSize;
+       spin_lock_irqsave(&psGEDLogBuf->sSpinLock, psGEDLogBuf->ui32IRQFlags);
 
-    spin_lock_irqsave(&psGEDLogBuf->sSpinLock, psGEDLogBuf->ui32IRQFlags);
+       /* if OOM */
+       if (psGEDLogBuf->i32LineCurrent >= psGEDLogBuf->i32LineCount ||
+                       psGEDLogBuf->i32BufferCurrent + 256 > psGEDLogBuf->i32BufferSize)
+       {
+               if (attrs & GED_LOG_ATTR_RINGBUFFER)
+               {
+                       /* for ring buffer, we start over. */
+                       psGEDLogBuf->i32LineCurrent = 0;
+                       psGEDLogBuf->i32BufferCurrent = 0;
+               }
+               else if (attrs & GED_LOG_ATTR_QUEUEBUFFER)
+               {
+                       if (attrs & GED_LOG_ATTR_AUTO_INCREASE)
+                       {
+                               int newLineCount, newBufferSize; 
+
+                               /* incease min(25%, 1MB) */
+                               if ((psGEDLogBuf->i32LineCount >> 2) <= 1024 * 1024)
+                               {
+                                       newLineCount = psGEDLogBuf->i32LineCount + (psGEDLogBuf->i32LineCount >> 2);
+                                       newBufferSize = psGEDLogBuf->i32BufferSize + (psGEDLogBuf->i32BufferSize >> 2);
+                               }
+                               else
+                               {
+                                       newLineCount = psGEDLogBuf->i32LineCount + 4096;
+                                       newBufferSize = psGEDLogBuf->i32BufferSize + 1024 * 1024;
+                               }
+
+                               spin_unlock_irqrestore(&psGEDLogBuf->sSpinLock, psGEDLogBuf->ui32IRQFlags);
+                               if (ged_log_buf_resize(psGEDLogBuf->ui32HashNodeID, newLineCount, newBufferSize) != GED_OK)
+                               {
+                                       return GED_ERROR_OOM;
+                               }
+                               spin_lock_irqsave(&psGEDLogBuf->sSpinLock, psGEDLogBuf->ui32IRQFlags);
+                       }
+                       else
+                       {
+                               /* for queuebuffer only, we skip the log. */
+                               spin_unlock_irqrestore(&psGEDLogBuf->sSpinLock, psGEDLogBuf->ui32IRQFlags);
+                               return GED_ERROR_OOM;
+                       }
+               }
+       }
+
+       psGEDLogBuf->psLine[psGEDLogBuf->i32LineCurrent].offset = psGEDLogBuf->i32BufferCurrent;
+       psGEDLogBuf->psLine[psGEDLogBuf->i32LineCurrent].tattrs = 0;
+       psGEDLogBuf->psLine[psGEDLogBuf->i32LineCurrent].time = 0;
+
+       /* record the kernel time */
+       if (attrs & GED_LOG_ATTR_TIME)
+       {
+               psGEDLogBuf->psLine[psGEDLogBuf->i32LineCurrent].tattrs = GED_LOG_ATTR_TIME;
+               psGEDLogBuf->psLine[psGEDLogBuf->i32LineCurrent].time = ged_get_time();
+       }
+
+       /* record the user time */
+       if (attrs & GED_LOG_ATTR_TIME_TPT)
+       {
+               struct timeval time;
+               unsigned long local_time;
+
+               do_gettimeofday(&time);
+               local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
 
-    if (psGEDLogBuf->i32LineCurrent < psGEDLogBuf->i32LineCount)
-    {
-        buf = psGEDLogBuf->pcBuffer + psGEDLogBuf->i32LineCurrent * psGEDLogBuf->i32LineBufferSize;
+               psGEDLogBuf->psLine[psGEDLogBuf->i32LineCurrent].tattrs = GED_LOG_ATTR_TIME_TPT;
+               psGEDLogBuf->psLine[psGEDLogBuf->i32LineCurrent].time = local_time;
+               psGEDLogBuf->psLine[psGEDLogBuf->i32LineCurrent].time_usec = time.tv_usec;
+               psGEDLogBuf->psLine[psGEDLogBuf->i32LineCurrent].pid = current->tgid;
+               psGEDLogBuf->psLine[psGEDLogBuf->i32LineCurrent].tid = current->pid;
+       }
 
-        ged_copy_from_user(buf, pszBuffer, cnt);
+       buf_n = psGEDLogBuf->i32BufferSize - psGEDLogBuf->i32BufferCurrent;
+       len = vsnprintf(psGEDLogBuf->pcBuffer + psGEDLogBuf->i32BufferCurrent, buf_n, fmt, args);
 
-        buf[cnt - 1] = '\0';
+       if (len > buf_n) len = buf_n;
 
-        psGEDLogBuf->i32LineCurrent ++;
-        if (GED_LOG_BUF_TYPE_RINGBUFFER == psGEDLogBuf->eType)
-        {
-            if (psGEDLogBuf->i32LineCurrent >= psGEDLogBuf->i32LineCount)
-            {
-                psGEDLogBuf->i32LineCurrent = 0;
-            }
-        }
+       buf_n -= len;
 
-        if (psGEDLogBuf->i32LineValidCount < psGEDLogBuf->i32LineCount)
-        {
-            psGEDLogBuf->i32LineValidCount ++;
-        }
+       if (attrs & GED_LOG_ATTR_RINGBUFFER)
+       {
+               int i;
+               int check = 10 + 1; /* we check the following 10 items. */ 
+               int a = psGEDLogBuf->i32BufferCurrent;
+               int b = psGEDLogBuf->i32BufferCurrent + len + 2;
+
+               for (i = psGEDLogBuf->i32LineCurrent+1; --check && i < psGEDLogBuf->i32LineCount; ++i)
+               {
+                       int pos = psGEDLogBuf->psLine[i].offset;
+                       if (pos >= a && pos < b)
+                               psGEDLogBuf->psLine[i].offset = -1;
+               }
+
+               if (check && i == psGEDLogBuf->i32LineCount)
+               {
+                       for (i = 0; --check && i < psGEDLogBuf->i32LineCurrent; ++i)
+                       {
+                               int pos = psGEDLogBuf->psLine[i].offset;
+
+                               if (pos >= a && pos < b)
+                                       psGEDLogBuf->psLine[i].offset = -1;
+                       }
+               }
+       }
 
-        bUpdate = true;
-    }
+       /* update current */
+       psGEDLogBuf->i32BufferCurrent += len + 2;
+       psGEDLogBuf->i32LineCurrent += 1;
 
-    spin_unlock_irqrestore(&psGEDLogBuf->sSpinLock, psGEDLogBuf->ui32IRQFlags);
+       spin_unlock_irqrestore(&psGEDLogBuf->sSpinLock, psGEDLogBuf->ui32IRQFlags);
 
-    if ((psGEDLogBuf->i32LineValidCount == psGEDLogBuf->i32LineCount) &&
-        (psGEDLogBuf->eType == GED_LOG_BUF_TYPE_QUEUEBUFFER_AUTO_INCREASE))
-    {
-        ged_log_buf_resize(psGEDLogBuf->ui32HashNodeID, psGEDLogBuf->i32LineCount + psGEDLogBuf->i32LineCountOrg);
-    }
+       return GED_OK;
+}
+
+static GED_ERROR __ged_log_buf_print(GED_LOG_BUF *psGEDLogBuf, const char *fmt, ...)
+{
+       va_list args;
+       GED_ERROR err;
+
+       va_start(args, fmt);
+       err = __ged_log_buf_vprint(psGEDLogBuf, fmt, args, psGEDLogBuf->attrs | GED_LOG_ATTR_TIME);
+       va_end(args);
+
+       return err;
+}
+
+static int __ged_log_buf_write(GED_LOG_BUF *psGEDLogBuf, const char __user *pszBuffer, int i32Count)
+{
+       int cnt;
+       char buf[256];
+
+       if (!psGEDLogBuf)
+       {
+               return 0;
+       }
+
+       cnt = (i32Count >= 256) ? 255 : i32Count;
+
+       ged_copy_from_user(buf, pszBuffer, cnt);
+
+       buf[cnt] = 0;
+       if (buf[cnt-1] == '\n')
+       {
+               buf[cnt-1] = 0;
+       }
+
+       __ged_log_buf_print(psGEDLogBuf, buf);
+
+       return cnt;
+}
+
+static void __ged_log_buf_check_get_early_list(GED_LOG_BUF_HANDLE hLogBuf, const char *pszName)
+{
+       struct list_head *psListEntry, *psListEntryTemp, *psList;
+       GED_LOG_LISTEN *psFound = NULL, *psLogListen;
+
+       read_lock_bh(&gsGEDLogBufList.sLock);
+
+       psList = &gsGEDLogBufList.sList_listen;
+       list_for_each_safe(psListEntry, psListEntryTemp, psList)
+       {
+               psLogListen = list_entry(psListEntry, GED_LOG_LISTEN, sList);
+               if (0 == strcmp(psLogListen->acName, pszName))
+               {
+                       psFound = psLogListen;
+                       break;
+               }
+       }
 
-    if (false == bUpdate)
-    {
-        GED_LOGE("gedlog: not update in ged_log_buf_write()!\n");
-    }
+       read_unlock_bh(&gsGEDLogBufList.sLock);
 
-    return cnt;
+       if (psFound)
+       {
+               write_lock_bh(&gsGEDLogBufList.sLock);
+               *psFound->pCBHnd = hLogBuf;
+               list_del(&psFound->sList);
+               write_unlock_bh(&gsGEDLogBufList.sLock);
+       }
 }
 
 static ssize_t ged_log_buf_write_entry(const char __user *pszBuffer, size_t uiCount, loff_t uiPosition, void *pvData)
 {
-    return (ssize_t)__ged_log_buf_write(pvData, pszBuffer, (int)uiCount);
+       return (ssize_t)__ged_log_buf_write((GED_LOG_BUF *)pvData, pszBuffer, (int)uiCount);
 }
 //-----------------------------------------------------------------------------
 static void* ged_log_buf_seq_start(struct seq_file *psSeqFile, loff_t *puiPosition)
 {
-    GED_LOG_BUF *psGEDLogBuf = (GED_LOG_BUF *)psSeqFile->private;
+       GED_LOG_BUF *psGEDLogBuf = (GED_LOG_BUF *)psSeqFile->private;
 
-    if (0 == *puiPosition)
-    {
-        return psGEDLogBuf;
-    }
-    return NULL;
+       if (0 == *puiPosition)
+       {
+               return psGEDLogBuf;
+       }
+       return NULL;
 }
 //-----------------------------------------------------------------------------
 static void ged_log_buf_seq_stop(struct seq_file *psSeqFile, void *pvData)
@@ -141,73 +314,93 @@ static void ged_log_buf_seq_stop(struct seq_file *psSeqFile, void *pvData)
 //-----------------------------------------------------------------------------
 static void* ged_log_buf_seq_next(struct seq_file *psSeqFile, void *pvData, loff_t *puiPosition)
 {
-    (*puiPosition)++;
+       (*puiPosition)++;
 
-    return NULL;
+       return NULL;
 }
 //-----------------------------------------------------------------------------
+static int ged_log_buf_seq_show_print(struct seq_file *psSeqFile, GED_LOG_BUF *psGEDLogBuf, int i)
+{
+       int err = 0;
+       GED_LOG_BUF_LINE *line;
+
+       line = &psGEDLogBuf->psLine[i];
+
+       if (line->offset >= 0)
+       {
+               if (line->tattrs & GED_LOG_ATTR_TIME)
+               {
+                       unsigned long long t;
+                       unsigned long nanosec_rem;
+
+                       t = line->time;                             
+                       nanosec_rem = do_div(t, 1000000000);
+
+                       seq_printf(psSeqFile,"[%5llu.%06lu] ", t, nanosec_rem / 1000);
+               }
+
+               if (line->tattrs & GED_LOG_ATTR_TIME_TPT)
+               {
+                       unsigned long local_time;
+                       struct rtc_time tm;
+
+                       local_time = line->time; 
+                       rtc_time_to_tm(local_time, &tm);
+
+                       seq_printf(psSeqFile,"%02d-%02d %02d:%02d:%02d.%06lu %5d %5d ", 
+                                       /*tm.tm_year + 1900,*/ tm.tm_mon + 1, tm.tm_mday, 
+                                       tm.tm_hour, tm.tm_min, tm.tm_sec, 
+                                       line->time_usec, line->pid, line->tid);
+               }
+
+               err = seq_printf(psSeqFile, "%s\n", psGEDLogBuf->pcBuffer + line->offset);
+       }
+
+       return err;
+}
+
 static int ged_log_buf_seq_show(struct seq_file *psSeqFile, void *pvData)
 {
-    GED_LOG_BUF *psGEDLogBuf = (GED_LOG_BUF *)pvData;
+       GED_LOG_BUF *psGEDLogBuf = (GED_LOG_BUF *)pvData;
 
        if (psGEDLogBuf != NULL)
        {
-        char *buf;
-        int i, i32Count;
-
-        spin_lock_irqsave(&psGEDLogBuf->sSpinLock, psGEDLogBuf->ui32IRQFlags);
-
-        if (psGEDLogBuf->acName[0] != '\0')
-        {
-            seq_printf(psSeqFile, "---------- %s ----------\n", psGEDLogBuf->acName);
-        }
-
-        if (GED_LOG_BUF_TYPE_RINGBUFFER == psGEDLogBuf->eType)
-        {
-            i32Count = psGEDLogBuf->i32LineValidCount;
-            i = psGEDLogBuf->i32LineCurrent - i32Count;
-            if (i < 0)
-            {
-                i += psGEDLogBuf->i32LineCount;
-                buf = psGEDLogBuf->pcBuffer + i * psGEDLogBuf->i32LineBufferSize;
-                while ((i32Count > 0) && (i < psGEDLogBuf->i32LineCount))
-                {
-                    if (0 != seq_printf(psSeqFile, "%s\n", buf))
-                    {
-                        break;
-                    }
-                    buf += psGEDLogBuf->i32LineBufferSize;
-                    i32Count --;
-                    i ++;
-                }
-            }
-            buf = psGEDLogBuf->pcBuffer;
-            while (i32Count > 0)
-            {
-                if (0 != seq_printf(psSeqFile, "%s\n", buf))
-                {
-                    break;
-                }
-                buf += psGEDLogBuf->i32LineBufferSize;
-                i32Count --;
-            }
-        }
-        else if ((GED_LOG_BUF_TYPE_QUEUEBUFFER == psGEDLogBuf->eType) || 
-                 (GED_LOG_BUF_TYPE_QUEUEBUFFER_AUTO_INCREASE == psGEDLogBuf->eType))
-        {
-            i32Count = psGEDLogBuf->i32LineValidCount;
-            buf = psGEDLogBuf->pcBuffer;
-            for (i = 0; i < i32Count; ++i)
-            {
-                if (0 != seq_printf(psSeqFile, "%s\n", buf))
-                {
-                    break;
-                }
-                buf += psGEDLogBuf->i32LineBufferSize;
-            }
-        }
-
-        spin_unlock_irqrestore(&psGEDLogBuf->sSpinLock, psGEDLogBuf->ui32IRQFlags);
+               int i;
+
+               spin_lock_irqsave(&psGEDLogBuf->sSpinLock, psGEDLogBuf->ui32IRQFlags);
+
+               if (psGEDLogBuf->acName[0] != '\0')
+               {
+                       seq_printf(psSeqFile, "---------- %s (%d/%d) ----------\n",
+                                       psGEDLogBuf->acName, psGEDLogBuf->i32BufferCurrent, psGEDLogBuf->i32BufferSize);
+               }
+
+               if (psGEDLogBuf->attrs & GED_LOG_ATTR_RINGBUFFER)
+               {
+                       for (i = psGEDLogBuf->i32LineCurrent; i < psGEDLogBuf->i32LineCount; ++i)
+                       {
+                               if (0 != ged_log_buf_seq_show_print(psSeqFile, psGEDLogBuf, i))
+                                       break;
+                       }
+
+                       //seq_printf(psSeqFile, " > ---------- start over ----------\n");
+
+                       for (i = 0; i < psGEDLogBuf->i32LineCurrent; ++i)
+                       {
+                               if (0 != ged_log_buf_seq_show_print(psSeqFile, psGEDLogBuf, i))
+                                       break;
+                       }
+               }
+               else if (psGEDLogBuf->attrs & GED_LOG_ATTR_QUEUEBUFFER)
+               {
+                       for (i = 0; i < psGEDLogBuf->i32LineCount; ++i)
+                       {
+                               if (0 != ged_log_buf_seq_show_print(psSeqFile, psGEDLogBuf, i))
+                                       break;
+                       }
+               }
+
+               spin_unlock_irqrestore(&psGEDLogBuf->sSpinLock, psGEDLogBuf->ui32IRQFlags);
        }
 
        return 0;
@@ -222,294 +415,389 @@ static struct seq_operations gsGEDLogBufReadOps =
 };
 //-----------------------------------------------------------------------------
 GED_LOG_BUF_HANDLE ged_log_buf_alloc(
-    int i32LineCount, 
-    int i32LineBufferSize, 
-    GED_LOG_BUF_TYPE eType, 
-    const char* pszName,
-    const char* pszNodeName)
-{
-    void *pvBuf;
-    GED_LOG_BUF *psGEDLogBuf;
-    GED_ERROR error;
-
-    int i32BufSize = i32LineCount * i32LineBufferSize;
-
-    if (((!pszName) && (!pszNodeName)) || (i32LineCount <= 0) || (i32LineBufferSize <= 0))
-    {
-        return (GED_LOG_BUF_HANDLE)0;
-    }
-
-    psGEDLogBuf = (GED_LOG_BUF*)ged_alloc(sizeof(GED_LOG_BUF));
-    if (NULL == psGEDLogBuf)
-    {
-        GED_LOGE("ged: failed to allocate log buf!\n");
-        return (GED_LOG_BUF_HANDLE)0;
-    }
-
-    pvBuf = ged_alloc(i32BufSize);
-    if (NULL == pvBuf)
-    {
-        ged_free(psGEDLogBuf, sizeof(GED_LOG_BUF));
-        GED_LOGE("ged: failed to allocate log buf!\n");
-        return (GED_LOG_BUF_HANDLE)0;
-    }
-
-    psGEDLogBuf->eType = eType;
-    psGEDLogBuf->pcBuffer = pvBuf;
-    psGEDLogBuf->i32LineCountOrg = i32LineCount;
-    psGEDLogBuf->i32LineCount = i32LineCount;
-    psGEDLogBuf->i32LineBufferSize = i32LineBufferSize;
-    psGEDLogBuf->i32LineCurrent = 0;
-    psGEDLogBuf->i32LineValidCount = 0;
-    psGEDLogBuf->psEntry = NULL;
-    spin_lock_init(&psGEDLogBuf->sSpinLock);
-    psGEDLogBuf->acName[0] = '\0';
-    psGEDLogBuf->acNodeName[0] = '\0';
-
-    if (pszName)
-    {
-        snprintf(psGEDLogBuf->acName, GED_LOG_BUF_NAME_LENGTH, "%s", pszName);
-    }
-
-    // Add into the global list
-    INIT_LIST_HEAD(&psGEDLogBuf->sList);
-    write_lock_bh(&gsGEDLogBufList.sLock);
-    list_add(&psGEDLogBuf->sList, &gsGEDLogBufList.sList);
-    write_unlock_bh(&gsGEDLogBufList.sLock);
-
-    if (pszNodeName)
-    {
-        int err;
-        snprintf(psGEDLogBuf->acNodeName, GED_LOG_BUF_NODE_NAME_LENGTH, "%s", pszNodeName);
-        err = ged_debugFS_create_entry(
-                psGEDLogBuf->acNodeName,
-                gpsGEDLogBufsDir,
-                &gsGEDLogBufReadOps,
-                ged_log_buf_write_entry,
-                psGEDLogBuf,
-                &psGEDLogBuf->psEntry);
-
-        if (unlikely(err)) 
-        {
-            GED_LOGE("ged: failed to create %s entry, err(%d)!\n", pszNodeName, err);
-            ged_log_buf_free(psGEDLogBuf->ui32HashNodeID);
-            return (GED_LOG_BUF_HANDLE)0;
-        }
-    }
-
-    error = ged_hashtable_insert(ghHashTable, psGEDLogBuf, &psGEDLogBuf->ui32HashNodeID);
-    if (GED_OK != error)
-    {
-        GED_LOGE("ged: failed to insert into a hash table, err(%d)!\n", error);
-        ged_log_buf_free(psGEDLogBuf->ui32HashNodeID);
-        return (GED_LOG_BUF_HANDLE)0;
-    }
-
-    GED_LOGI("ged_log_buf_alloc OK\n");
-
-    return (GED_LOG_BUF_HANDLE)psGEDLogBuf->ui32HashNodeID;
+               int i32MaxLineCount,
+               int i32MaxBufferSizeByte,
+               GED_LOG_BUF_TYPE eType, 
+               const char* pszName,
+               const char* pszNodeName)
+{
+       GED_LOG_BUF *psGEDLogBuf;
+       GED_ERROR error;
+
+       if (((!pszName) && (!pszNodeName)) || (i32MaxLineCount <= 0) || (i32MaxBufferSizeByte <= 0))
+       {
+               return (GED_LOG_BUF_HANDLE)0;
+       }
+
+       psGEDLogBuf = (GED_LOG_BUF*)ged_alloc(sizeof(GED_LOG_BUF));
+       if (NULL == psGEDLogBuf)
+       {
+               GED_LOGE("ged: failed to allocate log buf!\n");
+               return (GED_LOG_BUF_HANDLE)0;
+       }
+
+       psGEDLogBuf->eType = eType;
+
+       switch (eType)
+       {
+               case GED_LOG_BUF_TYPE_RINGBUFFER:
+                       psGEDLogBuf->attrs = GED_LOG_ATTR_RINGBUFFER;
+                       break;
+               case GED_LOG_BUF_TYPE_QUEUEBUFFER:
+                       psGEDLogBuf->attrs = GED_LOG_ATTR_QUEUEBUFFER;
+                       break;
+               case GED_LOG_BUF_TYPE_QUEUEBUFFER_AUTO_INCREASE:
+                       psGEDLogBuf->attrs = GED_LOG_ATTR_QUEUEBUFFER | GED_LOG_ATTR_AUTO_INCREASE;
+                       break;
+       }
+
+       psGEDLogBuf->i32MemorySize = i32MaxBufferSizeByte + sizeof(GED_LOG_BUF_LINE) * i32MaxLineCount;
+       psGEDLogBuf->pMemory = ged_alloc(psGEDLogBuf->i32MemorySize);
+       if (NULL == psGEDLogBuf->pMemory)
+       {
+               ged_free(psGEDLogBuf, sizeof(GED_LOG_BUF));
+               GED_LOGE("ged: failed to allocate log buf!\n");
+               return (GED_LOG_BUF_HANDLE)0;
+       }
+
+       psGEDLogBuf->psLine = (GED_LOG_BUF_LINE *)psGEDLogBuf->pMemory;
+       psGEDLogBuf->pcBuffer = (char *)&psGEDLogBuf->psLine[i32MaxLineCount];
+       psGEDLogBuf->i32LineCount = i32MaxLineCount;
+       psGEDLogBuf->i32BufferSize = i32MaxBufferSizeByte;
+       psGEDLogBuf->i32LineCurrent = 0;
+       psGEDLogBuf->i32BufferCurrent = 0;
+
+       psGEDLogBuf->psEntry = NULL;
+       spin_lock_init(&psGEDLogBuf->sSpinLock);
+       psGEDLogBuf->acName[0] = '\0';
+       psGEDLogBuf->acNodeName[0] = '\0';
+
+       /* Init Line */
+       {
+               int i = 0;
+               for (i = 0; i < psGEDLogBuf->i32LineCount; ++i)
+                       psGEDLogBuf->psLine[i].offset = -1;
+       }
+
+       if (pszName)
+       {
+               snprintf(psGEDLogBuf->acName, GED_LOG_BUF_NAME_LENGTH, "%s", pszName);
+       }
+
+       // Add into the global list
+       INIT_LIST_HEAD(&psGEDLogBuf->sList);
+       write_lock_bh(&gsGEDLogBufList.sLock);
+       list_add(&psGEDLogBuf->sList, &gsGEDLogBufList.sList_buf);
+       write_unlock_bh(&gsGEDLogBufList.sLock);
+
+       if (pszNodeName)
+       {
+               int err;
+               snprintf(psGEDLogBuf->acNodeName, GED_LOG_BUF_NODE_NAME_LENGTH, "%s", pszNodeName);
+               err = ged_debugFS_create_entry(
+                               psGEDLogBuf->acNodeName,
+                               gpsGEDLogBufsDir,
+                               &gsGEDLogBufReadOps,
+                               ged_log_buf_write_entry,
+                               psGEDLogBuf,
+                               &psGEDLogBuf->psEntry);
+
+               if (unlikely(err)) 
+               {
+                       GED_LOGE("ged: failed to create %s entry, err(%d)!\n", pszNodeName, err);
+                       ged_log_buf_free(psGEDLogBuf->ui32HashNodeID);
+                       return (GED_LOG_BUF_HANDLE)0;
+               }
+       }
+
+       error = ged_hashtable_insert(ghHashTable, psGEDLogBuf, &psGEDLogBuf->ui32HashNodeID);
+       if (GED_OK != error)
+       {
+               GED_LOGE("ged: failed to insert into a hash table, err(%d)!\n", error);
+               ged_log_buf_free(psGEDLogBuf->ui32HashNodeID);
+               return (GED_LOG_BUF_HANDLE)0;
+       }
+
+       GED_LOGI("ged_log_buf_alloc OK\n");
+
+       __ged_log_buf_check_get_early_list(psGEDLogBuf->ui32HashNodeID, pszName);
+
+       return (GED_LOG_BUF_HANDLE)psGEDLogBuf->ui32HashNodeID;
 }
 
 GED_ERROR ged_log_buf_resize(
-    GED_LOG_BUF_HANDLE hLogBuf,
-    int i32NewLineCount)
-{
-    GED_LOG_BUF *psGEDLogBuf = ged_log_buf_from_handle(hLogBuf);
-    int i32OldLineCount, i32OldBufSize, i32NewBufSize;
-    void *pvNewBuf, *pvOldBuf;
-
-    if ((NULL == psGEDLogBuf) || (i32NewLineCount <= 0))
-    {
-        return GED_ERROR_INVALID_PARAMS;
-    }
-
-    i32OldLineCount = psGEDLogBuf->i32LineCount;
-    i32OldBufSize = i32OldLineCount * psGEDLogBuf->i32LineBufferSize;
-    i32NewBufSize = i32NewLineCount * psGEDLogBuf->i32LineBufferSize;
-    pvNewBuf = ged_alloc(i32NewBufSize);
-
-    if (NULL == pvNewBuf)
-    {
-        return GED_ERROR_OOM;
-    }
-
-    spin_lock_irqsave(&psGEDLogBuf->sSpinLock, psGEDLogBuf->ui32IRQFlags);
-    pvOldBuf = (void*)psGEDLogBuf->pcBuffer;
-    memcpy(pvNewBuf, pvOldBuf, i32OldBufSize);
-    psGEDLogBuf->i32LineCount = i32NewLineCount;
-    psGEDLogBuf->pcBuffer = pvNewBuf;
-    spin_unlock_irqrestore(&psGEDLogBuf->sSpinLock, psGEDLogBuf->ui32IRQFlags);
-    ged_free(pvOldBuf, i32OldBufSize);
-    return GED_OK;
+               GED_LOG_BUF_HANDLE hLogBuf,
+               int i32NewMaxLineCount,
+               int i32NewMaxBufferSizeByte)
+{
+       int i;
+       GED_LOG_BUF *psGEDLogBuf = ged_log_buf_from_handle(hLogBuf);
+       int i32NewMemorySize, i32OldMemorySize;
+       void *pNewMemory, *pOldMemory;
+       GED_LOG_BUF_LINE *pi32NewLine;
+       char *pcNewBuffer;
+
+       if ((NULL == psGEDLogBuf) || (i32NewMaxLineCount <= 0) || (i32NewMaxBufferSizeByte <= 0))
+       {
+               return GED_ERROR_INVALID_PARAMS;
+       }
+
+       i32NewMemorySize = i32NewMaxBufferSizeByte + sizeof(GED_LOG_BUF_LINE) * i32NewMaxLineCount;
+       pNewMemory = ged_alloc(i32NewMemorySize);
+       if (NULL == pNewMemory)
+       {
+               return GED_ERROR_OOM;
+       }
+
+       spin_lock_irqsave(&psGEDLogBuf->sSpinLock, psGEDLogBuf->ui32IRQFlags);
+
+       pi32NewLine = (GED_LOG_BUF_LINE *)pNewMemory;
+       pcNewBuffer = (char *)&pi32NewLine[i32NewMaxLineCount];
+
+       memcpy(pi32NewLine, psGEDLogBuf->psLine, sizeof(GED_LOG_BUF_LINE) * min(i32NewMaxLineCount, psGEDLogBuf->i32LineCount));
+       memcpy(pcNewBuffer, psGEDLogBuf->pcBuffer, min(i32NewMaxBufferSizeByte, psGEDLogBuf->i32BufferSize));
+
+       for (i = psGEDLogBuf->i32LineCount; i < i32NewMaxLineCount; ++i)
+               pi32NewLine[i].offset = -1;
+
+       i32OldMemorySize = psGEDLogBuf->i32MemorySize;
+       pOldMemory = psGEDLogBuf->pMemory;
+
+       psGEDLogBuf->i32MemorySize = i32NewMemorySize;
+       psGEDLogBuf->pMemory = pNewMemory;
+       psGEDLogBuf->psLine = pi32NewLine;
+       psGEDLogBuf->pcBuffer = pcNewBuffer;
+       psGEDLogBuf->i32LineCount = i32NewMaxLineCount;
+       psGEDLogBuf->i32BufferSize = i32NewMaxBufferSizeByte;
+
+       if (psGEDLogBuf->i32BufferCurrent >= i32NewMaxBufferSizeByte)
+               psGEDLogBuf->i32BufferCurrent = i32NewMaxBufferSizeByte - 1;
+       pcNewBuffer[psGEDLogBuf->i32BufferCurrent] = 0;
+
+       spin_unlock_irqrestore(&psGEDLogBuf->sSpinLock, psGEDLogBuf->ui32IRQFlags);
+       ged_free(pOldMemory, i32OldMemorySize);
+
+       return GED_OK;
 }
 
-GED_ERROR ged_log_buf_ignore_lines(GED_LOG_BUF_HANDLE hLogBuf, int i32LineCount)
-{
-    GED_LOG_BUF *psGEDLogBuf = ged_log_buf_from_handle(hLogBuf);
-    if (psGEDLogBuf)
-    {
-        int i32NewLineValidCount;
-
-        spin_lock_irqsave(&psGEDLogBuf->sSpinLock, psGEDLogBuf->ui32IRQFlags);
-
-        i32NewLineValidCount = psGEDLogBuf->i32LineValidCount - i32LineCount;
-        if (i32NewLineValidCount < 0)
-        {
-            i32NewLineValidCount = 0;
-        }
-
-        if ((GED_LOG_BUF_TYPE_QUEUEBUFFER == psGEDLogBuf->eType) ||
-            (GED_LOG_BUF_TYPE_QUEUEBUFFER_AUTO_INCREASE == psGEDLogBuf->eType))
-        {
-            if (i32NewLineValidCount > 0)
-            {
-                void *pvNewBuf = (void*)psGEDLogBuf->pcBuffer;
-                void *pvOldBuf = (void*)(psGEDLogBuf->pcBuffer + 
-                    (psGEDLogBuf->i32LineCurrent - i32NewLineValidCount) * 
-                    psGEDLogBuf->i32LineBufferSize);
-                int i32NewBufSize = i32NewLineValidCount * psGEDLogBuf->i32LineBufferSize;
-                memcpy(pvNewBuf, pvOldBuf, i32NewBufSize);
-            }
-            psGEDLogBuf->i32LineCurrent = i32NewLineValidCount;
-        }
-
-        psGEDLogBuf->i32LineValidCount = i32NewLineValidCount;
-
-        spin_unlock_irqrestore(&psGEDLogBuf->sSpinLock, psGEDLogBuf->ui32IRQFlags);
-    }
-
-    return GED_OK;
+GED_ERROR ged_log_buf_ignore_lines(GED_LOG_BUF_HANDLE hLogBuf, int n)
+{
+       GED_LOG_BUF *psGEDLogBuf = ged_log_buf_from_handle(hLogBuf);
+
+       if (psGEDLogBuf && n > 0)
+       {
+               if (psGEDLogBuf->attrs & GED_LOG_ATTR_QUEUEBUFFER)
+               {
+                       if (n >= psGEDLogBuf->i32LineCurrent)
+                       {
+                               /* reset all buffer */
+                               ged_log_buf_reset(hLogBuf);
+                       }
+                       else
+                       {
+                               int i;
+                               int buf_offset;
+                               int buf_size;
+
+                               spin_lock_irqsave(&psGEDLogBuf->sSpinLock, psGEDLogBuf->ui32IRQFlags);
+
+                               buf_offset = psGEDLogBuf->psLine[n].offset;
+                               buf_size = psGEDLogBuf->i32BufferCurrent - buf_offset;
+
+                               /* Move lines, update offset and update current */
+                               for (i = 0; n + i < psGEDLogBuf->i32LineCount; ++i)
+                               {
+                                       psGEDLogBuf->psLine[i] = psGEDLogBuf->psLine[n + i];
+                                       psGEDLogBuf->psLine[i].offset -= buf_offset;
+                               }
+                               psGEDLogBuf->i32LineCurrent -= n;
+
+                               /* Move buffers and update current */
+                               for (i = 0; i < buf_size; ++i)
+                                       psGEDLogBuf->pcBuffer[i] = psGEDLogBuf->pcBuffer[buf_offset + i];
+                               psGEDLogBuf->i32BufferCurrent = buf_size;
+
+                               spin_unlock_irqrestore(&psGEDLogBuf->sSpinLock, psGEDLogBuf->ui32IRQFlags);
+                       }
+               }
+       }
+
+       return GED_OK;
 }
 
 GED_LOG_BUF_HANDLE ged_log_buf_get(const char* pszName)
 {
-    struct list_head *psListEntry, *psListEntryTemp, *psList;
-    GED_LOG_BUF* psFound = NULL, *psLogBuf;
+       struct list_head *psListEntry, *psListEntryTemp, *psList;
+       GED_LOG_BUF *psFound = NULL, *psLogBuf;
 
-    if (!pszName)
-    {
-        return (GED_LOG_BUF_HANDLE)0;
-    }
+       if (!pszName)
+       {
+               return (GED_LOG_BUF_HANDLE)0;
+       }
+
+       read_lock_bh(&gsGEDLogBufList.sLock);
+
+       psList = &gsGEDLogBufList.sList_buf;
+       list_for_each_safe(psListEntry, psListEntryTemp, psList)
+       {
+               psLogBuf = list_entry(psListEntry, GED_LOG_BUF, sList);
+               if (0 == strcmp(psLogBuf->acName, pszName))
+               {
+                       psFound = psLogBuf;
+                       break;
+               }
+       }
+
+       read_unlock_bh(&gsGEDLogBufList.sLock);
+
+       if (!psFound)
+       {
+               return (GED_LOG_BUF_HANDLE)0;
+       }
+
+       return (GED_LOG_BUF_HANDLE)psFound->ui32HashNodeID;
+}
+
+int ged_log_buf_get_early(const char* pszName, GED_LOG_BUF_HANDLE *callback_set_handle)
+{
+       int err = 0;
+
+       if (NULL == pszName)
+       {
+               return GED_ERROR_INVALID_PARAMS;
+       }
 
-    read_lock_bh(&gsGEDLogBufList.sLock);
+       *callback_set_handle = ged_log_buf_get(pszName);
 
-    psList = &gsGEDLogBufList.sList;
-    list_for_each_safe(psListEntry, psListEntryTemp, psList)
-    {
-        psLogBuf = list_entry(psListEntry, GED_LOG_BUF, sList);
-        if (0 == strcmp(psLogBuf->acName, pszName))
-        {
-            psFound = psLogBuf;
-            break;
-        }
-    }
+       if (0 == *callback_set_handle)
+       {
+               GED_LOG_LISTEN *psGEDLogListen;
 
-    read_unlock_bh(&gsGEDLogBufList.sLock);
+               write_lock_bh(&gsGEDLogBufList.sLock);
 
-    if (!psFound)
-    {
-        return (GED_LOG_BUF_HANDLE)0;
-    }
+               /* search again */
+               {
+                       struct list_head *psListEntry, *psListEntryTemp, *psList;
+                       GED_LOG_BUF *psFound = NULL, *psLogBuf;
+
+                       psList = &gsGEDLogBufList.sList_buf;
+                       list_for_each_safe(psListEntry, psListEntryTemp, psList)
+                       {
+                               psLogBuf = list_entry(psListEntry, GED_LOG_BUF, sList);
+                               if (0 == strcmp(psLogBuf->acName, pszName))
+                               {
+                                       psFound = psLogBuf;
+                                       break;
+                               }
+                       }
+
+                       if (psFound)
+                       {
+                               *callback_set_handle = (GED_LOG_BUF_HANDLE)psFound->ui32HashNodeID;
+                               goto exit_unlock;
+                       }
+               }
+
+               /* add to listen list */
+               psGEDLogListen = (GED_LOG_LISTEN*)ged_alloc(sizeof(GED_LOG_LISTEN));
+               if (NULL == psGEDLogListen)
+               {
+                       err = GED_ERROR_OOM;
+                       goto exit_unlock;
+               }
+               psGEDLogListen->pCBHnd = callback_set_handle;
+               snprintf(psGEDLogListen->acName, GED_LOG_BUF_NAME_LENGTH, "%s", pszName);
+               INIT_LIST_HEAD(&psGEDLogListen->sList);
+               list_add(&psGEDLogListen->sList, &gsGEDLogBufList.sList_listen);
+
+exit_unlock:
+               write_unlock_bh(&gsGEDLogBufList.sLock);
+       }
 
-    return (GED_LOG_BUF_HANDLE)psFound->ui32HashNodeID;
+       return err;
 }
+
 //-----------------------------------------------------------------------------
 void ged_log_buf_free(GED_LOG_BUF_HANDLE hLogBuf)
 {
-    GED_LOG_BUF *psGEDLogBuf = ged_log_buf_from_handle(hLogBuf);
-    if (psGEDLogBuf)
-    {
-        int i32BufSize = psGEDLogBuf->i32LineCount * psGEDLogBuf->i32LineBufferSize;
-
-        ged_hashtable_remove(ghHashTable, psGEDLogBuf->ui32HashNodeID);
+       GED_LOG_BUF *psGEDLogBuf = ged_log_buf_from_handle(hLogBuf);
+       if (psGEDLogBuf)
+       {
+               ged_hashtable_remove(ghHashTable, psGEDLogBuf->ui32HashNodeID);
 
                write_lock_bh(&gsGEDLogBufList.sLock);
                list_del(&psGEDLogBuf->sList);
-        write_unlock_bh(&gsGEDLogBufList.sLock);
+               write_unlock_bh(&gsGEDLogBufList.sLock);
 
-        if (psGEDLogBuf->psEntry)
-        {
-            ged_debugFS_remove_entry(psGEDLogBuf->psEntry);
-        }
+               if (psGEDLogBuf->psEntry)
+               {
+                       ged_debugFS_remove_entry(psGEDLogBuf->psEntry);
+               }
 
-        ged_free(psGEDLogBuf->pcBuffer, i32BufSize);
-        ged_free(psGEDLogBuf, sizeof(GED_LOG_BUF));
+               ged_free(psGEDLogBuf->pMemory, psGEDLogBuf->i32MemorySize);
+               ged_free(psGEDLogBuf, sizeof(GED_LOG_BUF));
 
-        GED_LOGI("ged_log_buf_free OK\n");
-    }
+               GED_LOGI("ged_log_buf_free OK\n");
+       }
 }
 //-----------------------------------------------------------------------------
 GED_ERROR ged_log_buf_print(GED_LOG_BUF_HANDLE hLogBuf, const char *fmt, ...)
 {
-    GED_LOG_BUF *psGEDLogBuf = ged_log_buf_from_handle(hLogBuf);
-    if (psGEDLogBuf)
-    {
-        GED_BOOL bUpdate = GED_FALSE;
-        va_list args;
-        char *buf;
-        
-        spin_lock_irqsave(&psGEDLogBuf->sSpinLock, psGEDLogBuf->ui32IRQFlags);
-
-        if (psGEDLogBuf->i32LineCurrent < psGEDLogBuf->i32LineCount)
-        {
-            buf = psGEDLogBuf->pcBuffer + psGEDLogBuf->i32LineCurrent * psGEDLogBuf->i32LineBufferSize;
-            va_start(args, fmt);
-            vsnprintf(buf, psGEDLogBuf->i32LineBufferSize - 1, fmt, args);
-            va_end(args);
-            buf[psGEDLogBuf->i32LineBufferSize - 1] = '\0';
-
-            psGEDLogBuf->i32LineCurrent ++;
-            if (GED_LOG_BUF_TYPE_RINGBUFFER == psGEDLogBuf->eType)
-            {
-                if (psGEDLogBuf->i32LineCurrent >= psGEDLogBuf->i32LineCount)
-                {
-                    psGEDLogBuf->i32LineCurrent = 0;
-                }
-            }
-
-            if (psGEDLogBuf->i32LineValidCount < psGEDLogBuf->i32LineCount)
-            {
-                psGEDLogBuf->i32LineValidCount ++;
-            }
-
-            bUpdate = GED_TRUE;
-        }
-
-        spin_unlock_irqrestore(&psGEDLogBuf->sSpinLock, psGEDLogBuf->ui32IRQFlags);
-
-        if ((psGEDLogBuf->i32LineValidCount == psGEDLogBuf->i32LineCount) &&
-            (psGEDLogBuf->eType == GED_LOG_BUF_TYPE_QUEUEBUFFER_AUTO_INCREASE))
-        {
-            ged_log_buf_resize(psGEDLogBuf->ui32HashNodeID, psGEDLogBuf->i32LineCount + psGEDLogBuf->i32LineCountOrg);
-        }
-
-        if (GED_FALSE == bUpdate)
-        {
-            GED_LOGE("gedlog: out of buffer!\n");
-        }
-    }
-
-    return GED_OK;
+       va_list args;
+       GED_ERROR err;
+       GED_LOG_BUF *psGEDLogBuf;
+
+       if (hLogBuf)
+       {
+               psGEDLogBuf = ged_log_buf_from_handle(hLogBuf);
+
+               va_start(args, fmt);
+               err = __ged_log_buf_vprint(psGEDLogBuf, fmt, args, psGEDLogBuf->attrs);
+               va_end(args);
+       }
+
+       return GED_OK;
+}
+GED_ERROR ged_log_buf_print2(GED_LOG_BUF_HANDLE hLogBuf, int i32LogAttrs, const char *fmt, ...)
+{
+       va_list args;
+       GED_ERROR err;
+       GED_LOG_BUF *psGEDLogBuf;
+
+       if (hLogBuf)
+       {
+               psGEDLogBuf = ged_log_buf_from_handle(hLogBuf);
+
+               /* clear reserved attrs */
+               i32LogAttrs &= ~0xff; 
+
+               va_start(args, fmt);
+               err = __ged_log_buf_vprint(psGEDLogBuf, fmt, args, psGEDLogBuf->attrs | i32LogAttrs);
+               va_end(args);
+       }
+
+       return GED_OK;
 }
 //-----------------------------------------------------------------------------
 GED_ERROR ged_log_buf_reset(GED_LOG_BUF_HANDLE hLogBuf)
 {
-    GED_LOG_BUF *psGEDLogBuf = ged_log_buf_from_handle(hLogBuf);
-    if (psGEDLogBuf)
-    {
-        if ((psGEDLogBuf->eType == GED_LOG_BUF_TYPE_QUEUEBUFFER_AUTO_INCREASE) &&
-            (psGEDLogBuf->i32LineCount != psGEDLogBuf->i32LineCountOrg))
-        {
-            ged_log_buf_resize(psGEDLogBuf->ui32HashNodeID, psGEDLogBuf->i32LineCountOrg);
-        }
-
-        spin_lock_irqsave(&psGEDLogBuf->sSpinLock, psGEDLogBuf->ui32IRQFlags);
-        psGEDLogBuf->i32LineCurrent = 0;
-        psGEDLogBuf->i32LineValidCount = 0;
-        spin_unlock_irqrestore(&psGEDLogBuf->sSpinLock, psGEDLogBuf->ui32IRQFlags);
-    }
-
-    return GED_OK;
+       GED_LOG_BUF *psGEDLogBuf = ged_log_buf_from_handle(hLogBuf);
+       if (psGEDLogBuf)
+       {
+               int i;
+               spin_lock_irqsave(&psGEDLogBuf->sSpinLock, psGEDLogBuf->ui32IRQFlags);
+
+               psGEDLogBuf->i32LineCurrent = 0;
+               psGEDLogBuf->i32BufferCurrent = 0;
+               for (i = 0; i < psGEDLogBuf->i32LineCount; ++i)
+               {
+                       psGEDLogBuf->psLine[i].offset = -1;
+               }
+
+               spin_unlock_irqrestore(&psGEDLogBuf->sSpinLock, psGEDLogBuf->ui32IRQFlags);
+       }
+
+       return GED_OK;
 }
 
 //-----------------------------------------------------------------------------
@@ -519,172 +807,108 @@ GED_ERROR ged_log_buf_reset(GED_LOG_BUF_HANDLE hLogBuf)
 //-----------------------------------------------------------------------------
 static ssize_t ged_log_write_entry(const char __user *pszBuffer, size_t uiCount, loff_t uiPosition, void *pvData)
 {
-    #define GED_LOG_CMD_SIZE 64
-    char acBuffer[GED_LOG_CMD_SIZE];
-
-    int i32Value;
-
-    if ((0 < uiCount) && (uiCount < GED_LOG_CMD_SIZE))
-    {
-        if (0 == ged_copy_from_user(acBuffer, pszBuffer, uiCount))
-        {
-            acBuffer[uiCount - 1] = '\0';
-            if (strcmp(acBuffer, "reset") == 0)
-            {
-                struct list_head *psListEntry, *psListEntryTemp, *psList;
-                write_lock_bh(&gsGEDLogBufList.sLock);
-                psList = &gsGEDLogBufList.sList;
-                list_for_each_safe(psListEntry, psListEntryTemp, psList)
-                {
-                    GED_LOG_BUF* psGEDLogBuf = (GED_LOG_BUF*)list_entry(psListEntry, GED_LOG_BUF, sList);
-                    ged_log_buf_reset(psGEDLogBuf);
-                }
-                write_unlock_bh(&gsGEDLogBufList.sLock);
-            }
-            else if (strcmp(acBuffer, "profile_dvfs_enable") == 0)
-            {
-                ged_profile_dvfs_enable();
-            }
-            else if (strcmp(acBuffer, "profile_dvfs_disable") == 0)
-            {
-                ged_profile_dvfs_disable();
-            }
-            else if (strcmp(acBuffer, "profile_dvfs_start") == 0)
-            {
-                ged_profile_dvfs_start();
-            }
-            else if (strcmp(acBuffer, "profile_dvfs_stop") == 0)
-            {
-                ged_profile_dvfs_stop();
-            }
-            else if (sscanf(acBuffer, "profile_dvfs_ignore_lines %d", &i32Value) == 1)
-            {
-                ged_profile_dvfs_ignore_lines(i32Value);
-            }
-            //else if (...) //for other commands
-            //{
-            //}
-        }
-    }
-
-    return uiCount;
+#define GED_LOG_CMD_SIZE 64
+       char acBuffer[GED_LOG_CMD_SIZE];
+
+       int i32Value;
+
+       if ((0 < uiCount) && (uiCount < GED_LOG_CMD_SIZE))
+       {
+               if (0 == ged_copy_from_user(acBuffer, pszBuffer, uiCount))
+               {
+                       acBuffer[uiCount - 1] = '\0';
+                       if (strcmp(acBuffer, "reset") == 0)
+                       {
+                               struct list_head *psListEntry, *psListEntryTemp, *psList;
+                               write_lock_bh(&gsGEDLogBufList.sLock);
+                               psList = &gsGEDLogBufList.sList_buf;
+                               list_for_each_safe(psListEntry, psListEntryTemp, psList)
+                               {
+                                       GED_LOG_BUF* psGEDLogBuf = (GED_LOG_BUF*)list_entry(psListEntry, GED_LOG_BUF, sList);
+                                       ged_log_buf_reset(psGEDLogBuf->ui32HashNodeID);
+                               }
+                               write_unlock_bh(&gsGEDLogBufList.sLock);
+                       }
+                       else if (strcmp(acBuffer, "profile_dvfs_enable") == 0)
+                       {
+                               ged_profile_dvfs_enable();
+                       }
+                       else if (strcmp(acBuffer, "profile_dvfs_disable") == 0)
+                       {
+                               ged_profile_dvfs_disable();
+                       }
+                       else if (strcmp(acBuffer, "profile_dvfs_start") == 0)
+                       {
+                               ged_profile_dvfs_start();
+                       }
+                       else if (strcmp(acBuffer, "profile_dvfs_stop") == 0)
+                       {
+                               ged_profile_dvfs_stop();
+                       }
+                       else if (sscanf(acBuffer, "profile_dvfs_ignore_lines %d", &i32Value) == 1)
+                       {
+                               ged_profile_dvfs_ignore_lines(i32Value);
+                       }
+                       //else if (...) //for other commands
+                       //{
+                       //}
+               }
+       }
+
+       return uiCount;
 }
 //-----------------------------------------------------------------------------
 static void* ged_log_seq_start(struct seq_file *psSeqFile, loff_t *puiPosition)
 {
-    struct list_head *psListEntry, *psListEntryTemp, *psList;
-    loff_t uiCurrentPosition = 0;
-
-    read_lock_bh(&gsGEDLogBufList.sLock);
-
-    psList = &gsGEDLogBufList.sList;
-    list_for_each_safe(psListEntry, psListEntryTemp, psList)
-    {
-        GED_LOG_BUF* psGEDLogBuf = (GED_LOG_BUF*)list_entry(psListEntry, GED_LOG_BUF, sList);
-        if (psGEDLogBuf->acName[0] != '\0')
-        {
-            if (uiCurrentPosition == *puiPosition)
-            {
-                return psGEDLogBuf;
-            }
-            uiCurrentPosition ++;
-        }
-    }
-
-    return NULL;
+       struct list_head *psListEntry, *psListEntryTemp, *psList;
+       loff_t uiCurrentPosition = 0;
+
+       read_lock_bh(&gsGEDLogBufList.sLock);
+
+       psList = &gsGEDLogBufList.sList_buf;
+       list_for_each_safe(psListEntry, psListEntryTemp, psList)
+       {
+               GED_LOG_BUF* psGEDLogBuf = (GED_LOG_BUF*)list_entry(psListEntry, GED_LOG_BUF, sList);
+               if (psGEDLogBuf->acName[0] != '\0')
+               {
+                       if (uiCurrentPosition == *puiPosition)
+                       {
+                               return psGEDLogBuf;
+                       }
+                       uiCurrentPosition ++;
+               }
+       }
+
+       return NULL;
 }
 //-----------------------------------------------------------------------------
 static void ged_log_seq_stop(struct seq_file *psSeqFile, void *pvData)
 {
-    read_unlock_bh(&gsGEDLogBufList.sLock);
+       read_unlock_bh(&gsGEDLogBufList.sLock);
 }
 //-----------------------------------------------------------------------------
 static void* ged_log_seq_next(struct seq_file *psSeqFile, void *pvData, loff_t *puiPosition)
 {
-    struct list_head *psListEntry, *psListEntryTemp, *psList;
+       struct list_head *psListEntry, *psListEntryTemp, *psList;
        loff_t uiCurrentPosition = 0;
 
        (*puiPosition)++;
 
-    psList = &gsGEDLogBufList.sList;
-    list_for_each_safe(psListEntry, psListEntryTemp, psList)
-    {
-        GED_LOG_BUF* psGEDLogBuf = (GED_LOG_BUF*)list_entry(psListEntry, GED_LOG_BUF, sList);
-        if (psGEDLogBuf->acName[0] != '\0')
-        {
-            if (uiCurrentPosition == *puiPosition)
-            {
-                return psGEDLogBuf;
-            }
-            uiCurrentPosition ++;
-        }
-    }
-
-    return NULL;
-}
-//-----------------------------------------------------------------------------
-static int ged_log_seq_show(struct seq_file *psSeqFile, void *pvData)
-{
-    GED_LOG_BUF *psGEDLogBuf = (GED_LOG_BUF *)pvData;
-
-       if (psGEDLogBuf != NULL)
+       psList = &gsGEDLogBufList.sList_buf;
+       list_for_each_safe(psListEntry, psListEntryTemp, psList)
        {
-        char *buf;
-        int i, i32Count;
-
-        spin_lock_irqsave(&psGEDLogBuf->sSpinLock, psGEDLogBuf->ui32IRQFlags);
-
-        seq_printf(psSeqFile, "---------- %s ----------\n", psGEDLogBuf->acName);
-
-        if (GED_LOG_BUF_TYPE_RINGBUFFER == psGEDLogBuf->eType)
-        {
-            i32Count = psGEDLogBuf->i32LineValidCount;
-            i = psGEDLogBuf->i32LineCurrent - i32Count;
-            if (i < 0)
-            {
-                i += psGEDLogBuf->i32LineCount;
-                buf = psGEDLogBuf->pcBuffer + i * psGEDLogBuf->i32LineBufferSize;
-                while ((i32Count > 0) && (i < psGEDLogBuf->i32LineCount))
-                {
-                    if (0 != seq_printf(psSeqFile, "%s\n", buf))
-                    {
-                       break;
-                    }
-                    buf += psGEDLogBuf->i32LineBufferSize;
-                    i32Count --;
-                    i ++;
-                }
-            }
-            buf = psGEDLogBuf->pcBuffer;
-            while (i32Count > 0)
-            {
-                if (0 != seq_printf(psSeqFile, "%s\n", buf))
-                {
-                    break;
-                }
-                buf += psGEDLogBuf->i32LineBufferSize;
-                i32Count --;
-            }
-        }
-        else if ((GED_LOG_BUF_TYPE_QUEUEBUFFER == psGEDLogBuf->eType) || 
-                 (GED_LOG_BUF_TYPE_QUEUEBUFFER_AUTO_INCREASE == psGEDLogBuf->eType))
-        {
-            i32Count = psGEDLogBuf->i32LineValidCount;
-            buf = psGEDLogBuf->pcBuffer;
-            for (i = 0; i < i32Count; ++i)
-            {
-                if (0 != seq_printf(psSeqFile, "%s\n", buf))
-                {
-                    break;
-                }
-                buf += psGEDLogBuf->i32LineBufferSize;
-            }
-        }
-
-        spin_unlock_irqrestore(&psGEDLogBuf->sSpinLock, psGEDLogBuf->ui32IRQFlags);
+               GED_LOG_BUF* psGEDLogBuf = (GED_LOG_BUF*)list_entry(psListEntry, GED_LOG_BUF, sList);
+               if (psGEDLogBuf->acName[0] != '\0')
+               {
+                       if (uiCurrentPosition == *puiPosition)
+                       {
+                               return psGEDLogBuf;
+                       }
+                       uiCurrentPosition ++;
+               }
        }
 
-       return 0;
+       return NULL;
 }
 //-----------------------------------------------------------------------------
 static struct seq_operations gsGEDLogReadOps = 
@@ -692,68 +916,73 @@ static struct seq_operations gsGEDLogReadOps =
        .start = ged_log_seq_start,
        .stop = ged_log_seq_stop,
        .next = ged_log_seq_next,
-       .show = ged_log_seq_show,
+       .show = ged_log_buf_seq_show,
 };
 //-----------------------------------------------------------------------------
 GED_ERROR ged_log_system_init(void)
 {
-    GED_ERROR err = GED_OK;
-
-       INIT_LIST_HEAD(&gsGEDLogBufList.sList);
-       rwlock_init(&gsGEDLogBufList.sLock);
-
-    err = ged_debugFS_create_entry(
-            "gedlog",
-            NULL,
-            &gsGEDLogReadOps,
-            ged_log_write_entry,
-            NULL,
-            &gpsGEDLogEntry);
-
-    if (unlikely(err != GED_OK))
-    {
-        GED_LOGE("ged: failed to create gedlog entry!\n");
-        goto ERROR;
-    }
-
-    err = ged_debugFS_create_entry_dir(
-            "logbufs",
-            NULL,
-            &gpsGEDLogBufsDir);
-
-    if (unlikely(err != GED_OK))
-    {
-        err = GED_ERROR_FAIL;
-        GED_LOGE("ged: failed to create logbufs dir!\n");
-        goto ERROR;
-    }
-
-    ghHashTable = ged_hashtable_create(5);
-    if (!ghHashTable) 
-    {
-        err = GED_ERROR_OOM;
-        GED_LOGE("ged: failed to create a hash table!\n");
-        goto ERROR;
-    }
-
-    return err;
+       GED_ERROR err = GED_OK;
+
+       err = ged_debugFS_create_entry(
+                       "gedlog",
+                       NULL,
+                       &gsGEDLogReadOps,
+                       ged_log_write_entry,
+                       NULL,
+                       &gpsGEDLogEntry);
+
+       if (unlikely(err != GED_OK))
+       {
+               GED_LOGE("ged: failed to create gedlog entry!\n");
+               goto ERROR;
+       }
+
+       err = ged_debugFS_create_entry_dir(
+                       "logbufs",
+                       NULL,
+                       &gpsGEDLogBufsDir);
+
+       if (unlikely(err != GED_OK))
+       {
+               err = GED_ERROR_FAIL;
+               GED_LOGE("ged: failed to create logbufs dir!\n");
+               goto ERROR;
+       }
+
+       ghHashTable = ged_hashtable_create(5);
+       if (!ghHashTable) 
+       {
+               err = GED_ERROR_OOM;
+               GED_LOGE("ged: failed to create a hash table!\n");
+               goto ERROR;
+       }
+
+       return err;
 
 ERROR:
 
-    ged_log_system_exit();
+       ged_log_system_exit();
 
-    return err;
+       return err;
 }
 //-----------------------------------------------------------------------------
 void ged_log_system_exit(void)
 {
-    ged_hashtable_destroy(ghHashTable);
+       ged_hashtable_destroy(ghHashTable);
 
-    ged_debugFS_remove_entry(gpsGEDLogEntry);
+       ged_debugFS_remove_entry(gpsGEDLogEntry);
 }
 //-----------------------------------------------------------------------------
 int ged_log_buf_write(GED_LOG_BUF_HANDLE hLogBuf, const char __user *pszBuffer, int i32Count)
 {
-    GED_LOG_BUF *psGEDLogBuf = ged_log_buf_from_handle(hLogBuf);
-    return __ged_log_buf_write(psGEDLogBuf, pszBuffer, i32Count);
+       GED_LOG_BUF *psGEDLogBuf = ged_log_buf_from_handle(hLogBuf);
+       return __ged_log_buf_write(psGEDLogBuf, pszBuffer, i32Count);
 }
+
+EXPORT_SYMBOL(ged_log_buf_alloc);
+EXPORT_SYMBOL(ged_log_buf_reset);
+EXPORT_SYMBOL(ged_log_buf_get);
+EXPORT_SYMBOL(ged_log_buf_get_early);
+EXPORT_SYMBOL(ged_log_buf_free);
+EXPORT_SYMBOL(ged_log_buf_print);
+EXPORT_SYMBOL(ged_log_buf_print2);
index bca785bc4c5e0ce22eaccccfbb4f23cd867f7296..bf563493de104422b12ec316c77154cfb8763a37 100644 (file)
 #include <linux/wait.h>
 #include <linux/sched.h>
 #include <linux/vmalloc.h>
-#include <linux/disp_assert_layer.h>
-#include <mach/system.h>
+//#include <mach/system.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/semaphore.h>
 #include <linux/workqueue.h>
 #include <linux/kthread.h>
-#include <linux/aee.h>
+#include <mt-plat/aee.h>
 
 #include "ged_debugFS.h"
 #include "ged_log.h"
+#include "ged_hal.h"
 #include "ged_bridge.h"
 #include "ged_profile_dvfs.h"
 #include "ged_monitor_3D_fence.h"
+#include "ged_notify_sw_vsync.h"
+#include "ged_dvfs.h"
+
 
 #define GED_DRIVER_DEVICE_NAME "ged"
 
@@ -45,6 +48,16 @@ static GED_LOG_BUF_HANDLE ghLogBuf_GLES = 0;
 GED_LOG_BUF_HANDLE ghLogBuf_GED = 0;
 #endif
 
+#define GED_LOG_BUF_COMMON_HWC "HWC"
+static GED_LOG_BUF_HANDLE ghLogBuf_HWC = 0;
+#define GED_LOG_BUF_COMMON_FENCE "FENCE"
+static GED_LOG_BUF_HANDLE ghLogBuf_FENCE = 0;
+
+GED_LOG_BUF_HANDLE ghLogBuf_DVFS = 0;
+GED_LOG_BUF_HANDLE ghLogBuf_ged_srv = 0;
+
+
+
 static void* gvIOCTLParamBuf = NULL;
 
 /******************************************************************************
@@ -52,172 +65,177 @@ static void* gvIOCTLParamBuf = NULL;
  *****************************************************************************/
 static int ged_open(struct inode *inode, struct file *filp)
 {
-    GED_LOGE("%s:%d:%d\n", __func__, MAJOR(inode->i_rdev), MINOR(inode->i_rdev));
-    return 0;
+       GED_LOGE("%s:%d:%d\n", __func__, MAJOR(inode->i_rdev), MINOR(inode->i_rdev));
+       return 0;
 }
 
 static int ged_release(struct inode *inode, struct file *filp)
 {
-    GED_LOGE("%s:%d:%d\n", __func__, MAJOR(inode->i_rdev), MINOR(inode->i_rdev));
-    return 0;
+       GED_LOGE("%s:%d:%d\n", __func__, MAJOR(inode->i_rdev), MINOR(inode->i_rdev));
+       return 0;
 }
 
 static unsigned int ged_poll(struct file *file, struct poll_table_struct *ptable)
 {
-    return 0;
+       return 0;
 }
 
 static ssize_t ged_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
 {
-    return 0;
+       return 0;
 }
 
 static ssize_t ged_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos)
 {
-    return 0;
+       return 0;
 }
 
 static long ged_dispatch(GED_BRIDGE_PACKAGE *psBridgePackageKM)
 {
-    int ret = -EFAULT;
-    void *pvInt, *pvOut;
-    typedef int (ged_bridge_func_type)(void*, void*);
-    ged_bridge_func_type* pFunc = NULL;
-    
-    /* We make sure the both size and the sum of them are GE 0 integer.
-     * The sum will not overflow to zero, because we will get zero from two GE 0 integers
-     * if and only if they are both zero in a 2's complement numeral system.
-     * That is: if overflow happen, the sum will be a negative number.
-     */
-    if (psBridgePackageKM->i32InBufferSize >= 0 && psBridgePackageKM->i32OutBufferSize >= 0
-        && psBridgePackageKM->i32InBufferSize + psBridgePackageKM->i32OutBufferSize >= 0
-        && psBridgePackageKM->i32InBufferSize + psBridgePackageKM->i32OutBufferSize
-        < GED_IOCTL_PARAM_BUF_SIZE)
-    {
-        pvInt = gvIOCTLParamBuf;
-        pvOut = (void*)((char*)pvInt + (uintptr_t)psBridgePackageKM->i32InBufferSize);
-        if (psBridgePackageKM->i32InBufferSize > 0)
-        {
-            if (0 != ged_copy_from_user(pvInt, psBridgePackageKM->pvParamIn, psBridgePackageKM->i32InBufferSize))
-            {
-                GED_LOGE("ged_copy_from_user fail\n");
-                return ret;
-            }
-        }
-
-        // we will change the below switch into a function pointer mapping table in the future
-        switch(GED_GET_BRIDGE_ID(psBridgePackageKM->ui32FunctionID))
-        {
-        case GED_BRIDGE_COMMAND_LOG_BUF_GET:
-            pFunc = (ged_bridge_func_type*)ged_bridge_log_buf_get;
-            break;
-        case GED_BRIDGE_COMMAND_LOG_BUF_WRITE:
-            pFunc = (ged_bridge_func_type*)ged_bridge_log_buf_write;
-            break;
-        case GED_BRIDGE_COMMAND_LOG_BUF_RESET:
-            pFunc = (ged_bridge_func_type*)ged_bridge_log_buf_reset;
-            break;            
-        case GED_BRIDGE_COMMAND_BOOST_GPU_FREQ:
-            pFunc = (ged_bridge_func_type*)ged_bridge_boost_gpu_freq;
-            break;
-        case GED_BRIDGE_COMMAND_MONITOR_3D_FENCE:
-            pFunc = (ged_bridge_func_type*)ged_bridge_monitor_3D_fence;
-            break;
-        default:
-            GED_LOGE("Unknown Bridge ID: %u\n", GED_GET_BRIDGE_ID(psBridgePackageKM->ui32FunctionID));
-            break;
-        }
-
-        if (pFunc)
-        {
-            ret = pFunc(pvInt, pvOut);
-        }
-
-        if (psBridgePackageKM->i32OutBufferSize > 0)
-        {
-            if (0 != ged_copy_to_user(psBridgePackageKM->pvParamOut, pvOut, psBridgePackageKM->i32OutBufferSize))
-            {
-                return ret;
-            }
-        }
-    }
-
-    return ret;
+       int ret = -EFAULT;
+       void *pvInt, *pvOut;
+       typedef int (ged_bridge_func_type)(void*, void*);
+       ged_bridge_func_type* pFunc = NULL;
+
+       if ((psBridgePackageKM->i32InBufferSize >=0) && (psBridgePackageKM->i32OutBufferSize >=0) &&
+                       (psBridgePackageKM->i32InBufferSize + psBridgePackageKM->i32OutBufferSize < GED_IOCTL_PARAM_BUF_SIZE))
+       {
+               pvInt = gvIOCTLParamBuf;
+               pvOut = (void*)((char*)pvInt + (uintptr_t)psBridgePackageKM->i32InBufferSize);
+               if (psBridgePackageKM->i32InBufferSize > 0)
+               {
+                       if (0 != ged_copy_from_user(pvInt, psBridgePackageKM->pvParamIn, psBridgePackageKM->i32InBufferSize))
+                       {
+                               GED_LOGE("ged_copy_from_user fail\n");
+                               return ret;
+                       }
+               }
+
+               // we will change the below switch into a function pointer mapping table in the future
+               switch(GED_GET_BRIDGE_ID(psBridgePackageKM->ui32FunctionID))
+               {
+                       case GED_BRIDGE_COMMAND_LOG_BUF_GET:
+                               pFunc = (ged_bridge_func_type*)ged_bridge_log_buf_get;
+                               break;
+                       case GED_BRIDGE_COMMAND_LOG_BUF_WRITE:
+                               pFunc = (ged_bridge_func_type*)ged_bridge_log_buf_write;
+                               break;
+                       case GED_BRIDGE_COMMAND_LOG_BUF_RESET:
+                               pFunc = (ged_bridge_func_type*)ged_bridge_log_buf_reset;
+                               break;            
+                       case GED_BRIDGE_COMMAND_BOOST_GPU_FREQ:
+                               pFunc = (ged_bridge_func_type*)ged_bridge_boost_gpu_freq;
+                               break;
+                       case GED_BRIDGE_COMMAND_MONITOR_3D_FENCE:
+                               pFunc = (ged_bridge_func_type*)ged_bridge_monitor_3D_fence;
+                               break;
+                       case GED_BRIDGE_COMMAND_QUERY_INFO:
+                               pFunc = (ged_bridge_func_type*)ged_bridge_query_info;
+                               break;            
+                       case GED_BRIDGE_COMMAND_NOTIFY_VSYNC:
+                               pFunc = (ged_bridge_func_type*)ged_bridge_notify_vsync;
+                               break;
+                       case GED_BRIDGE_COMMAND_DVFS_PROBE:
+                               pFunc = (ged_bridge_func_type*)ged_bridge_dvfs_probe;
+                               break;
+                       case GED_BRIDGE_COMMAND_DVFS_UM_RETURN:
+                               pFunc = (ged_bridge_func_type*)ged_bridge_dvfs_um_retrun;
+                               break;
+                       default:
+                               GED_LOGE("Unknown Bridge ID: %u\n", GED_GET_BRIDGE_ID(psBridgePackageKM->ui32FunctionID));
+                               break;
+               }
+
+               if (pFunc)
+               {
+                       ret = pFunc(pvInt, pvOut);
+               }
+
+               if (psBridgePackageKM->i32OutBufferSize > 0)
+               {
+                       if (0 != ged_copy_to_user(psBridgePackageKM->pvParamOut, pvOut, psBridgePackageKM->i32OutBufferSize))
+                       {
+                               return ret;
+                       }
+               }
+       }
+
+       return ret;
 }
 
 DEFINE_SEMAPHORE(ged_dal_sem);
 
 static long ged_ioctl(struct file *pFile, unsigned int ioctlCmd, unsigned long arg)
 {
-    int ret = -EFAULT;
+       int ret = -EFAULT;
        GED_BRIDGE_PACKAGE *psBridgePackageKM, *psBridgePackageUM = (GED_BRIDGE_PACKAGE*)arg;
        GED_BRIDGE_PACKAGE sBridgePackageKM;
 
-    if (down_interruptible(&ged_dal_sem) < 0) 
-    {
-        GED_LOGE("Fail to down ged_dal_sem\n");
-        return -ERESTARTSYS;
-    }
+       if (down_interruptible(&ged_dal_sem) < 0) 
+       {
+               GED_LOGE("Fail to down ged_dal_sem\n");
+               return -ERESTARTSYS;
+       }
 
        psBridgePackageKM = &sBridgePackageKM;
-    if (0 != ged_copy_from_user(psBridgePackageKM, psBridgePackageUM, sizeof(GED_BRIDGE_PACKAGE)))
-    {
-        GED_LOGE("Fail to ged_copy_from_user\n");
-        goto unlock_and_return;
-    }
+       if (0 != ged_copy_from_user(psBridgePackageKM, psBridgePackageUM, sizeof(GED_BRIDGE_PACKAGE)))
+       {
+               GED_LOGE("Fail to ged_copy_from_user\n");
+               goto unlock_and_return;
+       }
 
-    ret = ged_dispatch(psBridgePackageKM);
+       ret = ged_dispatch(psBridgePackageKM);
 
 unlock_and_return:
-    up(&ged_dal_sem);
+       up(&ged_dal_sem);
 
-    return ret;
+       return ret;
 }
 
 #ifdef CONFIG_COMPAT
 static long ged_ioctl_compat(struct file *pFile, unsigned int ioctlCmd, unsigned long arg)
 {
-    typedef struct GED_BRIDGE_PACKAGE_32_TAG
-    {
-        unsigned int    ui32FunctionID;
-        int             i32Size;
-        unsigned int    ui32ParamIn;
-        int             i32InBufferSize;
-        unsigned int    ui32ParamOut;
-        int             i32OutBufferSize;
-    } GED_BRIDGE_PACKAGE_32;
-
-    int ret = -EFAULT;
-    GED_BRIDGE_PACKAGE sBridgePackageKM64;
-    GED_BRIDGE_PACKAGE_32 sBridgePackageKM32;
-    GED_BRIDGE_PACKAGE_32 *psBridgePackageKM32 = &sBridgePackageKM32;    
-    GED_BRIDGE_PACKAGE_32 *psBridgePackageUM32 = (GED_BRIDGE_PACKAGE_32*)arg;
-
-    if (down_interruptible(&ged_dal_sem) < 0) 
-    {
-        GED_LOGE("Fail to down ged_dal_sem\n");
-        return -ERESTARTSYS;
-    }
-
-    if (0 != ged_copy_from_user(psBridgePackageKM32, psBridgePackageUM32, sizeof(GED_BRIDGE_PACKAGE_32)))
-    {
-        GED_LOGE("Fail to ged_copy_from_user\n");
-        goto unlock_and_return;
-    }
-
-    sBridgePackageKM64.ui32FunctionID = psBridgePackageKM32->ui32FunctionID;
-    sBridgePackageKM64.i32Size = sizeof(GED_BRIDGE_PACKAGE);
-    sBridgePackageKM64.pvParamIn = (void*) ((size_t) psBridgePackageKM32->ui32ParamIn);
-    sBridgePackageKM64.pvParamOut = (void*) ((size_t) psBridgePackageKM32->ui32ParamOut);
-    sBridgePackageKM64.i32InBufferSize = psBridgePackageKM32->i32InBufferSize;
-    sBridgePackageKM64.i32OutBufferSize = psBridgePackageKM32->i32OutBufferSize;
-    
-    ret = ged_dispatch(&sBridgePackageKM64);
-    
+       typedef struct GED_BRIDGE_PACKAGE_32_TAG
+       {
+               unsigned int    ui32FunctionID;
+               int             i32Size;
+               unsigned int    ui32ParamIn;
+               int             i32InBufferSize;
+               unsigned int    ui32ParamOut;
+               int             i32OutBufferSize;
+       } GED_BRIDGE_PACKAGE_32;
+
+       int ret = -EFAULT;
+       GED_BRIDGE_PACKAGE sBridgePackageKM64;
+       GED_BRIDGE_PACKAGE_32 sBridgePackageKM32;
+       GED_BRIDGE_PACKAGE_32 *psBridgePackageKM32 = &sBridgePackageKM32;    
+       GED_BRIDGE_PACKAGE_32 *psBridgePackageUM32 = (GED_BRIDGE_PACKAGE_32*)arg;
+
+       if (down_interruptible(&ged_dal_sem) < 0) 
+       {
+               GED_LOGE("Fail to down ged_dal_sem\n");
+               return -ERESTARTSYS;
+       }
+
+       if (0 != ged_copy_from_user(psBridgePackageKM32, psBridgePackageUM32, sizeof(GED_BRIDGE_PACKAGE_32)))
+       {
+               GED_LOGE("Fail to ged_copy_from_user\n");
+               goto unlock_and_return;
+       }
+
+       sBridgePackageKM64.ui32FunctionID = psBridgePackageKM32->ui32FunctionID;
+       sBridgePackageKM64.i32Size = sizeof(GED_BRIDGE_PACKAGE);
+       sBridgePackageKM64.pvParamIn = (void*) ((size_t) psBridgePackageKM32->ui32ParamIn);
+       sBridgePackageKM64.pvParamOut = (void*) ((size_t) psBridgePackageKM32->ui32ParamOut);
+       sBridgePackageKM64.i32InBufferSize = psBridgePackageKM32->i32InBufferSize;
+       sBridgePackageKM64.i32OutBufferSize = psBridgePackageKM32->i32OutBufferSize;
+
+       ret = ged_dispatch(&sBridgePackageKM64);
+
 unlock_and_return:
-    up(&ged_dal_sem);
-    
-    return ret;
+       up(&ged_dal_sem);
+
+       return ret;
 }
 #endif
 
@@ -226,101 +244,152 @@ unlock_and_return:
  *****************************************************************************/
 
 static struct file_operations ged_fops = {
-    .owner = THIS_MODULE,
-    .open = ged_open,
-    .release = ged_release,
-    .poll = ged_poll,
-    .read = ged_read,
-    .write = ged_write,
-    .unlocked_ioctl = ged_ioctl,
+       .owner = THIS_MODULE,
+       .open = ged_open,
+       .release = ged_release,
+       .poll = ged_poll,
+       .read = ged_read,
+       .write = ged_write,
+       .unlocked_ioctl = ged_ioctl,
 #ifdef CONFIG_COMPAT
-    .compat_ioctl = ged_ioctl_compat,
+       .compat_ioctl = ged_ioctl_compat,
 #endif
 };
 
 #if 0
 static struct miscdevice ged_dev = {
-    .minor = MISC_DYNAMIC_MINOR,
-    .name = "ged",
-    .fops = &ged_fops,
+       .minor = MISC_DYNAMIC_MINOR,
+       .name = "ged",
+       .fops = &ged_fops,
 };
 #endif
 
 static void ged_exit(void)
 {
+#ifdef GED_DVFS_DEBUG_BUF
+       ged_log_buf_free(ghLogBuf_DVFS);
+       ged_log_buf_free(ghLogBuf_ged_srv);
+       ghLogBuf_DVFS = 0;
+       ghLogBuf_ged_srv = 0;
+#endif   
 #ifdef GED_DEBUG
-    ged_log_buf_free(ghLogBuf_GED);
-    ghLogBuf_GED = 0;
-
-    ged_log_buf_free(ghLogBuf_GLES);
-    ghLogBuf_GLES = 0;
+       ged_log_buf_free(ghLogBuf_GED);
+       ghLogBuf_GED = 0;
+       ged_log_buf_free(ghLogBuf_GLES);
+       ghLogBuf_GLES = 0;
 #endif
+       ged_log_buf_free(ghLogBuf_FENCE);
+       ghLogBuf_FENCE = 0;
+       ged_log_buf_free(ghLogBuf_HWC);
+       ghLogBuf_HWC = 0;
+       
+       ged_dvfs_system_exit();
+
+       ged_profile_dvfs_exit();
+
+       //ged_notify_vsync_system_exit();
 
-    ged_profile_dvfs_exit();
+       ged_notify_sw_vsync_system_exit();
 
-    ged_log_system_exit();
+       ged_hal_exit();
 
-    ged_debugFS_exit();
+       ged_log_system_exit();
 
-    remove_proc_entry(GED_DRIVER_DEVICE_NAME, NULL);
+       ged_debugFS_exit();
 
-    if (gvIOCTLParamBuf)
-    {
-        vfree(gvIOCTLParamBuf);
-        gvIOCTLParamBuf = NULL;
-    }
+       remove_proc_entry(GED_DRIVER_DEVICE_NAME, NULL);
+
+       if (gvIOCTLParamBuf)
+       {
+               vfree(gvIOCTLParamBuf);
+               gvIOCTLParamBuf = NULL;
+       }
 }
 
 static int ged_init(void)
 {
-    GED_ERROR err = GED_ERROR_FAIL;
-
-    gvIOCTLParamBuf = vmalloc(GED_IOCTL_PARAM_BUF_SIZE);
-    if (NULL == gvIOCTLParamBuf)
-    {
-        err = GED_ERROR_OOM;
-        goto ERROR;
-    }
-
-    if (NULL == proc_create(GED_DRIVER_DEVICE_NAME, 0644, NULL, &ged_fops))
-    {
-        err = GED_ERROR_FAIL;
-        GED_LOGE("ged: failed to register ged proc entry!\n");
-        goto ERROR;
-    }
-
-    err = ged_debugFS_init();
-    if (unlikely(err != GED_OK))
-    {
-        GED_LOGE("ged: failed to init debug FS!\n");
-        goto ERROR;
-    }
-
-    err = ged_log_system_init();
-    if (unlikely(err != GED_OK))
-    {
-        GED_LOGE("ged: failed to create gedlog entry!\n");
-        goto ERROR;
-    }
-
-    err = ged_profile_dvfs_init();
-    if (unlikely(err != GED_OK))
-    {
-        GED_LOGE("ged: failed to init profile dvfs!\n");
-        goto ERROR;
-    }
+       GED_ERROR err = GED_ERROR_FAIL;
+
+       gvIOCTLParamBuf = vmalloc(GED_IOCTL_PARAM_BUF_SIZE);
+       if (NULL == gvIOCTLParamBuf)
+       {
+               err = GED_ERROR_OOM;
+               goto ERROR;
+       }
+
+       if (NULL == proc_create(GED_DRIVER_DEVICE_NAME, 0644, NULL, &ged_fops))
+       {
+               err = GED_ERROR_FAIL;
+               GED_LOGE("ged: failed to register ged proc entry!\n");
+               goto ERROR;
+       }
+
+       err = ged_debugFS_init();
+       if (unlikely(err != GED_OK))
+       {
+               GED_LOGE("ged: failed to init debug FS!\n");
+               goto ERROR;
+       }
+
+       err = ged_log_system_init();
+       if (unlikely(err != GED_OK))
+       {
+               GED_LOGE("ged: failed to create gedlog entry!\n");
+               goto ERROR;
+       }
+
+       err = ged_hal_init();
+       if (unlikely(err != GED_OK))
+       {
+               GED_LOGE("ged: failed to create hal entry!\n");
+               goto ERROR;
+       }
+
+       err = ged_notify_sw_vsync_system_init();
+       if (unlikely(err != GED_OK))
+       {
+               GED_LOGE("ged: failed to init notify sw vsync!\n");
+               goto ERROR;
+       }
+
+       err = ged_profile_dvfs_init();
+       if (unlikely(err != GED_OK))
+       {
+               GED_LOGE("ged: failed to init profile dvfs!\n");
+               goto ERROR;
+       }
+
+
+       err = ged_dvfs_system_init();
+       if (unlikely(err != GED_OK))
+       {
+               GED_LOGE("ged: failed to init common dvfs!\n");
+               goto ERROR;
+       }
+
 
 #ifdef GED_DEBUG
-    ghLogBuf_GLES = ged_log_buf_alloc(160, 128, GED_LOG_BUF_TYPE_RINGBUFFER, GED_LOG_BUF_COMMON_GLES, NULL);
-    ghLogBuf_GED = ged_log_buf_alloc(32, 64, GED_LOG_BUF_TYPE_RINGBUFFER, "GED internal", NULL);
+       ghLogBuf_GLES = ged_log_buf_alloc(160, 128 * 160, GED_LOG_BUF_TYPE_RINGBUFFER, GED_LOG_BUF_COMMON_GLES, NULL);
+       ghLogBuf_GED = ged_log_buf_alloc(32, 64 * 32, GED_LOG_BUF_TYPE_RINGBUFFER, "GED internal", NULL);
+#endif
+       ghLogBuf_HWC = ged_log_buf_alloc(4096, 128 * 4096, GED_LOG_BUF_TYPE_RINGBUFFER, GED_LOG_BUF_COMMON_HWC, NULL);
+       ghLogBuf_FENCE = ged_log_buf_alloc(256, 128 * 256, GED_LOG_BUF_TYPE_RINGBUFFER, GED_LOG_BUF_COMMON_FENCE, NULL);
+       
+#ifdef GED_DVFS_DEBUG_BUF
+#ifdef GED_LOG_SIZE_LIMITED
+       ghLogBuf_DVFS =  ged_log_buf_alloc(20*60, 20*60*80, GED_LOG_BUF_TYPE_RINGBUFFER, "DVFS_Log", "ged_dvfs_debug_limited");
+#else
+       ghLogBuf_DVFS =  ged_log_buf_alloc(20*60*10, 20*60*10*80, GED_LOG_BUF_TYPE_RINGBUFFER, "DVFS_Log", "ged_dvfs_debug");
 #endif
+       ghLogBuf_ged_srv =  ged_log_buf_alloc(32, 32*80, GED_LOG_BUF_TYPE_RINGBUFFER, "ged_srv_Log", "ged_srv_debug");
+#endif    
 
-    return 0;
+       return 0;
 
 ERROR:
-    ged_exit();
+       ged_exit();
 
-    return -EFAULT;
+       return -EFAULT;
 }
 
 module_init(ged_init);
diff --git a/drivers/misc/mediatek/gpu/ged/src/ged_mm.c b/drivers/misc/mediatek/gpu/ged/src/ged_mm.c
new file mode 100644 (file)
index 0000000..caa4dda
--- /dev/null
@@ -0,0 +1,144 @@
+#include <linux/version.h>
+#include <asm/io.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/genalloc.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+//#include <linux/xlog.h>
+#include <linux/proc_fs.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <mt-plat/mtk_gpu_utility.h>
+
+#include "ged_base.h"
+#include "ged_mm.h"
+#include "ged_debugFS.h"
+
+#include "ged_dvfs.h"
+
+static struct dentry* gpsMMDir = NULL;
+static struct dentry* gpsDvfsServiceData = NULL;
+
+GED_DVFS_POLICY_DATA* gpDVFSdata=NULL;
+
+void mmap_open(struct vm_area_struct *vma)
+{
+       struct mmap_info *info = (struct mmap_info *)vma->vm_private_data;
+}
+
+static int mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct page *page;
+       struct mmap_info *info;
+       /* is the address valid? */                     //--changed
+       /*if (address > vma->vm_end) {
+         printk("invalid address\n");
+       //return NOPAGE_SIGBUS;
+       return VM_FAULT_SIGBUS;
+       }
+       /* the data is in vma->vm_private_data */
+       info = (struct mmap_info *)vma->vm_private_data;
+       if (!info) {
+               printk("no data\n");
+               return NULL;    
+       }
+
+       /* get the page */
+       page = virt_to_page(info);
+
+       /* increment the reference count of this page */
+       get_page(page);
+       vmf->page = page;                                       //--changed
+       /* type is the page fault type */
+       /*if (type)
+        *type = VM_FAULT_MINOR;
+        */
+       return 0;
+}
+
+struct vm_operations_struct mmap_vm_ops = {
+       .open =     mmap_open,
+       .close =    mmap_open,
+       .fault =    mmap_fault,
+};
+
+
+int ged_dvfs_service_data_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+       vma->vm_ops = &mmap_vm_ops;
+       vma->vm_flags |= VM_RESERVED;
+       /* assign the file private data to the vm private data */
+       vma->vm_private_data = filp->private_data;
+       mmap_open(vma);
+       return 0;
+}
+
+int ged_dvfs_service_data_close(struct inode *inode, struct file *filp)
+{
+       void *info = filp->private_data;
+       /* obtain new memory */
+       //kfree(info);
+       free_page(info);
+       gpDVFSdata = NULL;
+       filp->private_data = NULL;
+       return 0;
+}
+
+int ged_dvfs_service_data_open(struct inode *inode, struct file *filp)
+{
+       void *info;
+       /* obtain new memory */
+       //info = kmalloc(sizeof(GED_DVFS_POLICY_DATA),GFP_KERNEL);
+       info = get_zeroed_page(GFP_KERNEL);
+       //info = kmalloc(sizeof(GED_DVFS_POLICY_DATA),GFP_KERNEL);
+       gpDVFSdata =(GED_DVFS_POLICY_DATA*) info;
+       /* assign this info struct to the file */
+       filp->private_data = info;
+       return 0;
+}
+
+static const struct file_operations gsDVFSServiceData = {
+       .open = ged_dvfs_service_data_open,
+       .release = ged_dvfs_service_data_close,
+       .mmap = ged_dvfs_service_data_mmap,
+};
+
+//-----------------------------------------------------------------------------
+
+GED_ERROR ged_mm_init(void)
+{
+       GED_ERROR err = GED_OK;
+
+       err = ged_debugFS_create_entry_dir(
+                       "mm",
+                       NULL,
+                       &gpsMMDir);
+
+       if (unlikely(err != GED_OK))
+       {
+               err = GED_ERROR_FAIL;
+               GED_LOGE("ged: failed to create mm dir!\n");
+               goto ERROR;
+       }
+
+
+       gpsDvfsServiceData = debugfs_create_file("ged_dvfs_service_data", 0644, gpsMMDir, NULL, &gsDVFSServiceData);
+
+
+       return err;
+
+ERROR:
+
+       ged_mm_exit();
+
+       return err;
+}
+//-----------------------------------------------------------------------------
+void ged_mm_exit(void)
+{
+       debugfs_remove(gpsDvfsServiceData);
+       ged_debugFS_remove_entry_dir(gpsMMDir);
+}
+//-----------------------------------------------------------------------------
index a736d084032243142fc829628645c2b1dd1e8a99..4057441d8f32114d7fe1a45b876a7221b12d9a4c 100644 (file)
@@ -2,6 +2,7 @@
 #include <linux/workqueue.h>
 #include <linux/sched.h>
 #include <asm/atomic.h>
+#include <linux/module.h>
 
 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0))
 #include <linux/sync.h>
 #include <../drivers/staging/android/sync.h>
 #endif
 
-#include <linux/mtk_gpu_utility.h>
+#include <mt-plat/mtk_gpu_utility.h>
+#include <trace/events/gpu.h>
+#ifdef GED_DVFS_ENABLE
+#include <mt_gpufreq.h>
+#endif
+
 #include "ged_monitor_3D_fence.h"
+
 #include "ged_log.h"
 #include "ged_base.h"
+#include "ged_type.h"
+#include "ged_dvfs.h"
+
+#include <asm/div64.h>
 
 static atomic_t g_i32Count = ATOMIC_INIT(0);
+static unsigned int ged_monitor_3D_fence_debug = 0;
+static unsigned int ged_monitor_3D_fence_disable = 0;
+static unsigned int ged_monitor_3D_fence_systrace = 0;
+static unsigned long g_ul3DFenceDoneTime = 0;
+
+
+extern bool mtk_get_bottom_gpu_freq(unsigned int *pui32FreqLevel);
 
 #ifdef GED_DEBUG_MONITOR_3D_FENCE
 extern GED_LOG_BUF_HANDLE ghLogBuf_GED;
 #endif
+extern GED_LOG_BUF_HANDLE ghLogBuf_DVFS;
 
 typedef struct GED_MONITOR_3D_FENCE_TAG
 {
-    struct sync_fence_waiter    sSyncWaiter;
+       struct sync_fence_waiter    sSyncWaiter;
        struct work_struct          sWork;
-    struct sync_fence*          psSyncFence;
+       struct sync_fence*          psSyncFence;
 } GED_MONITOR_3D_FENCE;
 
 static void ged_sync_cb(struct sync_fence *fence, struct sync_fence_waiter *waiter)
 {
        GED_MONITOR_3D_FENCE *psMonitor;
+       unsigned long long t;
+
+       t = ged_get_time();
+
+
+       do_div(t,1000);
+
+       ged_monitor_3D_fence_notify();
+       ged_dvfs_cal_gpu_utilization_force();
        psMonitor = GED_CONTAINER_OF(waiter, GED_MONITOR_3D_FENCE, sSyncWaiter);
-    schedule_work(&psMonitor->sWork);
+
+       ged_log_buf_print(ghLogBuf_DVFS, "[-] ged_monitor_3D_fence_done (ts=%llu) %p", t, psMonitor->psSyncFence);
+
+       schedule_work(&psMonitor->sWork);
 }
 
 static void ged_monitor_3D_fence_work_cb(struct work_struct *psWork)
 {
        GED_MONITOR_3D_FENCE *psMonitor;
 
+
 #ifdef GED_DEBUG_MONITOR_3D_FENCE
-    ged_log_buf_print(ghLogBuf_GED, "ged_monitor_3D_fence_work_cb");
+       ged_log_buf_print(ghLogBuf_GED, "ged_monitor_3D_fence_work_cb");
 #endif
 
-    if (atomic_sub_return(1, &g_i32Count) < 1)
-    {
+       if (atomic_sub_return(1, &g_i32Count) < 1)
+       {
+               if (0 == ged_monitor_3D_fence_disable)
+               {
+                       unsigned int uiFreqLevelID;
+                       if (mtk_get_bottom_gpu_freq(&uiFreqLevelID))
+                       {
+                               if (uiFreqLevelID > 0)
+                               {
 #ifdef GED_DEBUG_MONITOR_3D_FENCE
-        ged_log_buf_print(ghLogBuf_GED, "mtk_set_bottom_gpu_freq(0)");
+                                       ged_log_buf_print(ghLogBuf_GED, "mtk_set_bottom_gpu_freq(0)");
+#endif
+                                       mtk_set_bottom_gpu_freq(0);
+#if 0
+#ifdef CONFIG_MTK_SCHED_TRACERS
+                                       if (ged_monitor_3D_fence_systrace)
+                                       {
+                                               unsigned long long t = cpu_clock(smp_processor_id());
+                                               trace_gpu_sched_switch("Smart Boost", t, 0, 0, 1);
+                                       }
+#endif
 #endif
-        mtk_set_bottom_gpu_freq(0);
-    }
+                               }
+                       }
+               }
+       }
+
+       if (ged_monitor_3D_fence_debug > 0)
+       {
+               GED_LOGI("[-]3D fences count = %d\n", atomic_read(&g_i32Count));
+       }
 
        psMonitor = GED_CONTAINER_OF(psWork, GED_MONITOR_3D_FENCE, sWork);
-    sync_fence_put(psMonitor->psSyncFence);
-    ged_free(psMonitor, sizeof(GED_MONITOR_3D_FENCE));
+       sync_fence_put(psMonitor->psSyncFence);
+       ged_free(psMonitor, sizeof(GED_MONITOR_3D_FENCE));
+}
+
+unsigned long ged_monitor_3D_fence_done_time()
+{
+       return g_ul3DFenceDoneTime;
 }
 
 GED_ERROR ged_monitor_3D_fence_add(int fence_fd)
 {
-    int err;
-    GED_MONITOR_3D_FENCE* psMonitor = (GED_MONITOR_3D_FENCE*)ged_alloc(sizeof(GED_MONITOR_3D_FENCE));
+       int err;
+       unsigned long long t;
+       GED_MONITOR_3D_FENCE* psMonitor;
+
+       t = ged_get_time();
+
+       do_div(t,1000);
+
+       psMonitor = (GED_MONITOR_3D_FENCE*)ged_alloc(sizeof(GED_MONITOR_3D_FENCE));
 
 #ifdef GED_DEBUG_MONITOR_3D_FENCE
-    ged_log_buf_print(ghLogBuf_GED, "[+]ged_monitor_3D_fence_add");
+       ged_log_buf_print(ghLogBuf_GED, "[+]ged_monitor_3D_fence_add");
 #endif
 
-    if (!psMonitor)
-    {
-        return GED_ERROR_OOM;
-    }
+       if (!psMonitor)
+       {
+               return GED_ERROR_OOM;
+       }
 
-    sync_fence_waiter_init(&psMonitor->sSyncWaiter, ged_sync_cb);
-    INIT_WORK(&psMonitor->sWork, ged_monitor_3D_fence_work_cb);
-    psMonitor->psSyncFence = sync_fence_fdget(fence_fd);
-    if (NULL == psMonitor->psSyncFence)
-    {
-        ged_free(psMonitor, sizeof(GED_MONITOR_3D_FENCE));
-        return GED_ERROR_INVALID_PARAMS;
-    }
+       sync_fence_waiter_init(&psMonitor->sSyncWaiter, ged_sync_cb);
+       INIT_WORK(&psMonitor->sWork, ged_monitor_3D_fence_work_cb);
+       psMonitor->psSyncFence = sync_fence_fdget(fence_fd);
+       if (NULL == psMonitor->psSyncFence)
+       {
+               ged_free(psMonitor, sizeof(GED_MONITOR_3D_FENCE));
+               return GED_ERROR_INVALID_PARAMS;
+       }
+
+       ged_log_buf_print(ghLogBuf_DVFS, "[+] ged_monitor_3D_fence_add (ts=%llu) %p", t, psMonitor->psSyncFence);
 
 #ifdef GED_DEBUG_MONITOR_3D_FENCE
-    ged_log_buf_print(ghLogBuf_GED, "[+]sync_fence_wait_async");
+       ged_log_buf_print(ghLogBuf_GED, "[+]sync_fence_wait_async");
 #endif
 
-    err = sync_fence_wait_async(psMonitor->psSyncFence, &psMonitor->sSyncWaiter);
+       err = sync_fence_wait_async(psMonitor->psSyncFence, &psMonitor->sSyncWaiter);
 
 #ifdef GED_DEBUG_MONITOR_3D_FENCE
-    ged_log_buf_print(ghLogBuf_GED, "[-]sync_fence_wait_async, err = %d", err);
+       ged_log_buf_print(ghLogBuf_GED, "[-]sync_fence_wait_async, err = %d", err);
 #endif
 
-    if ((1 == err) || (0 > err))
-    {
-        sync_fence_put(psMonitor->psSyncFence);
-        ged_free(psMonitor, sizeof(GED_MONITOR_3D_FENCE));
-    }
-    else if (0 == err)
-    {
-        int iCount = atomic_add_return (1, &g_i32Count);
-        if (iCount > 1)
-        {
-            //mtk_set_bottom_gpu_freq(iCount + 1);
-            mtk_set_bottom_gpu_freq(4);
-        }
-    }
+       if ((1 == err) || (0 > err))
+       {
+               sync_fence_put(psMonitor->psSyncFence);
+               ged_free(psMonitor, sizeof(GED_MONITOR_3D_FENCE));
+       }
+       else if (0 == err)
+       {
+               int iCount = atomic_add_return (1, &g_i32Count);
+               if (iCount > 1)
+               {
+                       if (0 == ged_monitor_3D_fence_disable)
+                       {
+                               unsigned int uiFreqLevelID;
+                               if (mtk_get_bottom_gpu_freq(&uiFreqLevelID))
+                               {
+#ifdef GED_DVFS_ENABLE
+                                       if (uiFreqLevelID != mt_gpufreq_get_dvfs_table_num() - 1)
+#else
+                                               if (uiFreqLevelID != 9999) // NEVER TRUE
+#endif
+                                               {
+#if 0
+#ifdef CONFIG_MTK_SCHED_TRACERS
+                                                       if (ged_monitor_3D_fence_systrace)
+                                                       {
+                                                               unsigned long long t = cpu_clock(smp_processor_id());
+                                                               trace_gpu_sched_switch("Smart Boost", t, 1, 0, 1);
+                                                       }
+#endif
+#endif
 
+#ifdef GED_DVFS_ENABLE
+                                                       mtk_set_bottom_gpu_freq(mt_gpufreq_get_dvfs_table_num() - 1);
+#endif
+                                               }
+                               }
+                       }
+               }
+       }
+
+       if (ged_monitor_3D_fence_debug > 0)
+       {
+               GED_LOGI("[+]3D fences count = %d\n", atomic_read(&g_i32Count));
+       }
 #ifdef GED_DEBUG_MONITOR_3D_FENCE
-    ged_log_buf_print(ghLogBuf_GED, "[-]ged_monitor_3D_fence_add, count = %d", atomic_read(&g_i32Count));
+       ged_log_buf_print(ghLogBuf_GED, "[-]ged_monitor_3D_fence_add, count = %d", atomic_read(&g_i32Count));
 #endif
 
-    return GED_OK;
+       return GED_OK;
 }
 
+void ged_monitor_3D_fence_set_disable(GED_BOOL bFlag)
+{
+       if(bFlag!=ged_monitor_3D_fence_disable)
+       {
+               ged_monitor_3D_fence_disable = bFlag;
+       }
+}
+
+void ged_monitor_3D_fence_notify(void)
+{
+       unsigned long long t;
+
+       t = ged_get_time();
+
+       do_div(t,1000);
+
+       g_ul3DFenceDoneTime = (unsigned long)t;
+}
+
+
+module_param(ged_monitor_3D_fence_debug, uint, 0644);
+module_param(ged_monitor_3D_fence_disable, uint, 0644);
+module_param(ged_monitor_3D_fence_systrace, uint, 0644);
diff --git a/drivers/misc/mediatek/gpu/ged/src/ged_notify_sw_vsync.c b/drivers/misc/mediatek/gpu/ged/src/ged_notify_sw_vsync.c
new file mode 100644 (file)
index 0000000..f77c15d
--- /dev/null
@@ -0,0 +1,394 @@
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <asm/atomic.h>
+
+#include <linux/kernel.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+
+#include <asm/div64.h>
+
+#include <mt-plat/mtk_gpu_utility.h>
+#include "ged_notify_sw_vsync.h"
+#include "ged_log.h"
+#include "ged_base.h"
+#include "ged_monitor_3D_fence.h"
+
+#define GED_DVFS_TIMER_TIMEOUT 25000000
+
+static struct hrtimer g_HT_hwvsync_emu;
+
+#include "ged_dvfs.h"
+
+extern void (*mtk_gpu_sodi_entry_fp)(void);
+extern void (*mtk_gpu_sodi_exit_fp)(void);
+
+
+static struct workqueue_struct* g_psNotifyWorkQueue = NULL;
+
+static struct mutex gsVsyncStampLock;
+
+
+typedef struct GED_NOTIFY_SW_SYNC_TAG
+{
+       struct work_struct      sWork;
+       unsigned long t;
+       long phase;
+       unsigned long ul3DFenceDoneTime;
+} GED_NOTIFY_SW_SYNC;
+
+
+
+static void ged_notify_sw_sync_work_handle(struct work_struct *psWork)
+{
+       GED_NOTIFY_SW_SYNC* psNotify = GED_CONTAINER_OF(psWork, GED_NOTIFY_SW_SYNC, sWork);
+       if (psNotify)
+       {
+               ged_dvfs_run(psNotify->t, psNotify->phase, psNotify->ul3DFenceDoneTime);
+               ged_free(psNotify, sizeof(GED_NOTIFY_SW_SYNC));
+       }
+}
+
+#define GED_VSYNC_MISS_QUANTUM_NS 16666666
+extern GED_LOG_BUF_HANDLE ghLogBuf_DVFS;
+
+static unsigned long long sw_vsync_ts;
+#ifdef ENABLE_COMMON_DVFS      
+static unsigned long long hw_vsync_ts;
+#endif
+static unsigned long long g_ns_gpu_on_ts=0;
+
+static bool g_timer_on = false;
+static unsigned long long g_timer_on_ts=0;
+
+static bool g_bGPUClock = false;
+
+/*
+ * void timer_switch(bool bTock)
+ * only set the staus, not really operating on real timer 
+ */
+void timer_switch(bool bTock)
+{
+       mutex_lock(&gsVsyncStampLock);
+       g_timer_on = bTock;
+       if(bTock)
+       {
+               g_timer_on_ts = ged_get_time();
+       }
+       mutex_unlock(&gsVsyncStampLock);                
+}
+
+void timer_switch_locked(bool bTock)
+{
+       g_timer_on = bTock;
+       if(bTock)
+       {
+               g_timer_on_ts = ged_get_time();
+       }
+}
+
+
+static void ged_timer_switch_work_handle(struct work_struct *psWork)
+{
+       GED_NOTIFY_SW_SYNC* psNotify = GED_CONTAINER_OF(psWork, GED_NOTIFY_SW_SYNC, sWork);
+       if (psNotify)
+       {
+               timer_switch(false);
+               ged_free(psNotify, sizeof(GED_NOTIFY_SW_SYNC));
+       }
+}
+
+extern unsigned int g_gpu_timer_based_emu;
+GED_ERROR ged_notify_sw_vsync(GED_VSYNC_TYPE eType, GED_DVFS_UM_QUERY_PACK* psQueryData)
+{
+#ifdef ENABLE_COMMON_DVFS      
+
+       long long llDiff = 0;
+       bool bHWEventKick = false;
+       unsigned long long temp;
+
+       unsigned long t;
+       long phase = 0;
+
+       temp = ged_get_time();
+
+       if(g_gpu_timer_based_emu)
+       {
+               ged_log_buf_print(ghLogBuf_DVFS, "[GED_K] Vsync ignored (ts=%llu)", temp);
+               return GED_INTENTIONAL_BLOCK;
+       }
+
+
+
+       /*critical session begin*/
+       mutex_lock(&gsVsyncStampLock);
+
+       if(GED_VSYNC_SW_EVENT==eType)
+       {
+               sw_vsync_ts = temp;
+#ifdef ENABLE_TIMER_BACKUP                             
+               if(hrtimer_start(&g_HT_hwvsync_emu, ns_to_ktime(GED_DVFS_TIMER_TIMEOUT), HRTIMER_MODE_REL)) // timer not start
+               {
+                       hrtimer_try_to_cancel ( &g_HT_hwvsync_emu );
+                       hrtimer_restart(&g_HT_hwvsync_emu);
+                       ged_log_buf_print(ghLogBuf_DVFS, "[GED_K] Timer Restart (ts=%llu)", temp);
+               }
+               else // timer active
+               {
+                       ged_log_buf_print(ghLogBuf_DVFS, "[GED_K] New Timer Start (ts=%llu)", temp);
+                       timer_switch_locked(true);
+               }                               
+
+#endif                         
+       }
+       else
+       {
+               hw_vsync_ts = temp;
+
+               llDiff = (long long)(hw_vsync_ts - sw_vsync_ts);
+
+               if(llDiff > GED_VSYNC_MISS_QUANTUM_NS)
+               {
+                       bHWEventKick = true;
+               }
+       }               
+#ifdef GED_DVFS_DEBUG          
+       if(GED_VSYNC_HW_EVENT==eType)
+       {
+               GED_LOGE("[5566] HW VSYNC: llDiff= %lld, hw_vsync_ts=%llu, sw_vsync_ts=%llu\n", llDiff, hw_vsync_ts, sw_vsync_ts);
+       }
+       else
+       {
+               GED_LOGE("[5566] SW VSYNC: llDiff= %lld, hw_vsync_ts=%llu, sw_vsync_ts=%llu\n", llDiff, hw_vsync_ts, sw_vsync_ts);
+       }
+#endif         
+
+
+       if(GED_VSYNC_HW_EVENT==eType)
+               ged_log_buf_print(ghLogBuf_DVFS, "[GED_K] HW VSYNC (ts=%llu) ", hw_vsync_ts);
+       else
+               ged_log_buf_print(ghLogBuf_DVFS, "[GED_K] SW VSYNC (ts=%llu) ", sw_vsync_ts);
+
+       mutex_unlock(&gsVsyncStampLock);
+       /*critical session end*/
+
+       if(GED_VSYNC_SW_EVENT==eType)
+       {
+               do_div(temp,1000);
+               t = (unsigned long)(temp);
+               ged_dvfs_run(t, phase, ged_monitor_3D_fence_done_time());
+               psQueryData->usT = t;
+               psQueryData-> ul3DFenceDoneTime = ged_monitor_3D_fence_done_time();
+               ged_dvfs_sw_vsync_query_data(psQueryData);
+       }               
+       else
+       {
+               if(bHWEventKick)
+               {
+#ifdef GED_DVFS_DEBUG          
+                       GED_LOGE("[5566] HW Event: kick!\n");
+#endif                                                         
+                       ged_log_buf_print(ghLogBuf_DVFS, "[GED_K] HW VSync: mending kick!");
+                       ged_dvfs_run(0, 0, 0);
+               }
+       }
+
+#else
+#if 0
+       GED_NOTIFY_SW_SYNC* psNotify;
+       unsigned long long temp = cpu_clock(smp_processor_id());
+       *pt = (unsigned long)(temp / 1000);
+
+       psNotify = (GED_NOTIFY_SW_SYNC*)ged_alloc(sizeof(GED_NOTIFY_SW_SYNC));
+       if (!psNotify)
+       {
+               return GED_ERROR_OOM;
+       }
+
+       INIT_WORK(&psNotify->sWork, ged_notify_sw_sync_work_handle);
+       psNotify->t = *pt;
+       psNotify->phase = phase;
+       psNotify->ul3DFenceDoneTime = ged_monitor_3D_fence_done_time();
+       queue_work(g_psNotifyWorkQueue, &psNotify->sWork);
+#endif
+#endif
+
+       return GED_OK;
+}
+
+extern unsigned int gpu_loading;
+enum hrtimer_restart ged_sw_vsync_check_cb( struct hrtimer *timer )
+{
+       unsigned long long temp;
+       long long llDiff;
+       GED_NOTIFY_SW_SYNC* psNotify;
+
+       temp = cpu_clock(smp_processor_id()); // interrupt contex no need to set non-preempt
+
+       llDiff = (long long)(temp - sw_vsync_ts);
+
+       if(llDiff > GED_VSYNC_MISS_QUANTUM_NS)
+       {
+               psNotify = (GED_NOTIFY_SW_SYNC*)ged_alloc_atomic(sizeof(GED_NOTIFY_SW_SYNC));
+
+               if(false==g_bGPUClock && 0==gpu_loading && (temp - g_ns_gpu_on_ts> GED_DVFS_TIMER_TIMEOUT) )
+               {
+                       if (psNotify)
+                       {
+                               INIT_WORK(&psNotify->sWork, ged_timer_switch_work_handle);      
+                               queue_work(g_psNotifyWorkQueue, &psNotify->sWork);
+                       }
+                       ged_log_buf_print(ghLogBuf_DVFS, "[GED_K] Timer removed (ts=%llu) ", temp);                                             
+                       return HRTIMER_NORESTART;
+               }                               
+
+
+               if (psNotify)
+               {
+                       INIT_WORK(&psNotify->sWork, ged_notify_sw_sync_work_handle);
+                       psNotify->t = temp;
+                       do_div(psNotify->t,1000);
+                       psNotify->phase = GED_DVFS_FALLBACK;
+                       psNotify->ul3DFenceDoneTime = 0;
+                       queue_work(g_psNotifyWorkQueue, &psNotify->sWork);
+                       ged_log_buf_print(ghLogBuf_DVFS, "[GED_K] Timer kick    (ts=%llu) ", temp);
+                       hrtimer_start(&g_HT_hwvsync_emu, ns_to_ktime(GED_DVFS_TIMER_TIMEOUT), HRTIMER_MODE_REL);
+                       g_timer_on_ts = temp;
+               }
+       }       
+       return HRTIMER_NORESTART;
+}
+
+bool ged_gpu_power_on_notified = 0;
+bool ged_gpu_power_off_notified = 0;
+void ged_dvfs_gpu_clock_switch_notify(bool bSwitch)
+{
+#ifdef ENABLE_COMMON_DVFS
+#ifdef ENABLE_TIMER_BACKUP
+
+       if(bSwitch)
+       {                               
+               ged_gpu_power_on_notified = true;
+               g_ns_gpu_on_ts = ged_get_time();
+               g_bGPUClock = true;
+               if( g_timer_on )
+               {
+                       ged_log_buf_print(ghLogBuf_DVFS, "[GED_K] Timer Already Start");
+               }
+               else
+               {
+                       hrtimer_start(&g_HT_hwvsync_emu, ns_to_ktime(GED_DVFS_TIMER_TIMEOUT), HRTIMER_MODE_REL);
+                       ged_log_buf_print(ghLogBuf_DVFS, "[GED_K] HW Start Timer");
+                       timer_switch(true);
+               }
+       }
+       else
+       {
+               ged_gpu_power_off_notified = true;
+               g_bGPUClock = false;
+       }
+#endif
+#endif                                          
+}
+EXPORT_SYMBOL(ged_dvfs_gpu_clock_switch_notify);
+
+
+#define GED_TIMER_BACKUP_THRESHOLD 3000
+
+/* 
+ *     SODI implementation need to cancel timer physically. 
+ *     but timer status is logically unchanged *       
+ */
+
+/*
+ * enter sodi state is trivial, just cancel timer 
+ */
+void ged_sodi_start(void)
+{
+#ifdef ENABLE_COMMON_DVFS      
+       hrtimer_try_to_cancel(&g_HT_hwvsync_emu); 
+#endif          
+}
+
+
+/*
+ * exit sodi state should aware sands of time is still running
+ */
+void ged_sodi_stop(void)
+{
+#ifdef ENABLE_COMMON_DVFS      
+       unsigned long long ns_cur_time;
+       unsigned long long ns_timer_remains;
+       if(g_timer_on)
+       {
+               ns_cur_time = ged_get_time();
+               ns_timer_remains = ns_cur_time - g_timer_on_ts - GED_DVFS_TIMER_TIMEOUT;
+               if( ns_timer_remains < GED_TIMER_BACKUP_THRESHOLD ) // sleeped too long, do timber-based DVFS now
+               {
+                       GED_NOTIFY_SW_SYNC* psNotify;
+                       psNotify = (GED_NOTIFY_SW_SYNC*)ged_alloc_atomic(sizeof(GED_NOTIFY_SW_SYNC));
+                       if (psNotify)
+                       {
+                               INIT_WORK(&psNotify->sWork, ged_notify_sw_sync_work_handle);
+                               psNotify->t = ns_cur_time;
+                               psNotify->phase = GED_DVFS_FALLBACK;
+                               psNotify->ul3DFenceDoneTime = 0;
+                               queue_work(g_psNotifyWorkQueue, &psNotify->sWork);                                                              
+                       }                                               
+                       hrtimer_start(&g_HT_hwvsync_emu, ns_to_ktime(GED_DVFS_TIMER_TIMEOUT), HRTIMER_MODE_REL);
+               }
+               else if( ns_timer_remains > GED_DVFS_TIMER_TIMEOUT) 
+               {
+                       // unknown status, just start timer with default timeout;
+                       hrtimer_start(&g_HT_hwvsync_emu, ns_to_ktime(GED_DVFS_TIMER_TIMEOUT), HRTIMER_MODE_REL);
+               }
+               else // keep counting down the timer with real remianing time
+               {
+                       hrtimer_start(&g_HT_hwvsync_emu, ns_to_ktime(ns_timer_remains), HRTIMER_MODE_REL);                                              
+               }                               
+       }
+#endif          
+}
+
+
+GED_ERROR ged_notify_sw_vsync_system_init(void)
+{
+       g_psNotifyWorkQueue = create_workqueue("ged_notify_sw_vsync");
+
+       if (g_psNotifyWorkQueue == NULL)
+       {
+               return GED_ERROR_OOM;
+       }
+       mutex_init(&gsVsyncStampLock);
+
+#ifdef ENABLE_COMMON_DVFS
+#ifdef ENABLE_TIMER_BACKUP
+       hrtimer_init(&g_HT_hwvsync_emu, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       g_HT_hwvsync_emu.function = ged_sw_vsync_check_cb;              
+
+       mtk_gpu_sodi_entry_fp= ged_sodi_start;
+       mtk_gpu_sodi_exit_fp= ged_sodi_stop;
+
+
+#endif
+#endif          
+
+       return GED_OK;
+}
+
+void ged_notify_sw_vsync_system_exit(void)
+{
+       if (g_psNotifyWorkQueue != NULL)
+       {
+               flush_workqueue(g_psNotifyWorkQueue);
+
+               destroy_workqueue(g_psNotifyWorkQueue);
+
+               g_psNotifyWorkQueue = NULL;
+       }
+#ifdef ENABLE_COMMON_DVFS                       
+       hrtimer_cancel( &g_HT_hwvsync_emu );
+#endif 
+       mutex_destroy(&gsVsyncStampLock);
+}
index daeffe6e58ef06ffb980387875b64c52c2b3c37b..76697d8356dd40f95aa9b15bfb3c91b517d9cf07 100644 (file)
@@ -13,7 +13,7 @@ GED_ERROR ged_profile_dvfs_init(void)
     mutex_init(&gsMutex);
 
 #if 0
-    ghLogBuf = ged_log_buf_alloc(320, 64, GED_LOG_BUF_TYPE_QUEUEBUFFER_AUTO_INCREASE, NULL, "profile_dvfs");
+    ghLogBuf = ged_log_buf_alloc(320, 64 * 320, GED_LOG_BUF_TYPE_QUEUEBUFFER_AUTO_INCREASE, NULL, "profile_dvfs");
 #endif
 
     return GED_OK;
@@ -25,9 +25,9 @@ GED_ERROR ged_profile_dvfs_enable(void)
 
     mutex_lock(&gsMutex);
 
-    if (NULL == ghLogBuf)
+    if (0 == ghLogBuf)
     {
-        ghLogBuf = ged_log_buf_alloc(320, 64, GED_LOG_BUF_TYPE_QUEUEBUFFER_AUTO_INCREASE, NULL, "profile_dvfs");
+        ghLogBuf = ged_log_buf_alloc(320, 64 * 320, GED_LOG_BUF_TYPE_QUEUEBUFFER_AUTO_INCREASE, NULL, "profile_dvfs");
     }
 
     ret = ghLogBuf ? GED_OK : GED_ERROR_FAIL;
@@ -41,10 +41,10 @@ void ged_profile_dvfs_disable(void)
 {
     mutex_lock(&gsMutex);
 
-    if (NULL != ghLogBuf)
+    if (0 != ghLogBuf)
     {
         ged_log_buf_free(ghLogBuf);
-        ghLogBuf = NULL;
+        ghLogBuf = 0;
     }
 
     mutex_unlock(&gsMutex);
@@ -87,7 +87,7 @@ void ged_profile_dvfs_record_freq_volt(unsigned int ui32Frequency, unsigned int
        unsigned long long t;
        unsigned long nanosec_rem;
 
-       t = cpu_clock(smp_processor_id());
+       t = ged_get_time();
        nanosec_rem = do_div(t, 1000000000) / 1000;
 
         ged_log_buf_print(ghLogBuf, "%5lu.%06lu,freq_volt,%u,%u", (unsigned long) t, nanosec_rem, ui32Frequency, ui32Voltage);
@@ -105,7 +105,7 @@ void ged_profile_dvfs_record_temp(int i32Temp)
        unsigned long long t;
        unsigned long nanosec_rem;
 
-       t = cpu_clock(smp_processor_id());
+       t = ged_get_time();
        nanosec_rem = do_div(t, 1000000000) / 1000;
 
         ged_log_buf_print(ghLogBuf, "%5lu.%06lu,temp,%d", (unsigned long) t, nanosec_rem, i32Temp);
@@ -124,7 +124,7 @@ void ged_profile_dvfs_record_thermal_limit(unsigned int ui32FreqLimit)
        unsigned long long t;
        unsigned long nanosec_rem;
 
-       t = cpu_clock(smp_processor_id());
+       t = ged_get_time();
        nanosec_rem = do_div(t, 1000000000) / 1000;
 
         ged_log_buf_print(ghLogBuf, "%5lu.%06lu,thermal_limit,%u", (unsigned long) t, nanosec_rem, ui32FreqLimit);
@@ -142,7 +142,7 @@ void ged_profile_dvfs_record_gpu_loading(unsigned int ui32GpuLoading)
        unsigned long long t;
        unsigned long nanosec_rem;
 
-       t = cpu_clock(smp_processor_id());
+       t = ged_get_time();
        nanosec_rem = do_div(t, 1000000000) / 1000;
 
         ged_log_buf_print(ghLogBuf, "%5lu.%06lu,gpu_load,%u", (unsigned long) t, nanosec_rem, ui32GpuLoading);
@@ -161,7 +161,7 @@ void ged_profile_dvfs_record_clock_on(void)
        unsigned long long t;
        unsigned long nanosec_rem;
 
-       t = cpu_clock(smp_processor_id());
+       t = ged_get_time();
        nanosec_rem = do_div(t, 1000000000) / 1000;
 
         ged_log_buf_print(ghLogBuf, "%5lu.%06lu,gpu_clock,1", (unsigned long) t, nanosec_rem);
@@ -180,7 +180,7 @@ void ged_profile_dvfs_record_clock_off(void)
        unsigned long long t;
        unsigned long nanosec_rem;
 
-       t = cpu_clock(smp_processor_id());
+       t = ged_get_time();
        nanosec_rem = do_div(t, 1000000000) / 1000;
 
         ged_log_buf_print(ghLogBuf, "%5lu.%06lu,gpu_clock,0", (unsigned long) t, nanosec_rem);
@@ -188,3 +188,44 @@ void ged_profile_dvfs_record_clock_off(void)
 
     mutex_unlock(&gsMutex);
 }
+
+void ged_profile_dvfs_record_SW_vsync(unsigned long ulTimeStamp, long lPhase, unsigned long ul3DFenceDoneTime)
+{
+    mutex_lock(&gsMutex);
+
+    if (ghLogBuf && gbAllowRecord)
+    {
+       /* copy & modify from ./kernel/printk.c */
+       unsigned long long t;
+       unsigned long nanosec_rem;
+
+       t = ged_get_time();
+       nanosec_rem = do_div(t, 1000000000) / 1000;
+
+        ged_log_buf_print(ghLogBuf, "%5lu.%06lu,SW_vsync,%lu,%ld,%lu", (unsigned long) t, nanosec_rem, ulTimeStamp, lPhase, ul3DFenceDoneTime);
+    }
+
+    mutex_unlock(&gsMutex);
+}
+
+void ged_profile_dvfs_record_policy(
+    long lFreq, unsigned int ui32GpuLoading, long lPreT1, unsigned long ulPreFreq, long t0, unsigned long ulCurFreq, long t1, long lPhase)
+{
+    mutex_lock(&gsMutex);
+
+    if (ghLogBuf && gbAllowRecord)
+    {
+       /* copy & modify from ./kernel/printk.c */
+       unsigned long long t;
+       unsigned long nanosec_rem;
+
+       t = ged_get_time();
+       nanosec_rem = do_div(t, 1000000000) / 1000;
+
+        ged_log_buf_print(ghLogBuf, "%5lu.%06lu,Freq=%ld,Load=%u,PreT1=%ld,PreF=%lu,t0=%ld,CurF=%lu,t1=%ld,phase=%ld", (unsigned long) t, nanosec_rem, lFreq, ui32GpuLoading, lPreT1, ulPreFreq, t0, ulCurFreq, t1, lPhase);
+    }
+
+    mutex_unlock(&gsMutex);
+
+}
+
diff --git a/drivers/misc/mediatek/gpu/ged/src/ged_thread.c b/drivers/misc/mediatek/gpu/ged/src/ged_thread.c
new file mode 100644 (file)
index 0000000..6140394
--- /dev/null
@@ -0,0 +1,71 @@
+#include <linux/sched.h>
+#include <linux/kthread.h>
+#include "ged_base.h"
+#include "ged_thread.h"
+
+typedef struct GED_THREAD_DATA_TAG
+{
+       struct task_struct* psThread;
+       GED_THREAD_FUNC     pFunc;
+       void*               pvData;
+} GED_THREAD_DATA;
+
+static int ged_thread_run(void *pvData)
+{
+    GED_THREAD_DATA* psThreadData = (GED_THREAD_DATA*)pvData;
+    if (psThreadData == NULL)
+    {
+        return 0;
+    }
+
+       psThreadData->pFunc(psThreadData->pvData);
+
+       while (!kthread_should_stop())
+       {
+                schedule();
+       }
+
+       return 0;
+}
+
+GED_ERROR ged_thread_create(GED_THREAD_HANDLE *phThread, const char* szThreadName, GED_THREAD_FUNC pFunc, void* pvData)
+{
+    GED_THREAD_DATA* psThreadData;
+    if (phThread == NULL)
+    {
+        return GED_ERROR_INVALID_PARAMS;
+    }
+
+    psThreadData = (GED_THREAD_DATA*)ged_alloc(sizeof(GED_THREAD_DATA));
+       if (psThreadData == NULL)
+       {
+               return GED_ERROR_OOM;
+       }
+
+       psThreadData->pFunc = pFunc;
+       psThreadData->pvData = pvData;
+    psThreadData->psThread = kthread_run(ged_thread_run, psThreadData, szThreadName);
+
+       if (IS_ERR(psThreadData->psThread))
+       {
+        ged_free(psThreadData, sizeof(GED_THREAD_DATA));
+               return GED_ERROR_OOM;
+       }
+
+    *phThread = (GED_THREAD_HANDLE)psThreadData;
+
+    return GED_OK;
+}
+
+GED_ERROR ged_thread_destroy(GED_THREAD_HANDLE hThread)
+{
+       GED_THREAD_DATA* psThreadData = (GED_THREAD_DATA*)hThread;
+    if (psThreadData == NULL)
+    {
+        return GED_ERROR_INVALID_PARAMS;
+    }
+
+       kthread_stop(psThreadData->psThread);
+    ged_free(psThreadData, sizeof(GED_THREAD_DATA));
+    return GED_OK;
+}
diff --git a/drivers/misc/mediatek/gpu/ged/src/ged_thread.h b/drivers/misc/mediatek/gpu/ged/src/ged_thread.h
new file mode 100644 (file)
index 0000000..5ec5eab
--- /dev/null
@@ -0,0 +1,14 @@
+#ifndef __GED_THREAD_H__
+#define __GED_THREAD_H__
+
+#include "ged_type.h"
+
+typedef void* GED_THREAD_HANDLE;
+
+typedef void (*GED_THREAD_FUNC)(void*);
+
+GED_ERROR ged_thread_create(GED_THREAD_HANDLE *phThread, const char* szThreadName, GED_THREAD_FUNC pFunc, void* pvData);
+
+GED_ERROR ged_thread_destroy(GED_THREAD_HANDLE hThread);
+
+#endif
old mode 100755 (executable)
new mode 100644 (file)
index 4959cfe5d8144b47cae547dd35f3250cfd3e4a41..e7ecb1e546a17fa391e8a9d8364dd042bc36c924 100644 (file)
@@ -1,6 +1,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/mtk_gpu_utility.h>
+#include <mt-plat/mtk_gpu_utility.h>
 
 unsigned int (*mtk_get_gpu_memory_usage_fp)(void) = NULL;
 EXPORT_SYMBOL(mtk_get_gpu_memory_usage_fp);
@@ -87,6 +87,20 @@ bool mtk_get_gpu_idle(unsigned int* pIdle)
 }
 EXPORT_SYMBOL(mtk_get_gpu_idle);
 
+unsigned int (*mtk_get_gpu_freq_fp)(void) = NULL;
+EXPORT_SYMBOL(mtk_get_gpu_freq_fp);
+
+bool mtk_get_gpu_freq(unsigned int *pFreq)
+{
+       if (NULL != mtk_get_gpu_freq_fp) {
+               if (pFreq) {
+                       *pFreq = mtk_get_gpu_freq_fp();
+                       return true;
+               }
+       }
+       return false;
+}
+EXPORT_SYMBOL(mtk_get_gpu_freq);
 
 unsigned int (*mtk_get_gpu_GP_loading_fp)(void) = NULL;
 EXPORT_SYMBOL(mtk_get_gpu_GP_loading_fp);
@@ -182,6 +196,20 @@ bool mtk_set_bottom_gpu_freq(unsigned int ui32FreqLevel)
 }
 EXPORT_SYMBOL(mtk_set_bottom_gpu_freq);
 
+//-----------------------------------------------------------------------------
+unsigned int (*mtk_get_bottom_gpu_freq_fp)(void) = NULL;
+EXPORT_SYMBOL(mtk_get_bottom_gpu_freq_fp);
+
+bool mtk_get_bottom_gpu_freq(unsigned int *pui32FreqLevel)
+{
+    if ((NULL != mtk_get_bottom_gpu_freq_fp) && (pui32FreqLevel))
+    {
+        *pui32FreqLevel = mtk_get_bottom_gpu_freq_fp();
+        return true;
+    }
+    return false;
+}
+EXPORT_SYMBOL(mtk_get_bottom_gpu_freq);
 //-----------------------------------------------------------------------------
 unsigned int (*mtk_custom_get_gpu_freq_level_count_fp)(void) = NULL;
 EXPORT_SYMBOL(mtk_custom_get_gpu_freq_level_count_fp);
@@ -232,3 +260,201 @@ bool mtk_custom_upbound_gpu_freq(unsigned int ui32FreqLevel)
 }
 EXPORT_SYMBOL(mtk_custom_upbound_gpu_freq);
 
+//-----------------------------------------------------------------------------
+
+unsigned int (*mtk_get_custom_boost_gpu_freq_fp)(void) = NULL;
+EXPORT_SYMBOL(mtk_get_custom_boost_gpu_freq_fp);
+
+bool mtk_get_custom_boost_gpu_freq(unsigned int *pui32FreqLevel)
+{
+    if ((NULL != mtk_get_custom_boost_gpu_freq_fp) && (NULL != pui32FreqLevel))
+    {
+        *pui32FreqLevel = mtk_get_custom_boost_gpu_freq_fp();
+        return true;
+    }
+    return false;
+}
+EXPORT_SYMBOL(mtk_get_custom_boost_gpu_freq);
+
+//-----------------------------------------------------------------------------
+
+unsigned int (*mtk_get_custom_upbound_gpu_freq_fp)(void) = NULL;
+EXPORT_SYMBOL(mtk_get_custom_upbound_gpu_freq_fp);
+
+bool mtk_get_custom_upbound_gpu_freq(unsigned int *pui32FreqLevel)
+{
+    if ((NULL != mtk_get_custom_upbound_gpu_freq_fp) && (NULL != pui32FreqLevel))
+    {
+        *pui32FreqLevel = mtk_get_custom_upbound_gpu_freq_fp();
+        return true;
+    }
+    return false;
+}
+EXPORT_SYMBOL(mtk_get_custom_upbound_gpu_freq);
+
+//-----------------------------------------------------------------------------
+void (*mtk_do_gpu_dvfs_fp)(unsigned long t, long phase, unsigned long ul3DFenceDoneTime) = NULL;
+EXPORT_SYMBOL(mtk_do_gpu_dvfs_fp);
+
+bool mtk_do_gpu_dvfs(unsigned long t, long phase, unsigned long ul3DFenceDoneTime)
+{
+    if (NULL != mtk_do_gpu_dvfs_fp)
+    {
+        mtk_do_gpu_dvfs_fp(t, phase, ul3DFenceDoneTime);
+        return true;
+    }
+    return false;
+}
+EXPORT_SYMBOL(mtk_do_gpu_dvfs);
+
+//-----------------------------------------------------------------------------
+
+void  (*mtk_gpu_sodi_entry_fp)(void) = NULL;
+EXPORT_SYMBOL(mtk_gpu_sodi_entry_fp);
+
+bool mtk_gpu_sodi_entry(void)
+{
+    if (NULL != mtk_gpu_sodi_entry_fp)
+    {
+        mtk_gpu_sodi_entry_fp();
+        return true;
+    }
+    return false;
+}
+EXPORT_SYMBOL(mtk_gpu_sodi_entry);
+
+
+
+//-----------------------------------------------------------------------------
+
+void  (*mtk_gpu_sodi_exit_fp)(void) = NULL;
+EXPORT_SYMBOL(mtk_gpu_sodi_exit_fp);
+
+bool mtk_gpu_sodi_exit(void)
+{
+    if (NULL != mtk_gpu_sodi_exit_fp)
+    {
+        mtk_gpu_sodi_exit_fp();
+        return true;
+    }
+    return false;
+}
+EXPORT_SYMBOL(mtk_gpu_sodi_exit);
+
+
+//-----------------------------------------------------------------------------
+
+unsigned int (*mtk_get_sw_vsync_phase_fp)(void) = NULL;
+EXPORT_SYMBOL(mtk_get_sw_vsync_phase_fp);
+
+bool mtk_get_sw_vsync_phase(long* plPhase)
+{
+    if (NULL != mtk_get_sw_vsync_phase_fp)
+    {
+        if (plPhase)
+        {
+            *plPhase = mtk_get_sw_vsync_phase_fp();
+            return true;
+        }
+    }
+    return false;
+}
+EXPORT_SYMBOL(mtk_get_sw_vsync_phase);
+
+//-----------------------------------------------------------------------------
+
+unsigned int (*mtk_get_sw_vsync_time_fp)(void) = NULL;
+EXPORT_SYMBOL(mtk_get_sw_vsync_time_fp);
+
+bool mtk_get_sw_vsync_time(unsigned long* pulTime)
+{
+    if (NULL != mtk_get_sw_vsync_time_fp)
+    {
+        if (pulTime)
+        {
+            *pulTime = mtk_get_sw_vsync_time_fp();
+            return true;
+        }
+    }
+    return false;
+}
+EXPORT_SYMBOL(mtk_get_sw_vsync_time);
+
+//-----------------------------------------------------------------------------
+
+unsigned int (*mtk_get_gpu_fence_done_fp)(void) = NULL;
+EXPORT_SYMBOL(mtk_get_gpu_fence_done_fp);
+
+bool mtk_get_gpu_fence_done(unsigned long* pulTime)
+{
+    if (NULL != mtk_get_gpu_fence_done_fp)
+    {
+        if (pulTime)
+        {
+            *pulTime = mtk_get_gpu_fence_done_fp();
+            return true;
+        }
+    }
+    return false;
+}
+EXPORT_SYMBOL(mtk_get_gpu_fence_done);
+
+//-----------------------------------------------------------------------------
+void (*mtk_gpu_dvfs_set_mode_fp)(int eMode) = NULL;
+EXPORT_SYMBOL(mtk_gpu_dvfs_set_mode_fp);
+
+bool mtk_gpu_dvfs_set_mode(int eMode)
+{
+    if (NULL != mtk_gpu_dvfs_set_mode_fp)
+    {
+        mtk_gpu_dvfs_set_mode_fp(eMode);
+        return true;
+    }
+    return false;
+}
+EXPORT_SYMBOL(mtk_gpu_dvfs_set_mode);
+
+//-----------------------------------------------------------------------------
+void (*mtk_dump_gpu_memory_usage_fp)(void) = NULL;
+EXPORT_SYMBOL(mtk_dump_gpu_memory_usage_fp);
+
+bool mtk_dump_gpu_memory_usage(void)
+{
+    if (NULL != mtk_dump_gpu_memory_usage_fp)
+    {
+        mtk_dump_gpu_memory_usage_fp();
+        return true;
+    }
+    return false;
+}
+EXPORT_SYMBOL(mtk_dump_gpu_memory_usage);
+
+
+//-----------------------------------------------------------------------------
+int (*mtk_get_gpu_power_state_fp)(void) =NULL;
+EXPORT_SYMBOL(mtk_get_gpu_power_state_fp);
+
+int mtk_get_gpu_power_state(void)
+{
+    if (NULL != mtk_get_gpu_power_state_fp)
+    {
+        return mtk_get_gpu_power_state_fp();
+    }
+    return -1;
+}
+EXPORT_SYMBOL(mtk_get_gpu_power_state);
+
+//-----------------------------------------------------------------------------
+void (*mtk_gpu_dvfs_clock_switch_fp)(bool bSwitch) =NULL;
+EXPORT_SYMBOL(mtk_gpu_dvfs_clock_switch_fp);
+
+bool mtk_gpu_dvfs_clock_switch(bool bSwitch)
+{
+    if (NULL != mtk_gpu_dvfs_clock_switch_fp)
+    {
+        mtk_gpu_dvfs_clock_switch_fp(bSwitch);
+        return true;
+    }
+    return false;
+}
+EXPORT_SYMBOL(mtk_gpu_dvfs_clock_switch);
diff --git a/drivers/misc/mediatek/gpu/hal/mtk_gpu_utility.h b/drivers/misc/mediatek/gpu/hal/mtk_gpu_utility.h
new file mode 100644 (file)
index 0000000..95b4fb4
--- /dev/null
@@ -0,0 +1,43 @@
+#ifndef __MTK_GPU_UTILITY_H__
+#define __MTK_GPU_UTILITY_H__
+
+#include <linux/types.h>
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/* returning false indicated no implement */
+
+/* unit: x bytes */
+bool mtk_get_gpu_memory_usage(unsigned int *pMemUsage);
+bool mtk_get_gpu_page_cache(unsigned int *pPageCache);
+
+/* unit: 0~100 % */
+bool mtk_get_gpu_loading(unsigned int *pLoading);
+bool mtk_get_gpu_block(unsigned int *pBlock);
+bool mtk_get_gpu_idle(unsigned int *pIlde);
+bool mtk_get_gpu_freq(unsigned int *pFreq);
+
+bool mtk_get_gpu_GP_loading(unsigned int *pLoading);
+bool mtk_get_gpu_PP_loading(unsigned int *pLoading);
+bool mtk_get_gpu_power_loading(unsigned int *pLoading);
+
+bool mtk_enable_gpu_dvfs_timer(bool bEnable);
+bool mtk_boost_gpu_freq(void);
+bool mtk_set_bottom_gpu_freq(unsigned int ui32FreqLevel);
+
+/* ui32FreqLevel: 0=>lowest freq, count-1=>highest freq */
+bool mtk_custom_get_gpu_freq_level_count(unsigned int *pui32FreqLevelCount);
+bool mtk_custom_boost_gpu_freq(unsigned int ui32FreqLevel);
+bool mtk_custom_upbound_gpu_freq(unsigned int ui32FreqLevel);
+bool mtk_get_custom_boost_gpu_freq(unsigned int *pui32FreqLevel);
+bool mtk_get_custom_upbound_gpu_freq(unsigned int *pui32FreqLevel);
+
+bool mtk_dump_gpu_memory_usage(void);
+#ifdef __cplusplus
+}
+#endif
+
+#endif
old mode 100755 (executable)
new mode 100644 (file)
index 38f7c3e..e367a88
@@ -1 +1,14 @@
+#
+# Copyright (C) 2015 MediaTek Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+
 obj-y += mali/
old mode 100755 (executable)
new mode 100644 (file)
index 38f7c3e..e367a88
@@ -1 +1,14 @@
+#
+# Copyright (C) 2015 MediaTek Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+
 obj-y += mali/
old mode 100755 (executable)
new mode 100644 (file)
index 7f6abae..64d52cd
@@ -25,31 +25,20 @@ MALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED ?= 0
 MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS ?= 0
 MALI_UPPER_HALF_SCHEDULING ?= 1
 MALI_ENABLE_CPU_CYCLES ?= 0
-#ifeq ($(FLAG_MTK_BUILD_SYS),1)
-#DRIVER_DIR=$(MTK_PATH_PLATFORM)/drivers/gpu/mali/mali
-#else
-# Get path to driver source from Linux build system
+
 DRIVER_DIR=$(src)
-#endif
+
 
 # For customer releases the Linux Device Drivers will be provided as ARM proprietary and GPL releases:
 # The ARM proprietary product will only include the license/proprietary directory
 # The GPL product will only include the license/gpl directory
-#ifeq ($(wildcard $(DRIVER_DIR)/linux/license/gpl/*),)
-#    ccflags-y += -I$(DRIVER_DIR)/linux/license/proprietary
-#    ifeq ($(CONFIG_MALI400_PROFILING),y)
-#        $(error Profiling is incompatible with non-GPL license)
-#    endif
-#    ifeq ($(CONFIG_PM_RUNTIME),y)
-#       $(error Runtime PM is incompatible with non-GPL license)
-#    endif
-#    ifeq ($(CONFIG_DMA_SHARED_BUFFER),y)
-#        $(error DMA-BUF is incompatible with non-GPL license)
-#    endif
-#    $(error Linux Device integration is incompatible with non-GPL license)
-#else
-     ccflags-y += -I$(DRIVER_DIR)/linux/license/gpl
-#endif
+ccflags-y += -I$(DRIVER_DIR)/linux/license/gpl
+
+ifeq ($(USING_GPU_UTILIZATION), 1)
+    ifeq ($(USING_DVFS), 1)
+        $(error USING_GPU_UTILIZATION conflict with USING_DVFS you can read the Integration Guide to choose which one do you need)
+    endif
+endif
 
 mali-y += \
        linux/mali_osk_atomics.o \
@@ -70,6 +59,12 @@ mali-y += linux/mali_memory.o linux/mali_memory_os_alloc.o
 mali-y += linux/mali_memory_external.o
 mali-y += linux/mali_memory_block_alloc.o
 
+mali-y += \
+       linux/mali_memory_manager.o \
+       linux/mali_memory_virtual.o \
+       linux/mali_memory_util.o \
+       linux/mali_memory_cow.o
+
 mali-y += \
        linux/mali_ukk_mem.o \
        linux/mali_ukk_gp.o \
@@ -82,7 +77,6 @@ mali-y += \
 mali-y += \
        common/mali_kernel_core.o \
        linux/mali_kernel_linux.o \
-       common/mali_kernel_descriptor_mapping.o \
        common/mali_session.o \
        linux/mali_device_pause_resume.o \
        common/mali_kernel_vsync.o \
@@ -98,8 +92,7 @@ mali-y += \
        common/mali_gp_job.o \
        common/mali_soft_job.o \
        common/mali_scheduler.o \
-       common/mali_gp_scheduler.o \
-       common/mali_pp_scheduler.o \
+       common/mali_executor.o \
        common/mali_group.o \
        common/mali_dlbu.o \
        common/mali_broadcast.o \
@@ -107,8 +100,8 @@ mali-y += \
        common/mali_pmu.o \
        common/mali_user_settings_db.o \
        common/mali_kernel_utilization.o \
+       common/mali_control_timer.o \
        common/mali_l2_cache.o \
-       common/mali_dma.o \
        common/mali_timeline.o \
        common/mali_timeline_fence_wait.o \
        common/mali_timeline_sync_fence.o \
@@ -121,6 +114,10 @@ mali-y += \
 mali-y += platform/platform.o
 mali-y += platform/platform_pmm.o
 #mali-$(CONFIG_MTK_MET) += platform/platform_met.o
+ifdef CONFIG_OF
+ccflags-y += -DCONFIG_MALI_DT
+endif
+ccflags-y += -DCONFIG_MALI450
 
 ifneq ($(MALI_PLATFORM_FILES),)
        mali-y += $(MALI_PLATFORM_FILES:.c=.o)
@@ -134,10 +131,11 @@ ccflags-$(CONFIG_MALI400_INTERNAL_PROFILING) += -I$(DRIVER_DIR)/timestamp-$(TIME
 
 mali-$(CONFIG_DMA_SHARED_BUFFER) += linux/mali_memory_dma_buf.o
 mali-$(CONFIG_SYNC) += linux/mali_sync.o
+ccflags-$(CONFIG_SYNC) += -Idrivers/staging/android
 
 mali-$(CONFIG_MALI400_UMP) += linux/mali_memory_ump.o
 
-mali-$(CONFIG_MALI400_POWER_PERFORMANCE_POLICY) += common/mali_power_performance_policy.o
+mali-$(CONFIG_MALI_DVFS) += common/mali_dvfs_policy.o
 
 # Tell the Linux build system from which .o file to create the kernel module
 obj-$(CONFIG_MALI400) := mali.o
@@ -145,9 +143,6 @@ obj-$(CONFIG_MALI400) := mali.o
 ccflags-y += $(EXTRA_DEFINES)
 
 # Set up our defines, which will be passed to gcc
-ccflags-y += -DPROFILING_SKIP_PP_JOBS=$(PROFILING_SKIP_PP_JOBS)
-ccflags-y += -DPROFILING_SKIP_PP_AND_GP_JOBS=$(PROFILING_SKIP_PP_AND_GP_JOBS)
-
 ccflags-y += -DMALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP=$(MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP)
 ccflags-y += -DMALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED=$(MALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED)
 ccflags-y += -DMALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS=$(MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS)
@@ -164,7 +159,7 @@ ccflags-$(CONFIG_MALI400_UMP) += -I$(DRIVER_DIR)/../../ump/include/ump
 ccflags-$(CONFIG_MALI400_DEBUG) += -DDEBUG
 
 # Use our defines when compiling
-ccflags-y += -I$(DRIVER_DIR) -I$(DRIVER_DIR)/include -I$(DRIVER_DIR)/common -I$(DRIVER_DIR)/linux -I$(DRIVER_DIR)/platform
+ccflags-y += -I$(DRIVER_DIR) -I$(DRIVER_DIR)/include -I$(DRIVER_DIR)/common -I$(DRIVER_DIR)/linux -I$(DRIVER_DIR)/platform -Wno-date-time
 
 # Get subversion revision number, fall back to only ${MALI_RELEASE_NAME} if no svn info is available
 MALI_RELEASE_NAME=$(shell cat $(DRIVER_DIR)/.version 2> /dev/null)
@@ -198,11 +193,8 @@ endif
 
 ccflags-y += -DSVN_REV_STRING=\"$(DRIVER_REV)\"
 
-#Add staging include for android ..
-ccflags-y += -I$(srctree)/drivers/staging/android
-
 VERSION_STRINGS :=
-VERSION_STRINGS += API_VERSION=$(shell grep "\#define _MALI_API_VERSION" $(srctree)/$(DRIVER_DIR)/include/linux/mali/mali_utgard_uk_types.h | cut -d' ' -f 3 )
+VERSION_STRINGS += API_VERSION=$(shell cd $(DRIVER_DIR); grep "\#define _MALI_API_VERSION" $(FILES_PREFIX)include/linux/mali/mali_utgard_uk_types.h | cut -d' ' -f 3 )
 VERSION_STRINGS += REPO_URL=$(REPO_URL)
 VERSION_STRINGS += REVISION=$(DRIVER_REV)
 VERSION_STRINGS += CHANGED_REVISION=$(CHANGED_REVISION)
@@ -221,11 +213,9 @@ VERSION_STRINGS += USING_UMP=$(CONFIG_MALI400_UMP)
 VERSION_STRINGS += USING_PROFILING=$(CONFIG_MALI400_PROFILING)
 VERSION_STRINGS += USING_INTERNAL_PROFILING=$(CONFIG_MALI400_INTERNAL_PROFILING)
 VERSION_STRINGS += USING_GPU_UTILIZATION=$(USING_GPU_UTILIZATION)
-VERSION_STRINGS += USING_POWER_PERFORMANCE_POLICY=$(CONFIG_POWER_PERFORMANCE_POLICY)
+VERSION_STRINGS += USING_DVFS=$(CONFIG_MALI_DVFS)
 VERSION_STRINGS += MALI_UPPER_HALF_SCHEDULING=$(MALI_UPPER_HALF_SCHEDULING)
 
-
-
 #MTK port custom Kbuild
 #To Add 1.ccflags-y 2.SRC
 include $(DRIVER_DIR)/Kbuild-mtk-custom-src
old mode 100755 (executable)
new mode 100644 (file)
index 1e4f3db..b1180fb
@@ -1,10 +1,8 @@
 #clouds add
 VER:=eng
-ifneq ($(strip $(TARGET_BUILD_VARIANT)),)
-ifneq ($(strip $(TARGET_BUILD_VARIANT)),eng)
+ifndef CONFIG_MT_ENG_BUILD
 VER:=user
 endif
-endif
 #----------------------------------------------------------
 #[Section:Manual modify]
 
@@ -13,9 +11,7 @@ USING_GPU_UTILIZATION := 1
 
 #----------------------------------------------------------
 #[Section:Should add to alps project config (TODO)]
-#Not yet move to "alps/mediatek/config/mt6582/autoconfig/kconfig/platform"
 #due to the mechanism
-ccflags-y += -DCONFIG_MALI450
 
 CONFIG_MALI400 := y
 ifeq ($(VER),eng)
@@ -30,11 +26,11 @@ CONFIG_DMA_SHARED_BUFFER := y
 #CONFIG_MALI400_UMP := y
 #CONFIG_MALI400_PROFILING := y
 #CONFIG_MALI400_INTERNAL_PROFILING := y
+CONFIG_MALI450 :=y
 
 CONFIG_SYNC := y
 ccflags-y += -DCONFIG_SYNC
 
-ccflags-$(CONFIG_MALI400_PROFILING) += -DCONFIG_MALI400_PROFILING
 
 #----------------------------------------------------------
 #Debug version (BUILD := debug not eatten by kbuild)
old mode 100755 (executable)
new mode 100644 (file)
index 85b0033..2e421b7
@@ -8,19 +8,12 @@ ccflags-y += -DDEBUG
 ccflags-y += -I$(DRIVER_DIR)/../ump_include/ump
 
 #include mtk kernel header files
-#ccflags-y += \
-#    -I$(MTK_PATH_PLATFORM)/core/include/mach \
-#    -I$(MTK_PATH_SOURCE)/include/linux
 ccflags-y += \
-       -I$(srctree)/arch/arm/mach-mt8127/include/mach \
     -I$(srctree)/include/linux \
-    -I$(srctree)/arch/arm/mach-mt8127/gpu/mali/mali/linux/license/gpl
-
 
 #Add MTK custom source code
 ccflags-y += -I$(DRIVER_DIR)/../../mediatek
 mali-y    += ../../mediatek/mtk_mali_kernel.o
-#mali-y    += ../../mediatek/mtk_mali_trace.o
 
 ccflags-y += -DMALI_FAKE_PLATFORM_DEVICE
 
@@ -28,23 +21,11 @@ ccflags-y += -DMALI_FAKE_PLATFORM_DEVICE
 #ccflags-y += -finstrument-functions
 #mali-y += platform/$(TARGET_PLATFORM)/mtk_ptrace.c
 
+mali-y += platform/arm_core_scaling.o
 mali-y += platform/platform.o
 mali-y += platform/platform_pmm.o
 
-
 #MTK_DEBUG
-ccflags-y += -I$(DRIVER_DIR)/mtk_common
-
-#Temp remove this for Kernel version update
-#mali-y += mtk_common/mtk_mem_record.o
-#mali-$(MTK_DEBUG) += mtk_common/mtk_debug.o
-#mali-$(MTK_DEBUG_PROC_PRINT) += mtk_common/mtk_pp.o
-
-#Use this to set PP num and pll 
-#ifdef MTK_NR_MALI_PP
-#ccflags-y += -DMTK_NR_MALI_PP=$(MTK_NR_MALI_PP)
-#endif
 
-#ifeq ($(MTK_MALI_UNIV), yes)
-#ccflags-y += -DMTK_MALI_UNIV
-#endif
+#Add include path for kernel 3.10
+ccflags-y += -I$(srctree)/drivers/staging/android
old mode 100755 (executable)
new mode 100644 (file)
index 477e6b6..f199123
@@ -1,6 +1,6 @@
 config MALI400
        tristate "Mali-300/400/450 support"
-       depends on ARM
+       depends on ARM || ARM64
        select DMA_SHARED_BUFFER
        ---help---
          This enables support for the ARM Mali-300, Mali-400, and Mali-450
@@ -15,6 +15,12 @@ config MALI450
        ---help---
          This enables support for Mali-450 specific features.
 
+config MALI470
+       bool "Enable Mali-470 support"
+       depends on MALI400
+       ---help---
+         This enables support for Mali-470 specific features.
+
 config MALI400_DEBUG
        bool "Enable debug in Mali driver"
        depends on MALI400
@@ -42,12 +48,12 @@ config MALI400_UMP
        ---help---
          This enables support for the UMP memory sharing API in the Mali driver.
 
-config MALI400_POWER_PERFORMANCE_POLICY
-       bool "Enable Mali power performance policy"
-       depends on ARM
-       default n
+config MALI_DVFS
+       bool "Enable Mali dynamically frequency change"
+       depends on MALI400
+       default y
        ---help---
-         This enables support for dynamic performance scaling of Mali with the goal of lowering power consumption.
+         This enables support for dynamic change frequency of Mali with the goal of lowering power consumption.
 
 config MALI_DMA_BUF_MAP_ON_ATTACH
        bool "Map dma-buf attachments on attach"
@@ -79,3 +85,22 @@ config MALI_PMU_PARALLEL_POWER_UP
          powering up domains one by one, with a slight delay in between. Powering on all power
          domains at the same time may cause peak currents higher than what some systems can handle.
          These systems must not enable this option.
+
+config MALI_DT
+       bool "Using device tree to initialize module"
+       depends on MALI400 && OF
+       default n
+       ---help---
+         This enable the Mali driver to choose the device tree path to get platform resoures
+         and disable the old config method. Mali driver could run on the platform which the
+         device tree is enabled in kernel and corresponding hardware description is implemented
+         properly in device DTS file.
+
+config MALI_QUIET
+       bool "Make Mali driver very quiet"
+       depends on MALI400 && !MALI400_DEBUG
+       default n
+       ---help---
+         This forces the Mali driver to never print any messages.
+
+         If unsure, say N.
old mode 100755 (executable)
new mode 100644 (file)
index 022ca53..c48cf62
@@ -1,7 +1,7 @@
 #
 # This confidential and proprietary software may be used only as
 # authorised by a licensing agreement from ARM Limited
-# (C) COPYRIGHT 2007-2013 ARM Limited
+# (C) COPYRIGHT 2007-2015 ARM Limited
 # ALL RIGHTS RESERVED
 # The entire notice above must be reproduced on all authorised
 # copies and copies may only be made to the extent permitted
 USE_UMPV2=0
 USING_PROFILING ?= 1
 USING_INTERNAL_PROFILING ?= 0
-USING_POWER_PERFORMANCE_POLICY ?= 0
+USING_DVFS ?= 0
 MALI_HEATMAPS_ENABLED ?= 0
 MALI_DMA_BUF_MAP_ON_ATTACH ?= 1
 MALI_PMU_PARALLEL_POWER_UP ?= 0
+USING_DT ?= 0
 
 # The Makefile sets up "arch" based on the CONFIG, creates the version info
 # string and the __malidrv_build_info.c file, and then call the Linux build
@@ -63,6 +64,11 @@ ifeq ($(KDIR),)
 $(error No KDIR found for platform $(TARGET_PLATFORM))
 endif
 
+ifeq ($(USING_GPU_UTILIZATION), 1)
+    ifeq ($(USING_DVFS), 1)
+        $(error USING_GPU_UTILIZATION conflict with USING_DVFS you can read the Integration Guide to choose which one do you need)
+    endif
+endif
 
 ifeq ($(USING_UMP),1)
 export CONFIG_MALI400_UMP=y
@@ -96,9 +102,11 @@ endif
 # Set up build config
 export CONFIG_MALI400=m
 export CONFIG_MALI450=y
+export CONFIG_MALI470=y
 
 export EXTRA_DEFINES += -DCONFIG_MALI400=1
 export EXTRA_DEFINES += -DCONFIG_MALI450=1
+export EXTRA_DEFINES += -DCONFIG_MALI470=1
 
 ifneq ($(MALI_PLATFORM),)
 export EXTRA_DEFINES += -DMALI_FAKE_PLATFORM_DEVICE=1
@@ -132,9 +140,9 @@ export CONFIG_MALI_SHARED_INTERRUPTS=y
 export EXTRA_DEFINES += -DCONFIG_MALI_SHARED_INTERRUPTS
 endif
 
-ifeq ($(USING_POWER_PERFORMANCE_POLICY),1)
-export CONFIG_MALI400_POWER_PERFORMANCE_POLICY=y
-export EXTRA_DEFINES += -DCONFIG_MALI400_POWER_PERFORMANCE_POLICY
+ifeq ($(USING_DVFS),1)
+export CONFIG_MALI_DVFS=y
+export EXTRA_DEFINES += -DCONFIG_MALI_DVFS
 endif
 
 ifeq ($(MALI_PMU_PARALLEL_POWER_UP),1)
@@ -142,8 +150,26 @@ export CONFIG_MALI_PMU_PARALLEL_POWER_UP=y
 export EXTRA_DEFINES += -DCONFIG_MALI_PMU_PARALLEL_POWER_UP
 endif
 
+ifdef CONFIG_OF
+ifeq ($(USING_DT),1)
+export CONFIG_MALI_DT=y
+export EXTRA_DEFINES += -DCONFIG_MALI_DT
+endif
+endif
+
 ifneq ($(BUILD),release)
+# Debug
 export CONFIG_MALI400_DEBUG=y
+else
+# Release
+ifeq ($(MALI_QUIET),1)
+export CONFIG_MALI_QUIET=y
+export EXTRA_DEFINES += -DCONFIG_MALI_QUIET
+endif
+endif
+
+ifeq ($(MALI_SKIP_JOBS),1)
+EXTRA_DEFINES += -DPROFILING_SKIP_PP_JOBS=1 -DPROFILING_SKIP_GP_JOBS=1
 endif
 
 all: $(UMP_SYMVERS_FILE)
index c8f4887587addb9a094bb92d0e38b84f7c40fcaf..b08b7af51053bfa321aba2820780bae6e8e11e83 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2013 ARM Limited
+ * (C) COPYRIGHT 2012-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -12,9 +12,9 @@
 #include "mali_kernel_common.h"
 #include "mali_osk.h"
 
-static const int bcast_unit_reg_size = 0x1000;
-static const int bcast_unit_addr_broadcast_mask = 0x0;
-static const int bcast_unit_addr_irq_override_mask = 0x4;
+#define MALI_BROADCAST_REGISTER_SIZE      0x1000
+#define MALI_BROADCAST_REG_BROADCAST_MASK    0x0
+#define MALI_BROADCAST_REG_INTERRUPT_MASK    0x4
 
 struct mali_bcast_unit {
        struct mali_hw_core hw_core;
@@ -26,21 +26,23 @@ struct mali_bcast_unit *mali_bcast_unit_create(const _mali_osk_resource_t *resou
        struct mali_bcast_unit *bcast_unit = NULL;
 
        MALI_DEBUG_ASSERT_POINTER(resource);
-       MALI_DEBUG_PRINT(2, ("Mali Broadcast unit: Creating Mali Broadcast unit: %s\n", resource->description));
+       MALI_DEBUG_PRINT(2, ("Broadcast: Creating Mali Broadcast unit: %s\n",
+                            resource->description));
 
        bcast_unit = _mali_osk_malloc(sizeof(struct mali_bcast_unit));
        if (NULL == bcast_unit) {
-               MALI_PRINT_ERROR(("Mali Broadcast unit: Failed to allocate memory for Broadcast unit\n"));
+               MALI_PRINT_ERROR(("Broadcast: Failed to allocate memory for Broadcast unit\n"));
                return NULL;
        }
 
-       if (_MALI_OSK_ERR_OK == mali_hw_core_create(&bcast_unit->hw_core, resource, bcast_unit_reg_size)) {
+       if (_MALI_OSK_ERR_OK == mali_hw_core_create(&bcast_unit->hw_core,
+                       resource, MALI_BROADCAST_REGISTER_SIZE)) {
                bcast_unit->current_mask = 0;
                mali_bcast_reset(bcast_unit);
 
                return bcast_unit;
        } else {
-               MALI_PRINT_ERROR(("Mali Broadcast unit: Failed map broadcast unit\n"));
+               MALI_PRINT_ERROR(("Broadcast: Failed map broadcast unit\n"));
        }
 
        _mali_osk_free(bcast_unit);
@@ -51,12 +53,16 @@ struct mali_bcast_unit *mali_bcast_unit_create(const _mali_osk_resource_t *resou
 void mali_bcast_unit_delete(struct mali_bcast_unit *bcast_unit)
 {
        MALI_DEBUG_ASSERT_POINTER(bcast_unit);
-
        mali_hw_core_delete(&bcast_unit->hw_core);
        _mali_osk_free(bcast_unit);
 }
 
-void mali_bcast_add_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group)
+/* Call this function to add the @group's id into bcast mask
+ * Note: redundant calling this function with same @group
+ * doesn't make any difference as calling it once
+ */
+void mali_bcast_add_group(struct mali_bcast_unit *bcast_unit,
+                         struct mali_group *group)
 {
        u32 bcast_id;
        u32 broadcast_mask;
@@ -75,7 +81,12 @@ void mali_bcast_add_group(struct mali_bcast_unit *bcast_unit, struct mali_group
        bcast_unit->current_mask = broadcast_mask;
 }
 
-void mali_bcast_remove_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group)
+/* Call this function to remove @group's id from bcast mask
+ * Note: redundant calling this function with same @group
+ * doesn't make any difference as calling it once
+ */
+void mali_bcast_remove_group(struct mali_bcast_unit *bcast_unit,
+                            struct mali_group *group)
 {
        u32 bcast_id;
        u32 broadcast_mask;
@@ -97,28 +108,35 @@ void mali_bcast_reset(struct mali_bcast_unit *bcast_unit)
 {
        MALI_DEBUG_ASSERT_POINTER(bcast_unit);
 
+       MALI_DEBUG_PRINT(4,
+                        ("Broadcast: setting mask 0x%08X + 0x%08X (reset)\n",
+                         bcast_unit->current_mask,
+                         bcast_unit->current_mask & 0xFF));
+
        /* set broadcast mask */
        mali_hw_core_register_write(&bcast_unit->hw_core,
-                                   bcast_unit_addr_broadcast_mask,
-                                   bcast_unit->current_mask);
+                                   MALI_BROADCAST_REG_BROADCAST_MASK,
+                                   bcast_unit->current_mask);
 
        /* set IRQ override mask */
        mali_hw_core_register_write(&bcast_unit->hw_core,
-                                   bcast_unit_addr_irq_override_mask,
-                                   bcast_unit->current_mask & 0xFF);
+                                   MALI_BROADCAST_REG_INTERRUPT_MASK,
+                                   bcast_unit->current_mask & 0xFF);
 }
 
 void mali_bcast_disable(struct mali_bcast_unit *bcast_unit)
 {
        MALI_DEBUG_ASSERT_POINTER(bcast_unit);
 
+       MALI_DEBUG_PRINT(4, ("Broadcast: setting mask 0x0 + 0x0 (disable)\n"));
+
        /* set broadcast mask */
        mali_hw_core_register_write(&bcast_unit->hw_core,
-                                   bcast_unit_addr_broadcast_mask,
-                                   0x0);
+                                   MALI_BROADCAST_REG_BROADCAST_MASK,
+                                   0x0);
 
        /* set IRQ override mask */
        mali_hw_core_register_write(&bcast_unit->hw_core,
-                                   bcast_unit_addr_irq_override_mask,
-                                   0x0);
+                                   MALI_BROADCAST_REG_INTERRUPT_MASK,
+                                   0x0);
 }
index 472d9bc05407ecbe272ff30a66d1f0c885f23e60..e4af168893de1cb98a086943bdcc9ca5443f3656 100644 (file)
@@ -1,13 +1,16 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2013 ARM Limited
+ * (C) COPYRIGHT 2012-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
  * by a licensing agreement from ARM Limited.
  */
 
+#ifndef __MALI_BROADCAST_H__
+#define __MALI_BROADCAST_H__
+
 /*
  *  Interface for the broadcast unit on Mali-450.
  *
@@ -50,3 +53,5 @@ MALI_STATIC_INLINE void mali_bcast_enable(struct mali_bcast_unit *bcast_unit)
 {
        mali_bcast_reset(bcast_unit);
 }
+
+#endif /* __MALI_BROADCAST_H__ */
diff --git a/drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_control_timer.c b/drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_control_timer.c
new file mode 100644 (file)
index 0000000..46e088f
--- /dev/null
@@ -0,0 +1,128 @@
+/*
+ * This confidential and proprietary software may be used only as
+ * authorised by a licensing agreement from ARM Limited
+ * (C) COPYRIGHT 2010-2012, 2014-2015 ARM Limited
+ * ALL RIGHTS RESERVED
+ * The entire notice above must be reproduced on all authorised
+ * copies and copies may only be made to the extent permitted
+ * by a licensing agreement from ARM Limited.
+ */
+
+#include "mali_kernel_utilization.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_dvfs_policy.h"
+#include "mali_control_timer.h"
+
+static u64 period_start_time = 0;
+
+static _mali_osk_timer_t *mali_control_timer = NULL;
+static mali_bool timer_running = MALI_FALSE;
+
+static u32 mali_control_timeout = 1000;
+
+void mali_control_timer_add(u32 timeout)
+{
+       _mali_osk_timer_add(mali_control_timer, _mali_osk_time_mstoticks(timeout));
+}
+
+static void mali_control_timer_callback(void *arg)
+{
+       if (mali_utilization_enabled()) {
+               struct mali_gpu_utilization_data *util_data = NULL;
+               u64 time_period = 0;
+               mali_bool need_add_timer = MALI_TRUE;
+
+               /* Calculate gpu utilization */
+               util_data = mali_utilization_calculate(&period_start_time, &time_period, &need_add_timer);
+
+               if (util_data) {
+#if defined(CONFIG_MALI_DVFS)
+                       mali_dvfs_policy_realize(util_data, time_period);
+#else
+                       mali_utilization_platform_realize(util_data);
+#endif
+
+                       if (MALI_TRUE == need_add_timer) {
+                               mali_control_timer_add(mali_control_timeout);
+                       }
+               }
+       }
+}
+
+/* Init a timer (for now it is used for GPU utilization and dvfs) */
+_mali_osk_errcode_t mali_control_timer_init(void)
+{
+       _mali_osk_device_data data;
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+               /* Use device specific settings (if defined) */
+               if (0 != data.control_interval) {
+                       mali_control_timeout = data.control_interval;
+                       MALI_DEBUG_PRINT(2, ("Mali GPU Timer: %u\n", mali_control_timeout));
+               }
+       }
+
+       mali_control_timer = _mali_osk_timer_init();
+       if (NULL == mali_control_timer) {
+               return _MALI_OSK_ERR_FAULT;
+       }
+       _mali_osk_timer_setcallback(mali_control_timer, mali_control_timer_callback, NULL);
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_control_timer_term(void)
+{
+       if (NULL != mali_control_timer) {
+               _mali_osk_timer_del(mali_control_timer);
+               timer_running = MALI_FALSE;
+               _mali_osk_timer_term(mali_control_timer);
+               mali_control_timer = NULL;
+       }
+}
+
+mali_bool mali_control_timer_resume(u64 time_now)
+{
+       mali_utilization_data_assert_locked();
+
+       if (timer_running != MALI_TRUE) {
+               timer_running = MALI_TRUE;
+
+               period_start_time = time_now;
+
+               mali_utilization_reset();
+
+               return MALI_TRUE;
+       }
+
+       return MALI_FALSE;
+}
+
+void mali_control_timer_pause(void)
+{
+       mali_utilization_data_assert_locked();
+       if (timer_running == MALI_TRUE) {
+               timer_running = MALI_FALSE;
+       }
+}
+
+void mali_control_timer_suspend(mali_bool suspend)
+{
+       mali_utilization_data_lock();
+
+       if (timer_running == MALI_TRUE) {
+               timer_running = MALI_FALSE;
+
+               mali_utilization_data_unlock();
+
+               if (suspend == MALI_TRUE) {
+                       _mali_osk_timer_del(mali_control_timer);
+                       mali_utilization_reset();
+               }
+       } else {
+               mali_utilization_data_unlock();
+       }
+}
diff --git a/drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_control_timer.h b/drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_control_timer.h
new file mode 100644 (file)
index 0000000..6b8e5d2
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * This confidential and proprietary software may be used only as
+ * authorised by a licensing agreement from ARM Limited
+ * (C) COPYRIGHT 2010-2012, 2014-2015 ARM Limited
+ * ALL RIGHTS RESERVED
+ * The entire notice above must be reproduced on all authorised
+ * copies and copies may only be made to the extent permitted
+ * by a licensing agreement from ARM Limited.
+ */
+
+#ifndef __MALI_CONTROL_TIMER_H__
+#define __MALI_CONTROL_TIMER_H__
+
+#include "mali_osk.h"
+
+_mali_osk_errcode_t mali_control_timer_init(void);
+
+void mali_control_timer_term(void);
+
+mali_bool mali_control_timer_resume(u64 time_now);
+
+void mali_control_timer_suspend(mali_bool suspend);
+void mali_control_timer_pause(void);
+
+void mali_control_timer_add(u32 timeout);
+
+#endif /* __MALI_CONTROL_TIMER_H__ */
+
index 94cc5a7554fe3a19b94468f6d91e365c00a6d00d..220184eb28d01c5751522bc0925b86638714bc1b 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2013 ARM Limited
+ * (C) COPYRIGHT 2012-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -20,8 +20,8 @@
  */
 #define MALI_DLBU_SIZE 0x400
 
-u32 mali_dlbu_phys_addr = 0;
-static mali_io_address mali_dlbu_cpu_addr = 0;
+mali_dma_addr mali_dlbu_phys_addr = 0;
+static mali_io_address mali_dlbu_cpu_addr = NULL;
 
 /**
  * DLBU register numbers
@@ -30,36 +30,36 @@ static mali_io_address mali_dlbu_cpu_addr = 0;
  */
 typedef enum mali_dlbu_register {
        MALI_DLBU_REGISTER_MASTER_TLLIST_PHYS_ADDR = 0x0000, /**< Master tile list physical base address;
-                                                            31:12 Physical address to the page used for the DLBU
-                                                            0 DLBU enable - set this bit to 1 enables the AXI bus
-                                                            between PPs and L2s, setting to 0 disables the router and
-                                                            no further transactions are sent to DLBU */
+                                                             31:12 Physical address to the page used for the DLBU
+                                                             0 DLBU enable - set this bit to 1 enables the AXI bus
+                                                             between PPs and L2s, setting to 0 disables the router and
+                                                             no further transactions are sent to DLBU */
        MALI_DLBU_REGISTER_MASTER_TLLIST_VADDR     = 0x0004, /**< Master tile list virtual base address;
-                                                            31:12 Virtual address to the page used for the DLBU */
-       MALI_DLBU_REGISTER_TLLIST_VBASEADDR        = 0x0008, /**< Tile list virtual base address;
-                                                            31:12 Virtual address to the tile list. This address is used when
-                                                            calculating the call address sent to PP.*/
-       MALI_DLBU_REGISTER_FB_DIM                  = 0x000C, /**< Framebuffer dimension;
-                                                            23:16 Number of tiles in Y direction-1
-                                                            7:0 Number of tiles in X direction-1 */
-       MALI_DLBU_REGISTER_TLLIST_CONF             = 0x0010, /**< Tile list configuration;
-                                                            29:28 select the size of each allocated block: 0=128 bytes, 1=256, 2=512, 3=1024
-                                                            21:16 2^n number of tiles to be binned to one tile list in Y direction
-                                                            5:0 2^n number of tiles to be binned to one tile list in X direction */
-       MALI_DLBU_REGISTER_START_TILE_POS          = 0x0014, /**< Start tile positions;
-                                                            31:24 start position in Y direction for group 1
-                                                            23:16 start position in X direction for group 1
-                                                            15:8 start position in Y direction for group 0
-                                                            7:0 start position in X direction for group 0 */
-       MALI_DLBU_REGISTER_PP_ENABLE_MASK          = 0x0018, /**< PP enable mask;
-                                                            7 enable PP7 for load balancing
-                                                            6 enable PP6 for load balancing
-                                                            5 enable PP5 for load balancing
-                                                            4 enable PP4 for load balancing
-                                                            3 enable PP3 for load balancing
-                                                            2 enable PP2 for load balancing
-                                                            1 enable PP1 for load balancing
-                                                            0 enable PP0 for load balancing */
+                                                             31:12 Virtual address to the page used for the DLBU */
+       MALI_DLBU_REGISTER_TLLIST_VBASEADDR     = 0x0008, /**< Tile list virtual base address;
+                                                             31:12 Virtual address to the tile list. This address is used when
+                                                             calculating the call address sent to PP.*/
+       MALI_DLBU_REGISTER_FB_DIM                 = 0x000C, /**< Framebuffer dimension;
+                                                             23:16 Number of tiles in Y direction-1
+                                                             7:0 Number of tiles in X direction-1 */
+       MALI_DLBU_REGISTER_TLLIST_CONF       = 0x0010, /**< Tile list configuration;
+                                                             29:28 select the size of each allocated block: 0=128 bytes, 1=256, 2=512, 3=1024
+                                                             21:16 2^n number of tiles to be binned to one tile list in Y direction
+                                                             5:0 2^n number of tiles to be binned to one tile list in X direction */
+       MALI_DLBU_REGISTER_START_TILE_POS         = 0x0014, /**< Start tile positions;
+                                                             31:24 start position in Y direction for group 1
+                                                             23:16 start position in X direction for group 1
+                                                             15:8 start position in Y direction for group 0
+                                                             7:0 start position in X direction for group 0 */
+       MALI_DLBU_REGISTER_PP_ENABLE_MASK         = 0x0018, /**< PP enable mask;
+                                                             7 enable PP7 for load balancing
+                                                             6 enable PP6 for load balancing
+                                                             5 enable PP5 for load balancing
+                                                             4 enable PP4 for load balancing
+                                                             3 enable PP3 for load balancing
+                                                             2 enable PP2 for load balancing
+                                                             1 enable PP1 for load balancing
+                                                             0 enable PP0 for load balancing */
 } mali_dlbu_register;
 
 typedef enum {
@@ -76,16 +76,17 @@ typedef enum {
 struct mali_dlbu_core {
        struct mali_hw_core     hw_core;           /**< Common for all HW cores */
        u32                     pp_cores_mask;     /**< This is a mask for the PP cores whose operation will be controlled by LBU
-                                                     see MALI_DLBU_REGISTER_PP_ENABLE_MASK register */
+                                                      see MALI_DLBU_REGISTER_PP_ENABLE_MASK register */
 };
 
 _mali_osk_errcode_t mali_dlbu_initialize(void)
 {
-
        MALI_DEBUG_PRINT(2, ("Mali DLBU: Initializing\n"));
 
-       if (_MALI_OSK_ERR_OK == mali_mmu_get_table_page(&mali_dlbu_phys_addr, &mali_dlbu_cpu_addr)) {
-               MALI_SUCCESS;
+       if (_MALI_OSK_ERR_OK ==
+           mali_mmu_get_table_page(&mali_dlbu_phys_addr,
+                                   &mali_dlbu_cpu_addr)) {
+               return _MALI_OSK_ERR_OK;
        }
 
        return _MALI_OSK_ERR_FAULT;
@@ -95,10 +96,15 @@ void mali_dlbu_terminate(void)
 {
        MALI_DEBUG_PRINT(3, ("Mali DLBU: terminating\n"));
 
-       mali_mmu_release_table_page(mali_dlbu_phys_addr, mali_dlbu_cpu_addr);
+       if (0 != mali_dlbu_phys_addr && 0 != mali_dlbu_cpu_addr) {
+               mali_mmu_release_table_page(mali_dlbu_phys_addr,
+                                           mali_dlbu_cpu_addr);
+               mali_dlbu_phys_addr = 0;
+               mali_dlbu_cpu_addr = 0;
+       }
 }
 
-struct mali_dlbu_core *mali_dlbu_create(const _mali_osk_resource_t * resource)
+struct mali_dlbu_core *mali_dlbu_create(const _mali_osk_resource_t *resource)
 {
        struct mali_dlbu_core *core = NULL;
 
@@ -126,8 +132,6 @@ struct mali_dlbu_core *mali_dlbu_create(const _mali_osk_resource_t * resource)
 void mali_dlbu_delete(struct mali_dlbu_core *dlbu)
 {
        MALI_DEBUG_ASSERT_POINTER(dlbu);
-
-       mali_dlbu_reset(dlbu);
        mali_hw_core_delete(&dlbu->hw_core);
        _mali_osk_free(dlbu);
 }
@@ -168,8 +172,8 @@ void mali_dlbu_add_group(struct mali_dlbu_core *dlbu, struct mali_group *group)
        struct mali_pp_core *pp_core;
        u32 bcast_id;
 
-       MALI_DEBUG_ASSERT_POINTER( dlbu );
-       MALI_DEBUG_ASSERT_POINTER( group );
+       MALI_DEBUG_ASSERT_POINTER(dlbu);
+       MALI_DEBUG_ASSERT_POINTER(group);
 
        pp_core = mali_group_get_pp_core(group);
        bcast_id = mali_pp_core_get_bcast_id(pp_core);
@@ -184,8 +188,8 @@ void mali_dlbu_remove_group(struct mali_dlbu_core *dlbu, struct mali_group *grou
        struct mali_pp_core *pp_core;
        u32 bcast_id;
 
-       MALI_DEBUG_ASSERT_POINTER( dlbu );
-       MALI_DEBUG_ASSERT_POINTER( group );
+       MALI_DEBUG_ASSERT_POINTER(dlbu);
+       MALI_DEBUG_ASSERT_POINTER(group);
 
        pp_core = mali_group_get_pp_core(group);
        bcast_id = mali_pp_core_get_bcast_id(pp_core);
index 597a9088f54f97e87215b6a5865ad3380c0ae7dc..d55f417f1fa26c5cb54c1cc229ee7804fac59a67 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2013 ARM Limited
+ * (C) COPYRIGHT 2012-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 
 struct mali_pp_job;
 struct mali_group;
-
-extern u32 mali_dlbu_phys_addr;
-
 struct mali_dlbu_core;
 
+extern mali_dma_addr mali_dlbu_phys_addr;
+
 _mali_osk_errcode_t mali_dlbu_initialize(void);
 void mali_dlbu_terminate(void);
 
-struct mali_dlbu_core *mali_dlbu_create(const _mali_osk_resource_t * resource);
+struct mali_dlbu_core *mali_dlbu_create(const _mali_osk_resource_t *resource);
 void mali_dlbu_delete(struct mali_dlbu_core *dlbu);
 
 _mali_osk_errcode_t mali_dlbu_reset(struct mali_dlbu_core *dlbu);
diff --git a/drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_dma.c b/drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_dma.c
deleted file mode 100644 (file)
index 2fb8750..0000000
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2013 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
- */
-
-#include "mali_kernel_common.h"
-#include "mali_osk.h"
-#include "mali_hw_core.h"
-#include "mali_dma.h"
-
-/**
- * Size of the Mali-450 DMA unit registers in bytes.
- */
-#define MALI450_DMA_REG_SIZE 0x08
-
-/**
- * Value that appears in MEMSIZE if an error occurs when reading the command list.
- */
-#define MALI450_DMA_BUS_ERR_VAL 0xffffffff
-
-/**
- * Mali DMA registers
- * Used in the register read/write routines.
- * See the hardware documentation for more information about each register.
- */
-typedef enum mali_dma_register {
-
-       MALI450_DMA_REG_SOURCE_ADDRESS = 0x0000,
-       MALI450_DMA_REG_SOURCE_SIZE = 0x0004,
-} mali_dma_register;
-
-struct mali_dma_core {
-       struct mali_hw_core  hw_core;      /**< Common for all HW cores */
-       _mali_osk_spinlock_t *lock;            /**< Lock protecting access to DMA core */
-       mali_dma_pool pool;                /**< Memory pool for command buffers */
-};
-
-static struct mali_dma_core *mali_global_dma_core = NULL;
-
-struct mali_dma_core *mali_dma_create(_mali_osk_resource_t *resource)
-{
-       struct mali_dma_core* dma;
-       _mali_osk_errcode_t err;
-
-       MALI_DEBUG_ASSERT(NULL == mali_global_dma_core);
-
-       dma = _mali_osk_malloc(sizeof(struct mali_dma_core));
-       if (dma == NULL) goto alloc_failed;
-
-       dma->lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_DMA_COMMAND);
-       if (NULL == dma->lock) goto lock_init_failed;
-
-       dma->pool = mali_dma_pool_create(MALI_DMA_CMD_BUF_SIZE, 4, 0);
-       if (NULL == dma->pool) goto dma_pool_failed;
-
-       err = mali_hw_core_create(&dma->hw_core, resource, MALI450_DMA_REG_SIZE);
-       if (_MALI_OSK_ERR_OK != err) goto hw_core_failed;
-
-       mali_global_dma_core = dma;
-       MALI_DEBUG_PRINT(2, ("Mali DMA: Created Mali APB DMA unit\n"));
-       return dma;
-
-       /* Error handling */
-
-hw_core_failed:
-       mali_dma_pool_destroy(dma->pool);
-dma_pool_failed:
-       _mali_osk_spinlock_term(dma->lock);
-lock_init_failed:
-       _mali_osk_free(dma);
-alloc_failed:
-       MALI_DEBUG_PRINT(2, ("Mali DMA: Failed to create APB DMA unit\n"));
-       return NULL;
-}
-
-void mali_dma_delete(struct mali_dma_core *dma)
-{
-       MALI_DEBUG_ASSERT_POINTER(dma);
-
-       MALI_DEBUG_PRINT(2, ("Mali DMA: Deleted Mali APB DMA unit\n"));
-
-       mali_hw_core_delete(&dma->hw_core);
-       _mali_osk_spinlock_term(dma->lock);
-       mali_dma_pool_destroy(dma->pool);
-       _mali_osk_free(dma);
-}
-
-static void mali_dma_bus_error(struct mali_dma_core *dma)
-{
-       u32 addr = mali_hw_core_register_read(&dma->hw_core, MALI450_DMA_REG_SOURCE_ADDRESS);
-
-       MALI_PRINT_ERROR(("Mali DMA: Bus error when reading command list from 0x%lx\n", addr));
-
-       /* Clear the bus error */
-       mali_hw_core_register_write(&dma->hw_core, MALI450_DMA_REG_SOURCE_SIZE, 0);
-}
-
-static mali_bool mali_dma_is_busy(struct mali_dma_core *dma)
-{
-       u32 val;
-       mali_bool dma_busy_flag = MALI_FALSE;
-
-       MALI_DEBUG_ASSERT_POINTER(dma);
-
-       val = mali_hw_core_register_read(&dma->hw_core, MALI450_DMA_REG_SOURCE_SIZE);
-
-       if (MALI450_DMA_BUS_ERR_VAL == val) {
-               /* Bus error reading command list */
-               mali_dma_bus_error(dma);
-               return MALI_FALSE;
-       }
-       if (val > 0) {
-               dma_busy_flag = MALI_TRUE;
-       }
-
-       return dma_busy_flag;
-}
-
-static void mali_dma_start_transfer(struct mali_dma_core* dma, mali_dma_cmd_buf *buf)
-{
-       u32 memsize = buf->size * 4;
-       u32 addr = buf->phys_addr;
-
-       MALI_DEBUG_ASSERT_POINTER(dma);
-       MALI_DEBUG_ASSERT(memsize < (1 << 16));
-       MALI_DEBUG_ASSERT(0 == (memsize & 0x3)); /* 4 byte aligned */
-
-       MALI_DEBUG_ASSERT(!mali_dma_is_busy(dma));
-
-       /* Writes the physical source memory address of chunk containing command headers and data */
-       mali_hw_core_register_write(&dma->hw_core, MALI450_DMA_REG_SOURCE_ADDRESS, addr);
-
-       /* Writes the length of transfer */
-       mali_hw_core_register_write(&dma->hw_core, MALI450_DMA_REG_SOURCE_SIZE, memsize);
-}
-
-_mali_osk_errcode_t mali_dma_get_cmd_buf(mali_dma_cmd_buf *buf)
-{
-       MALI_DEBUG_ASSERT_POINTER(buf);
-
-       buf->virt_addr = (u32*)mali_dma_pool_alloc(mali_global_dma_core->pool, &buf->phys_addr);
-       if (NULL == buf->virt_addr) {
-               return _MALI_OSK_ERR_NOMEM;
-       }
-
-       /* size contains the number of words in the buffer and is incremented
-        * as commands are added to the buffer. */
-       buf->size = 0;
-
-       return _MALI_OSK_ERR_OK;
-}
-
-void mali_dma_put_cmd_buf(mali_dma_cmd_buf *buf)
-{
-       MALI_DEBUG_ASSERT_POINTER(buf);
-
-       if (NULL == buf->virt_addr) return;
-
-       mali_dma_pool_free(mali_global_dma_core->pool, buf->virt_addr, buf->phys_addr);
-
-       buf->virt_addr = NULL;
-}
-
-_mali_osk_errcode_t mali_dma_start(struct mali_dma_core* dma, mali_dma_cmd_buf *buf)
-{
-       _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
-
-       _mali_osk_spinlock_lock(dma->lock);
-
-       if (mali_dma_is_busy(dma)) {
-               err = _MALI_OSK_ERR_BUSY;
-               goto out;
-       }
-
-       mali_dma_start_transfer(dma, buf);
-
-out:
-       _mali_osk_spinlock_unlock(dma->lock);
-       return err;
-}
-
-void mali_dma_debug(struct mali_dma_core *dma)
-{
-       MALI_DEBUG_ASSERT_POINTER(dma);
-       MALI_DEBUG_PRINT(1, ("DMA unit registers:\n\t%08x, %08x\n",
-                            mali_hw_core_register_read(&dma->hw_core, MALI450_DMA_REG_SOURCE_ADDRESS),
-                            mali_hw_core_register_read(&dma->hw_core, MALI450_DMA_REG_SOURCE_SIZE)
-                           ));
-
-}
-
-struct mali_dma_core *mali_dma_get_global_dma_core(void)
-{
-       /* Returns the global dma core object */
-       return mali_global_dma_core;
-}
diff --git a/drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_dma.h b/drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_dma.h
deleted file mode 100644 (file)
index e62b7b9..0000000
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2013 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
- */
-
-#ifndef __MALI_DMA_H__
-#define __MALI_DMA_H__
-
-#include "mali_osk.h"
-#include "mali_osk_mali.h"
-#include "mali_hw_core.h"
-
-#define MALI_DMA_CMD_BUF_SIZE 1024
-
-typedef struct mali_dma_cmd_buf {
-       u32 *virt_addr;           /**< CPU address of command buffer */
-       u32 phys_addr;            /**< Physical address of command buffer */
-       u32 size;                 /**< Number of prepared words in command buffer */
-} mali_dma_cmd_buf;
-
-/** @brief Create a new DMA unit
- *
- * This is called from entry point of the driver in order to create and
- * intialize the DMA resource
- *
- * @param resource it will be a pointer to a DMA resource
- * @return DMA object on success, NULL on failure
- */
-struct mali_dma_core *mali_dma_create(_mali_osk_resource_t *resource);
-
-/** @brief Delete DMA unit
- *
- * This is called on entry point of driver if the driver initialization fails
- * after initialization of the DMA unit. It is also called on the exit of the
- * driver to delete the DMA resource
- *
- * @param dma Pointer to DMA unit object
- */
-void mali_dma_delete(struct mali_dma_core *dma);
-
-/** @brief Retrieves the MALI DMA core object (if there is)
- *
- * @return The Mali DMA object otherwise NULL
- */
-struct mali_dma_core *mali_dma_get_global_dma_core(void);
-
-/**
- * @brief Run a command buffer on the DMA unit
- *
- * @param dma Pointer to the DMA unit to use
- * @param buf Pointer to the command buffer to use
- * @return _MALI_OSK_ERR_OK if the buffer was started successfully,
- *         _MALI_OSK_ERR_BUSY if the DMA unit is busy.
- */
-_mali_osk_errcode_t mali_dma_start(struct mali_dma_core* dma, mali_dma_cmd_buf *buf);
-
-/**
- * @brief Create a DMA command
- *
- * @param core Mali core
- * @param reg offset to register of core
- * @param n number of registers to write
- */
-MALI_STATIC_INLINE u32 mali_dma_command_write(struct mali_hw_core *core, u32 reg, u32 n)
-{
-       u32 core_offset = core->phys_offset;
-
-       MALI_DEBUG_ASSERT(reg < 0x2000);
-       MALI_DEBUG_ASSERT(n < 0x800);
-       MALI_DEBUG_ASSERT(core_offset < 0x30000);
-       MALI_DEBUG_ASSERT(0 == ((core_offset + reg) & ~0x7FFFF));
-
-       return (n << 20) | (core_offset + reg);
-}
-
-/**
- * @brief Add a array write to DMA command buffer
- *
- * @param buf DMA command buffer to fill in
- * @param core Core to do DMA to
- * @param reg Register on core to start writing to
- * @param data Pointer to data to write
- * @param count Number of 4 byte words to write
- */
-MALI_STATIC_INLINE void mali_dma_write_array(mali_dma_cmd_buf *buf, struct mali_hw_core *core,
-        u32 reg, u32 *data, u32 count)
-{
-       MALI_DEBUG_ASSERT((buf->size + 1 + count ) < MALI_DMA_CMD_BUF_SIZE / 4);
-
-       buf->virt_addr[buf->size++] = mali_dma_command_write(core, reg, count);
-
-       _mali_osk_memcpy(buf->virt_addr + buf->size, data, count * sizeof(*buf->virt_addr));
-
-       buf->size += count;
-}
-
-/**
- * @brief Add a conditional array write to DMA command buffer
- *
- * @param buf DMA command buffer to fill in
- * @param core Core to do DMA to
- * @param reg Register on core to start writing to
- * @param data Pointer to data to write
- * @param count Number of 4 byte words to write
- * @param ref Pointer to referance data that can be skipped if equal
- */
-MALI_STATIC_INLINE void mali_dma_write_array_conditional(mali_dma_cmd_buf *buf, struct mali_hw_core *core,
-        u32 reg, u32 *data, u32 count, const u32 *ref)
-{
-       /* Do conditional array writes are not yet implemented, fallback to a
-        * normal array write. */
-       mali_dma_write_array(buf, core, reg, data, count);
-}
-
-/**
- * @brief Add a conditional register write to the DMA command buffer
- *
- * If the data matches the reference the command will be skipped.
- *
- * @param buf DMA command buffer to fill in
- * @param core Core to do DMA to
- * @param reg Register on core to start writing to
- * @param data Pointer to data to write
- * @param ref Pointer to referance data that can be skipped if equal
- */
-MALI_STATIC_INLINE void mali_dma_write_conditional(mali_dma_cmd_buf *buf, struct mali_hw_core *core,
-        u32 reg, u32 data, const u32 ref)
-{
-       /* Skip write if reference value is equal to data. */
-       if (data == ref) return;
-
-       buf->virt_addr[buf->size++] = mali_dma_command_write(core, reg, 1);
-
-       buf->virt_addr[buf->size++] = data;
-
-       MALI_DEBUG_ASSERT(buf->size < MALI_DMA_CMD_BUF_SIZE / 4);
-}
-
-/**
- * @brief Add a register write to the DMA command buffer
- *
- * @param buf DMA command buffer to fill in
- * @param core Core to do DMA to
- * @param reg Register on core to start writing to
- * @param data Pointer to data to write
- */
-MALI_STATIC_INLINE void mali_dma_write(mali_dma_cmd_buf *buf, struct mali_hw_core *core,
-                                       u32 reg, u32 data)
-{
-       buf->virt_addr[buf->size++] = mali_dma_command_write(core, reg, 1);
-
-       buf->virt_addr[buf->size++] = data;
-
-       MALI_DEBUG_ASSERT(buf->size < MALI_DMA_CMD_BUF_SIZE / 4);
-}
-
-/**
- * @brief Prepare DMA command buffer for use
- *
- * This function allocates the DMA buffer itself.
- *
- * @param buf The mali_dma_cmd_buf to prepare
- * @return _MALI_OSK_ERR_OK if the \a buf is ready to use
- */
-_mali_osk_errcode_t mali_dma_get_cmd_buf(mali_dma_cmd_buf *buf);
-
-/**
- * @brief Check if a DMA command buffer is ready for use
- *
- * @param buf The mali_dma_cmd_buf to check
- * @return MALI_TRUE if buffer is usable, MALI_FALSE otherwise
- */
-MALI_STATIC_INLINE mali_bool mali_dma_cmd_buf_is_valid(mali_dma_cmd_buf *buf)
-{
-       return NULL != buf->virt_addr;
-}
-
-/**
- * @brief Return a DMA command buffer
- *
- * @param buf Pointer to DMA command buffer to return
- */
-void mali_dma_put_cmd_buf(mali_dma_cmd_buf *buf);
-
-#endif /* __MALI_DMA_H__ */
diff --git a/drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_dvfs_policy.c b/drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_dvfs_policy.c
new file mode 100644 (file)
index 0000000..451a897
--- /dev/null
@@ -0,0 +1,308 @@
+/*
+ * This confidential and proprietary software may be used only as
+ * authorised by a licensing agreement from ARM Limited
+ * (C) COPYRIGHT 2010-2012, 2014-2015 ARM Limited
+ * ALL RIGHTS RESERVED
+ * The entire notice above must be reproduced on all authorised
+ * copies and copies may only be made to the extent permitted
+ * by a licensing agreement from ARM Limited.
+ */
+
+#include <linux/mali/mali_utgard.h>
+#include "mali_kernel_common.h"
+#include "mali_scheduler.h"
+#include "mali_dvfs_policy.h"
+#include "mali_osk_mali.h"
+#include "mali_osk_profiling.h"
+
+#define CLOCK_TUNING_TIME_DEBUG 0
+
+#define MAX_PERFORMANCE_VALUE 256
+#define MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(percent) ((int) ((percent)*(MAX_PERFORMANCE_VALUE)/100.0 + 0.5))
+
+/** The max fps the same as display vsync default 60, can set by module insert parameter */
+int mali_max_system_fps = 60;
+/** A lower limit on their desired FPS default 58, can set by module insert parameter */
+int mali_desired_fps = 58;
+
+static int mali_fps_step1 = 0;
+static int mali_fps_step2 = 0;
+
+static int clock_step = -1;
+static int cur_clk_step = -1;
+static struct mali_gpu_clock *gpu_clk = NULL;
+
+/*Function prototype */
+static int (*mali_gpu_set_freq)(int) = NULL;
+static int (*mali_gpu_get_freq)(void) = NULL;
+
+static mali_bool mali_dvfs_enabled = MALI_FALSE;
+
+#define NUMBER_OF_NANOSECONDS_PER_SECOND  1000000000ULL
+static u32 calculate_window_render_fps(u64 time_period)
+{
+       u32 max_window_number;
+       u64 tmp;
+       u64 max = time_period;
+       u32 leading_zeroes;
+       u32 shift_val;
+       u32 time_period_shift;
+       u32 max_window_number_shift;
+       u32 ret_val;
+
+       max_window_number = mali_session_max_window_num();
+
+       /* To avoid float division, extend the dividend to ns unit */
+       tmp = (u64)max_window_number * NUMBER_OF_NANOSECONDS_PER_SECOND;
+       if (tmp > time_period) {
+               max = tmp;
+       }
+
+       /*
+        * We may have 64-bit values, a dividend or a divisor or both
+        * To avoid dependencies to a 64-bit divider, we shift down the two values
+        * equally first.
+        */
+       leading_zeroes = _mali_osk_clz((u32)(max >> 32));
+       shift_val = 32 - leading_zeroes;
+
+       time_period_shift = (u32)(time_period >> shift_val);
+       max_window_number_shift = (u32)(tmp >> shift_val);
+
+       ret_val = max_window_number_shift / time_period_shift;
+
+       return ret_val;
+}
+
+static bool mali_pickup_closest_avail_clock(int target_clock_mhz, mali_bool pick_clock_up)
+{
+       int i = 0;
+       bool clock_changed = false;
+
+       /* Round up the closest available frequency step for target_clock_hz */
+       for (i = 0; i < gpu_clk->num_of_steps; i++) {
+               /* Find the first item > target_clock_hz */
+               if (((int)(gpu_clk->item[i].clock) - target_clock_mhz) > 0) {
+                       break;
+               }
+       }
+
+       /* If the target clock greater than the maximum clock just pick the maximum one*/
+       if (i == gpu_clk->num_of_steps) {
+               i = gpu_clk->num_of_steps - 1;
+       } else {
+               if ((!pick_clock_up) && (i > 0)) {
+                       i = i - 1;
+               }
+       }
+
+       clock_step = i;
+       if (cur_clk_step != clock_step) {
+               clock_changed = true;
+       }
+
+       return clock_changed;
+}
+
+void mali_dvfs_policy_realize(struct mali_gpu_utilization_data *data, u64 time_period)
+{
+       int under_perform_boundary_value = 0;
+       int over_perform_boundary_value = 0;
+       int current_fps = 0;
+       int current_gpu_util = 0;
+       bool clock_changed = false;
+#if CLOCK_TUNING_TIME_DEBUG
+       struct timeval start;
+       struct timeval stop;
+       unsigned int elapse_time;
+       do_gettimeofday(&start);
+#endif
+       u32 window_render_fps;
+
+       if (NULL == gpu_clk) {
+               MALI_DEBUG_PRINT(2, ("Enable DVFS but patform doesn't Support freq change. \n"));
+               return;
+       }
+
+       window_render_fps = calculate_window_render_fps(time_period);
+
+       current_fps = window_render_fps;
+       current_gpu_util = data->utilization_gpu;
+
+       /* Get the specific under_perform_boundary_value and over_perform_boundary_value */
+       if ((mali_desired_fps <= current_fps) && (current_fps < mali_max_system_fps)) {
+               under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(90);
+               over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(70);
+       } else if ((mali_fps_step1 <= current_fps) && (current_fps < mali_desired_fps)) {
+               under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(55);
+               over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(35);
+       } else if ((mali_fps_step2 <= current_fps) && (current_fps < mali_fps_step1)) {
+               under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(70);
+               over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(50);
+       } else {
+               under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(55);
+               over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(35);
+       }
+
+       MALI_DEBUG_PRINT(5, ("Using ARM power policy: gpu util = %d \n", current_gpu_util));
+       MALI_DEBUG_PRINT(5, ("Using ARM power policy: under_perform = %d,  over_perform = %d \n", under_perform_boundary_value, over_perform_boundary_value));
+       MALI_DEBUG_PRINT(5, ("Using ARM power policy: render fps = %d,  pressure render fps = %d \n", current_fps, window_render_fps));
+
+       /* Get current clock value */
+       cur_clk_step = mali_gpu_get_freq();
+
+       /* Consider offscreen */
+       if (0 == current_fps) {
+               /* GP or PP under perform, need to give full power */
+               if (current_gpu_util > over_perform_boundary_value) {
+                       if (cur_clk_step != gpu_clk->num_of_steps - 1) {
+                               clock_changed = true;
+                               clock_step = gpu_clk->num_of_steps - 1;
+                       }
+               }
+
+               /* If GPU is idle, use lowest power */
+               if (0 == current_gpu_util) {
+                       if (cur_clk_step != 0) {
+                               clock_changed = true;
+                               clock_step = 0;
+                       }
+               }
+
+               goto real_setting;
+       }
+
+       /* 2. Calculate target clock if the GPU clock can be tuned */
+       if (-1 != cur_clk_step) {
+               int target_clk_mhz = -1;
+               mali_bool pick_clock_up = MALI_TRUE;
+
+               if (current_gpu_util > under_perform_boundary_value) {
+                       /* when under perform, need to consider the fps part */
+                       target_clk_mhz = gpu_clk->item[cur_clk_step].clock * current_gpu_util * mali_desired_fps / under_perform_boundary_value / current_fps;
+                       pick_clock_up = MALI_TRUE;
+               } else if (current_gpu_util < over_perform_boundary_value) {
+                       /* when over perform, did't need to consider fps, system didn't want to reach desired fps */
+                       target_clk_mhz = gpu_clk->item[cur_clk_step].clock * current_gpu_util / under_perform_boundary_value;
+                       pick_clock_up = MALI_FALSE;
+               }
+
+               if (-1 != target_clk_mhz) {
+                       clock_changed = mali_pickup_closest_avail_clock(target_clk_mhz, pick_clock_up);
+               }
+       }
+
+real_setting:
+       if (clock_changed) {
+               mali_gpu_set_freq(clock_step);
+
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                             MALI_PROFILING_EVENT_CHANNEL_GPU |
+                                             MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+                                             gpu_clk->item[clock_step].clock,
+                                             gpu_clk->item[clock_step].vol / 1000,
+                                             0, 0, 0);
+       }
+
+#if CLOCK_TUNING_TIME_DEBUG
+       do_gettimeofday(&stop);
+
+       elapse_time = timeval_to_ns(&stop) - timeval_to_ns(&start);
+       MALI_DEBUG_PRINT(2, ("Using ARM power policy:  eclapse time = %d\n", elapse_time));
+#endif
+}
+
+_mali_osk_errcode_t mali_dvfs_policy_init(void)
+{
+       _mali_osk_device_data data;
+       _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+               if ((NULL != data.get_clock_info) && (NULL != data.set_freq) && (NULL != data.get_freq)) {
+                       MALI_DEBUG_PRINT(2, ("Mali DVFS init: using arm dvfs policy \n"));
+
+
+                       mali_fps_step1 = mali_max_system_fps / 3;
+                       mali_fps_step2 = mali_max_system_fps / 5;
+
+                       data.get_clock_info(&gpu_clk);
+
+                       if (gpu_clk != NULL) {
+#ifdef DEBUG
+                               int i;
+                               for (i = 0; i < gpu_clk->num_of_steps; i++) {
+                                       MALI_DEBUG_PRINT(5, ("mali gpu clock info: step%d clock(%d)Hz,vol(%d) \n",
+                                                            i, gpu_clk->item[i].clock, gpu_clk->item[i].vol));
+                               }
+#endif
+                       } else {
+                               MALI_DEBUG_PRINT(2, ("Mali DVFS init: platform didn't define enough info for ddk to do DVFS \n"));
+                       }
+
+                       mali_gpu_get_freq = data.get_freq;
+                       mali_gpu_set_freq = data.set_freq;
+
+                       if ((NULL != gpu_clk) && (gpu_clk->num_of_steps > 0)
+                           && (NULL != mali_gpu_get_freq) && (NULL != mali_gpu_set_freq)) {
+                               mali_dvfs_enabled = MALI_TRUE;
+                       }
+               } else {
+                       MALI_DEBUG_PRINT(2, ("Mali DVFS init: platform function callback incomplete, need check mali_gpu_device_data in platform .\n"));
+               }
+       } else {
+               err = _MALI_OSK_ERR_FAULT;
+               MALI_DEBUG_PRINT(2, ("Mali DVFS init: get platform data error .\n"));
+       }
+
+       return err;
+}
+
+/*
+ * Always give full power when start a new period,
+ * if mali dvfs enabled, for performance consideration
+ */
+void mali_dvfs_policy_new_period(void)
+{
+       /* Always give full power when start a new period */
+       unsigned int cur_clk_step = 0;
+
+       cur_clk_step = mali_gpu_get_freq();
+
+       if (cur_clk_step != (gpu_clk->num_of_steps - 1)) {
+               mali_gpu_set_freq(gpu_clk->num_of_steps - 1);
+
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                             MALI_PROFILING_EVENT_CHANNEL_GPU |
+                                             MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE, gpu_clk->item[gpu_clk->num_of_steps - 1].clock,
+                                             gpu_clk->item[gpu_clk->num_of_steps - 1].vol / 1000, 0, 0, 0);
+       }
+}
+
+mali_bool mali_dvfs_policy_enabled(void)
+{
+       return mali_dvfs_enabled;
+}
+
+#if defined(CONFIG_MALI400_PROFILING)
+void mali_get_current_gpu_clk_item(struct mali_gpu_clk_item *clk_item)
+{
+       if (mali_platform_device != NULL) {
+
+               struct mali_gpu_device_data *device_data = NULL;
+               device_data = (struct mali_gpu_device_data *)mali_platform_device->dev.platform_data;
+
+               if ((NULL != device_data->get_clock_info) && (NULL != device_data->get_freq)) {
+
+                       int cur_clk_step = device_data->get_freq();
+                       struct mali_gpu_clock *mali_gpu_clk = NULL;
+
+                       device_data->get_clock_info(&mali_gpu_clk);
+                       clk_item->clock = mali_gpu_clk->item[cur_clk_step].clock;
+                       clk_item->vol = mali_gpu_clk->item[cur_clk_step].vol;
+               } else {
+                       MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: platform function callback incomplete, need check mali_gpu_device_data in platform .\n"));
+               }
+       }
+}
+#endif
+
diff --git a/drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_dvfs_policy.h b/drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_dvfs_policy.h
new file mode 100644 (file)
index 0000000..2dec9ad
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * This confidential and proprietary software may be used only as
+ * authorised by a licensing agreement from ARM Limited
+ * (C) COPYRIGHT 2010-2012, 2014-2015 ARM Limited
+ * ALL RIGHTS RESERVED
+ * The entire notice above must be reproduced on all authorised
+ * copies and copies may only be made to the extent permitted
+ * by a licensing agreement from ARM Limited.
+ */
+
+#ifndef __MALI_DVFS_POLICY_H__
+#define __MALI_DVFS_POLICY_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void mali_dvfs_policy_realize(struct mali_gpu_utilization_data *data, u64 time_period);
+
+_mali_osk_errcode_t mali_dvfs_policy_init(void);
+
+void mali_dvfs_policy_new_period(void);
+
+mali_bool mali_dvfs_policy_enabled(void);
+
+#if defined(CONFIG_MALI400_PROFILING)
+void mali_get_current_gpu_clk_item(struct mali_gpu_clk_item *clk_item);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif/* __MALI_DVFS_POLICY_H__ */
diff --git a/drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_executor.c b/drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_executor.c
new file mode 100644 (file)
index 0000000..4c95529
--- /dev/null
@@ -0,0 +1,2598 @@
+/*
+ * This confidential and proprietary software may be used only as
+ * authorised by a licensing agreement from ARM Limited
+ * (C) COPYRIGHT 2012-2015 ARM Limited
+ * ALL RIGHTS RESERVED
+ * The entire notice above must be reproduced on all authorised
+ * copies and copies may only be made to the extent permitted
+ * by a licensing agreement from ARM Limited.
+ */
+
+#include "mali_executor.h"
+#include "mali_scheduler.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_pp.h"
+#include "mali_pp_job.h"
+#include "mali_group.h"
+#include "mali_pm.h"
+#include "mali_timeline.h"
+#include "mali_osk_profiling.h"
+#include "mali_session.h"
+
+/*
+ * If dma_buf with map on demand is used, we defer job deletion and job queue
+ * if in atomic context, since both might sleep.
+ */
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+#define MALI_EXECUTOR_USE_DEFERRED_PP_JOB_DELETE 1
+#define MALI_EXECUTOR_USE_DEFERRED_PP_JOB_QUEUE 1
+#endif /* !defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) */
+
+/*
+ * ---------- static type definitions (structs, enums, etc) ----------
+ */
+
+enum mali_executor_state_t {
+       EXEC_STATE_NOT_PRESENT, /* Virtual group on Mali-300/400 (do not use) */
+       EXEC_STATE_DISABLED,    /* Disabled by core scaling (do not use) */
+       EXEC_STATE_EMPTY,       /* No child groups for virtual group (do not use) */
+       EXEC_STATE_INACTIVE,    /* Can be used, but must be activate first */
+       EXEC_STATE_IDLE,        /* Active and ready to be used */
+       EXEC_STATE_WORKING,     /* Executing a job */
+};
+
+/*
+ * ---------- global variables (exported due to inline functions) ----------
+ */
+
+/* Lock for this module (protecting all HW access except L2 caches) */
+_mali_osk_spinlock_irq_t *mali_executor_lock_obj = NULL;
+
+mali_bool mali_executor_hints[MALI_EXECUTOR_HINT_MAX];
+
+/*
+ * ---------- static variables ----------
+ */
+
+/* Used to defer job scheduling */
+static _mali_osk_wq_work_t *executor_wq_high_pri = NULL;
+
+/* Store version from GP and PP (user space wants to know this) */
+static u32 pp_version = 0;
+static u32 gp_version = 0;
+
+/* List of physical PP groups which are disabled by some external source */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_disabled);
+static u32 group_list_disabled_count = 0;
+
+/* List of groups which can be used, but activate first */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_inactive);
+static u32 group_list_inactive_count = 0;
+
+/* List of groups which are active and ready to be used */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_idle);
+static u32 group_list_idle_count = 0;
+
+/* List of groups which are executing a job */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_working);
+static u32 group_list_working_count = 0;
+
+/* Virtual group (if any) */
+static struct mali_group *virtual_group = NULL;
+
+/* Virtual group state is tracked with a state variable instead of 4 lists */
+static enum mali_executor_state_t virtual_group_state = EXEC_STATE_NOT_PRESENT;
+
+/* GP group */
+static struct mali_group *gp_group = NULL;
+
+/* GP group state is tracked with a state variable instead of 4 lists */
+static enum mali_executor_state_t gp_group_state = EXEC_STATE_NOT_PRESENT;
+
+static u32 gp_returned_cookie = 0;
+
+/* Total number of physical PP cores present */
+static u32 num_physical_pp_cores_total = 0;
+
+/* Number of physical cores which are enabled */
+static u32 num_physical_pp_cores_enabled = 0;
+
+/* Enable or disable core scaling */
+static mali_bool core_scaling_enabled = MALI_TRUE;
+
+/* Variables to allow safe pausing of the scheduler */
+static _mali_osk_wait_queue_t *executor_working_wait_queue = NULL;
+static u32 pause_count = 0;
+
+/* PP cores haven't been enabled because of some pp cores haven't been disabled. */
+static int core_scaling_delay_up_mask[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 };
+
+/* Variables used to implement notify pp core changes to userspace when core scaling
+ * is finished in mali_executor_complete_group() function. */
+static _mali_osk_wq_work_t *executor_wq_notify_core_change = NULL;
+static _mali_osk_wait_queue_t *executor_notify_core_change_wait_queue = NULL;
+
+/*
+ * ---------- Forward declaration of static functions ----------
+ */
+static mali_bool mali_executor_is_suspended(void *data);
+static mali_bool mali_executor_is_working(void);
+static void mali_executor_disable_empty_virtual(void);
+static mali_bool mali_executor_physical_rejoin_virtual(struct mali_group *group);
+static mali_bool mali_executor_has_virtual_group(void);
+static mali_bool mali_executor_virtual_group_is_usable(void);
+static void mali_executor_schedule(void);
+static void mali_executor_wq_schedule(void *arg);
+static void mali_executor_send_gp_oom_to_user(struct mali_gp_job *job);
+static void mali_executor_complete_group(struct mali_group *group,
+               mali_bool success,
+               struct mali_gp_job **gp_job_done,
+               struct mali_pp_job **pp_job_done);
+static void mali_executor_change_state_pp_physical(struct mali_group *group,
+               _mali_osk_list_t *old_list,
+               u32 *old_count,
+               _mali_osk_list_t *new_list,
+               u32 *new_count);
+static mali_bool mali_executor_group_is_in_state(struct mali_group *group,
+               enum mali_executor_state_t state);
+
+static void mali_executor_group_enable_internal(struct mali_group *group);
+static void mali_executor_group_disable_internal(struct mali_group *group);
+static void mali_executor_core_scale(unsigned int target_core_nr);
+static void mali_executor_core_scale_in_group_complete(struct mali_group *group);
+static void mali_executor_notify_core_change(u32 num_cores);
+static void mali_executor_wq_notify_core_change(void *arg);
+static void mali_executor_change_group_status_disabled(struct mali_group *group);
+static mali_bool mali_executor_deactivate_list_idle(mali_bool deactivate_idle_group);
+static void mali_executor_set_state_pp_physical(struct mali_group *group,
+               _mali_osk_list_t *new_list,
+               u32 *new_count);
+
+/*
+ * ---------- Actual implementation ----------
+ */
+
+_mali_osk_errcode_t mali_executor_initialize(void)
+{
+       mali_executor_lock_obj = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_EXECUTOR);
+       if (NULL == mali_executor_lock_obj) {
+               mali_executor_terminate();
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       executor_wq_high_pri = _mali_osk_wq_create_work_high_pri(mali_executor_wq_schedule, NULL);
+       if (NULL == executor_wq_high_pri) {
+               mali_executor_terminate();
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       executor_working_wait_queue = _mali_osk_wait_queue_init();
+       if (NULL == executor_working_wait_queue) {
+               mali_executor_terminate();
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       executor_wq_notify_core_change = _mali_osk_wq_create_work(mali_executor_wq_notify_core_change, NULL);
+       if (NULL == executor_wq_notify_core_change) {
+               mali_executor_terminate();
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       executor_notify_core_change_wait_queue = _mali_osk_wait_queue_init();
+       if (NULL == executor_notify_core_change_wait_queue) {
+               mali_executor_terminate();
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_executor_terminate(void)
+{
+       if (NULL != executor_notify_core_change_wait_queue) {
+               _mali_osk_wait_queue_term(executor_notify_core_change_wait_queue);
+               executor_notify_core_change_wait_queue = NULL;
+       }
+
+       if (NULL != executor_wq_notify_core_change) {
+               _mali_osk_wq_delete_work(executor_wq_notify_core_change);
+               executor_wq_notify_core_change = NULL;
+       }
+
+       if (NULL != executor_working_wait_queue) {
+               _mali_osk_wait_queue_term(executor_working_wait_queue);
+               executor_working_wait_queue = NULL;
+       }
+
+       if (NULL != executor_wq_high_pri) {
+               _mali_osk_wq_delete_work(executor_wq_high_pri);
+               executor_wq_high_pri = NULL;
+       }
+
+       if (NULL != mali_executor_lock_obj) {
+               _mali_osk_spinlock_irq_term(mali_executor_lock_obj);
+               mali_executor_lock_obj = NULL;
+       }
+}
+
+void mali_executor_populate(void)
+{
+       u32 num_groups;
+       u32 i;
+
+       num_groups = mali_group_get_glob_num_groups();
+
+       /* Do we have a virtual group? */
+       for (i = 0; i < num_groups; i++) {
+               struct mali_group *group = mali_group_get_glob_group(i);
+
+               if (mali_group_is_virtual(group)) {
+                       virtual_group = group;
+                       virtual_group_state = EXEC_STATE_INACTIVE;
+                       break;
+               }
+       }
+
+       /* Find all the available physical GP and PP cores */
+       for (i = 0; i < num_groups; i++) {
+               struct mali_group *group = mali_group_get_glob_group(i);
+
+               if (NULL != group) {
+                       struct mali_pp_core *pp_core = mali_group_get_pp_core(group);
+                       struct mali_gp_core *gp_core = mali_group_get_gp_core(group);
+
+                       if (!mali_group_is_virtual(group)) {
+                               if (NULL != pp_core) {
+                                       if (0 == pp_version) {
+                                               /* Retrieve PP version from the first available PP core */
+                                               pp_version = mali_pp_core_get_version(pp_core);
+                                       }
+
+                                       if (NULL != virtual_group) {
+                                               mali_executor_lock();
+                                               mali_group_add_group(virtual_group, group);
+                                               mali_executor_unlock();
+                                       } else {
+                                               _mali_osk_list_add(&group->executor_list, &group_list_inactive);
+                                               group_list_inactive_count++;
+                                       }
+
+                                       num_physical_pp_cores_total++;
+                               } else {
+                                       MALI_DEBUG_ASSERT_POINTER(gp_core);
+
+                                       if (0 == gp_version) {
+                                               /* Retrieve GP version */
+                                               gp_version = mali_gp_core_get_version(gp_core);
+                                       }
+
+                                       gp_group = group;
+                                       gp_group_state = EXEC_STATE_INACTIVE;
+                               }
+
+                       }
+               }
+       }
+
+       num_physical_pp_cores_enabled = num_physical_pp_cores_total;
+}
+
+void mali_executor_depopulate(void)
+{
+       struct mali_group *group;
+       struct mali_group *temp;
+
+       MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != gp_group_state);
+
+       if (NULL != gp_group) {
+               mali_group_delete(gp_group);
+               gp_group = NULL;
+       }
+
+       MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != virtual_group_state);
+
+       if (NULL != virtual_group) {
+               mali_group_delete(virtual_group);
+               virtual_group = NULL;
+       }
+
+       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&group_list_working));
+
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, executor_list) {
+               mali_group_delete(group);
+       }
+
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_inactive, struct mali_group, executor_list) {
+               mali_group_delete(group);
+       }
+
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, executor_list) {
+               mali_group_delete(group);
+       }
+}
+
+void mali_executor_suspend(void)
+{
+       mali_executor_lock();
+
+       /* Increment the pause_count so that no more jobs will be scheduled */
+       pause_count++;
+
+       mali_executor_unlock();
+
+       _mali_osk_wait_queue_wait_event(executor_working_wait_queue,
+                                       mali_executor_is_suspended, NULL);
+
+       /*
+        * mali_executor_complete_XX() leaves jobs in idle state.
+        * deactivate option is used when we are going to power down
+        * the entire GPU (OS suspend) and want a consistent SW vs HW
+        * state.
+        */
+       mali_executor_lock();
+
+       mali_executor_deactivate_list_idle(MALI_TRUE);
+
+       /*
+        * The following steps are used to deactive all of activated
+        * (MALI_GROUP_STATE_ACTIVE) and activating (MALI_GROUP
+        * _STAET_ACTIVATION_PENDING) groups, to make sure the variable
+        * pd_mask_wanted is equal with 0. */
+       if (MALI_GROUP_STATE_INACTIVE != mali_group_get_state(gp_group)) {
+               gp_group_state = EXEC_STATE_INACTIVE;
+               mali_group_deactivate(gp_group);
+       }
+
+       if (mali_executor_has_virtual_group()) {
+               if (MALI_GROUP_STATE_INACTIVE
+                   != mali_group_get_state(virtual_group)) {
+                       virtual_group_state = EXEC_STATE_INACTIVE;
+                       mali_group_deactivate(virtual_group);
+               }
+       }
+
+       if (0 < group_list_inactive_count) {
+               struct mali_group *group;
+               struct mali_group *temp;
+
+               _MALI_OSK_LIST_FOREACHENTRY(group, temp,
+                                           &group_list_inactive,
+                                           struct mali_group, executor_list) {
+                       if (MALI_GROUP_STATE_ACTIVATION_PENDING
+                           == mali_group_get_state(group)) {
+                               mali_group_deactivate(group);
+                       }
+
+                       /*
+                        * On mali-450 platform, we may have physical group in the group inactive
+                        * list, and its state is MALI_GROUP_STATE_ACTIVATION_PENDING, so we only
+                        * deactivate it is not enough, we still also need add it back to virtual group.
+                        * And now, virtual group must be in INACTIVE state, so it's safe to add
+                        * physical group to virtual group at this point.
+                        */
+                       if (NULL != virtual_group) {
+                               _mali_osk_list_delinit(&group->executor_list);
+                               group_list_inactive_count--;
+
+                               mali_group_add_group(virtual_group, group);
+                       }
+               }
+       }
+
+       mali_executor_unlock();
+}
+
+void mali_executor_resume(void)
+{
+       mali_executor_lock();
+
+       /* Decrement pause_count to allow scheduling again (if it reaches 0) */
+       pause_count--;
+       if (0 == pause_count) {
+               mali_executor_schedule();
+       }
+
+       mali_executor_unlock();
+}
+
+u32 mali_executor_get_num_cores_total(void)
+{
+       return num_physical_pp_cores_total;
+}
+
+u32 mali_executor_get_num_cores_enabled(void)
+{
+       return num_physical_pp_cores_enabled;
+}
+
+struct mali_pp_core *mali_executor_get_virtual_pp(void)
+{
+       MALI_DEBUG_ASSERT_POINTER(virtual_group);
+       MALI_DEBUG_ASSERT_POINTER(virtual_group->pp_core);
+       return virtual_group->pp_core;
+}
+
+struct mali_group *mali_executor_get_virtual_group(void)
+{
+       return virtual_group;
+}
+
+void mali_executor_zap_all_active(struct mali_session_data *session)
+{
+       struct mali_group *group;
+       struct mali_group *temp;
+       mali_bool ret;
+
+       mali_executor_lock();
+
+       /*
+        * This function is a bit complicated because
+        * mali_group_zap_session() can fail. This only happens because the
+        * group is in an unhandled page fault status.
+        * We need to make sure this page fault is handled before we return,
+        * so that we know every single outstanding MMU transactions have
+        * completed. This will allow caller to safely remove physical pages
+        * when we have returned.
+        */
+
+       MALI_DEBUG_ASSERT(NULL != gp_group);
+       ret = mali_group_zap_session(gp_group, session);
+       if (MALI_FALSE == ret) {
+               struct mali_gp_job *gp_job = NULL;
+
+               mali_executor_complete_group(gp_group, MALI_FALSE, &gp_job, NULL);
+
+               MALI_DEBUG_ASSERT_POINTER(gp_job);
+
+               /* GP job completed, make sure it is freed */
+               mali_scheduler_complete_gp_job(gp_job, MALI_FALSE,
+                                              MALI_TRUE, MALI_TRUE);
+       }
+
+       if (mali_executor_has_virtual_group()) {
+               ret = mali_group_zap_session(virtual_group, session);
+               if (MALI_FALSE == ret) {
+                       struct mali_pp_job *pp_job = NULL;
+
+                       mali_executor_complete_group(virtual_group, MALI_FALSE, NULL, &pp_job);
+
+                       if (NULL != pp_job) {
+                               /* PP job completed, make sure it is freed */
+                               mali_scheduler_complete_pp_job(pp_job, 0,
+                                                              MALI_FALSE, MALI_TRUE);
+                       }
+               }
+       }
+
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working,
+                                   struct mali_group, executor_list) {
+               ret = mali_group_zap_session(group, session);
+               if (MALI_FALSE == ret) {
+                       ret = mali_group_zap_session(group, session);
+                       if (MALI_FALSE == ret) {
+                               struct mali_pp_job *pp_job = NULL;
+
+                               mali_executor_complete_group(group, MALI_FALSE, NULL, &pp_job);
+
+                               if (NULL != pp_job) {
+                                       /* PP job completed, free it */
+                                       mali_scheduler_complete_pp_job(pp_job,
+                                                                      0, MALI_FALSE,
+                                                                      MALI_TRUE);
+                               }
+                       }
+               }
+       }
+
+       mali_executor_unlock();
+}
+
+void mali_executor_schedule_from_mask(mali_scheduler_mask mask, mali_bool deferred_schedule)
+{
+       if (MALI_SCHEDULER_MASK_EMPTY != mask) {
+               if (MALI_TRUE == deferred_schedule) {
+                       _mali_osk_wq_schedule_work_high_pri(executor_wq_high_pri);
+               } else {
+                       /* Schedule from this thread*/
+                       mali_executor_lock();
+                       mali_executor_schedule();
+                       mali_executor_unlock();
+               }
+       }
+}
+
+_mali_osk_errcode_t mali_executor_interrupt_gp(struct mali_group *group,
+               mali_bool in_upper_half)
+{
+       enum mali_interrupt_result int_result;
+       mali_bool time_out = MALI_FALSE;
+
+       MALI_DEBUG_PRINT(4, ("Executor: GP interrupt from %s in %s half\n",
+                            mali_group_core_description(group),
+                            in_upper_half ? "upper" : "bottom"));
+
+       mali_executor_lock();
+       if (!mali_group_is_working(group)) {
+               /* Not working, so nothing to do */
+               mali_executor_unlock();
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT(mali_group_is_working(group));
+
+       if (mali_group_has_timed_out(group)) {
+               int_result = MALI_INTERRUPT_RESULT_ERROR;
+               time_out = MALI_TRUE;
+               MALI_PRINT(("Executor GP: Job %d Timeout on %s\n",
+                           mali_gp_job_get_id(group->gp_running_job),
+                           mali_group_core_description(group)));
+       } else {
+               int_result = mali_group_get_interrupt_result_gp(group);
+               if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+                       mali_executor_unlock();
+                       return _MALI_OSK_ERR_FAULT;
+               }
+       }
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+               /* No interrupts signalled, so nothing to do */
+               mali_executor_unlock();
+               return _MALI_OSK_ERR_FAULT;
+       }
+#else
+       MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_NONE != int_result);
+#endif
+
+       mali_group_mask_all_interrupts_gp(group);
+
+       if (MALI_INTERRUPT_RESULT_SUCCESS_VS == int_result) {
+               if (mali_group_gp_is_active(group)) {
+                       /* Only VS completed so far, while PLBU is still active */
+
+                       /* Enable all but the current interrupt */
+                       mali_group_enable_interrupts_gp(group, int_result);
+
+                       mali_executor_unlock();
+                       return _MALI_OSK_ERR_OK;
+               }
+       } else if (MALI_INTERRUPT_RESULT_SUCCESS_PLBU == int_result) {
+               if (mali_group_gp_is_active(group)) {
+                       /* Only PLBU completed so far, while VS is still active */
+
+                       /* Enable all but the current interrupt */
+                       mali_group_enable_interrupts_gp(group, int_result);
+
+                       mali_executor_unlock();
+                       return _MALI_OSK_ERR_OK;
+               }
+       } else if (MALI_INTERRUPT_RESULT_OOM == int_result) {
+               struct mali_gp_job *job = mali_group_get_running_gp_job(group);
+
+               /* PLBU out of mem */
+               MALI_DEBUG_PRINT(3, ("Executor: PLBU needs more heap memory\n"));
+
+#if defined(CONFIG_MALI400_PROFILING)
+               /* Give group a chance to generate a SUSPEND event */
+               mali_group_oom(group);
+#endif
+
+               /*
+                * no need to hold interrupt raised while
+                * waiting for more memory.
+                */
+               mali_executor_send_gp_oom_to_user(job);
+
+               mali_executor_unlock();
+
+               return _MALI_OSK_ERR_OK;
+       }
+
+       /* We should now have a real interrupt to handle */
+
+       MALI_DEBUG_PRINT(4, ("Executor: Group %s completed with %s\n",
+                            mali_group_core_description(group),
+                            (MALI_INTERRUPT_RESULT_ERROR == int_result) ?
+                            "ERROR" : "success"));
+
+       if (in_upper_half && MALI_INTERRUPT_RESULT_ERROR == int_result) {
+               /* Don't bother to do processing of errors in upper half */
+               mali_executor_unlock();
+
+               if (MALI_FALSE == time_out) {
+                       mali_group_schedule_bottom_half_gp(group);
+               }
+       } else {
+               struct mali_gp_job *job;
+               mali_bool success;
+
+               if (MALI_TRUE == time_out) {
+                       mali_group_dump_status(group);
+               }
+
+               success = (int_result != MALI_INTERRUPT_RESULT_ERROR) ?
+                         MALI_TRUE : MALI_FALSE;
+
+               mali_executor_complete_group(group, success, &job, NULL);
+
+               mali_executor_unlock();
+
+               /* GP jobs always fully complete */
+               MALI_DEBUG_ASSERT(NULL != job);
+
+               /* This will notify user space and close the job object */
+               mali_scheduler_complete_gp_job(job, success,
+                                              MALI_TRUE, MALI_TRUE);
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_executor_interrupt_pp(struct mali_group *group,
+               mali_bool in_upper_half)
+{
+       enum mali_interrupt_result int_result;
+       mali_bool time_out = MALI_FALSE;
+
+       MALI_DEBUG_PRINT(4, ("Executor: PP interrupt from %s in %s half\n",
+                            mali_group_core_description(group),
+                            in_upper_half ? "upper" : "bottom"));
+
+       mali_executor_lock();
+
+       if (!mali_group_is_working(group)) {
+               /* Not working, so nothing to do */
+               mali_executor_unlock();
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       if (in_upper_half) {
+               if (mali_group_is_in_virtual(group)) {
+                       /* Child groups should never handle PP interrupts */
+                       MALI_DEBUG_ASSERT(!mali_group_has_timed_out(group));
+                       mali_executor_unlock();
+                       return _MALI_OSK_ERR_FAULT;
+               }
+       }
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT(mali_group_is_working(group));
+       MALI_DEBUG_ASSERT(!mali_group_is_in_virtual(group));
+
+       if (mali_group_has_timed_out(group)) {
+               int_result = MALI_INTERRUPT_RESULT_ERROR;
+               time_out = MALI_TRUE;
+               MALI_PRINT(("Executor PP: Job %d Timeout on %s\n",
+                           mali_pp_job_get_id(group->pp_running_job),
+                           mali_group_core_description(group)));
+       } else {
+               int_result = mali_group_get_interrupt_result_pp(group);
+               if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+                       mali_executor_unlock();
+                       return _MALI_OSK_ERR_FAULT;
+               }
+       }
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+               /* No interrupts signalled, so nothing to do */
+               mali_executor_unlock();
+               return _MALI_OSK_ERR_FAULT;
+       } else if (MALI_INTERRUPT_RESULT_SUCCESS == int_result) {
+               if (mali_group_is_virtual(group) && mali_group_pp_is_active(group)) {
+                       /* Some child groups are still working, so nothing to do right now */
+                       mali_executor_unlock();
+                       return _MALI_OSK_ERR_FAULT;
+               }
+       }
+#else
+       MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_NONE != int_result);
+#endif
+
+       /* We should now have a real interrupt to handle */
+
+       MALI_DEBUG_PRINT(4, ("Executor: Group %s completed with %s\n",
+                            mali_group_core_description(group),
+                            (MALI_INTERRUPT_RESULT_ERROR == int_result) ?
+                            "ERROR" : "success"));
+
+       if (in_upper_half && MALI_INTERRUPT_RESULT_ERROR == int_result) {
+               /* Don't bother to do processing of errors in upper half */
+               mali_group_mask_all_interrupts_pp(group);
+               mali_executor_unlock();
+
+               if (MALI_FALSE == time_out) {
+                       mali_group_schedule_bottom_half_pp(group);
+               }
+       } else {
+               struct mali_pp_job *job = NULL;
+               mali_bool success;
+
+               if (MALI_TRUE == time_out) {
+                       mali_group_dump_status(group);
+               }
+
+               success = (int_result == MALI_INTERRUPT_RESULT_SUCCESS) ?
+                         MALI_TRUE : MALI_FALSE;
+
+               mali_executor_complete_group(group, success, NULL, &job);
+
+               mali_executor_unlock();
+
+               if (NULL != job) {
+                       /* Notify user space and close the job object */
+                       mali_scheduler_complete_pp_job(job,
+                                                      num_physical_pp_cores_total,
+                                                      MALI_TRUE, MALI_TRUE);
+               }
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_executor_interrupt_mmu(struct mali_group *group,
+               mali_bool in_upper_half)
+{
+       enum mali_interrupt_result int_result;
+
+       MALI_DEBUG_PRINT(4, ("Executor: MMU interrupt from %s in %s half\n",
+                            mali_group_core_description(group),
+                            in_upper_half ? "upper" : "bottom"));
+
+       mali_executor_lock();
+       if (!mali_group_is_working(group)) {
+               /* Not working, so nothing to do */
+               mali_executor_unlock();
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT(mali_group_is_working(group));
+
+       int_result = mali_group_get_interrupt_result_mmu(group);
+       if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+               mali_executor_unlock();
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+               /* No interrupts signalled, so nothing to do */
+               mali_executor_unlock();
+               return _MALI_OSK_ERR_FAULT;
+       }
+#else
+       MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_ERROR == int_result);
+#endif
+
+       /* We should now have a real interrupt to handle */
+
+       if (in_upper_half) {
+               /* Don't bother to do processing of errors in upper half */
+
+               struct mali_group *parent = group->parent_group;
+
+               mali_mmu_mask_all_interrupts(group->mmu);
+
+               mali_executor_unlock();
+
+               if (NULL == parent) {
+                       mali_group_schedule_bottom_half_mmu(group);
+               } else {
+                       mali_group_schedule_bottom_half_mmu(parent);
+               }
+
+       } else {
+               struct mali_gp_job *gp_job = NULL;
+               struct mali_pp_job *pp_job = NULL;
+
+#ifdef DEBUG
+
+               u32 fault_address = mali_mmu_get_page_fault_addr(group->mmu);
+               u32 status = mali_mmu_get_status(group->mmu);
+               MALI_DEBUG_PRINT(2, ("Executor: Mali page fault detected at 0x%x from bus id %d of type %s on %s\n",
+                                    (void *)(uintptr_t)fault_address,
+                                    (status >> 6) & 0x1F,
+                                    (status & 32) ? "write" : "read",
+                                    group->mmu->hw_core.description));
+               MALI_DEBUG_PRINT(3, ("Executor: MMU rawstat = 0x%08X, MMU status = 0x%08X\n",
+                                    mali_mmu_get_rawstat(group->mmu), status));
+#endif
+
+               mali_executor_complete_group(group, MALI_FALSE, &gp_job, &pp_job);
+
+               mali_executor_unlock();
+
+               if (NULL != gp_job) {
+                       MALI_DEBUG_ASSERT(NULL == pp_job);
+
+                       /* Notify user space and close the job object */
+                       mali_scheduler_complete_gp_job(gp_job, MALI_FALSE,
+                                                      MALI_TRUE, MALI_TRUE);
+               } else if (NULL != pp_job) {
+                       MALI_DEBUG_ASSERT(NULL == gp_job);
+
+                       /* Notify user space and close the job object */
+                       mali_scheduler_complete_pp_job(pp_job,
+                                                      num_physical_pp_cores_total,
+                                                      MALI_TRUE, MALI_TRUE);
+               }
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void mali_executor_group_power_up(struct mali_group *groups[], u32 num_groups)
+{
+       u32 i;
+       mali_bool child_groups_activated = MALI_FALSE;
+       mali_bool do_schedule = MALI_FALSE;
+#if defined(DEBUG)
+       u32 num_activated = 0;
+#endif
+
+       MALI_DEBUG_ASSERT_POINTER(groups);
+       MALI_DEBUG_ASSERT(0 < num_groups);
+
+       mali_executor_lock();
+
+       MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups\n", num_groups));
+
+       for (i = 0; i < num_groups; i++) {
+               MALI_DEBUG_PRINT(3, ("Executor: powering up group %s\n",
+                                    mali_group_core_description(groups[i])));
+
+               mali_group_power_up(groups[i]);
+
+               if ((MALI_GROUP_STATE_ACTIVATION_PENDING != mali_group_get_state(groups[i]) ||
+                    (MALI_TRUE != mali_executor_group_is_in_state(groups[i], EXEC_STATE_INACTIVE)))) {
+                       /* nothing more to do for this group */
+                       continue;
+               }
+
+               MALI_DEBUG_PRINT(3, ("Executor: activating group %s\n",
+                                    mali_group_core_description(groups[i])));
+
+#if defined(DEBUG)
+               num_activated++;
+#endif
+
+               if (mali_group_is_in_virtual(groups[i])) {
+                       /*
+                        * At least one child group of virtual group is powered on.
+                        */
+                       child_groups_activated = MALI_TRUE;
+               } else if (MALI_FALSE == mali_group_is_virtual(groups[i])) {
+                       /* Set gp and pp not in virtual to active. */
+                       mali_group_set_active(groups[i]);
+               }
+
+               /* Move group from inactive to idle list */
+               if (groups[i] == gp_group) {
+                       MALI_DEBUG_ASSERT(EXEC_STATE_INACTIVE ==
+                                         gp_group_state);
+                       gp_group_state = EXEC_STATE_IDLE;
+               } else if (MALI_FALSE == mali_group_is_in_virtual(groups[i])
+                          && MALI_FALSE == mali_group_is_virtual(groups[i])) {
+                       MALI_DEBUG_ASSERT(MALI_TRUE == mali_executor_group_is_in_state(groups[i],
+                                         EXEC_STATE_INACTIVE));
+
+                       mali_executor_change_state_pp_physical(groups[i],
+                                                              &group_list_inactive,
+                                                              &group_list_inactive_count,
+                                                              &group_list_idle,
+                                                              &group_list_idle_count);
+               }
+
+               do_schedule = MALI_TRUE;
+       }
+
+       if (mali_executor_has_virtual_group() &&
+           MALI_TRUE == child_groups_activated &&
+           MALI_GROUP_STATE_ACTIVATION_PENDING ==
+           mali_group_get_state(virtual_group)) {
+               /*
+                * Try to active virtual group while it may be not sucessful every time,
+                * because there is one situation that not all of child groups are powered on
+                * in one time and virtual group is in activation pending state.
+                */
+               if (mali_group_set_active(virtual_group)) {
+                       /* Move group from inactive to idle */
+                       MALI_DEBUG_ASSERT(EXEC_STATE_INACTIVE ==
+                                         virtual_group_state);
+                       virtual_group_state = EXEC_STATE_IDLE;
+
+                       MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups completed, %u  physical activated, 1 virtual activated.\n", num_groups, num_activated));
+               } else {
+                       MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups completed, %u physical activated\n", num_groups, num_activated));
+               }
+       } else {
+               MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups completed, %u physical activated\n", num_groups, num_activated));
+       }
+
+       if (MALI_TRUE == do_schedule) {
+               /* Trigger a schedule */
+               mali_executor_schedule();
+       }
+
+       mali_executor_unlock();
+}
+
+void mali_executor_group_power_down(struct mali_group *groups[],
+                                   u32 num_groups)
+{
+       u32 i;
+
+       MALI_DEBUG_ASSERT_POINTER(groups);
+       MALI_DEBUG_ASSERT(0 < num_groups);
+
+       mali_executor_lock();
+
+       MALI_DEBUG_PRINT(3, ("Executor: powering down %u groups\n", num_groups));
+
+       for (i = 0; i < num_groups; i++) {
+               /* Groups must be either disabled or inactive. while for virtual group,
+                * it maybe in empty state, because when we meet pm_runtime_suspend,
+                * virtual group could be powered off, and before we acquire mali_executor_lock,
+                * we must release mali_pm_state_lock, if there is a new physical job was queued,
+                * all of physical groups in virtual group could be pulled out, so we only can
+                * powered down an empty virtual group. Those physical groups will be powered
+                * up in following pm_runtime_resume callback function.
+                */
+               MALI_DEBUG_ASSERT(mali_executor_group_is_in_state(groups[i],
+                                 EXEC_STATE_DISABLED) ||
+                                 mali_executor_group_is_in_state(groups[i],
+                                                 EXEC_STATE_INACTIVE) ||
+                                 mali_executor_group_is_in_state(groups[i],
+                                                 EXEC_STATE_EMPTY));
+
+               MALI_DEBUG_PRINT(3, ("Executor: powering down group %s\n",
+                                    mali_group_core_description(groups[i])));
+
+               mali_group_power_down(groups[i]);
+       }
+
+       MALI_DEBUG_PRINT(3, ("Executor: powering down %u groups completed\n", num_groups));
+
+       mali_executor_unlock();
+}
+
+void mali_executor_abort_session(struct mali_session_data *session)
+{
+       struct mali_group *group;
+       struct mali_group *tmp_group;
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+       MALI_DEBUG_ASSERT(session->is_aborting);
+
+       MALI_DEBUG_PRINT(3,
+                        ("Executor: Aborting all jobs from session 0x%08X.\n",
+                         session));
+
+       mali_executor_lock();
+
+       if (mali_group_get_session(gp_group) == session) {
+               if (EXEC_STATE_WORKING == gp_group_state) {
+                       struct mali_gp_job *gp_job = NULL;
+
+                       mali_executor_complete_group(gp_group, MALI_FALSE, &gp_job, NULL);
+
+                       MALI_DEBUG_ASSERT_POINTER(gp_job);
+
+                       /* GP job completed, make sure it is freed */
+                       mali_scheduler_complete_gp_job(gp_job, MALI_FALSE,
+                                                      MALI_FALSE, MALI_TRUE);
+               } else {
+                       /* Same session, but not working, so just clear it */
+                       mali_group_clear_session(gp_group);
+               }
+       }
+
+       if (mali_executor_has_virtual_group()) {
+               if (EXEC_STATE_WORKING == virtual_group_state
+                   && mali_group_get_session(virtual_group) == session) {
+                       struct mali_pp_job *pp_job = NULL;
+
+                       mali_executor_complete_group(virtual_group, MALI_FALSE, NULL, &pp_job);
+
+                       if (NULL != pp_job) {
+                               /* PP job completed, make sure it is freed */
+                               mali_scheduler_complete_pp_job(pp_job, 0,
+                                                              MALI_FALSE, MALI_TRUE);
+                       }
+               }
+       }
+
+       _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_working,
+                                   struct mali_group, executor_list) {
+               if (mali_group_get_session(group) == session) {
+                       struct mali_pp_job *pp_job = NULL;
+
+                       mali_executor_complete_group(group, MALI_FALSE, NULL, &pp_job);
+
+                       if (NULL != pp_job) {
+                               /* PP job completed, make sure it is freed */
+                               mali_scheduler_complete_pp_job(pp_job, 0,
+                                                              MALI_FALSE, MALI_TRUE);
+                       }
+               }
+       }
+
+       _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_idle, struct mali_group, executor_list) {
+               mali_group_clear_session(group);
+       }
+
+       _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_inactive, struct mali_group, executor_list) {
+               mali_group_clear_session(group);
+       }
+
+       _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_disabled, struct mali_group, executor_list) {
+               mali_group_clear_session(group);
+       }
+
+       mali_executor_unlock();
+}
+
+
+void mali_executor_core_scaling_enable(void)
+{
+       /* PS: Core scaling is by default enabled */
+       core_scaling_enabled = MALI_TRUE;
+}
+
+void mali_executor_core_scaling_disable(void)
+{
+       core_scaling_enabled = MALI_FALSE;
+}
+
+mali_bool mali_executor_core_scaling_is_enabled(void)
+{
+       return core_scaling_enabled;
+}
+
+void mali_executor_group_enable(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+
+       mali_executor_lock();
+
+       if ((NULL != mali_group_get_gp_core(group) || NULL != mali_group_get_pp_core(group))
+           && (mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED))) {
+               mali_executor_group_enable_internal(group);
+       }
+
+       mali_executor_schedule();
+       mali_executor_unlock();
+
+       _mali_osk_wq_schedule_work(executor_wq_notify_core_change);
+}
+
+/*
+ * If a physical group is inactive or idle, we should disable it immediately,
+ * if group is in virtual, and virtual group is idle, disable given physical group in it.
+ */
+void mali_executor_group_disable(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+
+       mali_executor_lock();
+
+       if ((NULL != mali_group_get_gp_core(group) || NULL != mali_group_get_pp_core(group))
+           && (!mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED))) {
+               mali_executor_group_disable_internal(group);
+       }
+
+       mali_executor_schedule();
+       mali_executor_unlock();
+
+       _mali_osk_wq_schedule_work(executor_wq_notify_core_change);
+}
+
+mali_bool mali_executor_group_is_disabled(struct mali_group *group)
+{
+       /* NB: This function is not optimized for time critical usage */
+
+       mali_bool ret;
+
+       MALI_DEBUG_ASSERT_POINTER(group);
+
+       mali_executor_lock();
+       ret = mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED);
+       mali_executor_unlock();
+
+       return ret;
+}
+
+int mali_executor_set_perf_level(unsigned int target_core_nr, mali_bool override)
+{
+       if (target_core_nr == num_physical_pp_cores_enabled) return 0;
+       if (MALI_FALSE == core_scaling_enabled && MALI_FALSE == override) return -EPERM;
+       if (target_core_nr > num_physical_pp_cores_total) return -EINVAL;
+       if (0 == target_core_nr) return -EINVAL;
+
+       mali_executor_core_scale(target_core_nr);
+
+       _mali_osk_wq_schedule_work(executor_wq_notify_core_change);
+
+       return 0;
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_executor_dump_state(char *buf, u32 size)
+{
+       int n = 0;
+       struct mali_group *group;
+       struct mali_group *temp;
+
+       mali_executor_lock();
+
+       switch (gp_group_state) {
+       case EXEC_STATE_INACTIVE:
+               n += _mali_osk_snprintf(buf + n, size - n,
+                                       "GP group is in state INACTIVE\n");
+               break;
+       case EXEC_STATE_IDLE:
+               n += _mali_osk_snprintf(buf + n, size - n,
+                                       "GP group is in state IDLE\n");
+               break;
+       case EXEC_STATE_WORKING:
+               n += _mali_osk_snprintf(buf + n, size - n,
+                                       "GP group is in state WORKING\n");
+               break;
+       default:
+               n += _mali_osk_snprintf(buf + n, size - n,
+                                       "GP group is in unknown/illegal state %u\n",
+                                       gp_group_state);
+               break;
+       }
+
+       n += mali_group_dump_state(gp_group, buf + n, size - n);
+
+       n += _mali_osk_snprintf(buf + n, size - n,
+                               "Physical PP groups in WORKING state (count = %u):\n",
+                               group_list_working_count);
+
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working, struct mali_group, executor_list) {
+               n += mali_group_dump_state(group, buf + n, size - n);
+       }
+
+       n += _mali_osk_snprintf(buf + n, size - n,
+                               "Physical PP groups in IDLE state (count = %u):\n",
+                               group_list_idle_count);
+
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, executor_list) {
+               n += mali_group_dump_state(group, buf + n, size - n);
+       }
+
+       n += _mali_osk_snprintf(buf + n, size - n,
+                               "Physical PP groups in INACTIVE state (count = %u):\n",
+                               group_list_inactive_count);
+
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_inactive, struct mali_group, executor_list) {
+               n += mali_group_dump_state(group, buf + n, size - n);
+       }
+
+       n += _mali_osk_snprintf(buf + n, size - n,
+                               "Physical PP groups in DISABLED state (count = %u):\n",
+                               group_list_disabled_count);
+
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, executor_list) {
+               n += mali_group_dump_state(group, buf + n, size - n);
+       }
+
+       if (mali_executor_has_virtual_group()) {
+               switch (virtual_group_state) {
+               case EXEC_STATE_EMPTY:
+                       n += _mali_osk_snprintf(buf + n, size - n,
+                                               "Virtual PP group is in state EMPTY\n");
+                       break;
+               case EXEC_STATE_INACTIVE:
+                       n += _mali_osk_snprintf(buf + n, size - n,
+                                               "Virtual PP group is in state INACTIVE\n");
+                       break;
+               case EXEC_STATE_IDLE:
+                       n += _mali_osk_snprintf(buf + n, size - n,
+                                               "Virtual PP group is in state IDLE\n");
+                       break;
+               case EXEC_STATE_WORKING:
+                       n += _mali_osk_snprintf(buf + n, size - n,
+                                               "Virtual PP group is in state WORKING\n");
+                       break;
+               default:
+                       n += _mali_osk_snprintf(buf + n, size - n,
+                                               "Virtual PP group is in unknown/illegal state %u\n",
+                                               virtual_group_state);
+                       break;
+               }
+
+               n += mali_group_dump_state(virtual_group, buf + n, size - n);
+       }
+
+       mali_executor_unlock();
+
+       n += _mali_osk_snprintf(buf + n, size - n, "\n");
+
+       return n;
+}
+#endif
+
+_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores(_mali_uk_get_pp_number_of_cores_s *args)
+{
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+       args->number_of_total_cores = num_physical_pp_cores_total;
+       args->number_of_enabled_cores = num_physical_pp_cores_enabled;
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_pp_core_version(_mali_uk_get_pp_core_version_s *args)
+{
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+       args->version = pp_version;
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores(_mali_uk_get_gp_number_of_cores_s *args)
+{
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+       args->number_of_cores = 1;
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_gp_core_version(_mali_uk_get_gp_core_version_s *args)
+{
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+       args->version = gp_version;
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_gp_suspend_response(_mali_uk_gp_suspend_response_s *args)
+{
+       struct mali_session_data *session;
+       struct mali_gp_job *job;
+
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+       session = (struct mali_session_data *)(uintptr_t)args->ctx;
+
+       if (_MALIGP_JOB_RESUME_WITH_NEW_HEAP == args->code) {
+               _mali_osk_notification_t *new_notification = NULL;
+
+               new_notification = _mali_osk_notification_create(
+                                          _MALI_NOTIFICATION_GP_STALLED,
+                                          sizeof(_mali_uk_gp_job_suspended_s));
+
+               if (NULL != new_notification) {
+                       MALI_DEBUG_PRINT(3, ("Executor: Resuming job %u with new heap; 0x%08X - 0x%08X\n",
+                                            args->cookie, args->arguments[0], args->arguments[1]));
+
+                       mali_executor_lock();
+
+                       /* Resume the job in question if it is still running */
+                       job = mali_group_get_running_gp_job(gp_group);
+                       if (NULL != job &&
+                           args->cookie == mali_gp_job_get_id(job) &&
+                           session == mali_gp_job_get_session(job)) {
+                               /*
+                                * Correct job is running, resume with new heap
+                                */
+
+                               mali_gp_job_set_oom_notification(job,
+                                                                new_notification);
+
+                               /* This will also re-enable interrupts */
+                               mali_group_resume_gp_with_new_heap(gp_group,
+                                                                  args->cookie,
+                                                                  args->arguments[0],
+                                                                  args->arguments[1]);
+
+                               mali_executor_unlock();
+                               return _MALI_OSK_ERR_OK;
+                       } else {
+                               MALI_PRINT_ERROR(("Executor: Unable to resume, GP job no longer running.\n"));
+
+                               _mali_osk_notification_delete(new_notification);
+
+                               mali_executor_unlock();
+                               return _MALI_OSK_ERR_FAULT;
+                       }
+               } else {
+                       MALI_PRINT_ERROR(("Executor: Failed to allocate notification object. Will abort GP job.\n"));
+               }
+       } else {
+               MALI_DEBUG_PRINT(2, ("Executor: Aborting job %u, no new heap provided\n", args->cookie));
+       }
+
+       mali_executor_lock();
+
+       /* Abort the job in question if it is still running */
+       job = mali_group_get_running_gp_job(gp_group);
+       if (NULL != job &&
+           args->cookie == mali_gp_job_get_id(job) &&
+           session == mali_gp_job_get_session(job)) {
+               /* Correct job is still running */
+               struct mali_gp_job *job_done = NULL;
+
+               mali_executor_complete_group(gp_group, MALI_FALSE, &job_done, NULL);
+
+               /* The same job should have completed */
+               MALI_DEBUG_ASSERT(job_done == job);
+
+               /* GP job completed, make sure it is freed */
+               mali_scheduler_complete_gp_job(job_done, MALI_FALSE,
+                                              MALI_TRUE, MALI_TRUE);
+       }
+
+       mali_executor_unlock();
+       return _MALI_OSK_ERR_FAULT;
+}
+
+
+/*
+ * ---------- Implementation of static functions ----------
+ */
+
+void mali_executor_lock(void)
+{
+       _mali_osk_spinlock_irq_lock(mali_executor_lock_obj);
+       MALI_DEBUG_PRINT(5, ("Executor: lock taken\n"));
+}
+
+void mali_executor_unlock(void)
+{
+       MALI_DEBUG_PRINT(5, ("Executor: Releasing lock\n"));
+       _mali_osk_spinlock_irq_unlock(mali_executor_lock_obj);
+}
+
+static mali_bool mali_executor_is_suspended(void *data)
+{
+       mali_bool ret;
+
+       /* This callback does not use the data pointer. */
+       MALI_IGNORE(data);
+
+       mali_executor_lock();
+
+       ret = pause_count > 0 && !mali_executor_is_working();
+
+       mali_executor_unlock();
+
+       return ret;
+}
+
+static mali_bool mali_executor_is_working()
+{
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+       return (0 != group_list_working_count ||
+               EXEC_STATE_WORKING == gp_group_state ||
+               EXEC_STATE_WORKING == virtual_group_state);
+}
+
+static void mali_executor_disable_empty_virtual(void)
+{
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT(virtual_group_state != EXEC_STATE_EMPTY);
+       MALI_DEBUG_ASSERT(virtual_group_state != EXEC_STATE_WORKING);
+
+       if (mali_group_is_empty(virtual_group)) {
+               virtual_group_state = EXEC_STATE_EMPTY;
+       }
+}
+
+static mali_bool mali_executor_physical_rejoin_virtual(struct mali_group *group)
+{
+       mali_bool trigger_pm_update = MALI_FALSE;
+
+       MALI_DEBUG_ASSERT_POINTER(group);
+       /* Only rejoining after job has completed (still active) */
+       MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE ==
+                         mali_group_get_state(group));
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT(MALI_TRUE == mali_executor_has_virtual_group());
+       MALI_DEBUG_ASSERT(MALI_FALSE == mali_group_is_virtual(group));
+
+       /* Make sure group and virtual group have same status */
+
+       if (MALI_GROUP_STATE_INACTIVE == mali_group_get_state(virtual_group)) {
+               if (mali_group_deactivate(group)) {
+                       trigger_pm_update = MALI_TRUE;
+               }
+
+               if (virtual_group_state == EXEC_STATE_EMPTY) {
+                       virtual_group_state = EXEC_STATE_INACTIVE;
+               }
+       } else if (MALI_GROUP_STATE_ACTIVATION_PENDING ==
+                  mali_group_get_state(virtual_group)) {
+               /*
+                * Activation is pending for virtual group, leave
+                * this child group as active.
+                */
+               if (virtual_group_state == EXEC_STATE_EMPTY) {
+                       virtual_group_state = EXEC_STATE_INACTIVE;
+               }
+       } else {
+               MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE ==
+                                 mali_group_get_state(virtual_group));
+
+               if (virtual_group_state == EXEC_STATE_EMPTY) {
+                       virtual_group_state = EXEC_STATE_IDLE;
+               }
+       }
+
+       /* Remove group from idle list */
+       MALI_DEBUG_ASSERT(mali_executor_group_is_in_state(group,
+                         EXEC_STATE_IDLE));
+       _mali_osk_list_delinit(&group->executor_list);
+       group_list_idle_count--;
+
+       /*
+        * And finally rejoin the virtual group
+        * group will start working on same job as virtual_group,
+        * if virtual_group is working on a job
+        */
+       mali_group_add_group(virtual_group, group);
+
+       return trigger_pm_update;
+}
+
+static mali_bool mali_executor_has_virtual_group(void)
+{
+#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470))
+       return (NULL != virtual_group) ? MALI_TRUE : MALI_FALSE;
+#else
+       return MALI_FALSE;
+#endif /* (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) */
+}
+
+static mali_bool mali_executor_virtual_group_is_usable(void)
+{
+#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470))
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       return ((EXEC_STATE_INACTIVE == virtual_group_state ||
+                EXEC_STATE_IDLE == virtual_group_state) && (virtual_group->state != MALI_GROUP_STATE_ACTIVATION_PENDING)) ?
+              MALI_TRUE : MALI_FALSE;
+#else
+       return MALI_FALSE;
+#endif /* (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) */
+}
+
+static mali_bool mali_executor_tackle_gp_bound(void)
+{
+       struct mali_pp_job *job;
+
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+       job = mali_scheduler_job_pp_physical_peek();
+
+       if (NULL != job && MALI_TRUE == mali_is_mali400()) {
+               if (0 < group_list_working_count &&
+                   mali_pp_job_is_large_and_unstarted(job)) {
+                       return MALI_TRUE;
+               }
+       }
+
+       return MALI_FALSE;
+}
+
+/*
+ * This is where jobs are actually started.
+ */
+static void mali_executor_schedule(void)
+{
+       u32 i;
+       u32 num_physical_needed = 0;
+       u32 num_physical_to_process = 0;
+       mali_bool trigger_pm_update = MALI_FALSE;
+       mali_bool deactivate_idle_group = MALI_TRUE;
+
+       /* Physical groups + jobs to start in this function */
+       struct mali_group *groups_to_start[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS];
+       struct mali_pp_job *jobs_to_start[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS];
+       u32 sub_jobs_to_start[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS];
+       int num_jobs_to_start = 0;
+
+       /* Virtual job to start in this function */
+       struct mali_pp_job *virtual_job_to_start = NULL;
+
+       /* GP job to start in this function */
+       struct mali_gp_job *gp_job_to_start = NULL;
+
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+       if (pause_count > 0) {
+               /* Execution is suspended, don't schedule any jobs. */
+               return;
+       }
+
+       /* Lock needed in order to safely handle the job queues */
+       mali_scheduler_lock();
+
+       /* 1. Activate gp firstly if have gp job queued. */
+       if (EXEC_STATE_INACTIVE == gp_group_state &&
+           0 < mali_scheduler_job_gp_count()) {
+
+               enum mali_group_state state =
+                       mali_group_activate(gp_group);
+               if (MALI_GROUP_STATE_ACTIVE == state) {
+                       /* Set GP group state to idle */
+                       gp_group_state = EXEC_STATE_IDLE;
+               } else {
+                       trigger_pm_update = MALI_TRUE;
+               }
+       }
+
+       /* 2. Prepare as many physical groups as needed/possible */
+
+       num_physical_needed = mali_scheduler_job_physical_head_count();
+
+       /* On mali-450 platform, we don't need to enter in this block frequently. */
+       if (0 < num_physical_needed) {
+
+               if (num_physical_needed <= group_list_idle_count) {
+                       /* We have enough groups on idle list already */
+                       num_physical_to_process = num_physical_needed;
+                       num_physical_needed = 0;
+               } else {
+                       /* We need to get a hold of some more groups */
+                       num_physical_to_process = group_list_idle_count;
+                       num_physical_needed -= group_list_idle_count;
+               }
+
+               if (0 < num_physical_needed) {
+
+                       /* 2.1. Activate groups which are inactive */
+
+                       struct mali_group *group;
+                       struct mali_group *temp;
+
+                       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_inactive,
+                                                   struct mali_group, executor_list) {
+                               enum mali_group_state state =
+                                       mali_group_activate(group);
+                               if (MALI_GROUP_STATE_ACTIVE == state) {
+                                       /* Move from inactive to idle */
+                                       mali_executor_change_state_pp_physical(group,
+                                                                              &group_list_inactive,
+                                                                              &group_list_inactive_count,
+                                                                              &group_list_idle,
+                                                                              &group_list_idle_count);
+                                       num_physical_to_process++;
+                               } else {
+                                       trigger_pm_update = MALI_TRUE;
+                               }
+
+                               num_physical_needed--;
+                               if (0 == num_physical_needed) {
+                                       /* We have activated all the groups we need */
+                                       break;
+                               }
+                       }
+               }
+
+               if (mali_executor_virtual_group_is_usable()) {
+
+                       /*
+                        * 2.2. And finally, steal and activate groups
+                        * from virtual group if we need even more
+                        */
+                       while (0 < num_physical_needed) {
+                               struct mali_group *group;
+
+                               group = mali_group_acquire_group(virtual_group);
+                               if (NULL != group) {
+                                       enum mali_group_state state;
+
+                                       mali_executor_disable_empty_virtual();
+
+                                       state = mali_group_activate(group);
+                                       if (MALI_GROUP_STATE_ACTIVE == state) {
+                                               /* Group is ready, add to idle list */
+                                               _mali_osk_list_add(
+                                                       &group->executor_list,
+                                                       &group_list_idle);
+                                               group_list_idle_count++;
+                                               num_physical_to_process++;
+                                       } else {
+                                               /*
+                                                * Group is not ready yet,
+                                                * add to inactive list
+                                                */
+                                               _mali_osk_list_add(
+                                                       &group->executor_list,
+                                                       &group_list_inactive);
+                                               group_list_inactive_count++;
+
+                                               trigger_pm_update = MALI_TRUE;
+                                       }
+                                       num_physical_needed--;
+                               } else {
+                                       /*
+                                        * We could not get enough groups
+                                        * from the virtual group.
+                                        */
+                                       break;
+                               }
+                       }
+               }
+
+               /* 2.3. Assign physical jobs to groups */
+
+               if (0 < num_physical_to_process) {
+                       struct mali_group *group;
+                       struct mali_group *temp;
+
+                       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle,
+                                                   struct mali_group, executor_list) {
+                               struct mali_pp_job *job = NULL;
+                               u32 sub_job = MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS;
+
+                               MALI_DEBUG_ASSERT(num_jobs_to_start <
+                                                 MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS);
+
+                               MALI_DEBUG_ASSERT(0 <
+                                                 mali_scheduler_job_physical_head_count());
+
+                               if (mali_executor_hint_is_enabled(
+                                           MALI_EXECUTOR_HINT_GP_BOUND)) {
+                                       if (MALI_TRUE == mali_executor_tackle_gp_bound()) {
+                                               /*
+                                               * We're gp bound,
+                                               * don't start this right now.
+                                               */
+                                               deactivate_idle_group = MALI_FALSE;
+                                               num_physical_to_process = 0;
+                                               break;
+                                       }
+                               }
+
+                               job = mali_scheduler_job_pp_physical_get(
+                                             &sub_job);
+
+                               MALI_DEBUG_ASSERT_POINTER(job);
+                               MALI_DEBUG_ASSERT(sub_job <= MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS);
+
+                               /* Put job + group on list of jobs to start later on */
+
+                               groups_to_start[num_jobs_to_start] = group;
+                               jobs_to_start[num_jobs_to_start] = job;
+                               sub_jobs_to_start[num_jobs_to_start] = sub_job;
+                               num_jobs_to_start++;
+
+                               /* Move group from idle to working */
+                               mali_executor_change_state_pp_physical(group,
+                                                                      &group_list_idle,
+                                                                      &group_list_idle_count,
+                                                                      &group_list_working,
+                                                                      &group_list_working_count);
+
+                               num_physical_to_process--;
+                               if (0 == num_physical_to_process) {
+                                       /* Got all we needed */
+                                       break;
+                               }
+                       }
+               }
+       }
+
+
+       /* 3. Deactivate idle pp group , must put deactive here before active vitual group
+        *    for cover case first only has physical job in normal queue but group inactive,
+        *    so delay the job start go to active group, when group activated,
+        *    call scheduler again, but now if we get high queue virtual job,
+        *    we will do nothing in schedule cause executor schedule stop
+        */
+
+       if (MALI_TRUE == mali_executor_deactivate_list_idle(deactivate_idle_group
+                       && (!mali_timeline_has_physical_pp_job()))) {
+               trigger_pm_update = MALI_TRUE;
+       }
+
+       /* 4. Activate virtual group, if needed */
+
+       if (EXEC_STATE_INACTIVE == virtual_group_state &&
+           0 < mali_scheduler_job_next_is_virtual()) {
+               enum mali_group_state state =
+                       mali_group_activate(virtual_group);
+               if (MALI_GROUP_STATE_ACTIVE == state) {
+                       /* Set virtual group state to idle */
+                       virtual_group_state = EXEC_STATE_IDLE;
+               } else {
+                       trigger_pm_update = MALI_TRUE;
+               }
+       }
+
+       /* 5. To power up group asap, we trigger pm update here. */
+
+       if (MALI_TRUE == trigger_pm_update) {
+               trigger_pm_update = MALI_FALSE;
+               mali_pm_update_async();
+       }
+
+       /* 6. Assign jobs to idle virtual group (or deactivate if no job) */
+
+       if (EXEC_STATE_IDLE == virtual_group_state) {
+               if (0 < mali_scheduler_job_next_is_virtual()) {
+                       virtual_job_to_start =
+                               mali_scheduler_job_pp_virtual_get();
+                       virtual_group_state = EXEC_STATE_WORKING;
+               } else if (!mali_timeline_has_virtual_pp_job()) {
+                       virtual_group_state = EXEC_STATE_INACTIVE;
+
+                       if (mali_group_deactivate(virtual_group)) {
+                               trigger_pm_update = MALI_TRUE;
+                       }
+               }
+       }
+
+       /* 7. Assign job to idle GP group (or deactivate if no job) */
+
+       if (EXEC_STATE_IDLE == gp_group_state) {
+               if (0 < mali_scheduler_job_gp_count()) {
+                       gp_job_to_start = mali_scheduler_job_gp_get();
+                       gp_group_state = EXEC_STATE_WORKING;
+               } else if (!mali_timeline_has_gp_job()) {
+                       gp_group_state = EXEC_STATE_INACTIVE;
+                       if (mali_group_deactivate(gp_group)) {
+                               trigger_pm_update = MALI_TRUE;
+                       }
+               }
+       }
+
+       /* 8. We no longer need the schedule/queue lock */
+
+       mali_scheduler_unlock();
+
+       /* 9. start jobs */
+
+       if (NULL != virtual_job_to_start) {
+               MALI_DEBUG_ASSERT(!mali_group_pp_is_active(virtual_group));
+               mali_group_start_pp_job(virtual_group,
+                                       virtual_job_to_start, 0);
+       }
+
+       for (i = 0; i < num_jobs_to_start; i++) {
+               MALI_DEBUG_ASSERT(!mali_group_pp_is_active(
+                                         groups_to_start[i]));
+               mali_group_start_pp_job(groups_to_start[i],
+                                       jobs_to_start[i],
+                                       sub_jobs_to_start[i]);
+       }
+
+       MALI_DEBUG_ASSERT_POINTER(gp_group);
+
+       if (NULL != gp_job_to_start) {
+               MALI_DEBUG_ASSERT(!mali_group_gp_is_active(gp_group));
+               mali_group_start_gp_job(gp_group, gp_job_to_start);
+       }
+
+       /* 10. Trigger any pending PM updates */
+       if (MALI_TRUE == trigger_pm_update) {
+               mali_pm_update_async();
+       }
+}
+
+/* Handler for deferred schedule requests */
+static void mali_executor_wq_schedule(void *arg)
+{
+       MALI_IGNORE(arg);
+       mali_executor_lock();
+       mali_executor_schedule();
+       mali_executor_unlock();
+}
+
+static void mali_executor_send_gp_oom_to_user(struct mali_gp_job *job)
+{
+       _mali_uk_gp_job_suspended_s *jobres;
+       _mali_osk_notification_t *notification;
+
+       notification = mali_gp_job_get_oom_notification(job);
+
+       /*
+        * Remember the id we send to user space, so we have something to
+        * verify when we get a response
+        */
+       gp_returned_cookie = mali_gp_job_get_id(job);
+
+       jobres = (_mali_uk_gp_job_suspended_s *)notification->result_buffer;
+       jobres->user_job_ptr = mali_gp_job_get_user_id(job);
+       jobres->cookie = gp_returned_cookie;
+
+       mali_session_send_notification(mali_gp_job_get_session(job),
+                                      notification);
+}
+static struct mali_gp_job *mali_executor_complete_gp(struct mali_group *group,
+               mali_bool success)
+{
+       struct mali_gp_job *job;
+
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+       /* Extracts the needed HW status from core and reset */
+       job = mali_group_complete_gp(group, success);
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       /* Core is now ready to go into idle list */
+       gp_group_state = EXEC_STATE_IDLE;
+
+       /* This will potentially queue more GP and PP jobs */
+       mali_timeline_tracker_release(&job->tracker);
+
+       /* Signal PP job */
+       mali_gp_job_signal_pp_tracker(job, success);
+
+       return job;
+}
+
+static struct mali_pp_job *mali_executor_complete_pp(struct mali_group *group,
+               mali_bool success)
+{
+       struct mali_pp_job *job;
+       u32 sub_job;
+       mali_bool job_is_done;
+
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+       /* Extracts the needed HW status from core and reset */
+       job = mali_group_complete_pp(group, success, &sub_job);
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       /* Core is now ready to go into idle list */
+       if (mali_group_is_virtual(group)) {
+               virtual_group_state = EXEC_STATE_IDLE;
+       } else {
+               /* Move from working to idle state */
+               mali_executor_change_state_pp_physical(group,
+                                                      &group_list_working,
+                                                      &group_list_working_count,
+                                                      &group_list_idle,
+                                                      &group_list_idle_count);
+       }
+
+       /* It is the executor module which owns the jobs themselves by now */
+       mali_pp_job_mark_sub_job_completed(job, success);
+       job_is_done = mali_pp_job_is_complete(job);
+
+       if (job_is_done) {
+               /* This will potentially queue more GP and PP jobs */
+               mali_timeline_tracker_release(&job->tracker);
+       }
+
+       return job;
+}
+
+static void mali_executor_complete_group(struct mali_group *group,
+               mali_bool success,
+               struct mali_gp_job **gp_job_done,
+               struct mali_pp_job **pp_job_done)
+{
+       struct mali_gp_core *gp_core = mali_group_get_gp_core(group);
+       struct mali_pp_core *pp_core = mali_group_get_pp_core(group);
+       struct mali_gp_job *gp_job = NULL;
+       struct mali_pp_job *pp_job = NULL;
+       mali_bool pp_job_is_done = MALI_TRUE;
+
+       if (NULL != gp_core) {
+               gp_job = mali_executor_complete_gp(group, success);
+       } else {
+               MALI_DEBUG_ASSERT_POINTER(pp_core);
+               MALI_IGNORE(pp_core);
+               pp_job = mali_executor_complete_pp(group, success);
+
+               pp_job_is_done = mali_pp_job_is_complete(pp_job);
+       }
+
+       if (pause_count > 0) {
+               /* Execution has been suspended */
+
+               if (!mali_executor_is_working()) {
+                       /* Last job completed, wake up sleepers */
+                       _mali_osk_wait_queue_wake_up(
+                               executor_working_wait_queue);
+               }
+       } else if (MALI_TRUE == mali_group_disable_requested(group)) {
+               mali_executor_core_scale_in_group_complete(group);
+
+               mali_executor_schedule();
+       } else {
+               /* try to schedule new jobs */
+               mali_executor_schedule();
+       }
+
+       if (NULL != gp_job) {
+               MALI_DEBUG_ASSERT_POINTER(gp_job_done);
+               *gp_job_done = gp_job;
+       } else if (pp_job_is_done) {
+               MALI_DEBUG_ASSERT_POINTER(pp_job);
+               MALI_DEBUG_ASSERT_POINTER(pp_job_done);
+               *pp_job_done = pp_job;
+       }
+}
+
+static void mali_executor_change_state_pp_physical(struct mali_group *group,
+               _mali_osk_list_t *old_list,
+               u32 *old_count,
+               _mali_osk_list_t *new_list,
+               u32 *new_count)
+{
+       /*
+        * It's a bit more complicated to change the state for the physical PP
+        * groups since their state is determined by the list they are on.
+        */
+#if defined(DEBUG)
+       mali_bool found = MALI_FALSE;
+       struct mali_group *group_iter;
+       struct mali_group *temp;
+       u32 old_counted = 0;
+       u32 new_counted = 0;
+
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(old_list);
+       MALI_DEBUG_ASSERT_POINTER(old_count);
+       MALI_DEBUG_ASSERT_POINTER(new_list);
+       MALI_DEBUG_ASSERT_POINTER(new_count);
+
+       /*
+        * Verify that group is present on old list,
+        * and that the count is correct
+        */
+
+       _MALI_OSK_LIST_FOREACHENTRY(group_iter, temp, old_list,
+                                   struct mali_group, executor_list) {
+               old_counted++;
+               if (group == group_iter) {
+                       found = MALI_TRUE;
+               }
+       }
+
+       _MALI_OSK_LIST_FOREACHENTRY(group_iter, temp, new_list,
+                                   struct mali_group, executor_list) {
+               new_counted++;
+       }
+
+       if (MALI_FALSE == found) {
+               if (old_list == &group_list_idle) {
+                       MALI_DEBUG_PRINT(1, (" old Group list is idle,"));
+               } else if (old_list == &group_list_inactive) {
+                       MALI_DEBUG_PRINT(1, (" old Group list is inactive,"));
+               } else if (old_list == &group_list_working) {
+                       MALI_DEBUG_PRINT(1, (" old Group list is working,"));
+               } else if (old_list == &group_list_disabled) {
+                       MALI_DEBUG_PRINT(1, (" old Group list is disable,"));
+               }
+
+               if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_WORKING)) {
+                       MALI_DEBUG_PRINT(1, (" group in working \n"));
+               } else if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_INACTIVE)) {
+                       MALI_DEBUG_PRINT(1, (" group in inactive \n"));
+               } else if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_IDLE)) {
+                       MALI_DEBUG_PRINT(1, (" group in idle \n"));
+               } else if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED)) {
+                       MALI_DEBUG_PRINT(1, (" but group in disabled \n"));
+               }
+       }
+
+       MALI_DEBUG_ASSERT(MALI_TRUE == found);
+       MALI_DEBUG_ASSERT(0 < (*old_count));
+       MALI_DEBUG_ASSERT((*old_count) == old_counted);
+       MALI_DEBUG_ASSERT((*new_count) == new_counted);
+#endif
+
+       _mali_osk_list_move(&group->executor_list, new_list);
+       (*old_count)--;
+       (*new_count)++;
+}
+
+static void mali_executor_set_state_pp_physical(struct mali_group *group,
+               _mali_osk_list_t *new_list,
+               u32 *new_count)
+{
+       _mali_osk_list_add(&group->executor_list, new_list);
+       (*new_count)++;
+}
+
+static mali_bool mali_executor_group_is_in_state(struct mali_group *group,
+               enum mali_executor_state_t state)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+       if (gp_group == group) {
+               if (gp_group_state == state) {
+                       return MALI_TRUE;
+               }
+       } else if (virtual_group == group || mali_group_is_in_virtual(group)) {
+               if (virtual_group_state == state) {
+                       return MALI_TRUE;
+               }
+       } else {
+               /* Physical PP group */
+               struct mali_group *group_iter;
+               struct mali_group *temp;
+               _mali_osk_list_t *list;
+
+               if (EXEC_STATE_DISABLED == state) {
+                       list = &group_list_disabled;
+               } else if (EXEC_STATE_INACTIVE == state) {
+                       list = &group_list_inactive;
+               } else if (EXEC_STATE_IDLE == state) {
+                       list = &group_list_idle;
+               } else {
+                       MALI_DEBUG_ASSERT(EXEC_STATE_WORKING == state);
+                       list = &group_list_working;
+               }
+
+               _MALI_OSK_LIST_FOREACHENTRY(group_iter, temp, list,
+                                           struct mali_group, executor_list) {
+                       if (group_iter == group) {
+                               return MALI_TRUE;
+                       }
+               }
+       }
+
+       /* group not in correct state */
+       return MALI_FALSE;
+}
+
+static void mali_executor_group_enable_internal(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT(group);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT(mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED));
+
+       /* Put into inactive state (== "lowest" enabled state) */
+       if (group == gp_group) {
+               MALI_DEBUG_ASSERT(EXEC_STATE_DISABLED == gp_group_state);
+               gp_group_state = EXEC_STATE_INACTIVE;
+       } else {
+               mali_executor_change_state_pp_physical(group,
+                                                      &group_list_disabled,
+                                                      &group_list_disabled_count,
+                                                      &group_list_inactive,
+                                                      &group_list_inactive_count);
+
+               ++num_physical_pp_cores_enabled;
+               MALI_DEBUG_PRINT(4, ("Enabling group id %d \n", group->pp_core->core_id));
+       }
+
+       if (MALI_GROUP_STATE_ACTIVE == mali_group_activate(group)) {
+               MALI_DEBUG_ASSERT(MALI_TRUE == mali_group_power_is_on(group));
+
+               /* Move from inactive to idle */
+               if (group == gp_group) {
+                       gp_group_state = EXEC_STATE_IDLE;
+               } else {
+                       mali_executor_change_state_pp_physical(group,
+                                                              &group_list_inactive,
+                                                              &group_list_inactive_count,
+                                                              &group_list_idle,
+                                                              &group_list_idle_count);
+
+                       if (mali_executor_has_virtual_group()) {
+                               if (mali_executor_physical_rejoin_virtual(group)) {
+                                       mali_pm_update_async();
+                               }
+                       }
+               }
+       } else {
+               mali_pm_update_async();
+       }
+}
+
+static void mali_executor_group_disable_internal(struct mali_group *group)
+{
+       mali_bool working;
+
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT(!mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED));
+
+       working = mali_executor_group_is_in_state(group, EXEC_STATE_WORKING);
+       if (MALI_TRUE == working) {
+               /** Group to be disabled once it completes current work,
+                * when virtual group completes, also check child groups for this flag */
+               mali_group_set_disable_request(group, MALI_TRUE);
+               return;
+       }
+
+       /* Put into disabled state */
+       if (group == gp_group) {
+               /* GP group */
+               MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != gp_group_state);
+               gp_group_state = EXEC_STATE_DISABLED;
+       } else {
+               if (mali_group_is_in_virtual(group)) {
+                       /* A child group of virtual group. move the specific group from virtual group */
+                       MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != virtual_group_state);
+
+                       mali_executor_set_state_pp_physical(group,
+                                                           &group_list_disabled,
+                                                           &group_list_disabled_count);
+
+                       mali_group_remove_group(virtual_group, group);
+                       mali_executor_disable_empty_virtual();
+               } else {
+                       mali_executor_change_group_status_disabled(group);
+               }
+
+               --num_physical_pp_cores_enabled;
+               MALI_DEBUG_PRINT(4, ("Disabling group id %d \n", group->pp_core->core_id));
+       }
+
+       if (MALI_GROUP_STATE_INACTIVE != group->state) {
+               if (MALI_TRUE == mali_group_deactivate(group)) {
+                       mali_pm_update_async();
+               }
+       }
+}
+
+static void mali_executor_notify_core_change(u32 num_cores)
+{
+       mali_bool done = MALI_FALSE;
+
+       if (mali_is_mali450() || mali_is_mali470()) {
+               return;
+       }
+
+       /*
+        * This function gets a bit complicated because we can't hold the session lock while
+        * allocating notification objects.
+        */
+       while (!done) {
+               u32 i;
+               u32 num_sessions_alloc;
+               u32 num_sessions_with_lock;
+               u32 used_notification_objects = 0;
+               _mali_osk_notification_t **notobjs;
+
+               /* Pre allocate the number of notifications objects we need right now (might change after lock has been taken) */
+               num_sessions_alloc = mali_session_get_count();
+               if (0 == num_sessions_alloc) {
+                       /* No sessions to report to */
+                       return;
+               }
+
+               notobjs = (_mali_osk_notification_t **)_mali_osk_malloc(sizeof(_mali_osk_notification_t *) * num_sessions_alloc);
+               if (NULL == notobjs) {
+                       MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure)\n"));
+                       /* there is probably no point in trying again, system must be really low on memory and probably unusable now anyway */
+                       return;
+               }
+
+               for (i = 0; i < num_sessions_alloc; i++) {
+                       notobjs[i] = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_NUM_CORE_CHANGE, sizeof(_mali_uk_pp_num_cores_changed_s));
+                       if (NULL != notobjs[i]) {
+                               _mali_uk_pp_num_cores_changed_s *data = notobjs[i]->result_buffer;
+                               data->number_of_enabled_cores = num_cores;
+                       } else {
+                               MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure %u)\n", i));
+                       }
+               }
+
+               mali_session_lock();
+
+               /* number of sessions will not change while we hold the lock */
+               num_sessions_with_lock = mali_session_get_count();
+
+               if (num_sessions_alloc >= num_sessions_with_lock) {
+                       /* We have allocated enough notification objects for all the sessions atm */
+                       struct mali_session_data *session, *tmp;
+                       MALI_SESSION_FOREACH(session, tmp, link) {
+                               MALI_DEBUG_ASSERT(used_notification_objects < num_sessions_alloc);
+                               if (NULL != notobjs[used_notification_objects]) {
+                                       mali_session_send_notification(session, notobjs[used_notification_objects]);
+                                       notobjs[used_notification_objects] = NULL; /* Don't track this notification object any more */
+                               }
+                               used_notification_objects++;
+                       }
+                       done = MALI_TRUE;
+               }
+
+               mali_session_unlock();
+
+               /* Delete any remaining/unused notification objects */
+               for (; used_notification_objects < num_sessions_alloc; used_notification_objects++) {
+                       if (NULL != notobjs[used_notification_objects]) {
+                               _mali_osk_notification_delete(notobjs[used_notification_objects]);
+                       }
+               }
+
+               _mali_osk_free(notobjs);
+       }
+}
+
+static mali_bool mali_executor_core_scaling_is_done(void *data)
+{
+       u32 i;
+       u32 num_groups;
+       mali_bool ret = MALI_TRUE;
+
+       MALI_IGNORE(data);
+
+       mali_executor_lock();
+
+       num_groups = mali_group_get_glob_num_groups();
+
+       for (i = 0; i < num_groups; i++) {
+               struct mali_group *group = mali_group_get_glob_group(i);
+
+               if (NULL != group) {
+                       if (MALI_TRUE == group->disable_requested && NULL != mali_group_get_pp_core(group)) {
+                               ret = MALI_FALSE;
+                               break;
+                       }
+               }
+       }
+       mali_executor_unlock();
+
+       return ret;
+}
+
+static void mali_executor_wq_notify_core_change(void *arg)
+{
+       MALI_IGNORE(arg);
+
+       if (mali_is_mali450() || mali_is_mali470()) {
+               return;
+       }
+
+       _mali_osk_wait_queue_wait_event(executor_notify_core_change_wait_queue,
+                                       mali_executor_core_scaling_is_done, NULL);
+
+       mali_executor_notify_core_change(num_physical_pp_cores_enabled);
+}
+
+/**
+ * Clear all disable request from the _last_ core scaling behavior.
+ */
+static void mali_executor_core_scaling_reset(void)
+{
+       u32 i;
+       u32 num_groups;
+
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+       num_groups = mali_group_get_glob_num_groups();
+
+       for (i = 0; i < num_groups; i++) {
+               struct mali_group *group = mali_group_get_glob_group(i);
+
+               if (NULL != group) {
+                       group->disable_requested = MALI_FALSE;
+               }
+       }
+
+       for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+               core_scaling_delay_up_mask[i] = 0;
+       }
+}
+
+static void mali_executor_core_scale(unsigned int target_core_nr)
+{
+       int current_core_scaling_mask[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 };
+       int target_core_scaling_mask[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 };
+       int i;
+
+       MALI_DEBUG_ASSERT(0 < target_core_nr);
+       MALI_DEBUG_ASSERT(num_physical_pp_cores_total >= target_core_nr);
+
+       mali_executor_lock();
+
+       if (target_core_nr < num_physical_pp_cores_enabled) {
+               MALI_DEBUG_PRINT(2, ("Requesting %d cores: disabling %d cores\n", target_core_nr, num_physical_pp_cores_enabled - target_core_nr));
+       } else {
+               MALI_DEBUG_PRINT(2, ("Requesting %d cores: enabling %d cores\n", target_core_nr, target_core_nr - num_physical_pp_cores_enabled));
+       }
+
+       /* When a new core scaling request is comming,  we should remove the un-doing
+        * part of the last core scaling request.  It's safe because we have only one
+        * lock(executor lock) protection. */
+       mali_executor_core_scaling_reset();
+
+       mali_pm_get_best_power_cost_mask(num_physical_pp_cores_enabled, current_core_scaling_mask);
+       mali_pm_get_best_power_cost_mask(target_core_nr, target_core_scaling_mask);
+
+       for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+               target_core_scaling_mask[i] = target_core_scaling_mask[i] - current_core_scaling_mask[i];
+               MALI_DEBUG_PRINT(5, ("target_core_scaling_mask[%d] = %d\n", i, target_core_scaling_mask[i]));
+       }
+
+       for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+               if (0 > target_core_scaling_mask[i]) {
+                       struct mali_pm_domain *domain;
+
+                       domain = mali_pm_domain_get_from_index(i);
+
+                       /* Domain is valid and has pp cores */
+                       if ((NULL != domain) && !(_mali_osk_list_empty(&domain->group_list))) {
+                               struct mali_group *group;
+                               struct mali_group *temp;
+
+                               _MALI_OSK_LIST_FOREACHENTRY(group, temp, &domain->group_list, struct mali_group, pm_domain_list) {
+                                       if (NULL != mali_group_get_pp_core(group) && (!mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED))
+                                           && (!mali_group_is_virtual(group))) {
+                                               mali_executor_group_disable_internal(group);
+                                               target_core_scaling_mask[i]++;
+                                               if ((0 == target_core_scaling_mask[i])) {
+                                                       break;
+                                               }
+
+                                       }
+                               }
+                       }
+               }
+       }
+
+       for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+               /**
+                * Target_core_scaling_mask[i] is bigger than 0,
+                * means we need to enable some pp cores in
+                * this domain whose domain index is i.
+                */
+               if (0 < target_core_scaling_mask[i]) {
+                       struct mali_pm_domain *domain;
+
+                       if (num_physical_pp_cores_enabled >= target_core_nr) {
+                               break;
+                       }
+
+                       domain = mali_pm_domain_get_from_index(i);
+
+                       /* Domain is valid and has pp cores */
+                       if ((NULL != domain) && !(_mali_osk_list_empty(&domain->group_list))) {
+                               struct mali_group *group;
+                               struct mali_group *temp;
+
+                               _MALI_OSK_LIST_FOREACHENTRY(group, temp, &domain->group_list, struct mali_group, pm_domain_list) {
+                                       if (NULL != mali_group_get_pp_core(group) && mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED)
+                                           && (!mali_group_is_virtual(group))) {
+                                               mali_executor_group_enable_internal(group);
+                                               target_core_scaling_mask[i]--;
+
+                                               if ((0 == target_core_scaling_mask[i]) || num_physical_pp_cores_enabled == target_core_nr) {
+                                                       break;
+                                               }
+                                       }
+                               }
+                       }
+               }
+       }
+
+       /**
+        * Here, we may still have some pp cores not been enabled because of some
+        * pp cores need to be disabled are still in working state.
+        */
+       for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+               if (0 < target_core_scaling_mask[i]) {
+                       core_scaling_delay_up_mask[i] = target_core_scaling_mask[i];
+               }
+       }
+
+       mali_executor_schedule();
+       mali_executor_unlock();
+}
+
+static void mali_executor_core_scale_in_group_complete(struct mali_group *group)
+{
+       int num_pp_cores_disabled = 0;
+       int num_pp_cores_to_enable = 0;
+       int i;
+
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT(MALI_TRUE == mali_group_disable_requested(group));
+
+       /* Disable child group of virtual group */
+       if (mali_group_is_virtual(group)) {
+               struct mali_group *child;
+               struct mali_group *temp;
+
+               _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+                       if (MALI_TRUE == mali_group_disable_requested(child)) {
+                               mali_group_set_disable_request(child, MALI_FALSE);
+                               mali_executor_group_disable_internal(child);
+                               num_pp_cores_disabled++;
+                       }
+               }
+               mali_group_set_disable_request(group, MALI_FALSE);
+       } else {
+               mali_executor_group_disable_internal(group);
+               mali_group_set_disable_request(group, MALI_FALSE);
+               if (NULL != mali_group_get_pp_core(group)) {
+                       num_pp_cores_disabled++;
+               }
+       }
+
+       num_pp_cores_to_enable = num_pp_cores_disabled;
+
+       for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+               if (0 < core_scaling_delay_up_mask[i]) {
+                       struct mali_pm_domain *domain;
+
+                       if (0 == num_pp_cores_to_enable) {
+                               break;
+                       }
+
+                       domain = mali_pm_domain_get_from_index(i);
+
+                       /* Domain is valid and has pp cores */
+                       if ((NULL != domain) && !(_mali_osk_list_empty(&domain->group_list))) {
+                               struct mali_group *disabled_group;
+                               struct mali_group *temp;
+
+                               _MALI_OSK_LIST_FOREACHENTRY(disabled_group, temp, &domain->group_list, struct mali_group, pm_domain_list) {
+                                       if (NULL != mali_group_get_pp_core(disabled_group) && mali_executor_group_is_in_state(disabled_group, EXEC_STATE_DISABLED)) {
+                                               mali_executor_group_enable_internal(disabled_group);
+                                               core_scaling_delay_up_mask[i]--;
+                                               num_pp_cores_to_enable--;
+
+                                               if ((0 == core_scaling_delay_up_mask[i]) || 0 == num_pp_cores_to_enable) {
+                                                       break;
+                                               }
+                                       }
+                               }
+                       }
+               }
+       }
+
+       _mali_osk_wait_queue_wake_up(executor_notify_core_change_wait_queue);
+}
+
+static void mali_executor_change_group_status_disabled(struct mali_group *group)
+{
+       /* Physical PP group */
+       mali_bool idle;
+
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+       idle = mali_executor_group_is_in_state(group, EXEC_STATE_IDLE);
+       if (MALI_TRUE == idle) {
+               mali_executor_change_state_pp_physical(group,
+                                                      &group_list_idle,
+                                                      &group_list_idle_count,
+                                                      &group_list_disabled,
+                                                      &group_list_disabled_count);
+       } else {
+               mali_executor_change_state_pp_physical(group,
+                                                      &group_list_inactive,
+                                                      &group_list_inactive_count,
+                                                      &group_list_disabled,
+                                                      &group_list_disabled_count);
+       }
+}
+
+static mali_bool mali_executor_deactivate_list_idle(mali_bool deactivate_idle_group)
+{
+       mali_bool trigger_pm_update = MALI_FALSE;
+
+       if (group_list_idle_count > 0) {
+               if (mali_executor_has_virtual_group()) {
+
+                       /* Rejoin virtual group on Mali-450 */
+
+                       struct mali_group *group;
+                       struct mali_group *temp;
+
+                       _MALI_OSK_LIST_FOREACHENTRY(group, temp,
+                                                   &group_list_idle,
+                                                   struct mali_group, executor_list) {
+                               if (mali_executor_physical_rejoin_virtual(
+                                           group)) {
+                                       trigger_pm_update = MALI_TRUE;
+                               }
+                       }
+               } else if (deactivate_idle_group) {
+                       struct mali_group *group;
+                       struct mali_group *temp;
+
+                       /* Deactivate group on Mali-300/400 */
+
+                       _MALI_OSK_LIST_FOREACHENTRY(group, temp,
+                                                   &group_list_idle,
+                                                   struct mali_group, executor_list) {
+                               if (mali_group_deactivate(group)) {
+                                       trigger_pm_update = MALI_TRUE;
+                               }
+
+                               /* Move from idle to inactive */
+                               mali_executor_change_state_pp_physical(group,
+                                                                      &group_list_idle,
+                                                                      &group_list_idle_count,
+                                                                      &group_list_inactive,
+                                                                      &group_list_inactive_count);
+                       }
+               }
+       }
+
+       return trigger_pm_update;
+}
+
+void mali_executor_running_status_print(void)
+{
+       struct mali_group *group = NULL;
+       struct mali_group *temp = NULL;
+
+       MALI_PRINT(("GP running job: %p\n", gp_group->gp_running_job));
+       if ((gp_group->gp_core) && (gp_group->is_working)) {
+               mali_group_dump_status(gp_group);
+       }
+       MALI_PRINT(("Physical PP groups in WORKING state (count = %u):\n", group_list_working_count));
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working, struct mali_group, executor_list) {
+               MALI_PRINT(("PP running job: %p, subjob %d \n", group->pp_running_job, group->pp_running_sub_job));
+               mali_group_dump_status(group);
+       }
+       MALI_PRINT(("Physical PP groups in INACTIVE state (count = %u):\n", group_list_inactive_count));
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_inactive, struct mali_group, executor_list) {
+               MALI_PRINT(("\tPP status %d, SW power: %s\n", group->state, group->power_is_on ? "On" : "Off"));
+               MALI_PRINT(("\tPP #%d: %s\n", group->pp_core->core_id, group->pp_core->hw_core.description));
+       }
+       MALI_PRINT(("Physical PP groups in IDLE state (count = %u):\n", group_list_idle_count));
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, executor_list) {
+               MALI_PRINT(("\tPP status %d, SW power: %s\n", group->state, group->power_is_on ? "On" : "Off"));
+               MALI_PRINT(("\tPP #%d: %s\n", group->pp_core->core_id, group->pp_core->hw_core.description));
+       }
+       MALI_PRINT(("Physical PP groups in DISABLED state (count = %u):\n", group_list_disabled_count));
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, executor_list) {
+               MALI_PRINT(("\tPP status %d, SW power: %s\n", group->state, group->power_is_on ? "On" : "Off"));
+               MALI_PRINT(("\tPP #%d: %s\n", group->pp_core->core_id, group->pp_core->hw_core.description));
+       }
+
+       if (mali_executor_has_virtual_group()) {
+               MALI_PRINT(("Virtual group running job: %p\n", virtual_group->pp_running_job));
+               MALI_PRINT(("Virtual group status: %d\n", virtual_group_state));
+               MALI_PRINT(("Virtual group->status: %d\n", virtual_group->state));
+               MALI_PRINT(("\tSW power: %s\n", virtual_group->power_is_on ? "On" : "Off"));
+               _MALI_OSK_LIST_FOREACHENTRY(group, temp, &virtual_group->group_list,
+                                           struct mali_group, group_list) {
+                       int i = 0;
+                       MALI_PRINT(("\tchild group(%s) running job: %p\n", group->pp_core->hw_core.description, group->pp_running_job));
+                       MALI_PRINT(("\tchild group(%s)->status: %d\n", group->pp_core->hw_core.description, group->state));
+                       MALI_PRINT(("\tchild group(%s) SW power: %s\n", group->pp_core->hw_core.description, group->power_is_on ? "On" : "Off"));
+                       if (group->pm_domain) {
+                               MALI_PRINT(("\tPower domain: id %u\n", mali_pm_domain_get_id(group->pm_domain)));
+                               MALI_PRINT(("\tMask:0x%04x \n", mali_pm_domain_get_mask(group->pm_domain)));
+                               MALI_PRINT(("\tUse-count:%u \n", mali_pm_domain_get_use_count(group->pm_domain)));
+                               MALI_PRINT(("\tCurrent power status:%s \n", (mali_pm_domain_get_mask(group->pm_domain)& mali_pm_get_current_mask()) ? "On" : "Off"));
+                               MALI_PRINT(("\tWanted  power status:%s \n", (mali_pm_domain_get_mask(group->pm_domain)& mali_pm_get_wanted_mask()) ? "On" : "Off"));
+                       }
+
+                       for (i = 0; i < 2; i++) {
+                               if (NULL != group->l2_cache_core[i]) {
+                                       struct mali_pm_domain *domain;
+                                       domain = mali_l2_cache_get_pm_domain(group->l2_cache_core[i]);
+                                       MALI_PRINT(("\t L2(index %d) group SW power: %s\n", i, group->l2_cache_core[i]->power_is_on ? "On" : "Off"));
+                                       if (domain) {
+                                               MALI_PRINT(("\tL2 Power domain: id %u\n", mali_pm_domain_get_id(domain)));
+                                               MALI_PRINT(("\tL2 Mask:0x%04x \n", mali_pm_domain_get_mask(domain)));
+                                               MALI_PRINT(("\tL2 Use-count:%u \n", mali_pm_domain_get_use_count(domain)));
+                                               MALI_PRINT(("\tL2 Current power status:%s \n", (mali_pm_domain_get_mask(domain) & mali_pm_get_current_mask()) ? "On" : "Off"));
+                                               MALI_PRINT(("\tL2 Wanted  power status:%s \n", (mali_pm_domain_get_mask(domain) & mali_pm_get_wanted_mask()) ? "On" : "Off"));
+                                       }
+                               }
+                       }
+               }
+               if (EXEC_STATE_WORKING == virtual_group_state) {
+                       mali_group_dump_status(virtual_group);
+               }
+       }
+}
+
+void mali_executor_status_dump(void)
+{
+       mali_executor_lock();
+       mali_scheduler_lock();
+
+       /* print schedule queue status */
+       mali_scheduler_gp_pp_job_queue_print();
+
+       mali_scheduler_unlock();
+       mali_executor_unlock();
+}
diff --git a/drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_executor.h b/drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_executor.h
new file mode 100644 (file)
index 0000000..d21c6b5
--- /dev/null
@@ -0,0 +1,103 @@
+/*
+ * This confidential and proprietary software may be used only as
+ * authorised by a licensing agreement from ARM Limited
+ * (C) COPYRIGHT 2012, 2014-2015 ARM Limited
+ * ALL RIGHTS RESERVED
+ * The entire notice above must be reproduced on all authorised
+ * copies and copies may only be made to the extent permitted
+ * by a licensing agreement from ARM Limited.
+ */
+
+#ifndef __MALI_EXECUTOR_H__
+#define __MALI_EXECUTOR_H__
+
+#include "mali_osk.h"
+#include "mali_scheduler_types.h"
+#include "mali_kernel_common.h"
+
+typedef enum {
+       MALI_EXECUTOR_HINT_GP_BOUND = 0
+#define MALI_EXECUTOR_HINT_MAX        1
+} mali_executor_hint;
+
+extern mali_bool mali_executor_hints[MALI_EXECUTOR_HINT_MAX];
+
+/* forward declare struct instead of using include */
+struct mali_session_data;
+struct mali_group;
+struct mali_pp_core;
+
+extern _mali_osk_spinlock_irq_t *mali_executor_lock_obj;
+
+#define MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD() MALI_DEBUG_ASSERT_LOCK_HELD(mali_executor_lock_obj);
+
+_mali_osk_errcode_t mali_executor_initialize(void);
+void mali_executor_terminate(void);
+
+void mali_executor_populate(void);
+void mali_executor_depopulate(void);
+
+void mali_executor_suspend(void);
+void mali_executor_resume(void);
+
+u32 mali_executor_get_num_cores_total(void);
+u32 mali_executor_get_num_cores_enabled(void);
+struct mali_pp_core *mali_executor_get_virtual_pp(void);
+struct mali_group *mali_executor_get_virtual_group(void);
+
+void mali_executor_zap_all_active(struct mali_session_data *session);
+
+/**
+ * Schedule GP and PP according to bitmask.
+ *
+ * @param mask A scheduling bitmask.
+ * @param deferred_schedule MALI_TRUE if schedule should be deferred, MALI_FALSE if not.
+ */
+void mali_executor_schedule_from_mask(mali_scheduler_mask mask, mali_bool deferred_schedule);
+
+_mali_osk_errcode_t mali_executor_interrupt_gp(struct mali_group *group, mali_bool in_upper_half);
+_mali_osk_errcode_t mali_executor_interrupt_pp(struct mali_group *group, mali_bool in_upper_half);
+_mali_osk_errcode_t mali_executor_interrupt_mmu(struct mali_group *group, mali_bool in_upper_half);
+
+void mali_executor_group_power_up(struct mali_group *groups[], u32 num_groups);
+void mali_executor_group_power_down(struct mali_group *groups[], u32 num_groups);
+
+void mali_executor_abort_session(struct mali_session_data *session);
+
+void mali_executor_core_scaling_enable(void);
+void mali_executor_core_scaling_disable(void);
+mali_bool mali_executor_core_scaling_is_enabled(void);
+
+void mali_executor_group_enable(struct mali_group *group);
+void mali_executor_group_disable(struct mali_group *group);
+mali_bool mali_executor_group_is_disabled(struct mali_group *group);
+
+int mali_executor_set_perf_level(unsigned int target_core_nr, mali_bool override);
+
+#if MALI_STATE_TRACKING
+u32 mali_executor_dump_state(char *buf, u32 size);
+#endif
+
+MALI_STATIC_INLINE void mali_executor_hint_enable(mali_executor_hint hint)
+{
+       MALI_DEBUG_ASSERT(hint < MALI_EXECUTOR_HINT_MAX);
+       mali_executor_hints[hint] = MALI_TRUE;
+}
+
+MALI_STATIC_INLINE void mali_executor_hint_disable(mali_executor_hint hint)
+{
+       MALI_DEBUG_ASSERT(hint < MALI_EXECUTOR_HINT_MAX);
+       mali_executor_hints[hint] = MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_executor_hint_is_enabled(mali_executor_hint hint)
+{
+       MALI_DEBUG_ASSERT(hint < MALI_EXECUTOR_HINT_MAX);
+       return mali_executor_hints[hint];
+}
+
+void mali_executor_running_status_print(void);
+void mali_executor_status_dump(void);
+void mali_executor_lock(void);
+void mali_executor_unlock(void);
+#endif /* __MALI_EXECUTOR_H__ */
index 0572fded0ee3991b360f854516bac9ec43016ef1..acc1d8e5bc155329cbbf356dc9fe8cf52769791f 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2013 ARM Limited
+ * (C) COPYRIGHT 2011-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -25,9 +25,9 @@ static struct mali_gp_core *mali_global_gp_core = NULL;
 static void mali_gp_irq_probe_trigger(void *data);
 static _mali_osk_errcode_t mali_gp_irq_probe_ack(void *data);
 
-struct mali_gp_core *mali_gp_create(const _mali_osk_resource_t * resource, struct mali_group *group)
+struct mali_gp_core *mali_gp_create(const _mali_osk_resource_t *resource, struct mali_group *group)
 {
-       struct mali_gp_corecore = NULL;
+       struct mali_gp_core *core = NULL;
 
        MALI_DEBUG_ASSERT(NULL == mali_global_gp_core);
        MALI_DEBUG_PRINT(2, ("Mali GP: Creating Mali GP core: %s\n", resource->description));
@@ -44,12 +44,12 @@ struct mali_gp_core *mali_gp_create(const _mali_osk_resource_t * resource, struc
                                if (_MALI_OSK_ERR_OK == ret) {
                                        /* Setup IRQ handlers (which will do IRQ probing if needed) */
                                        core->irq = _mali_osk_irq_init(resource->irq,
-                                                                      mali_group_upper_half_gp,
-                                                                      group,
-                                                                      mali_gp_irq_probe_trigger,
-                                                                      mali_gp_irq_probe_ack,
-                                                                      core,
-                                                                      resource->description);
+                                                                      mali_group_upper_half_gp,
+                                                                      group,
+                                                                      mali_gp_irq_probe_trigger,
+                                                                      mali_gp_irq_probe_ack,
+                                                                      core,
+                                                                      resource->description);
                                        if (NULL != core->irq) {
                                                MALI_DEBUG_PRINT(4, ("Mali GP: set global gp core from 0x%08X to 0x%08X\n", mali_global_gp_core, core));
                                                mali_global_gp_core = core;
@@ -101,13 +101,13 @@ _mali_osk_errcode_t mali_gp_stop_bus_wait(struct mali_gp_core *core)
        mali_gp_stop_bus(core);
 
        /* Wait for bus to be stopped */
-       for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+       for (i = 0; i < MALI_REG_POLL_COUNT_SLOW; i++) {
                if (mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS) & MALIGP2_REG_VAL_STATUS_BUS_STOPPED) {
                        break;
                }
        }
 
-       if (MALI_REG_POLL_COUNT_FAST == i) {
+       if (MALI_REG_POLL_COUNT_SLOW == i) {
                MALI_PRINT_ERROR(("Mali GP: Failed to stop bus on %s\n", core->hw_core.description));
                return _MALI_OSK_ERR_FAULT;
        }
@@ -116,7 +116,7 @@ _mali_osk_errcode_t mali_gp_stop_bus_wait(struct mali_gp_core *core)
 
 void mali_gp_hard_reset(struct mali_gp_core *core)
 {
-       const u32 reset_wait_target_register = MALIGP2_REG_ADDR_MGMT_WRITE_BOUND_LOW;
+       const u32 reset_wait_target_register = MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_LIMIT;
        const u32 reset_invalid_value = 0xC0FFE000;
        const u32 reset_check_value = 0xC01A0000;
        const u32 reset_default_value = 0;
@@ -175,7 +175,7 @@ _mali_osk_errcode_t mali_gp_reset_wait(struct mali_gp_core *core)
 
        if (i == MALI_REG_POLL_COUNT_FAST) {
                MALI_PRINT_ERROR(("Mali GP: Failed to reset core %s, rawstat: 0x%08x\n",
-                                 core->hw_core.description, rawstat));
+                                 core->hw_core.description, rawstat));
                return _MALI_OSK_ERR_FAULT;
        }
 
@@ -229,8 +229,26 @@ void mali_gp_job_start(struct mali_gp_core *core, struct mali_gp_job *job)
        /* Barrier to make sure the previous register write is finished */
        _mali_osk_write_mem_barrier();
 
-       /* This is the command that starts the core. */
+       /* This is the command that starts the core.
+        *
+        * Don't actually run the job if PROFILING_SKIP_PP_JOBS are set, just
+        * force core to assert the completion interrupt.
+        */
+#if !defined(PROFILING_SKIP_GP_JOBS)
        mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, startcmd);
+#else
+       {
+               u32 bits = 0;
+
+               if (mali_gp_job_has_vs_job(job))
+                       bits = MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST;
+               if (mali_gp_job_has_plbu_job(job))
+                       bits |= MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST;
+
+               mali_hw_core_register_write_relaxed(&core->hw_core,
+                                                   MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT, bits);
+       }
+#endif
 
        /* Barrier to make sure the previous register write is finished */
        _mali_osk_write_mem_barrier();
@@ -278,7 +296,7 @@ static void mali_gp_irq_probe_trigger(void *data)
        struct mali_gp_core *core = (struct mali_gp_core *)data;
 
        mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
-       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT, MALIGP2_REG_VAL_CMD_FORCE_HANG);
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT, MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR);
        _mali_osk_mem_barrier();
 }
 
@@ -288,8 +306,8 @@ static _mali_osk_errcode_t mali_gp_irq_probe_ack(void *data)
        u32 irq_readout;
 
        irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_STAT);
-       if (MALIGP2_REG_VAL_IRQ_FORCE_HANG & irq_readout) {
-               mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_FORCE_HANG);
+       if (MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR & irq_readout) {
+               mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR);
                _mali_osk_mem_barrier();
                return _MALI_OSK_ERR_OK;
        }
@@ -309,7 +327,7 @@ u32 mali_gp_dump_state(struct mali_gp_core *core, char *buf, u32 size)
 }
 #endif
 
-void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_gp_job *job, mali_bool suspend)
+void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_gp_job *job)
 {
        u32 val0 = 0;
        u32 val1 = 0;
@@ -322,6 +340,7 @@ void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_
 
 #if defined(CONFIG_MALI400_PROFILING)
                _mali_osk_profiling_report_hw_counter(COUNTER_VP_0_C0, val0);
+               _mali_osk_profiling_record_global_counters(COUNTER_VP_0_C0, val0);
 #endif
 
        }
@@ -332,6 +351,7 @@ void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_
 
 #if defined(CONFIG_MALI400_PROFILING)
                _mali_osk_profiling_report_hw_counter(COUNTER_VP_0_C1, val1);
+               _mali_osk_profiling_record_global_counters(COUNTER_VP_0_C1, val1);
 #endif
        }
 }
index b9a2762df64a710c571a021bba226308357e3e05..c7939c4e3725ecba61a620aa3320e2fa68a463d0 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2013 ARM Limited
+ * (C) COPYRIGHT 2011-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -30,7 +30,7 @@ struct mali_gp_core {
 _mali_osk_errcode_t mali_gp_initialize(void);
 void mali_gp_terminate(void);
 
-struct mali_gp_core *mali_gp_create(const _mali_osk_resource_t * resource, struct mali_group *group);
+struct mali_gp_core *mali_gp_create(const _mali_osk_resource_t *resource, struct mali_group *group);
 void mali_gp_delete(struct mali_gp_core *core);
 
 void mali_gp_stop_bus(struct mali_gp_core *core);
@@ -47,42 +47,76 @@ u32 mali_gp_core_get_version(struct mali_gp_core *core);
 
 struct mali_gp_core *mali_gp_get_global_gp_core(void);
 
+#if MALI_STATE_TRACKING
 u32 mali_gp_dump_state(struct mali_gp_core *core, char *buf, u32 size);
+#endif
 
-void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_gp_job *job, mali_bool suspend);
+void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_gp_job *job);
 
-/*** Accessor functions ***/
-MALI_STATIC_INLINE const char *mali_gp_get_hw_core_desc(struct mali_gp_core *core)
+MALI_STATIC_INLINE const char *mali_gp_core_description(struct mali_gp_core *core)
 {
        return core->hw_core.description;
 }
 
-/*** Register reading/writing functions ***/
-MALI_STATIC_INLINE u32 mali_gp_get_int_stat(struct mali_gp_core *core)
+MALI_STATIC_INLINE enum mali_interrupt_result mali_gp_get_interrupt_result(struct mali_gp_core *core)
 {
-       return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_STAT);
+       u32 stat_used = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_STAT) &
+                       MALIGP2_REG_VAL_IRQ_MASK_USED;
+
+       if (0 == stat_used) {
+               return MALI_INTERRUPT_RESULT_NONE;
+       } else if ((MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST |
+                   MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST) == stat_used) {
+               return MALI_INTERRUPT_RESULT_SUCCESS;
+       } else if (MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST == stat_used) {
+               return MALI_INTERRUPT_RESULT_SUCCESS_VS;
+       } else if (MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST == stat_used) {
+               return MALI_INTERRUPT_RESULT_SUCCESS_PLBU;
+       } else if (MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM & stat_used) {
+               return MALI_INTERRUPT_RESULT_OOM;
+       }
+
+       return MALI_INTERRUPT_RESULT_ERROR;
 }
 
-MALI_STATIC_INLINE void mali_gp_mask_all_interrupts(struct mali_gp_core *core)
+MALI_STATIC_INLINE u32 mali_gp_get_rawstat(struct mali_gp_core *core)
 {
-       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_NONE);
+       MALI_DEBUG_ASSERT_POINTER(core);
+       return mali_hw_core_register_read(&core->hw_core,
+                                         MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT);
 }
 
-MALI_STATIC_INLINE u32 mali_gp_read_rawstat(struct mali_gp_core *core)
+MALI_STATIC_INLINE u32 mali_gp_is_active(struct mali_gp_core *core)
 {
-       return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT) & MALIGP2_REG_VAL_IRQ_MASK_USED;
+       u32 status = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS);
+       return (status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE) ? MALI_TRUE : MALI_FALSE;
 }
 
-MALI_STATIC_INLINE u32 mali_gp_read_core_status(struct mali_gp_core *core)
+MALI_STATIC_INLINE void mali_gp_mask_all_interrupts(struct mali_gp_core *core)
 {
-       return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS);
+       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_NONE);
 }
 
-MALI_STATIC_INLINE void mali_gp_enable_interrupts(struct mali_gp_core *core, u32 irq_exceptions)
+MALI_STATIC_INLINE void mali_gp_enable_interrupts(struct mali_gp_core *core, enum mali_interrupt_result exceptions)
 {
-       /* Enable all interrupts, except those specified in irq_exceptions */
-       mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK,
-                                   MALIGP2_REG_VAL_IRQ_MASK_USED & ~irq_exceptions);
+       /* Enable all interrupts, except those specified in exceptions */
+       u32 value;
+
+       if (MALI_INTERRUPT_RESULT_SUCCESS_VS == exceptions) {
+               /* Enable all used except VS complete */
+               value = MALIGP2_REG_VAL_IRQ_MASK_USED &
+                       ~MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST;
+       } else {
+               MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_SUCCESS_PLBU ==
+                                 exceptions);
+               /* Enable all used except PLBU complete */
+               value = MALIGP2_REG_VAL_IRQ_MASK_USED &
+                       ~MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST;
+       }
+
+       mali_hw_core_register_write(&core->hw_core,
+                                   MALIGP2_REG_ADDR_MGMT_INT_MASK,
+                                   value);
 }
 
 MALI_STATIC_INLINE u32 mali_gp_read_plbu_alloc_start_addr(struct mali_gp_core *core)
index 1e74d302326d834887a870a4273135420f83e75b..c2631dd97bd0fbb96a429d5a2342658abd47f35a 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2013 ARM Limited
+ * (C) COPYRIGHT 2011-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -14,7 +14,7 @@
 #include "mali_uk_types.h"
 
 static u32 gp_counter_src0 = MALI_HW_CORE_NO_COUNTER;      /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
-static u32 gp_counter_src1 = MALI_HW_CORE_NO_COUNTER;          /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
+static u32 gp_counter_src1 = MALI_HW_CORE_NO_COUNTER;           /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
 
 struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *uargs, u32 id, struct mali_timeline_tracker *pp_tracker)
 {
@@ -82,6 +82,7 @@ void mali_gp_job_delete(struct mali_gp_job *job)
 {
        MALI_DEBUG_ASSERT_POINTER(job);
        MALI_DEBUG_ASSERT(NULL == job->pp_tracker);
+       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
 
        /* de-allocate the pre-allocated oom notifications */
        if (NULL != job->oom_notification) {
@@ -96,6 +97,31 @@ void mali_gp_job_delete(struct mali_gp_job *job)
        _mali_osk_free(job);
 }
 
+void mali_gp_job_list_add(struct mali_gp_job *job, _mali_osk_list_t *list)
+{
+       struct mali_gp_job *iter;
+       struct mali_gp_job *tmp;
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+
+       /* Find position in list/queue where job should be added. */
+       _MALI_OSK_LIST_FOREACHENTRY_REVERSE(iter, tmp, list,
+                                           struct mali_gp_job, list) {
+
+               /* A span is used to handle job ID wrapping. */
+               bool job_is_after = (mali_gp_job_get_id(job) -
+                                    mali_gp_job_get_id(iter)) <
+                                   MALI_SCHEDULER_JOB_ID_SPAN;
+
+               if (job_is_after) {
+                       break;
+               }
+       }
+
+       _mali_osk_list_add(&job->list, &iter->list);
+}
+
 u32 mali_gp_job_get_gp_counter_src0(void)
 {
        return gp_counter_src0;
index b30bcfd589d6229f5b89d362466d4f3d805bb3e3..a8443175ceb445d0e71ba0efe35ded1b4cf0cad3 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2013 ARM Limited
+ * (C) COPYRIGHT 2011-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 #include "mali_session.h"
 #include "mali_timeline.h"
 #include "mali_scheduler_types.h"
+#include "mali_scheduler.h"
+#include "mali_executor.h"
+#include "mali_timeline.h"
 
 /**
- * The structure represents a GP job, including all sub-jobs
- * (This struct unfortunately needs to be public because of how the _mali_osk_list_*
- * mechanism works)
+ * This structure represents a GP job
+ *
+ * The GP job object itself is not protected by any single lock,
+ * but relies on other locks instead (scheduler, executor and timeline lock).
+ * Think of the job object as moving between these sub systems through-out
+ * its lifetime. Different part of the GP job struct is used by different
+ * subsystems. Accessor functions ensure that correct lock is taken.
+ * Do NOT access any data members directly from outside this module!
  */
 struct mali_gp_job {
-       _mali_osk_list_t list;                             /**< Used to link jobs together in the scheduler queue */
-       struct mali_session_data *session;                 /**< Session which submitted this job */
+       /*
+        * These members are typically only set at creation,
+        * and only read later on.
+        * They do not require any lock protection.
+        */
        _mali_uk_gp_start_job_s uargs;                     /**< Arguments from user space */
+       struct mali_session_data *session;                 /**< Session which submitted this job */
+       u32 pid;                                           /**< Process ID of submitting process */
+       u32 tid;                                           /**< Thread ID of submitting thread */
        u32 id;                                            /**< Identifier for this job in kernel space (sequential numbering) */
        u32 cache_order;                                   /**< Cache order used for L2 cache flushing (sequential numbering) */
+       struct mali_timeline_tracker tracker;              /**< Timeline tracker for this job */
+       struct mali_timeline_tracker *pp_tracker;          /**< Pointer to Timeline tracker for PP job that depends on this job. */
+       _mali_osk_notification_t *finished_notification;   /**< Notification sent back to userspace on job complete */
+
+       /*
+        * These members are used by the scheduler,
+        * protected by scheduler lock
+        */
+       _mali_osk_list_t list;                             /**< Used to link jobs together in the scheduler queue */
+
+       /*
+        * These members are used by the executor and/or group,
+        * protected by executor lock
+        */
+       _mali_osk_notification_t *oom_notification;        /**< Notification sent back to userspace on OOM */
+
+       /*
+        * Set by executor/group on job completion, read by scheduler when
+        * returning job to user. Hold executor lock when setting,
+        * no lock needed when reading
+        */
        u32 heap_current_addr;                             /**< Holds the current HEAP address when the job has completed */
        u32 perf_counter_value0;                           /**< Value of performance counter 0 (to be returned to user space) */
        u32 perf_counter_value1;                           /**< Value of performance counter 1 (to be returned to user space) */
-       u32 pid;                                           /**< Process ID of submitting process */
-       u32 tid;                                           /**< Thread ID of submitting thread */
-       _mali_osk_notification_t *finished_notification;   /**< Notification sent back to userspace on job complete */
-       _mali_osk_notification_t *oom_notification;        /**< Notification sent back to userspace on OOM */
-       struct mali_timeline_tracker tracker;              /**< Timeline tracker for this job */
-       struct mali_timeline_tracker *pp_tracker;          /**< Pointer to Timeline tracker for PP job that depends on this job. */
 };
 
 struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *uargs, u32 id, struct mali_timeline_tracker *pp_tracker);
@@ -50,127 +79,220 @@ void mali_gp_job_set_gp_counter_src1(u32 counter);
 
 MALI_STATIC_INLINE u32 mali_gp_job_get_id(struct mali_gp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        return (NULL == job) ? 0 : job->id;
 }
 
+MALI_STATIC_INLINE void mali_gp_job_set_cache_order(struct mali_gp_job *job,
+               u32 cache_order)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+       job->cache_order = cache_order;
+}
+
 MALI_STATIC_INLINE u32 mali_gp_job_get_cache_order(struct mali_gp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        return (NULL == job) ? 0 : job->cache_order;
 }
 
-MALI_STATIC_INLINE u32 mali_gp_job_get_user_id(struct mali_gp_job *job)
+MALI_STATIC_INLINE u64 mali_gp_job_get_user_id(struct mali_gp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        return job->uargs.user_job_ptr;
 }
 
 MALI_STATIC_INLINE u32 mali_gp_job_get_frame_builder_id(struct mali_gp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        return job->uargs.frame_builder_id;
 }
 
 MALI_STATIC_INLINE u32 mali_gp_job_get_flush_id(struct mali_gp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        return job->uargs.flush_id;
 }
 
 MALI_STATIC_INLINE u32 mali_gp_job_get_pid(struct mali_gp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        return job->pid;
 }
 
 MALI_STATIC_INLINE u32 mali_gp_job_get_tid(struct mali_gp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        return job->tid;
 }
 
-MALI_STATIC_INLINE u32mali_gp_job_get_frame_registers(struct mali_gp_job *job)
+MALI_STATIC_INLINE u32 *mali_gp_job_get_frame_registers(struct mali_gp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        return job->uargs.frame_registers;
 }
 
 MALI_STATIC_INLINE struct mali_session_data *mali_gp_job_get_session(struct mali_gp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        return job->session;
 }
 
 MALI_STATIC_INLINE mali_bool mali_gp_job_has_vs_job(struct mali_gp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        return (job->uargs.frame_registers[0] != job->uargs.frame_registers[1]) ? MALI_TRUE : MALI_FALSE;
 }
 
 MALI_STATIC_INLINE mali_bool mali_gp_job_has_plbu_job(struct mali_gp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        return (job->uargs.frame_registers[2] != job->uargs.frame_registers[3]) ? MALI_TRUE : MALI_FALSE;
 }
 
 MALI_STATIC_INLINE u32 mali_gp_job_get_current_heap_addr(struct mali_gp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        return job->heap_current_addr;
 }
 
 MALI_STATIC_INLINE void mali_gp_job_set_current_heap_addr(struct mali_gp_job *job, u32 heap_addr)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
        job->heap_current_addr = heap_addr;
 }
 
 MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_flag(struct mali_gp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        return job->uargs.perf_counter_flag;
 }
 
 MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_src0(struct mali_gp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        return job->uargs.perf_counter_src0;
 }
 
 MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_src1(struct mali_gp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        return job->uargs.perf_counter_src1;
 }
 
 MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_value0(struct mali_gp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        return job->perf_counter_value0;
 }
 
 MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_value1(struct mali_gp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        return job->perf_counter_value1;
 }
 
 MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_src0(struct mali_gp_job *job, u32 src)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        job->uargs.perf_counter_src0 = src;
 }
 
 MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_src1(struct mali_gp_job *job, u32 src)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        job->uargs.perf_counter_src1 = src;
 }
 
 MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_value0(struct mali_gp_job *job, u32 value)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
        job->perf_counter_value0 = value;
 }
 
 MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_value1(struct mali_gp_job *job, u32 value)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
        job->perf_counter_value1 = value;
 }
 
-/**
- * Returns MALI_TRUE if first job is after the second job, ordered by job ID.
- *
- * @param first First job.
- * @param second Second job.
- * @return MALI_TRUE if first job should be ordered after the second job, MALI_FALSE if not.
- */
-MALI_STATIC_INLINE mali_bool mali_gp_job_is_after(struct mali_gp_job *first, struct mali_gp_job *second)
+void mali_gp_job_list_add(struct mali_gp_job *job, _mali_osk_list_t *list);
+
+MALI_STATIC_INLINE void mali_gp_job_list_move(struct mali_gp_job *job,
+               _mali_osk_list_t *list)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+       MALI_DEBUG_ASSERT(!_mali_osk_list_empty(&job->list));
+       _mali_osk_list_move(&job->list, list);
+}
+
+MALI_STATIC_INLINE void mali_gp_job_list_remove(struct mali_gp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+       _mali_osk_list_delinit(&job->list);
+}
+
+MALI_STATIC_INLINE _mali_osk_notification_t *
+mali_gp_job_get_finished_notification(struct mali_gp_job *job)
 {
-       /* A span is used to handle job ID wrapping. */
-       return (mali_gp_job_get_id(first) - mali_gp_job_get_id(second)) < MALI_SCHEDULER_JOB_ID_SPAN;
+       _mali_osk_notification_t *notification;
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_POINTER(job->finished_notification);
+
+       notification = job->finished_notification;
+       job->finished_notification = NULL;
+
+       return notification;
 }
 
+MALI_STATIC_INLINE _mali_osk_notification_t *mali_gp_job_get_oom_notification(
+       struct mali_gp_job *job)
+{
+       _mali_osk_notification_t *notification;
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT_POINTER(job->oom_notification);
+
+       notification = job->oom_notification;
+       job->oom_notification = NULL;
+
+       return notification;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_oom_notification(
+       struct mali_gp_job *job,
+       _mali_osk_notification_t *notification)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT(NULL == job->oom_notification);
+       job->oom_notification = notification;
+}
+
+MALI_STATIC_INLINE struct mali_timeline_tracker *mali_gp_job_get_tracker(
+       struct mali_gp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return &(job->tracker);
+}
+
+
+MALI_STATIC_INLINE u32 *mali_gp_job_get_timeline_point_ptr(
+       struct mali_gp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return (u32 __user *)(uintptr_t)job->uargs.timeline_point_ptr;
+}
+
+
 /**
  * Release reference on tracker for PP job that depends on this GP job.
  *
diff --git a/drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_gp_scheduler.c b/drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_gp_scheduler.c
deleted file mode 100644 (file)
index 2ade2c4..0000000
+++ /dev/null
@@ -1,701 +0,0 @@
-/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2013 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
- */
-
-#include "mali_gp_scheduler.h"
-#include "mali_kernel_common.h"
-#include "mali_osk.h"
-#include "mali_osk_list.h"
-#include "mali_scheduler.h"
-#include "mali_gp.h"
-#include "mali_gp_job.h"
-#include "mali_group.h"
-#include "mali_timeline.h"
-#include "mali_osk_profiling.h"
-#include "mali_kernel_utilization.h"
-#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
-#include <linux/sched.h>
-#include <trace/events/gpu.h>
-#endif
-
-enum mali_gp_slot_state {
-       MALI_GP_SLOT_STATE_IDLE,
-       MALI_GP_SLOT_STATE_WORKING,
-       MALI_GP_SLOT_STATE_DISABLED,
-};
-
-/* A render slot is an entity which jobs can be scheduled onto */
-struct mali_gp_slot {
-       struct mali_group *group;
-       /*
-        * We keep track of the state here as well as in the group object
-        * so we don't need to take the group lock so often (and also avoid clutter with the working lock)
-        */
-       enum mali_gp_slot_state state;
-       u32 returned_cookie;
-};
-
-static u32 gp_version = 0;
-static _MALI_OSK_LIST_HEAD_STATIC_INIT(job_queue);      /* List of unscheduled jobs. */
-static _MALI_OSK_LIST_HEAD_STATIC_INIT(job_queue_high); /* List of unscheduled high priority jobs. */
-static struct mali_gp_slot slot;
-
-/* Variables to allow safe pausing of the scheduler */
-static _mali_osk_wait_queue_t *gp_scheduler_working_wait_queue = NULL;
-static u32 pause_count = 0;
-
-static mali_bool mali_gp_scheduler_is_suspended(void *data);
-static void mali_gp_scheduler_job_queued(void);
-static void mali_gp_scheduler_job_completed(void);
-
-#if defined(MALI_UPPER_HALF_SCHEDULING)
-static _mali_osk_spinlock_irq_t *gp_scheduler_lock = NULL;
-#else
-static _mali_osk_spinlock_t *gp_scheduler_lock = NULL;
-#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
-
-_mali_osk_errcode_t mali_gp_scheduler_initialize(void)
-{
-       u32 num_groups;
-       u32 i;
-       _mali_osk_errcode_t ret = _MALI_OSK_ERR_OK;
-
-#if defined(MALI_UPPER_HALF_SCHEDULING)
-       gp_scheduler_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER);
-#else
-       gp_scheduler_lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER);
-#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
-       if (NULL == gp_scheduler_lock) {
-               ret = _MALI_OSK_ERR_NOMEM;
-               goto cleanup;
-       }
-
-       gp_scheduler_working_wait_queue = _mali_osk_wait_queue_init();
-       if (NULL == gp_scheduler_working_wait_queue) {
-               ret = _MALI_OSK_ERR_NOMEM;
-               goto cleanup;
-       }
-
-       /* Find all the available GP cores */
-       num_groups = mali_group_get_glob_num_groups();
-       for (i = 0; i < num_groups; i++) {
-               struct mali_group *group = mali_group_get_glob_group(i);
-               MALI_DEBUG_ASSERT(NULL != group);
-               if (NULL != group) {
-                       struct mali_gp_core *gp_core = mali_group_get_gp_core(group);
-                       if (NULL != gp_core) {
-                               if (0 == gp_version) {
-                                       /* Retrieve GP version */
-                                       gp_version = mali_gp_core_get_version(gp_core);
-                               }
-                               slot.group = group;
-                               slot.state = MALI_GP_SLOT_STATE_IDLE;
-                               break; /* There is only one GP, no point in looking for more */
-                       }
-               } else {
-                       ret = _MALI_OSK_ERR_ITEM_NOT_FOUND;
-                       goto cleanup;
-               }
-       }
-
-       return _MALI_OSK_ERR_OK;
-
-cleanup:
-       if (NULL != gp_scheduler_working_wait_queue) {
-               _mali_osk_wait_queue_term(gp_scheduler_working_wait_queue);
-               gp_scheduler_working_wait_queue = NULL;
-       }
-
-       if (NULL != gp_scheduler_lock) {
-#if defined(MALI_UPPER_HALF_SCHEDULING)
-               _mali_osk_spinlock_irq_term(gp_scheduler_lock);
-#else
-               _mali_osk_spinlock_term(gp_scheduler_lock);
-#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
-               gp_scheduler_lock = NULL;
-       }
-
-       return ret;
-}
-
-void mali_gp_scheduler_terminate(void)
-{
-       MALI_DEBUG_ASSERT(   MALI_GP_SLOT_STATE_IDLE     == slot.state
-                            || MALI_GP_SLOT_STATE_DISABLED == slot.state);
-       MALI_DEBUG_ASSERT_POINTER(slot.group);
-       mali_group_delete(slot.group);
-
-       _mali_osk_wait_queue_term(gp_scheduler_working_wait_queue);
-
-#if defined(MALI_UPPER_HALF_SCHEDULING)
-       _mali_osk_spinlock_irq_term(gp_scheduler_lock);
-#else
-       _mali_osk_spinlock_term(gp_scheduler_lock);
-#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
-}
-
-MALI_STATIC_INLINE void mali_gp_scheduler_lock(void)
-{
-#if defined(MALI_UPPER_HALF_SCHEDULING)
-       _mali_osk_spinlock_irq_lock(gp_scheduler_lock);
-#else
-       _mali_osk_spinlock_lock(gp_scheduler_lock);
-#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
-       MALI_DEBUG_PRINT(5, ("Mali GP scheduler: GP scheduler lock taken\n"));
-}
-
-MALI_STATIC_INLINE void mali_gp_scheduler_unlock(void)
-{
-       MALI_DEBUG_PRINT(5, ("Mali GP scheduler: Releasing GP scheduler lock\n"));
-#if defined(MALI_UPPER_HALF_SCHEDULING)
-       _mali_osk_spinlock_irq_unlock(gp_scheduler_lock);
-#else
-       _mali_osk_spinlock_unlock(gp_scheduler_lock);
-#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
-}
-
-#if defined(DEBUG)
-#define MALI_ASSERT_GP_SCHEDULER_LOCKED() MALI_DEBUG_ASSERT_LOCK_HELD(gp_scheduler_lock)
-#else
-#define MALI_ASSERT_GP_SCHEDULER_LOCKED() do {} while (0)
-#endif /* defined(DEBUG) */
-
-/* Group and scheduler must be locked when entering this function.  Both will be unlocked before
- * exiting. */
-static void mali_gp_scheduler_schedule_internal_and_unlock(void)
-{
-       struct mali_gp_job *job = NULL;
-
-       MALI_DEBUG_ASSERT_LOCK_HELD(slot.group->lock);
-       MALI_DEBUG_ASSERT_LOCK_HELD(gp_scheduler_lock);
-
-       if (0 < pause_count || MALI_GP_SLOT_STATE_IDLE != slot.state ||
-           (_mali_osk_list_empty(&job_queue) && _mali_osk_list_empty(&job_queue_high))) {
-               mali_gp_scheduler_unlock();
-               mali_group_unlock(slot.group);
-               MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Nothing to schedule (paused=%u, idle slots=%u)\n",
-                                    pause_count, MALI_GP_SLOT_STATE_IDLE == slot.state ? 1 : 0));
-#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
-               trace_gpu_sched_switch(mali_gp_get_hw_core_desc(slot.group->gp_core), sched_clock(), 0, 0, 0);
-#endif
-               return; /* Nothing to do, so early out */
-       }
-
-       /* Get next job in queue */
-       if (!_mali_osk_list_empty(&job_queue_high)) {
-               job = _MALI_OSK_LIST_ENTRY(job_queue_high.next, struct mali_gp_job, list);
-       } else {
-               MALI_DEBUG_ASSERT(!_mali_osk_list_empty(&job_queue));
-               job = _MALI_OSK_LIST_ENTRY(job_queue.next, struct mali_gp_job, list);
-       }
-
-       MALI_DEBUG_ASSERT_POINTER(job);
-
-       /* Remove the job from queue */
-       _mali_osk_list_del(&job->list);
-
-       /* Mark slot as busy */
-       slot.state = MALI_GP_SLOT_STATE_WORKING;
-
-       mali_gp_scheduler_unlock();
-
-       MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Starting job %u (0x%08X)\n", mali_gp_job_get_id(job), job));
-
-       mali_group_start_gp_job(slot.group, job);
-       mali_group_unlock(slot.group);
-}
-
-void mali_gp_scheduler_schedule(void)
-{
-       mali_group_lock(slot.group);
-       mali_gp_scheduler_lock();
-
-       mali_gp_scheduler_schedule_internal_and_unlock();
-}
-
-static void mali_gp_scheduler_return_job_to_user(struct mali_gp_job *job, mali_bool success)
-{
-       _mali_uk_gp_job_finished_s *jobres = job->finished_notification->result_buffer;
-       _mali_osk_memset(jobres, 0, sizeof(_mali_uk_gp_job_finished_s)); /* @@@@ can be removed once we initialize all members in this struct */
-       jobres->user_job_ptr = mali_gp_job_get_user_id(job);
-       if (MALI_TRUE == success) {
-               jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS;
-       } else {
-               jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR;
-       }
-
-       jobres->heap_current_addr = mali_gp_job_get_current_heap_addr(job);
-       jobres->perf_counter0 = mali_gp_job_get_perf_counter_value0(job);
-       jobres->perf_counter1 = mali_gp_job_get_perf_counter_value1(job);
-
-       mali_session_send_notification(mali_gp_job_get_session(job), job->finished_notification);
-       job->finished_notification = NULL;
-
-       mali_gp_job_delete(job);
-       mali_gp_scheduler_job_completed();
-}
-
-/* Group must be locked when entering this function.  Will be unlocked before exiting. */
-void mali_gp_scheduler_job_done(struct mali_group *group, struct mali_gp_job *job, mali_bool success)
-{
-       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
-
-       MALI_DEBUG_ASSERT_POINTER(group);
-       MALI_DEBUG_ASSERT_POINTER(job);
-
-       MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
-       MALI_DEBUG_ASSERT(slot.group == group);
-
-       MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) completed (%s)\n", mali_gp_job_get_id(job), job, success ? "success" : "failure"));
-
-       /* Release tracker. */
-       schedule_mask |= mali_timeline_tracker_release(&job->tracker);
-
-       /* Signal PP job. */
-       schedule_mask |= mali_gp_job_signal_pp_tracker(job, success);
-
-       mali_gp_scheduler_lock();
-
-       /* Mark slot as idle again */
-       slot.state = MALI_GP_SLOT_STATE_IDLE;
-
-       /* If paused, then this was the last job, so wake up sleeping workers */
-       if (pause_count > 0) {
-               _mali_osk_wait_queue_wake_up(gp_scheduler_working_wait_queue);
-       }
-
-       /* Schedule any queued GP jobs on this group. */
-       mali_gp_scheduler_schedule_internal_and_unlock();
-
-       /* GP is now scheduled, removing it from the mask. */
-       schedule_mask &= ~MALI_SCHEDULER_MASK_GP;
-
-       if (MALI_SCHEDULER_MASK_EMPTY != schedule_mask) {
-               /* Releasing the tracker activated other jobs that need scheduling. */
-               mali_scheduler_schedule_from_mask(schedule_mask, MALI_FALSE);
-       }
-
-       /* Sends the job end message to user space and free the job object */
-       mali_gp_scheduler_return_job_to_user(job, success);
-}
-
-void mali_gp_scheduler_oom(struct mali_group *group, struct mali_gp_job *job)
-{
-       _mali_uk_gp_job_suspended_s * jobres;
-       _mali_osk_notification_t * notification;
-
-       mali_gp_scheduler_lock();
-
-       notification = job->oom_notification;
-       job->oom_notification = NULL;
-       slot.returned_cookie = mali_gp_job_get_id(job);
-
-       jobres = (_mali_uk_gp_job_suspended_s *)notification->result_buffer;
-       jobres->user_job_ptr = mali_gp_job_get_user_id(job);
-       jobres->cookie = mali_gp_job_get_id(job);
-
-       mali_gp_scheduler_unlock();
-
-       mali_session_send_notification(mali_gp_job_get_session(job), notification);
-
-       /*
-       * If this function failed, then we could return the job to user space right away,
-       * but there is a job timer anyway that will do that eventually.
-       * This is not exactly a common case anyway.
-       */
-}
-
-void mali_gp_scheduler_suspend(void)
-{
-       mali_gp_scheduler_lock();
-       pause_count++; /* Increment the pause_count so that no more jobs will be scheduled */
-       mali_gp_scheduler_unlock();
-
-       _mali_osk_wait_queue_wait_event(gp_scheduler_working_wait_queue, mali_gp_scheduler_is_suspended, NULL);
-}
-
-void mali_gp_scheduler_resume(void)
-{
-       mali_gp_scheduler_lock();
-       pause_count--; /* Decrement pause_count to allow scheduling again (if it reaches 0) */
-       mali_gp_scheduler_unlock();
-       if (0 == pause_count) {
-               mali_gp_scheduler_schedule();
-       }
-}
-
-mali_timeline_point mali_gp_scheduler_submit_job(struct mali_session_data *session, struct mali_gp_job *job)
-{
-       mali_timeline_point point;
-
-       MALI_DEBUG_ASSERT_POINTER(session);
-       MALI_DEBUG_ASSERT_POINTER(job);
-
-       mali_gp_scheduler_job_queued();
-
-       /* Add job to Timeline system. */
-       point = mali_timeline_system_add_tracker(session->timeline_system, &job->tracker, MALI_TIMELINE_GP);
-
-       return point;
-}
-
-_mali_osk_errcode_t _mali_ukk_gp_start_job(void *ctx, _mali_uk_gp_start_job_s *uargs)
-{
-       struct mali_session_data *session;
-       struct mali_gp_job *job;
-       mali_timeline_point point;
-       u32 __user *timeline_point_ptr = NULL;
-
-       MALI_DEBUG_ASSERT_POINTER(uargs);
-       MALI_DEBUG_ASSERT_POINTER(ctx);
-
-       session = (struct mali_session_data*)ctx;
-
-       job = mali_gp_job_create(session, uargs, mali_scheduler_get_new_id(), NULL);
-       if (NULL == job) {
-               MALI_PRINT_ERROR(("Failed to create GP job.\n"));
-               return _MALI_OSK_ERR_NOMEM;
-       }
-
-       timeline_point_ptr = (u32 __user *) job->uargs.timeline_point_ptr;
-
-       point = mali_gp_scheduler_submit_job(session, job);
-
-       if (0 != _mali_osk_put_user(((u32) point), timeline_point_ptr)) {
-               /* Let user space know that something failed after the job was started. */
-               return _MALI_OSK_ERR_ITEM_NOT_FOUND;
-       }
-
-       return _MALI_OSK_ERR_OK;
-}
-
-_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores(_mali_uk_get_gp_number_of_cores_s *args)
-{
-       MALI_DEBUG_ASSERT_POINTER(args);
-       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
-       args->number_of_cores = 1;
-       return _MALI_OSK_ERR_OK;
-}
-
-_mali_osk_errcode_t _mali_ukk_get_gp_core_version(_mali_uk_get_gp_core_version_s *args)
-{
-       MALI_DEBUG_ASSERT_POINTER(args);
-       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
-       args->version = gp_version;
-       return _MALI_OSK_ERR_OK;
-}
-
-_mali_osk_errcode_t _mali_ukk_gp_suspend_response(_mali_uk_gp_suspend_response_s *args)
-{
-       struct mali_session_data *session;
-       struct mali_gp_job *resumed_job;
-       _mali_osk_notification_t *new_notification = 0;
-
-       MALI_DEBUG_ASSERT_POINTER(args);
-
-       if (NULL == args->ctx) {
-               return _MALI_OSK_ERR_INVALID_ARGS;
-       }
-
-       session = (struct mali_session_data*)args->ctx;
-       if (NULL == session) {
-               return _MALI_OSK_ERR_FAULT;
-       }
-
-       if (_MALIGP_JOB_RESUME_WITH_NEW_HEAP == args->code) {
-               new_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_STALLED, sizeof(_mali_uk_gp_job_suspended_s));
-
-               if (NULL == new_notification) {
-                       MALI_PRINT_ERROR(("Mali GP scheduler: Failed to allocate notification object. Will abort GP job.\n"));
-                       mali_group_lock(slot.group);
-                       mali_group_abort_gp_job(slot.group, args->cookie);
-                       mali_group_unlock(slot.group);
-                       return _MALI_OSK_ERR_FAULT;
-               }
-       }
-
-       mali_group_lock(slot.group);
-
-       if (_MALIGP_JOB_RESUME_WITH_NEW_HEAP == args->code) {
-               MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Resuming job %u with new heap; 0x%08X - 0x%08X\n", args->cookie, args->arguments[0], args->arguments[1]));
-
-               resumed_job = mali_group_resume_gp_with_new_heap(slot.group, args->cookie, args->arguments[0], args->arguments[1]);
-               if (NULL != resumed_job) {
-                       resumed_job->oom_notification = new_notification;
-                       mali_group_unlock(slot.group);
-                       return _MALI_OSK_ERR_OK;
-               } else {
-                       mali_group_unlock(slot.group);
-                       _mali_osk_notification_delete(new_notification);
-                       return _MALI_OSK_ERR_FAULT;
-               }
-       }
-
-       MALI_DEBUG_PRINT(2, ("Mali GP scheduler: Aborting job %u, no new heap provided\n", args->cookie));
-       mali_group_abort_gp_job(slot.group, args->cookie);
-       mali_group_unlock(slot.group);
-       return _MALI_OSK_ERR_OK;
-}
-
-void mali_gp_scheduler_abort_session(struct mali_session_data *session)
-{
-       struct mali_gp_job *job, *tmp;
-       _MALI_OSK_LIST_HEAD_STATIC_INIT(removed_jobs);
-
-       MALI_DEBUG_ASSERT_POINTER(session);
-       MALI_DEBUG_ASSERT(session->is_aborting);
-
-       MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Aborting all jobs from session 0x%08X.\n", session));
-
-       mali_gp_scheduler_lock();
-
-       /* Find all jobs from the aborting session. */
-       _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &job_queue, struct mali_gp_job, list) {
-               if (job->session == session) {
-                       MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Removing job %u (0x%08X) from queue.\n", mali_gp_job_get_id(job), job));
-                       _mali_osk_list_move(&job->list, &removed_jobs);
-               }
-       }
-
-       /* Find all high priority jobs from the aborting session. */
-       _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &job_queue_high, struct mali_gp_job, list) {
-               if (job->session == session) {
-                       MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Removing job %u (0x%08X) from queue.\n", mali_gp_job_get_id(job), job));
-                       _mali_osk_list_move(&job->list, &removed_jobs);
-               }
-       }
-
-       mali_gp_scheduler_unlock();
-
-       /* Release and delete all found jobs from the aborting session. */
-       _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &removed_jobs, struct mali_gp_job, list) {
-               mali_timeline_tracker_release(&job->tracker);
-               mali_gp_job_signal_pp_tracker(job, MALI_FALSE);
-               mali_gp_job_delete(job);
-               mali_gp_scheduler_job_completed();
-       }
-
-       /* Abort any running jobs from the session. */
-       mali_group_abort_session(slot.group, session);
-}
-
-static mali_bool mali_gp_scheduler_is_suspended(void *data)
-{
-       mali_bool ret;
-
-       /* This callback does not use the data pointer. */
-       MALI_IGNORE(data);
-
-       mali_gp_scheduler_lock();
-       ret = pause_count > 0 && (slot.state == MALI_GP_SLOT_STATE_IDLE || slot.state == MALI_GP_SLOT_STATE_DISABLED);
-       mali_gp_scheduler_unlock();
-
-       return ret;
-}
-
-
-#if MALI_STATE_TRACKING
-u32 mali_gp_scheduler_dump_state(char *buf, u32 size)
-{
-       int n = 0;
-
-       n += _mali_osk_snprintf(buf + n, size - n, "GP\n");
-       n += _mali_osk_snprintf(buf + n, size - n, "\tQueue is %s\n", _mali_osk_list_empty(&job_queue) ? "empty" : "not empty");
-       n += _mali_osk_snprintf(buf + n, size - n, "\tHigh priority queue is %s\n", _mali_osk_list_empty(&job_queue_high) ? "empty" : "not empty");
-
-       n += mali_group_dump_state(slot.group, buf + n, size - n);
-       n += _mali_osk_snprintf(buf + n, size - n, "\n");
-
-       return n;
-}
-#endif
-
-void mali_gp_scheduler_reset_all_groups(void)
-{
-       if (NULL != slot.group) {
-               mali_group_lock(slot.group);
-               mali_group_reset(slot.group);
-               mali_group_unlock(slot.group);
-       }
-}
-
-void mali_gp_scheduler_zap_all_active(struct mali_session_data *session)
-{
-       if (NULL != slot.group) {
-               mali_group_zap_session(slot.group, session);
-       }
-}
-
-void mali_gp_scheduler_enable_group(struct mali_group *group)
-{
-       MALI_DEBUG_ASSERT_POINTER(group);
-       MALI_DEBUG_ASSERT(slot.group == group);
-       MALI_DEBUG_PRINT(2, ("Mali GP scheduler: enabling gp group %p\n", group));
-
-       mali_group_lock(group);
-
-       if (MALI_GROUP_STATE_DISABLED != group->state) {
-               mali_group_unlock(group);
-               MALI_DEBUG_PRINT(2, ("Mali GP scheduler: gp group %p already enabled\n", group));
-               return;
-       }
-
-       mali_gp_scheduler_lock();
-
-       MALI_DEBUG_ASSERT(MALI_GROUP_STATE_DISABLED == group->state);
-       MALI_DEBUG_ASSERT(MALI_GP_SLOT_STATE_DISABLED == slot.state);
-       slot.state = MALI_GP_SLOT_STATE_IDLE;
-       group->state = MALI_GROUP_STATE_IDLE;
-
-       mali_group_power_on_group(group);
-       mali_group_reset(group);
-
-       /* Pick up any jobs that might have been queued while the GP group was disabled. */
-       mali_gp_scheduler_schedule_internal_and_unlock();
-}
-
-void mali_gp_scheduler_disable_group(struct mali_group *group)
-{
-       MALI_DEBUG_ASSERT_POINTER(group);
-       MALI_DEBUG_ASSERT(slot.group == group);
-       MALI_DEBUG_PRINT(2, ("Mali GP scheduler: disabling gp group %p\n", group));
-
-       mali_gp_scheduler_suspend();
-       mali_group_lock(group);
-       mali_gp_scheduler_lock();
-
-       MALI_DEBUG_ASSERT(   MALI_GROUP_STATE_IDLE     == group->state
-                            || MALI_GROUP_STATE_DISABLED == group->state);
-
-       if (MALI_GROUP_STATE_DISABLED == group->state) {
-               MALI_DEBUG_ASSERT(MALI_GP_SLOT_STATE_DISABLED == slot.state);
-               MALI_DEBUG_PRINT(2, ("Mali GP scheduler: gp group %p already disabled\n", group));
-       } else {
-               MALI_DEBUG_ASSERT(MALI_GP_SLOT_STATE_IDLE == slot.state);
-               slot.state = MALI_GP_SLOT_STATE_DISABLED;
-               group->state = MALI_GROUP_STATE_DISABLED;
-
-               mali_group_power_off_group(group, MALI_TRUE);
-       }
-
-       mali_gp_scheduler_unlock();
-       mali_group_unlock(group);
-       mali_gp_scheduler_resume();
-}
-
-static mali_scheduler_mask mali_gp_scheduler_queue_job(struct mali_gp_job *job)
-{
-       _mali_osk_list_t *queue = NULL;
-       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
-       struct mali_gp_job *iter, *tmp;
-
-       MALI_DEBUG_ASSERT_POINTER(job);
-       MALI_DEBUG_ASSERT_POINTER(job->session);
-
-       MALI_DEBUG_ASSERT_LOCK_HELD(gp_scheduler_lock);
-
-       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_SINGLE_SW_GP_ENQUEUE, job->pid, job->tid, job->uargs.frame_builder_id, job->uargs.flush_id, 0);
-
-       job->cache_order = mali_scheduler_get_new_cache_order();
-
-       /* Determine which queue the job should be added to. */
-       if (job->session->use_high_priority_job_queue) {
-               queue = &job_queue_high;
-       } else {
-               queue = &job_queue;
-       }
-
-       /* Find position in queue where job should be added. */
-       _MALI_OSK_LIST_FOREACHENTRY_REVERSE(iter, tmp, queue, struct mali_gp_job, list) {
-               if (mali_gp_job_is_after(job, iter)) {
-                       break;
-               }
-       }
-
-       /* Add job to queue. */
-       _mali_osk_list_add(&job->list, &iter->list);
-
-       /* Set schedule bitmask if the GP core is idle. */
-       if (MALI_GP_SLOT_STATE_IDLE == slot.state) {
-               schedule_mask |= MALI_SCHEDULER_MASK_GP;
-       }
-
-#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
-       trace_gpu_job_enqueue(mali_gp_job_get_tid(job), mali_gp_job_get_id(job), "GP");
-#endif
-
-       MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) queued\n", mali_gp_job_get_id(job), job));
-
-       return schedule_mask;
-}
-
-mali_scheduler_mask mali_gp_scheduler_activate_job(struct mali_gp_job *job)
-{
-       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
-
-       MALI_DEBUG_ASSERT_POINTER(job);
-       MALI_DEBUG_ASSERT_POINTER(job->session);
-
-       MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Timeline activation for job %u (0x%08X).\n", mali_gp_job_get_id(job), job));
-
-       mali_gp_scheduler_lock();
-
-       if (unlikely(job->session->is_aborting)) {
-               /* Before checking if the session is aborting, the scheduler must be locked. */
-               MALI_DEBUG_ASSERT_LOCK_HELD(gp_scheduler_lock);
-
-               MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) activated while session is aborting.\n", mali_gp_job_get_id(job), job));
-
-               /* This job should not be on any list. */
-               MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
-
-               mali_gp_scheduler_unlock();
-
-               /* Release tracker and delete job. */
-               mali_timeline_tracker_release(&job->tracker);
-               mali_gp_job_signal_pp_tracker(job, MALI_FALSE);
-               mali_gp_job_delete(job);
-               mali_gp_scheduler_job_completed();
-
-               /* Since we are aborting we ignore the scheduler mask. */
-               return MALI_SCHEDULER_MASK_EMPTY;
-       }
-
-       /* GP job is ready to run, queue it. */
-       schedule_mask = mali_gp_scheduler_queue_job(job);
-
-       mali_gp_scheduler_unlock();
-
-       return schedule_mask;
-}
-
-static void mali_gp_scheduler_job_queued(void)
-{
-       /* We hold a PM reference for every job we hold queued (and running) */
-       _mali_osk_pm_dev_ref_add();
-
-       if (mali_utilization_enabled()) {
-               /*
-                * We cheat a little bit by counting the PP as busy from the time a GP job is queued.
-                * This will be fine because we only loose the tiny idle gap between jobs, but
-                * we will instead get less utilization work to do (less locks taken)
-                */
-               mali_utilization_gp_start();
-       }
-}
-
-static void mali_gp_scheduler_job_completed(void)
-{
-       /* Release the PM reference we got in the mali_gp_scheduler_job_queued() function */
-       _mali_osk_pm_dev_ref_dec();
-
-       if (mali_utilization_enabled()) {
-               mali_utilization_gp_end();
-       }
-}
diff --git a/drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_gp_scheduler.h b/drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_gp_scheduler.h
deleted file mode 100644 (file)
index 9a98f0a..0000000
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2013 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
- */
-
-#ifndef __MALI_GP_SCHEDULER_H__
-#define __MALI_GP_SCHEDULER_H__
-
-#include "mali_osk.h"
-#include "mali_gp_job.h"
-#include "mali_group.h"
-
-_mali_osk_errcode_t mali_gp_scheduler_initialize(void);
-void mali_gp_scheduler_terminate(void);
-
-void mali_gp_scheduler_job_done(struct mali_group *group, struct mali_gp_job *job, mali_bool success);
-void mali_gp_scheduler_oom(struct mali_group *group, struct mali_gp_job *job);
-u32 mali_gp_scheduler_dump_state(char *buf, u32 size);
-
-void mali_gp_scheduler_suspend(void);
-void mali_gp_scheduler_resume(void);
-
-/**
- * @brief Abort all running and queued GP jobs from session.
- *
-* This functions aborts all GP jobs from the specified session. Queued jobs are removed from the
-* queue and jobs currently running on a core will be aborted.
- *
- * @param session Session that is aborting.
- */
-void mali_gp_scheduler_abort_session(struct mali_session_data *session);
-
-/**
- * @brief Reset all groups
- *
- * This function resets all groups known by the GP scheuduler. This must be
- * called after the Mali HW has been powered on in order to reset the HW.
- */
-void mali_gp_scheduler_reset_all_groups(void);
-
-/**
- * @brief Zap TLB on all groups with \a session active
- *
- * The scheculer will zap the session on all groups it owns.
- */
-void mali_gp_scheduler_zap_all_active(struct mali_session_data *session);
-
-/**
- * @brief Re-enable a group that has been disabled with mali_gp_scheduler_disable_group
- *
- * If a Mali PMU is present, the group will be powered back on and added back
- * into the GP scheduler.
- *
- * @param group Pointer to the group to enable
- */
-void mali_gp_scheduler_enable_group(struct mali_group *group);
-
-/**
- * @brief Disable a group
- *
- * The group will be taken out of the GP scheduler and powered off, if a Mali
- * PMU is present.
- *
- * @param group Pointer to the group to disable
- */
-void mali_gp_scheduler_disable_group(struct mali_group *group);
-
-/**
- * @brief Used by the Timeline system to queue a GP job.
- *
- * @note @ref mali_scheduler_schedule_from_mask() should be called if this function returns non-zero.
- *
- * @param job The GP job that is being activated.
- *
- * @return A scheduling bitmask that can be used to decide if scheduling is necessary after this
- * call.
- */
-mali_scheduler_mask mali_gp_scheduler_activate_job(struct mali_gp_job *job);
-
-/**
- * @brief Schedule queued jobs on idle cores.
- */
-void mali_gp_scheduler_schedule(void);
-
-/**
- * @brief Submit a GP job to the GP scheduler.
- *
- * This will add the GP job to the Timeline system.
- *
- * @param session Session this job belongs to.
- * @param job GP job that will be submitted
- * @return Point on GP timeline for job.
- */
-mali_timeline_point mali_gp_scheduler_submit_job(struct mali_session_data *session, struct mali_gp_job *job);
-
-#endif /* __MALI_GP_SCHEDULER_H__ */
index 94ce122f7ac26fb9890085ddf95afbe99e47b802..359c56f8b6e706ed291cbff5fb3559defe78743f 100644 (file)
@@ -1,13 +1,12 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2013 ARM Limited
+ * (C) COPYRIGHT 2011-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
  * by a licensing agreement from ARM Limited.
  */
-
 #include "mali_kernel_common.h"
 #include "mali_group.h"
 #include "mali_osk.h"
 #include "mali_osk_profiling.h"
 #include "mali_pm_domain.h"
 #include "mali_pm.h"
+#include "mali_executor.h"
+
 #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
 #include <linux/sched.h>
 #include <trace/events/gpu.h>
 #endif
 
-
-static void mali_group_bottom_half_mmu(void *data);
-static void mali_group_bottom_half_gp(void *data);
-static void mali_group_bottom_half_pp(void *data);
-
-static void mali_group_timeout(void *data);
-static void mali_group_reset_pp(struct mali_group *group);
-static void mali_group_reset_mmu(struct mali_group *group);
+#define MALI_MAX_NUM_DOMAIN_REFS (MALI_MAX_NUMBER_OF_GROUPS * 2)
 
 #if defined(CONFIG_MALI400_PROFILING)
 static void mali_group_report_l2_cache_counters_per_core(struct mali_group *group, u32 core_num);
 #endif /* #if defined(CONFIG_MALI400_PROFILING) */
 
-/*
- * The group object is the most important object in the device driver,
- * and acts as the center of many HW operations.
- * The reason for this is that operations on the MMU will affect all
- * cores connected to this MMU (a group is defined by the MMU and the
- * cores which are connected to this).
- * The group lock is thus the most important lock, followed by the
- * GP and PP scheduler locks. They must be taken in the following
- * order:
- * GP/PP lock first, then group lock(s).
- */
-
 static struct mali_group *mali_global_groups[MALI_MAX_NUMBER_OF_GROUPS] = { NULL, };
 static u32 mali_global_num_groups = 0;
 
-/* timer related */
+/* SW timer for job execution */
 int mali_max_job_runtime = MALI_MAX_JOB_RUNTIME_DEFAULT;
 
 /* local helper functions */
-static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session);
-static void mali_group_remove_session_if_unused(struct mali_group *group, struct mali_session_data *session);
-static void mali_group_recovery_reset(struct mali_group *group);
-static void mali_group_mmu_page_fault_and_unlock(struct mali_group *group);
-
-static void mali_group_post_process_job_pp(struct mali_group *group);
-static void mali_group_post_process_job_gp(struct mali_group *group, mali_bool suspend);
-
-void mali_group_lock(struct mali_group *group)
-{
-#ifdef MALI_UPPER_HALF_SCHEDULING
-       _mali_osk_spinlock_irq_lock(group->lock);
-#else
-       _mali_osk_spinlock_lock(group->lock);
-#endif
-       MALI_DEBUG_PRINT(5, ("Mali group: Group lock taken 0x%08X\n", group));
-}
-
-void mali_group_unlock(struct mali_group *group)
-{
-       MALI_DEBUG_PRINT(5, ("Mali group: Releasing group lock 0x%08X\n", group));
-#ifdef MALI_UPPER_HALF_SCHEDULING
-       _mali_osk_spinlock_irq_unlock(group->lock);
-#else
-       _mali_osk_spinlock_unlock(group->lock);
-#endif
-}
+static void mali_group_bottom_half_mmu(void *data);
+static void mali_group_bottom_half_gp(void *data);
+static void mali_group_bottom_half_pp(void *data);
+static void mali_group_timeout(void *data);
 
-#ifdef DEBUG
-void mali_group_assert_locked(struct mali_group *group)
-{
-       MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
-}
-#endif
+static void mali_group_reset_pp(struct mali_group *group);
+static void mali_group_reset_mmu(struct mali_group *group);
 
+static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session);
+static void mali_group_recovery_reset(struct mali_group *group);
 
-struct mali_group *mali_group_create(struct mali_l2_cache_core *core, struct mali_dlbu_core *dlbu, struct mali_bcast_unit *bcast)
+struct mali_group *mali_group_create(struct mali_l2_cache_core *core,
+                                    struct mali_dlbu_core *dlbu,
+                                    struct mali_bcast_unit *bcast,
+                                    u32 domain_index)
 {
        struct mali_group *group = NULL;
 
@@ -106,42 +66,24 @@ struct mali_group *mali_group_create(struct mali_l2_cache_core *core, struct mal
        group = _mali_osk_calloc(1, sizeof(struct mali_group));
        if (NULL != group) {
                group->timeout_timer = _mali_osk_timer_init();
-
                if (NULL != group->timeout_timer) {
-                       _mali_osk_lock_order_t order;
                        _mali_osk_timer_setcallback(group->timeout_timer, mali_group_timeout, (void *)group);
 
-                       if (NULL != dlbu) {
-                               order = _MALI_OSK_LOCK_ORDER_GROUP_VIRTUAL;
-                       } else {
-                               order = _MALI_OSK_LOCK_ORDER_GROUP;
-                       }
+                       group->l2_cache_core[0] = core;
+                       _mali_osk_list_init(&group->group_list);
+                       _mali_osk_list_init(&group->executor_list);
+                       _mali_osk_list_init(&group->pm_domain_list);
+                       group->bcast_core = bcast;
+                       group->dlbu_core = dlbu;
 
-#ifdef MALI_UPPER_HALF_SCHEDULING
-                       group->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, order);
-#else
-                       group->lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, order);
-#endif
+                       /* register this object as a part of the correct power domain */
+                       if ((NULL != core) || (NULL != dlbu) || (NULL != bcast))
+                               group->pm_domain = mali_pm_register_group(domain_index, group);
 
-                       if (NULL != group->lock) {
-                               group->l2_cache_core[0] = core;
-                               group->session = NULL;
-                               group->power_is_on = MALI_TRUE;
-                               group->state = MALI_GROUP_STATE_IDLE;
-                               _mali_osk_list_init(&group->group_list);
-                               _mali_osk_list_init(&group->pp_scheduler_list);
-                               group->parent_group = NULL;
-                               group->l2_cache_core_ref_count[0] = 0;
-                               group->l2_cache_core_ref_count[1] = 0;
-                               group->bcast_core = bcast;
-                               group->dlbu_core = dlbu;
-
-                               mali_global_groups[mali_global_num_groups] = group;
-                               mali_global_num_groups++;
-
-                               return group;
-                       }
-                       _mali_osk_timer_term(group->timeout_timer);
+                       mali_global_groups[mali_global_num_groups] = group;
+                       mali_global_num_groups++;
+
+                       return group;
                }
                _mali_osk_free(group);
        }
@@ -149,78 +91,15 @@ struct mali_group *mali_group_create(struct mali_l2_cache_core *core, struct mal
        return NULL;
 }
 
-_mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group, struct mali_mmu_core* mmu_core)
-{
-       /* This group object now owns the MMU core object */
-       group->mmu= mmu_core;
-       group->bottom_half_work_mmu = _mali_osk_wq_create_work(mali_group_bottom_half_mmu, group);
-       if (NULL == group->bottom_half_work_mmu) {
-               return _MALI_OSK_ERR_FAULT;
-       }
-       return _MALI_OSK_ERR_OK;
-}
-
-void mali_group_remove_mmu_core(struct mali_group *group)
-{
-       /* This group object no longer owns the MMU core object */
-       group->mmu = NULL;
-       if (NULL != group->bottom_half_work_mmu) {
-               _mali_osk_wq_delete_work(group->bottom_half_work_mmu);
-       }
-}
-
-_mali_osk_errcode_t mali_group_add_gp_core(struct mali_group *group, struct mali_gp_core* gp_core)
-{
-       /* This group object now owns the GP core object */
-       group->gp_core = gp_core;
-       group->bottom_half_work_gp = _mali_osk_wq_create_work(mali_group_bottom_half_gp, group);
-       if (NULL == group->bottom_half_work_gp) {
-               return _MALI_OSK_ERR_FAULT;
-       }
-       return _MALI_OSK_ERR_OK;
-}
-
-void mali_group_remove_gp_core(struct mali_group *group)
-{
-       /* This group object no longer owns the GP core object */
-       group->gp_core = NULL;
-       if (NULL != group->bottom_half_work_gp) {
-               _mali_osk_wq_delete_work(group->bottom_half_work_gp);
-       }
-}
-
-_mali_osk_errcode_t mali_group_add_pp_core(struct mali_group *group, struct mali_pp_core* pp_core)
-{
-       /* This group object now owns the PP core object */
-       group->pp_core = pp_core;
-       group->bottom_half_work_pp = _mali_osk_wq_create_work(mali_group_bottom_half_pp, group);
-       if (NULL == group->bottom_half_work_pp) {
-               return _MALI_OSK_ERR_FAULT;
-       }
-       return _MALI_OSK_ERR_OK;
-}
-
-void mali_group_remove_pp_core(struct mali_group *group)
-{
-       /* This group object no longer owns the PP core object */
-       group->pp_core = NULL;
-       if (NULL != group->bottom_half_work_pp) {
-               _mali_osk_wq_delete_work(group->bottom_half_work_pp);
-       }
-}
-
-void mali_group_set_pm_domain(struct mali_group *group, struct mali_pm_domain *domain)
-{
-       group->pm_domain = domain;
-}
-
 void mali_group_delete(struct mali_group *group)
 {
        u32 i;
 
-       MALI_DEBUG_PRINT(4, ("Deleting group %p\n", group));
+       MALI_DEBUG_PRINT(4, ("Deleting group %s\n",
+                            mali_group_core_description(group)));
 
        MALI_DEBUG_ASSERT(NULL == group->parent_group);
+       MALI_DEBUG_ASSERT((MALI_GROUP_STATE_INACTIVE == group->state) || ((MALI_GROUP_STATE_ACTIVATION_PENDING == group->state)));
 
        /* Delete the resources that this group owns */
        if (NULL != group->gp_core) {
@@ -285,221 +164,630 @@ void mali_group_delete(struct mali_group *group)
                _mali_osk_wq_delete_work(group->bottom_half_work_pp);
        }
 
-#ifdef MALI_UPPER_HALF_SCHEDULING
-       _mali_osk_spinlock_irq_term(group->lock);
-#else
-       _mali_osk_spinlock_term(group->lock);
-#endif
        _mali_osk_free(group);
 }
 
-MALI_DEBUG_CODE(static void mali_group_print_virtual(struct mali_group *vgroup)
+_mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group, struct mali_mmu_core *mmu_core)
 {
-       u32 i;
-       struct mali_group *group;
-       struct mali_group *temp;
+       /* This group object now owns the MMU core object */
+       group->mmu = mmu_core;
+       group->bottom_half_work_mmu = _mali_osk_wq_create_work(mali_group_bottom_half_mmu, group);
+       if (NULL == group->bottom_half_work_mmu) {
+               return _MALI_OSK_ERR_FAULT;
+       }
+       return _MALI_OSK_ERR_OK;
+}
 
-       MALI_DEBUG_PRINT(4, ("Virtual group %p\n", vgroup));
-       MALI_DEBUG_PRINT(4, ("l2_cache_core[0] = %p, ref = %d\n", vgroup->l2_cache_core[0], vgroup->l2_cache_core_ref_count[0]));
-       MALI_DEBUG_PRINT(4, ("l2_cache_core[1] = %p, ref = %d\n", vgroup->l2_cache_core[1], vgroup->l2_cache_core_ref_count[1]));
+void mali_group_remove_mmu_core(struct mali_group *group)
+{
+       /* This group object no longer owns the MMU core object */
+       group->mmu = NULL;
+       if (NULL != group->bottom_half_work_mmu) {
+               _mali_osk_wq_delete_work(group->bottom_half_work_mmu);
+       }
+}
 
-       i = 0;
-       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &vgroup->group_list, struct mali_group, group_list) {
-               MALI_DEBUG_PRINT(4, ("[%d] %p, l2_cache_core[0] = %p\n", i, group, group->l2_cache_core[0]));
-               i++;
+_mali_osk_errcode_t mali_group_add_gp_core(struct mali_group *group, struct mali_gp_core *gp_core)
+{
+       /* This group object now owns the GP core object */
+       group->gp_core = gp_core;
+       group->bottom_half_work_gp = _mali_osk_wq_create_work(mali_group_bottom_half_gp, group);
+       if (NULL == group->bottom_half_work_gp) {
+               return _MALI_OSK_ERR_FAULT;
        }
-})
+       return _MALI_OSK_ERR_OK;
+}
 
-/**
- * @brief Add child group to virtual group parent
- *
- * Before calling this function, child must have it's state set to JOINING_VIRTUAL
- * to ensure it's not touched during the transition period. When this function returns,
- * child's state will be IN_VIRTUAL.
- */
-void mali_group_add_group(struct mali_group *parent, struct mali_group *child, mali_bool update_hw)
+void mali_group_remove_gp_core(struct mali_group *group)
 {
-       mali_bool found;
-       u32 i;
-       struct mali_session_data *child_session;
+       /* This group object no longer owns the GP core object */
+       group->gp_core = NULL;
+       if (NULL != group->bottom_half_work_gp) {
+               _mali_osk_wq_delete_work(group->bottom_half_work_gp);
+       }
+}
 
-       MALI_DEBUG_PRINT(3, ("Adding group %p to virtual group %p\n", child, parent));
+_mali_osk_errcode_t mali_group_add_pp_core(struct mali_group *group, struct mali_pp_core *pp_core)
+{
+       /* This group object now owns the PP core object */
+       group->pp_core = pp_core;
+       group->bottom_half_work_pp = _mali_osk_wq_create_work(mali_group_bottom_half_pp, group);
+       if (NULL == group->bottom_half_work_pp) {
+               return _MALI_OSK_ERR_FAULT;
+       }
+       return _MALI_OSK_ERR_OK;
+}
 
-       MALI_ASSERT_GROUP_LOCKED(parent);
+void mali_group_remove_pp_core(struct mali_group *group)
+{
+       /* This group object no longer owns the PP core object */
+       group->pp_core = NULL;
+       if (NULL != group->bottom_half_work_pp) {
+               _mali_osk_wq_delete_work(group->bottom_half_work_pp);
+       }
+}
 
-       MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
-       MALI_DEBUG_ASSERT(!mali_group_is_virtual(child));
-       MALI_DEBUG_ASSERT(NULL == child->parent_group);
-       MALI_DEBUG_ASSERT(MALI_GROUP_STATE_JOINING_VIRTUAL == child->state);
+enum mali_group_state mali_group_activate(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
 
-       _mali_osk_list_addtail(&child->group_list, &parent->group_list);
+       MALI_DEBUG_PRINT(4, ("Group: Activating group %s\n",
+                            mali_group_core_description(group)));
 
-       child->state = MALI_GROUP_STATE_IN_VIRTUAL;
-       child->parent_group = parent;
+       if (MALI_GROUP_STATE_INACTIVE == group->state) {
+               /* Group is inactive, get PM refs in order to power up */
 
-       MALI_DEBUG_ASSERT_POINTER(child->l2_cache_core[0]);
+               /*
+                * We'll take a maximum of 2 power domain references pr group,
+                * one for the group itself, and one for it's L2 cache.
+                */
+               struct mali_pm_domain *domains[MALI_MAX_NUM_DOMAIN_REFS];
+               struct mali_group *groups[MALI_MAX_NUM_DOMAIN_REFS];
+               u32 num_domains = 0;
+               mali_bool all_groups_on;
 
-       MALI_DEBUG_PRINT(4, ("parent->l2_cache_core: [0] = %p, [1] = %p\n", parent->l2_cache_core[0], parent->l2_cache_core[1]));
-       MALI_DEBUG_PRINT(4, ("child->l2_cache_core: [0] = %p, [1] = %p\n", child->l2_cache_core[0], child->l2_cache_core[1]));
+               /* Deal with child groups first */
+               if (mali_group_is_virtual(group)) {
+                       /*
+                        * The virtual group might have 0, 1 or 2 L2s in
+                        * its l2_cache_core array, but we ignore these and
+                        * let the child groups take the needed L2 cache ref
+                        * on behalf of the virtual group.
+                        * In other words; The L2 refs are taken in pair with
+                        * the physical group which the L2 is attached to.
+                        */
+                       struct mali_group *child;
+                       struct mali_group *temp;
 
-       /* Keep track of the L2 cache cores of child groups */
-       found = MALI_FALSE;
-       for (i = 0; i < 2; i++) {
-               if (parent->l2_cache_core[i] == child->l2_cache_core[0]) {
-                       MALI_DEBUG_ASSERT(parent->l2_cache_core_ref_count[i] > 0);
-                       parent->l2_cache_core_ref_count[i]++;
-                       found = MALI_TRUE;
+                       /*
+                        * Child group is inactive, get PM
+                        * refs in order to power up.
+                        */
+                       _MALI_OSK_LIST_FOREACHENTRY(child, temp,
+                                                   &group->group_list,
+                                                   struct mali_group, group_list) {
+                               MALI_DEBUG_ASSERT(MALI_GROUP_STATE_INACTIVE
+                                                 == child->state);
+
+                               child->state = MALI_GROUP_STATE_ACTIVATION_PENDING;
+
+                               MALI_DEBUG_ASSERT_POINTER(
+                                       child->pm_domain);
+                               domains[num_domains] = child->pm_domain;
+                               groups[num_domains] = child;
+                               num_domains++;
+
+                               /*
+                                * Take L2 domain ref for child group.
+                                */
+                               MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS
+                                                 > num_domains);
+                               domains[num_domains] = mali_l2_cache_get_pm_domain(
+                                                              child->l2_cache_core[0]);
+                               groups[num_domains] = NULL;
+                               MALI_DEBUG_ASSERT(NULL ==
+                                                 child->l2_cache_core[1]);
+                               num_domains++;
+                       }
+               } else {
+                       /* Take L2 domain ref for physical groups. */
+                       MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS >
+                                         num_domains);
+
+                       domains[num_domains] = mali_l2_cache_get_pm_domain(
+                                                      group->l2_cache_core[0]);
+                       groups[num_domains] = NULL;
+                       MALI_DEBUG_ASSERT(NULL == group->l2_cache_core[1]);
+                       num_domains++;
                }
-       }
-
-       if (!found) {
-               /* First time we see this L2 cache, add it to our list */
-               i = (NULL == parent->l2_cache_core[0]) ? 0 : 1;
 
-               MALI_DEBUG_PRINT(4, ("First time we see l2_cache %p. Adding to [%d] = %p\n", child->l2_cache_core[0], i, parent->l2_cache_core[i]));
-
-               MALI_DEBUG_ASSERT(NULL == parent->l2_cache_core[i]);
-
-               parent->l2_cache_core[i] = child->l2_cache_core[0];
-               parent->l2_cache_core_ref_count[i]++;
-       }
+               /* Do the group itself last (it's dependencies first) */
 
-       /* Update Broadcast Unit and DLBU */
-       mali_bcast_add_group(parent->bcast_core, child);
-       mali_dlbu_add_group(parent->dlbu_core, child);
+               group->state = MALI_GROUP_STATE_ACTIVATION_PENDING;
 
-       child_session = child->session;
-       child->session = NULL;
+               MALI_DEBUG_ASSERT_POINTER(group->pm_domain);
+               domains[num_domains] = group->pm_domain;
+               groups[num_domains] = group;
+               num_domains++;
 
-       /* Above this comment, only software state is updated and the HW is not
-        * touched. Now, check if Mali is powered and skip the rest if it isn't
-        * powered.
-        */
+               all_groups_on = mali_pm_get_domain_refs(domains, groups,
+                                                       num_domains);
 
-       if (!update_hw) {
-               MALI_DEBUG_CODE(mali_group_print_virtual(parent));
-               return;
-       }
+               /*
+                * Complete activation for group, include
+                * virtual group or physical group.
+                */
+               if (MALI_TRUE == all_groups_on) {
 
-       /* Update MMU */
-       if (parent->session == child_session) {
-               mali_mmu_zap_tlb(child->mmu);
-       } else {
-               if (NULL == parent->session) {
-                       mali_mmu_activate_empty_page_directory(child->mmu);
-               } else {
-                       mali_mmu_activate_page_directory(child->mmu, mali_session_get_page_directory(parent->session));
+                       mali_group_set_active(group);
                }
+       } else if (MALI_GROUP_STATE_ACTIVE == group->state) {
+               /* Already active */
+               MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
+       } else {
+               /*
+                * Activation already pending, group->power_is_on could
+                * be both true or false. We need to wait for power up
+                * notification anyway.
+                */
+               MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVATION_PENDING
+                                 == group->state);
        }
 
-       /* Update HW only if power is on */
-       mali_bcast_reset(parent->bcast_core);
-       mali_dlbu_update_mask(parent->dlbu_core);
-
-       /* Start job on child when parent is active */
-       if (NULL != parent->pp_running_job) {
-               struct mali_pp_job *job = parent->pp_running_job;
-               MALI_DEBUG_PRINT(3, ("Group %x joining running job %d on virtual group %x\n",
-                                    child, mali_pp_job_get_id(job), parent));
-               MALI_DEBUG_ASSERT(MALI_GROUP_STATE_WORKING == parent->state);
-               mali_pp_job_start(child->pp_core, job, mali_pp_core_get_id(child->pp_core), MALI_TRUE);
-
-               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|
-                                             MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core))|
-                                             MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
-                                             mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
-
-               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|
-                                             MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core))|
-                                             MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
-                                             mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
-       }
+       MALI_DEBUG_PRINT(4, ("Group: group %s activation result: %s\n",
+                            mali_group_core_description(group),
+                            MALI_GROUP_STATE_ACTIVE == group->state ?
+                            "ACTIVE" : "PENDING"));
 
-       MALI_DEBUG_CODE(mali_group_print_virtual(parent);)
+       return group->state;
 }
 
-/**
- * @brief Remove child group from virtual group parent
- *
- * After the child is removed, it's state will be LEAVING_VIRTUAL and must be set
- * to IDLE before it can be used.
- */
-void mali_group_remove_group(struct mali_group *parent, struct mali_group *child)
+mali_bool mali_group_set_active(struct mali_group *group)
 {
-       u32 i;
-
-       MALI_ASSERT_GROUP_LOCKED(parent);
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVATION_PENDING == group->state);
+       MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
 
-       MALI_DEBUG_PRINT(3, ("Removing group %p from virtual group %p\n", child, parent));
+       MALI_DEBUG_PRINT(4, ("Group: Activation completed for %s\n",
+                            mali_group_core_description(group)));
 
-       MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
-       MALI_DEBUG_ASSERT(!mali_group_is_virtual(child));
-       MALI_DEBUG_ASSERT(parent == child->parent_group);
-       MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IN_VIRTUAL == child->state);
-       /* Removing groups while running is not yet supported. */
-       MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == parent->state);
+       if (mali_group_is_virtual(group)) {
+               struct mali_group *child;
+               struct mali_group *temp;
 
-       mali_group_lock(child);
+               _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list,
+                                           struct mali_group, group_list) {
+                       if (MALI_TRUE != child->power_is_on) {
+                               return MALI_FALSE;
+                       }
 
-       /* Update Broadcast Unit and DLBU */
-       mali_bcast_remove_group(parent->bcast_core, child);
-       mali_dlbu_remove_group(parent->dlbu_core, child);
+                       child->state = MALI_GROUP_STATE_ACTIVE;
+               }
 
-       /* Update HW only if power is on */
-       if (mali_pm_is_power_on()) {
-               mali_bcast_reset(parent->bcast_core);
-               mali_dlbu_update_mask(parent->dlbu_core);
+               mali_group_reset(group);
        }
 
-       _mali_osk_list_delinit(&child->group_list);
+       /* Go to ACTIVE state */
+       group->state = MALI_GROUP_STATE_ACTIVE;
 
-       child->session = parent->session;
-       child->parent_group = NULL;
-       child->state = MALI_GROUP_STATE_LEAVING_VIRTUAL;
+       return MALI_TRUE;
+}
 
-       /* Keep track of the L2 cache cores of child groups */
-       i = (child->l2_cache_core[0] == parent->l2_cache_core[0]) ? 0 : 1;
+mali_bool mali_group_deactivate(struct mali_group *group)
+{
+       struct mali_pm_domain *domains[MALI_MAX_NUM_DOMAIN_REFS];
+       u32 num_domains = 0;
+       mali_bool power_down = MALI_FALSE;
 
-       MALI_DEBUG_ASSERT(child->l2_cache_core[0] == parent->l2_cache_core[i]);
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT(MALI_GROUP_STATE_INACTIVE != group->state);
 
-       parent->l2_cache_core_ref_count[i]--;
+       MALI_DEBUG_PRINT(3, ("Group: Deactivating group %s\n",
+                            mali_group_core_description(group)));
 
-       if (parent->l2_cache_core_ref_count[i] == 0) {
-               parent->l2_cache_core[i] = NULL;
-       }
+       group->state = MALI_GROUP_STATE_INACTIVE;
 
-       MALI_DEBUG_CODE(mali_group_print_virtual(parent));
+       MALI_DEBUG_ASSERT_POINTER(group->pm_domain);
+       domains[num_domains] = group->pm_domain;
+       num_domains++;
 
-       mali_group_unlock(child);
+       if (mali_group_is_virtual(group)) {
+               /* Release refs for all child groups */
+               struct mali_group *child;
+               struct mali_group *temp;
+
+               _MALI_OSK_LIST_FOREACHENTRY(child, temp,
+                                           &group->group_list,
+                                           struct mali_group, group_list) {
+                       child->state = MALI_GROUP_STATE_INACTIVE;
+
+                       MALI_DEBUG_ASSERT_POINTER(child->pm_domain);
+                       domains[num_domains] = child->pm_domain;
+                       num_domains++;
+
+                       /* Release L2 cache domain for child groups */
+                       MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS >
+                                         num_domains);
+                       domains[num_domains] = mali_l2_cache_get_pm_domain(
+                                                      child->l2_cache_core[0]);
+                       MALI_DEBUG_ASSERT(NULL == child->l2_cache_core[1]);
+                       num_domains++;
+               }
+
+               /*
+                * Must do mali_group_power_down() steps right here for
+                * virtual group, because virtual group itself is likely to
+                * stay powered on, however child groups are now very likely
+                * to be powered off (and thus lose their state).
+                */
+
+               mali_group_clear_session(group);
+               /*
+                * Disable the broadcast unit (clear it's mask).
+                * This is needed in case the GPU isn't actually
+                * powered down at this point and groups are
+                * removed from an inactive virtual group.
+                * If not, then the broadcast unit will intercept
+                * their interrupts!
+                */
+               mali_bcast_disable(group->bcast_core);
+       } else {
+               /* Release L2 cache domain for physical groups */
+               MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS >
+                                 num_domains);
+               domains[num_domains] = mali_l2_cache_get_pm_domain(
+                                              group->l2_cache_core[0]);
+               MALI_DEBUG_ASSERT(NULL == group->l2_cache_core[1]);
+               num_domains++;
+       }
+
+       power_down = mali_pm_put_domain_refs(domains, num_domains);
+
+       return power_down;
 }
 
-struct mali_group *mali_group_acquire_group(struct mali_group *parent)
+void mali_group_power_up(struct mali_group *group)
 {
-       struct mali_group *child;
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+       MALI_DEBUG_PRINT(3, ("Group: Power up for %s\n",
+                            mali_group_core_description(group)));
+
+       group->power_is_on = MALI_TRUE;
+
+       if (MALI_FALSE == mali_group_is_virtual(group)
+           && MALI_FALSE == mali_group_is_in_virtual(group)) {
+               mali_group_reset(group);
+       }
+
+       /*
+        * When we just acquire only one physical group form virt group,
+        * we should remove the bcast&dlbu mask from virt group and
+        * reset bcast and dlbu core, although part of pp cores in virt
+        * group maybe not be powered on.
+        */
+       if (MALI_TRUE == mali_group_is_virtual(group)) {
+               mali_bcast_reset(group->bcast_core);
+               mali_dlbu_update_mask(group->dlbu_core);
+       }
+}
+
+void mali_group_power_down(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+       MALI_DEBUG_PRINT(3, ("Group: Power down for %s\n",
+                            mali_group_core_description(group)));
+
+       group->power_is_on = MALI_FALSE;
+
+       if (mali_group_is_virtual(group)) {
+               /*
+                * What we do for physical jobs in this function should
+                * already have been done in mali_group_deactivate()
+                * for virtual group.
+                */
+               MALI_DEBUG_ASSERT(NULL == group->session);
+       } else {
+               mali_group_clear_session(group);
+       }
+}
+
+MALI_DEBUG_CODE(static void mali_group_print_virtual(struct mali_group *vgroup)
+{
+       u32 i;
+       struct mali_group *group;
+       struct mali_group *temp;
+
+       MALI_DEBUG_PRINT(4, ("Virtual group %s (%p)\n",
+                            mali_group_core_description(vgroup),
+                            vgroup));
+       MALI_DEBUG_PRINT(4, ("l2_cache_core[0] = %p, ref = %d\n", vgroup->l2_cache_core[0], vgroup->l2_cache_core_ref_count[0]));
+       MALI_DEBUG_PRINT(4, ("l2_cache_core[1] = %p, ref = %d\n", vgroup->l2_cache_core[1], vgroup->l2_cache_core_ref_count[1]));
+
+       i = 0;
+       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &vgroup->group_list, struct mali_group, group_list) {
+               MALI_DEBUG_PRINT(4, ("[%d] %s (%p), l2_cache_core[0] = %p\n",
+                                    i, mali_group_core_description(group),
+                                    group, group->l2_cache_core[0]));
+               i++;
+       }
+})
+
+static void mali_group_dump_core_status(struct mali_group *group)
+{
+       u32 i;
+
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT(NULL != group->gp_core || (NULL != group->pp_core && !mali_group_is_virtual(group)));
+
+       if (NULL != group->gp_core) {
+               MALI_PRINT(("Dump Group %s\n", group->gp_core->hw_core.description));
+
+               for (i = 0; i < 0xA8; i += 0x10) {
+                       MALI_PRINT(("0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, mali_hw_core_register_read(&group->gp_core->hw_core, i),
+                                   mali_hw_core_register_read(&group->gp_core->hw_core, i + 4),
+                                   mali_hw_core_register_read(&group->gp_core->hw_core, i + 8),
+                                   mali_hw_core_register_read(&group->gp_core->hw_core, i + 12)));
+               }
+
+
+       } else {
+               MALI_PRINT(("Dump Group %s\n", group->pp_core->hw_core.description));
+
+               for (i = 0; i < 0x5c; i += 0x10) {
+                       MALI_PRINT(("0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, mali_hw_core_register_read(&group->pp_core->hw_core, i),
+                                   mali_hw_core_register_read(&group->pp_core->hw_core, i + 4),
+                                   mali_hw_core_register_read(&group->pp_core->hw_core, i + 8),
+                                   mali_hw_core_register_read(&group->pp_core->hw_core, i + 12)));
+               }
+
+               /* Ignore some minor registers */
+               for (i = 0x1000; i < 0x1068; i += 0x10) {
+                       MALI_PRINT(("0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, mali_hw_core_register_read(&group->pp_core->hw_core, i),
+                                   mali_hw_core_register_read(&group->pp_core->hw_core, i + 4),
+                                   mali_hw_core_register_read(&group->pp_core->hw_core, i + 8),
+                                   mali_hw_core_register_read(&group->pp_core->hw_core, i + 12)));
+               }
+       }
+
+       MALI_PRINT(("Dump Group MMU\n"));
+       for (i = 0; i < 0x24; i += 0x10) {
+               MALI_PRINT(("0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, mali_hw_core_register_read(&group->mmu->hw_core, i),
+                           mali_hw_core_register_read(&group->mmu->hw_core, i + 4),
+                           mali_hw_core_register_read(&group->mmu->hw_core, i + 8),
+                           mali_hw_core_register_read(&group->mmu->hw_core, i + 12)));
+       }
+}
+
+
+/**
+ * @Dump group status
+ */
+void mali_group_dump_status(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+
+       if (mali_group_is_virtual(group)) {
+               struct mali_group *group_c;
+               struct mali_group *temp;
+               _MALI_OSK_LIST_FOREACHENTRY(group_c, temp, &group->group_list, struct mali_group, group_list) {
+                       mali_group_dump_core_status(group_c);
+               }
+       } else {
+               mali_group_dump_core_status(group);
+       }
+}
+
+/**
+ * @brief Add child group to virtual group parent
+ */
+void mali_group_add_group(struct mali_group *parent, struct mali_group *child)
+{
+       mali_bool found;
+       u32 i;
+
+       MALI_DEBUG_PRINT(3, ("Adding group %s to virtual group %s\n",
+                            mali_group_core_description(child),
+                            mali_group_core_description(parent)));
+
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
+       MALI_DEBUG_ASSERT(!mali_group_is_virtual(child));
+       MALI_DEBUG_ASSERT(NULL == child->parent_group);
+
+       _mali_osk_list_addtail(&child->group_list, &parent->group_list);
+
+       child->parent_group = parent;
+
+       MALI_DEBUG_ASSERT_POINTER(child->l2_cache_core[0]);
+
+       MALI_DEBUG_PRINT(4, ("parent->l2_cache_core: [0] = %p, [1] = %p\n", parent->l2_cache_core[0], parent->l2_cache_core[1]));
+       MALI_DEBUG_PRINT(4, ("child->l2_cache_core: [0] = %p, [1] = %p\n", child->l2_cache_core[0], child->l2_cache_core[1]));
+
+       /* Keep track of the L2 cache cores of child groups */
+       found = MALI_FALSE;
+       for (i = 0; i < 2; i++) {
+               if (parent->l2_cache_core[i] == child->l2_cache_core[0]) {
+                       MALI_DEBUG_ASSERT(parent->l2_cache_core_ref_count[i] > 0);
+                       parent->l2_cache_core_ref_count[i]++;
+                       found = MALI_TRUE;
+               }
+       }
+
+       if (!found) {
+               /* First time we see this L2 cache, add it to our list */
+               i = (NULL == parent->l2_cache_core[0]) ? 0 : 1;
+
+               MALI_DEBUG_PRINT(4, ("First time we see l2_cache %p. Adding to [%d] = %p\n", child->l2_cache_core[0], i, parent->l2_cache_core[i]));
+
+               MALI_DEBUG_ASSERT(NULL == parent->l2_cache_core[i]);
+
+               parent->l2_cache_core[i] = child->l2_cache_core[0];
+               parent->l2_cache_core_ref_count[i]++;
+       }
+
+       /* Update Broadcast Unit and DLBU */
+       mali_bcast_add_group(parent->bcast_core, child);
+       mali_dlbu_add_group(parent->dlbu_core, child);
+
+       if (MALI_TRUE == parent->power_is_on) {
+               mali_bcast_reset(parent->bcast_core);
+               mali_dlbu_update_mask(parent->dlbu_core);
+       }
+
+       if (MALI_TRUE == child->power_is_on) {
+               if (NULL == parent->session) {
+                       if (NULL != child->session) {
+                               /*
+                                * Parent has no session, so clear
+                                * child session as well.
+                                */
+                               mali_mmu_activate_empty_page_directory(child->mmu);
+                       }
+               } else {
+                       if (parent->session == child->session) {
+                               /* We already have same session as parent,
+                                * so a simple zap should be enough.
+                                */
+                               mali_mmu_zap_tlb(child->mmu);
+                       } else {
+                               /*
+                                * Parent has a different session, so we must
+                                * switch to that sessions page table
+                                */
+                               mali_mmu_activate_page_directory(child->mmu, mali_session_get_page_directory(parent->session));
+                       }
+
+                       /* It is the parent which keeps the session from now on */
+                       child->session = NULL;
+               }
+       } else {
+               /* should have been cleared when child was powered down */
+               MALI_DEBUG_ASSERT(NULL == child->session);
+       }
+
+       /* Start job on child when parent is active */
+       if (NULL != parent->pp_running_job) {
+               struct mali_pp_job *job = parent->pp_running_job;
+
+               MALI_DEBUG_PRINT(3, ("Group %x joining running job %d on virtual group %x\n",
+                                    child, mali_pp_job_get_id(job), parent));
+
+               /* Only allowed to add active child to an active parent */
+               MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE == parent->state);
+               MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE == child->state);
+
+               mali_pp_job_start(child->pp_core, job, mali_pp_core_get_id(child->pp_core), MALI_TRUE);
+
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                             MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+                                             MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+                                             mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
+
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                                             MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
+                                             mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+               trace_gpu_sched_switch(
+                       mali_pp_core_description(group->pp_core),
+                       sched_clock(), mali_pp_job_get_tid(job),
+                       0, mali_pp_job_get_id(job));
+#endif
+
+#if defined(CONFIG_MALI400_PROFILING)
+               trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core),
+                                      mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job));
+#endif
+       }
 
-       MALI_ASSERT_GROUP_LOCKED(parent);
+       MALI_DEBUG_CODE(mali_group_print_virtual(parent);)
+}
 
+/**
+ * @brief Remove child group from virtual group parent
+ */
+void mali_group_remove_group(struct mali_group *parent, struct mali_group *child)
+{
+       u32 i;
+
+       MALI_DEBUG_PRINT(3, ("Removing group %s from virtual group %s\n",
+                            mali_group_core_description(child),
+                            mali_group_core_description(parent)));
+
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
        MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
-       MALI_DEBUG_ASSERT(!_mali_osk_list_empty(&parent->group_list));
+       MALI_DEBUG_ASSERT(!mali_group_is_virtual(child));
+       MALI_DEBUG_ASSERT(parent == child->parent_group);
 
-       child = _MALI_OSK_LIST_ENTRY(parent->group_list.prev, struct mali_group, group_list);
+       /* Update Broadcast Unit and DLBU */
+       mali_bcast_remove_group(parent->bcast_core, child);
+       mali_dlbu_remove_group(parent->dlbu_core, child);
 
-       mali_group_remove_group(parent, child);
+       if (MALI_TRUE == parent->power_is_on) {
+               mali_bcast_reset(parent->bcast_core);
+               mali_dlbu_update_mask(parent->dlbu_core);
+       }
+
+       child->session = parent->session;
+       child->parent_group = NULL;
+
+       _mali_osk_list_delinit(&child->group_list);
+       if (_mali_osk_list_empty(&parent->group_list)) {
+               parent->session = NULL;
+       }
+
+       /* Keep track of the L2 cache cores of child groups */
+       i = (child->l2_cache_core[0] == parent->l2_cache_core[0]) ? 0 : 1;
+
+       MALI_DEBUG_ASSERT(child->l2_cache_core[0] == parent->l2_cache_core[i]);
+
+       parent->l2_cache_core_ref_count[i]--;
+       if (parent->l2_cache_core_ref_count[i] == 0) {
+               parent->l2_cache_core[i] = NULL;
+       }
+
+       MALI_DEBUG_CODE(mali_group_print_virtual(parent));
+}
+
+struct mali_group *mali_group_acquire_group(struct mali_group *parent)
+{
+       struct mali_group *child = NULL;
+
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
+
+       if (!_mali_osk_list_empty(&parent->group_list)) {
+               child = _MALI_OSK_LIST_ENTRY(parent->group_list.prev, struct mali_group, group_list);
+               mali_group_remove_group(parent, child);
+       }
+
+       if (NULL != child) {
+               if (MALI_GROUP_STATE_ACTIVE != parent->state
+                   && MALI_TRUE == child->power_is_on) {
+                       mali_group_reset(child);
+               }
+       }
 
        return child;
 }
 
 void mali_group_reset(struct mali_group *group)
 {
-       /*
-        * This function should not be used to abort jobs,
-        * currently only called during insmod and PM resume
-        */
-       MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
        MALI_DEBUG_ASSERT(NULL == group->gp_running_job);
        MALI_DEBUG_ASSERT(NULL == group->pp_running_job);
+       MALI_DEBUG_ASSERT(NULL == group->session);
 
-       group->session = NULL;
+       MALI_DEBUG_PRINT(3, ("Group: reset of %s\n",
+                            mali_group_core_description(group)));
 
        if (NULL != group->dlbu_core) {
                mali_dlbu_reset(group->dlbu_core);
@@ -509,77 +797,89 @@ void mali_group_reset(struct mali_group *group)
                mali_bcast_reset(group->bcast_core);
        }
 
-       if (NULL != group->mmu) {
-               mali_group_reset_mmu(group);
-       }
+       MALI_DEBUG_ASSERT(NULL != group->mmu);
+       mali_group_reset_mmu(group);
 
        if (NULL != group->gp_core) {
+               MALI_DEBUG_ASSERT(NULL == group->pp_core);
                mali_gp_reset(group->gp_core);
-       }
-
-       if (NULL != group->pp_core) {
+       } else {
+               MALI_DEBUG_ASSERT(NULL != group->pp_core);
                mali_group_reset_pp(group);
        }
 }
 
-struct mali_gp_core* mali_group_get_gp_core(struct mali_group *group)
-{
-       return group->gp_core;
-}
-
-struct mali_pp_core* mali_group_get_pp_core(struct mali_group *group)
-{
-       return group->pp_core;
-}
-
 void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job)
 {
        struct mali_session_data *session;
 
-       MALI_ASSERT_GROUP_LOCKED(group);
-       MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+       MALI_DEBUG_PRINT(3, ("Group: Starting GP job 0x%08X on group %s\n",
+                            job,
+                            mali_group_core_description(group)));
 
        session = mali_gp_job_get_session(job);
 
-       if (NULL != group->l2_cache_core[0]) {
-               mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_gp_job_get_cache_order(job));
-       }
+       MALI_DEBUG_ASSERT_POINTER(group->l2_cache_core[0]);
+       mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_gp_job_get_cache_order(job));
 
        mali_group_activate_page_directory(group, session);
 
        mali_gp_job_start(group->gp_core, job);
 
        _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
-                                     MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0) |
-                                     MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
-                                     mali_gp_job_get_frame_builder_id(job), mali_gp_job_get_flush_id(job), 0, 0, 0);
+                                     MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0) |
+                                     MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+                                     mali_gp_job_get_frame_builder_id(job), mali_gp_job_get_flush_id(job), 0, 0, 0);
        _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
-                                     MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
-                                     mali_gp_job_get_pid(job), mali_gp_job_get_tid(job), 0, 0, 0);
+                                     MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+                                     mali_gp_job_get_pid(job), mali_gp_job_get_tid(job), 0, 0, 0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+       trace_mali_core_active(mali_gp_job_get_pid(job), 1 /* active */, 1 /* GP */,  0 /* core */,
+                              mali_gp_job_get_frame_builder_id(job), mali_gp_job_get_flush_id(job));
+#endif
+
 #if defined(CONFIG_MALI400_PROFILING)
        if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
-           (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
+           (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
                mali_group_report_l2_cache_counters_per_core(group, 0);
+       }
 #endif /* #if defined(CONFIG_MALI400_PROFILING) */
 
 #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
-       trace_gpu_sched_switch(mali_gp_get_hw_core_desc(group->gp_core), sched_clock(),
-                              mali_gp_job_get_pid(job), 0, mali_gp_job_get_id(job));
+       trace_gpu_sched_switch(mali_gp_core_description(group->gp_core),
+                              sched_clock(), mali_gp_job_get_tid(job),
+                              0, mali_gp_job_get_id(job));
 #endif
 
        group->gp_running_job = job;
-       group->state = MALI_GROUP_STATE_WORKING;
+       group->is_working = MALI_TRUE;
 
-       /* Setup the timeout timer value and save the job id for the job running on the gp core */
+       /* Setup SW timer and record start time */
+       group->start_time = _mali_osk_time_tickcount();
        _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
+
+       MALI_DEBUG_PRINT(4, ("Group: Started GP job 0x%08X on group %s at %u\n",
+                            job,
+                            mali_group_core_description(group),
+                            group->start_time));
 }
 
+/* Used to set all the registers except frame renderer list address and fragment shader stack address
+ * It means the caller must set these two registers properly before calling this function
+ */
 void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job)
 {
        struct mali_session_data *session;
 
-       MALI_ASSERT_GROUP_LOCKED(group);
-       MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+       MALI_DEBUG_PRINT(3, ("Group: Starting PP job 0x%08X part %u/%u on group %s\n",
+                            job, sub_job + 1,
+                            mali_pp_job_get_sub_job_count(job),
+                            mali_group_core_description(group)));
 
        session = mali_pp_job_get_session(job);
 
@@ -598,7 +898,7 @@ void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job,
                struct mali_group *temp;
                u32 core_num = 0;
 
-               MALI_DEBUG_ASSERT( mali_pp_job_is_virtual(job));
+               MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(job));
 
                /* Configure DLBU for the job */
                mali_dlbu_config_job(group->dlbu_core, job);
@@ -609,11 +909,7 @@ void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job,
                        core_num++;
                }
 
-               /* Try to use DMA unit to start job, fallback to writing directly to the core */
-               MALI_DEBUG_ASSERT(mali_dma_cmd_buf_is_valid(&job->dma_cmd_buf));
-               if (_MALI_OSK_ERR_OK != mali_dma_start(mali_dma_get_global_dma_core(), &job->dma_cmd_buf)) {
-                       mali_pp_job_start(group->pp_core, job, sub_job, MALI_FALSE);
-               }
+               mali_pp_job_start(group->pp_core, job, sub_job, MALI_FALSE);
        } else {
                mali_pp_job_start(group->pp_core, job, sub_job, MALI_FALSE);
        }
@@ -625,16 +921,22 @@ void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job,
                struct mali_group *temp;
 
                _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
-                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|
-                                                     MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core))|
-                                                     MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
-                                                     mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
-
-                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|
-                                                     MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core))|
-                                                     MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
-                                                     mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
+                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                                     MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+                                                     MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+                                                     mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
+
+                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                                                     MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+                                                     MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
+                                                     mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+                       trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core),
+                                              mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job));
+#endif
                }
+
 #if defined(CONFIG_MALI400_PROFILING)
                if (0 != group->l2_cache_core_ref_count[0]) {
                        if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
@@ -649,16 +951,23 @@ void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job,
                        }
                }
 #endif /* #if defined(CONFIG_MALI400_PROFILING) */
+
        } else { /* group is physical - call profiling events for physical cores */
-               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|
-                                             MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core))|
-                                             MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
-                                             mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
-
-               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|
-                                             MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core))|
-                                             MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL,
-                                             mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                             MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) |
+                                             MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+                                             mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
+
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                                             MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) |
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL,
+                                             mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+               trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(group->pp_core),
+                                      mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job));
+#endif
+
 #if defined(CONFIG_MALI400_PROFILING)
                if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
                    (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
@@ -666,39 +975,48 @@ void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job,
                }
 #endif /* #if defined(CONFIG_MALI400_PROFILING) */
        }
+
 #if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
-       trace_gpu_sched_switch(mali_pp_get_hw_core_desc(group->pp_core), sched_clock(), mali_pp_job_get_tid(job), 0, mali_pp_job_get_id(job));
+       trace_gpu_sched_switch(mali_pp_core_description(group->pp_core),
+                              sched_clock(), mali_pp_job_get_tid(job),
+                              0, mali_pp_job_get_id(job));
 #endif
+
        group->pp_running_job = job;
        group->pp_running_sub_job = sub_job;
-       group->state = MALI_GROUP_STATE_WORKING;
+       group->is_working = MALI_TRUE;
 
-       /* Setup the timeout timer value and save the job id for the job running on the pp core */
+       /* Setup SW timer and record start time */
+       group->start_time = _mali_osk_time_tickcount();
        _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
+
+       MALI_DEBUG_PRINT(4, ("Group: Started PP job 0x%08X part %u/%u on group %s at %u\n",
+                            job, sub_job + 1,
+                            mali_pp_job_get_sub_job_count(job),
+                            mali_group_core_description(group),
+                            group->start_time));
+
 }
 
-struct mali_gp_job *mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr)
+void mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr)
 {
-       MALI_ASSERT_GROUP_LOCKED(group);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
 
-       if (group->state != MALI_GROUP_STATE_OOM ||
-           mali_gp_job_get_id(group->gp_running_job) != job_id) {
-               return NULL; /* Illegal request or job has already been aborted */
-       }
-
-       if (NULL != group->l2_cache_core[0]) {
-               mali_l2_cache_invalidate(group->l2_cache_core[0]);
-       }
+       MALI_DEBUG_ASSERT_POINTER(group->l2_cache_core[0]);
+       mali_l2_cache_invalidate(group->l2_cache_core[0]);
 
        mali_mmu_zap_tlb_without_stall(group->mmu);
 
        mali_gp_resume_with_new_heap(group->gp_core, start_addr, end_addr);
 
-       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0), 0, 0, 0, 0, 0);
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME |
+                                     MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+                                     0, 0, 0, 0, 0);
 
-       group->state = MALI_GROUP_STATE_WORKING;
-
-       return group->gp_running_job;
+#if defined(CONFIG_MALI400_PROFILING)
+       trace_mali_core_active(mali_gp_job_get_pid(group->gp_running_job), 1 /* active */, 1 /* GP */,  0 /* core */,
+                              mali_gp_job_get_frame_builder_id(group->gp_running_job), mali_gp_job_get_flush_id(group->gp_running_job));
+#endif
 }
 
 static void mali_group_reset_mmu(struct mali_group *group)
@@ -707,17 +1025,14 @@ static void mali_group_reset_mmu(struct mali_group *group)
        struct mali_group *temp;
        _mali_osk_errcode_t err;
 
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
        if (!mali_group_is_virtual(group)) {
                /* This is a physical group or an idle virtual group -- simply wait for
                 * the reset to complete. */
                err = mali_mmu_reset(group->mmu);
                MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
        } else { /* virtual group */
-               err = mali_mmu_reset(group->mmu);
-               if (_MALI_OSK_ERR_OK == err) {
-                       return;
-               }
-
                /* Loop through all members of this virtual group and wait
                 * until they are done resetting.
                 */
@@ -733,13 +1048,15 @@ static void mali_group_reset_pp(struct mali_group *group)
        struct mali_group *child;
        struct mali_group *temp;
 
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
        mali_pp_reset_async(group->pp_core);
 
        if (!mali_group_is_virtual(group) || NULL == group->pp_running_job) {
                /* This is a physical group or an idle virtual group -- simply wait for
                 * the reset to complete. */
                mali_pp_reset_wait(group->pp_core);
-       } else { /* virtual group */
+       } else {
                /* Loop through all members of this virtual group and wait until they
                 * are done resetting.
                 */
@@ -749,18 +1066,95 @@ static void mali_group_reset_pp(struct mali_group *group)
        }
 }
 
-/* Group must be locked when entering this function.  Will be unlocked before exiting. */
-static void mali_group_complete_pp_and_unlock(struct mali_group *group, mali_bool success, mali_bool in_upper_half)
+struct mali_pp_job *mali_group_complete_pp(struct mali_group *group, mali_bool success, u32 *sub_job)
 {
        struct mali_pp_job *pp_job_to_return;
-       u32 pp_sub_job_to_return;
 
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
        MALI_DEBUG_ASSERT_POINTER(group);
        MALI_DEBUG_ASSERT_POINTER(group->pp_core);
        MALI_DEBUG_ASSERT_POINTER(group->pp_running_job);
-       MALI_ASSERT_GROUP_LOCKED(group);
+       MALI_DEBUG_ASSERT_POINTER(sub_job);
+       MALI_DEBUG_ASSERT(MALI_TRUE == group->is_working);
+
+       /* Stop/clear the timeout timer. */
+       _mali_osk_timer_del_async(group->timeout_timer);
+
+       if (NULL != group->pp_running_job) {
+
+               /* Deal with HW counters and profiling */
+
+               if (MALI_TRUE == mali_group_is_virtual(group)) {
+                       struct mali_group *child;
+                       struct mali_group *temp;
 
-       mali_group_post_process_job_pp(group);
+                       /* update performance counters from each physical pp core within this virtual group */
+                       _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+                               mali_pp_update_performance_counters(group->pp_core, child->pp_core, group->pp_running_job, mali_pp_core_get_id(child->pp_core));
+                       }
+
+#if defined(CONFIG_MALI400_PROFILING)
+                       /* send profiling data per physical core */
+                       _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+                               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                                             MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+                                                             MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
+                                                             mali_pp_job_get_perf_counter_value0(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
+                                                             mali_pp_job_get_perf_counter_value1(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
+                                                             mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8),
+                                                             0, 0);
+
+                               trace_mali_core_active(mali_pp_job_get_pid(group->pp_running_job),
+                                                      0 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core),
+                                                      mali_pp_job_get_frame_builder_id(group->pp_running_job),
+                                                      mali_pp_job_get_flush_id(group->pp_running_job));
+                       }
+                       if (0 != group->l2_cache_core_ref_count[0]) {
+                               if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+                                   (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+                                       mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+                               }
+                       }
+                       if (0 != group->l2_cache_core_ref_count[1]) {
+                               if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) &&
+                                   (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) {
+                                       mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1]));
+                               }
+                       }
+
+#endif
+               } else {
+                       /* update performance counters for a physical group's pp core */
+                       mali_pp_update_performance_counters(group->pp_core, group->pp_core, group->pp_running_job, group->pp_running_sub_job);
+
+#if defined(CONFIG_MALI400_PROFILING)
+                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                                     MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) |
+                                                     MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL,
+                                                     mali_pp_job_get_perf_counter_value0(group->pp_running_job, group->pp_running_sub_job),
+                                                     mali_pp_job_get_perf_counter_value1(group->pp_running_job, group->pp_running_sub_job),
+                                                     mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8),
+                                                     0, 0);
+
+                       trace_mali_core_active(mali_pp_job_get_pid(group->pp_running_job),
+                                              0 /* active */, 0 /* PP */, mali_pp_core_get_id(group->pp_core),
+                                              mali_pp_job_get_frame_builder_id(group->pp_running_job),
+                                              mali_pp_job_get_flush_id(group->pp_running_job));
+
+                       if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+                           (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+                               mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+                       }
+#endif
+               }
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+               trace_gpu_sched_switch(
+                       mali_gp_core_description(group->gp_core),
+                       sched_clock(), 0, 0, 0);
+#endif
+
+       }
 
        if (success) {
                /* Only do soft reset for successful jobs, a full recovery
@@ -769,9 +1163,9 @@ static void mali_group_complete_pp_and_unlock(struct mali_group *group, mali_boo
        }
 
        pp_job_to_return = group->pp_running_job;
-       pp_sub_job_to_return = group->pp_running_sub_job;
-       group->state = MALI_GROUP_STATE_IDLE;
        group->pp_running_job = NULL;
+       group->is_working = MALI_FALSE;
+       *sub_job = group->pp_running_sub_job;
 
        if (!success) {
                MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n"));
@@ -781,21 +1175,51 @@ static void mali_group_complete_pp_and_unlock(struct mali_group *group, mali_boo
                mali_group_recovery_reset(group);
        }
 
-       /* Return job to user, schedule and unlock group. */
-       mali_pp_scheduler_job_done(group, pp_job_to_return, pp_sub_job_to_return, success, in_upper_half);
+       return pp_job_to_return;
 }
 
-/* Group must be locked when entering this function.  Will be unlocked before exiting. */
-static void mali_group_complete_gp_and_unlock(struct mali_group *group, mali_bool success)
+struct mali_gp_job *mali_group_complete_gp(struct mali_group *group, mali_bool success)
 {
        struct mali_gp_job *gp_job_to_return;
 
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
        MALI_DEBUG_ASSERT_POINTER(group);
        MALI_DEBUG_ASSERT_POINTER(group->gp_core);
        MALI_DEBUG_ASSERT_POINTER(group->gp_running_job);
-       MALI_ASSERT_GROUP_LOCKED(group);
+       MALI_DEBUG_ASSERT(MALI_TRUE == group->is_working);
+
+       /* Stop/clear the timeout timer. */
+       _mali_osk_timer_del_async(group->timeout_timer);
+
+       if (NULL != group->gp_running_job) {
+               mali_gp_update_performance_counters(group->gp_core, group->gp_running_job);
 
-       mali_group_post_process_job_gp(group, MALI_FALSE);
+#if defined(CONFIG_MALI400_PROFILING)
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+                                             mali_gp_job_get_perf_counter_value0(group->gp_running_job),
+                                             mali_gp_job_get_perf_counter_value1(group->gp_running_job),
+                                             mali_gp_job_get_perf_counter_src0(group->gp_running_job) | (mali_gp_job_get_perf_counter_src1(group->gp_running_job) << 8),
+                                             0, 0);
+
+               if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+                   (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
+                       mali_group_report_l2_cache_counters_per_core(group, 0);
+#endif
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+               trace_gpu_sched_switch(
+                       mali_pp_core_description(group->pp_core),
+                       sched_clock(), 0, 0, 0);
+#endif
+
+#if defined(CONFIG_MALI400_PROFILING)
+               trace_mali_core_active(mali_gp_job_get_pid(group->gp_running_job), 0 /* active */, 1 /* GP */,  0 /* core */,
+                                      mali_gp_job_get_frame_builder_id(group->gp_running_job), mali_gp_job_get_flush_id(group->gp_running_job));
+#endif
+
+               mali_gp_job_set_current_heap_addr(group->gp_running_job,
+                                                 mali_gp_read_plbu_alloc_start_addr(group->gp_core));
+       }
 
        if (success) {
                /* Only do soft reset for successful jobs, a full recovery
@@ -804,8 +1228,8 @@ static void mali_group_complete_gp_and_unlock(struct mali_group *group, mali_boo
        }
 
        gp_job_to_return = group->gp_running_job;
-       group->state = MALI_GROUP_STATE_IDLE;
        group->gp_running_job = NULL;
+       group->is_working = MALI_FALSE;
 
        if (!success) {
                MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n"));
@@ -815,86 +1239,12 @@ static void mali_group_complete_gp_and_unlock(struct mali_group *group, mali_boo
                mali_group_recovery_reset(group);
        }
 
-       /* Return job to user, schedule and unlock group. */
-       mali_gp_scheduler_job_done(group, gp_job_to_return, success);
-}
-
-void mali_group_abort_gp_job(struct mali_group *group, u32 job_id)
-{
-       MALI_ASSERT_GROUP_LOCKED(group);
-
-       if (MALI_GROUP_STATE_IDLE == group->state ||
-           mali_gp_job_get_id(group->gp_running_job) != job_id) {
-               return; /* No need to cancel or job has already been aborted or completed */
-       }
-
-       /* Function will unlock the group, so we need to lock it again */
-       mali_group_complete_gp_and_unlock(group, MALI_FALSE);
-       mali_group_lock(group);
-}
-
-static void mali_group_abort_pp_job(struct mali_group *group, u32 job_id)
-{
-       MALI_ASSERT_GROUP_LOCKED(group);
-
-       if (MALI_GROUP_STATE_IDLE == group->state ||
-           mali_pp_job_get_id(group->pp_running_job) != job_id) {
-               return; /* No need to cancel or job has already been aborted or completed */
-       }
-
-       mali_group_complete_pp_and_unlock(group, MALI_FALSE, MALI_FALSE);
-       mali_group_lock(group);
-}
-
-void mali_group_abort_session(struct mali_group *group, struct mali_session_data *session)
-{
-       struct mali_gp_job *gp_job;
-       struct mali_pp_job *pp_job;
-       u32 gp_job_id = 0;
-       u32 pp_job_id = 0;
-       mali_bool abort_pp = MALI_FALSE;
-       mali_bool abort_gp = MALI_FALSE;
-
-       mali_group_lock(group);
-
-       if (mali_group_is_in_virtual(group)) {
-               /* Group is member of a virtual group, don't touch it! */
-               mali_group_unlock(group);
-               return;
-       }
-
-       gp_job = group->gp_running_job;
-       pp_job = group->pp_running_job;
-
-       if ((NULL != gp_job) && (mali_gp_job_get_session(gp_job) == session)) {
-               MALI_DEBUG_PRINT(4, ("Aborting GP job 0x%08x from session 0x%08x\n", gp_job, session));
-
-               gp_job_id = mali_gp_job_get_id(gp_job);
-               abort_gp = MALI_TRUE;
-       }
-
-       if ((NULL != pp_job) && (mali_pp_job_get_session(pp_job) == session)) {
-               MALI_DEBUG_PRINT(4, ("Mali group: Aborting PP job 0x%08x from session 0x%08x\n", pp_job, session));
-
-               pp_job_id = mali_pp_job_get_id(pp_job);
-               abort_pp = MALI_TRUE;
-       }
-
-       if (abort_gp) {
-               mali_group_abort_gp_job(group, gp_job_id);
-       }
-       if (abort_pp) {
-               mali_group_abort_pp_job(group, pp_job_id);
-       }
-
-       mali_group_remove_session_if_unused(group, session);
-
-       mali_group_unlock(group);
+       return gp_job_to_return;
 }
 
 struct mali_group *mali_group_get_glob_group(u32 index)
 {
-       if(mali_global_num_groups > index) {
+       if (mali_global_num_groups > index) {
                return mali_global_groups[index];
        }
 
@@ -908,121 +1258,33 @@ u32 mali_group_get_glob_num_groups(void)
 
 static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session)
 {
-       MALI_ASSERT_GROUP_LOCKED(group);
+       MALI_DEBUG_PRINT(5, ("Mali group: Activating page directory 0x%08X from session 0x%08X on group %s\n",
+                            mali_session_get_page_directory(session), session,
+                            mali_group_core_description(group)));
 
-       MALI_DEBUG_PRINT(5, ("Mali group: Activating page directory 0x%08X from session 0x%08X on group 0x%08X\n", mali_session_get_page_directory(session), session, group));
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
 
        if (group->session != session) {
                /* Different session than last time, so we need to do some work */
-               MALI_DEBUG_PRINT(5, ("Mali group: Activate session: %08x previous: %08x on group 0x%08X\n", session, group->session, group));
+               MALI_DEBUG_PRINT(5, ("Mali group: Activate session: %08x previous: %08x on group %s\n",
+                                    session, group->session,
+                                    mali_group_core_description(group)));
                mali_mmu_activate_page_directory(group->mmu, mali_session_get_page_directory(session));
                group->session = session;
        } else {
                /* Same session as last time, so no work required */
-               MALI_DEBUG_PRINT(4, ("Mali group: Activate existing session 0x%08X on group 0x%08X\n", session->page_directory, group));
+               MALI_DEBUG_PRINT(4, ("Mali group: Activate existing session 0x%08X on group %s\n",
+                                    session->page_directory,
+                                    mali_group_core_description(group)));
                mali_mmu_zap_tlb_without_stall(group->mmu);
        }
 }
 
-static void mali_group_remove_session_if_unused(struct mali_group *group, struct mali_session_data *session)
-{
-       MALI_ASSERT_GROUP_LOCKED(group);
-
-       if (MALI_GROUP_STATE_IDLE == group->state) {
-               if (group->session == session) {
-                       MALI_DEBUG_ASSERT(MALI_GROUP_STATE_WORKING != group->state);
-                       MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
-                       MALI_DEBUG_PRINT(3, ("Mali group: Deactivating unused session 0x%08X on group %08X\n", session, group));
-                       mali_mmu_activate_empty_page_directory(group->mmu);
-                       group->session = NULL;
-               }
-       }
-}
-
-mali_bool mali_group_power_is_on(struct mali_group *group)
-{
-       MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
-       return group->power_is_on;
-}
-
-void mali_group_power_on_group(struct mali_group *group)
-{
-       MALI_DEBUG_ASSERT_POINTER(group);
-       MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
-       MALI_DEBUG_ASSERT(   MALI_GROUP_STATE_IDLE       == group->state
-                            || MALI_GROUP_STATE_IN_VIRTUAL == group->state
-                            || MALI_GROUP_STATE_JOINING_VIRTUAL == group->state
-                            || MALI_GROUP_STATE_LEAVING_VIRTUAL == group->state
-                            || MALI_GROUP_STATE_DISABLED   == group->state);
-
-       MALI_DEBUG_PRINT(3, ("Group %p powered on\n", group));
-
-       group->power_is_on = MALI_TRUE;
-}
-
-void mali_group_power_off_group(struct mali_group *group, mali_bool do_power_change)
-{
-       MALI_DEBUG_ASSERT_POINTER(group);
-       MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
-       MALI_DEBUG_ASSERT(   MALI_GROUP_STATE_IDLE       == group->state
-                            || MALI_GROUP_STATE_IN_VIRTUAL == group->state
-                            || MALI_GROUP_STATE_JOINING_VIRTUAL == group->state
-                            || MALI_GROUP_STATE_LEAVING_VIRTUAL == group->state
-                            || MALI_GROUP_STATE_DISABLED   == group->state);
-
-       MALI_DEBUG_PRINT(3, ("Group %p powered off\n", group));
-
-       /* It is necessary to set group->session = NULL so that the powered off MMU is not written
-        * to on map/unmap.  It is also necessary to set group->power_is_on = MALI_FALSE so that
-        * pending bottom_halves does not access powered off cores. */
-
-       group->session = NULL;
-
-       if (do_power_change) {
-               group->power_is_on = MALI_FALSE;
-       }
-}
-
-void mali_group_power_on(void)
-{
-       int i;
-       for (i = 0; i < mali_global_num_groups; i++) {
-               struct mali_group *group = mali_global_groups[i];
-
-               mali_group_lock(group);
-               if (MALI_GROUP_STATE_DISABLED == group->state) {
-                       MALI_DEBUG_ASSERT(MALI_FALSE == group->power_is_on);
-               } else {
-                       mali_group_power_on_group(group);
-               }
-               mali_group_unlock(group);
-       }
-       MALI_DEBUG_PRINT(4, ("Mali Group: power on\n"));
-}
-
-void mali_group_power_off(mali_bool do_power_change)
-{
-       int i;
-
-       for (i = 0; i < mali_global_num_groups; i++) {
-               struct mali_group *group = mali_global_groups[i];
-
-               mali_group_lock(group);
-               if (MALI_GROUP_STATE_DISABLED == group->state) {
-                       MALI_DEBUG_ASSERT(MALI_FALSE == group->power_is_on);
-               } else {
-                       mali_group_power_off_group(group, do_power_change);
-               }
-               mali_group_unlock(group);
-       }
-       MALI_DEBUG_PRINT(4, ("Mali Group: power off\n"));
-}
-
 static void mali_group_recovery_reset(struct mali_group *group)
 {
        _mali_osk_errcode_t err;
 
-       MALI_ASSERT_GROUP_LOCKED(group);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
 
        /* Stop cores, bus stop */
        if (NULL != group->pp_core) {
@@ -1070,665 +1332,410 @@ static void mali_group_recovery_reset(struct mali_group *group)
 u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size)
 {
        int n = 0;
+       int i;
+       struct mali_group *child;
+       struct mali_group *temp;
 
-       n += _mali_osk_snprintf(buf + n, size - n, "Group: %p\n", group);
-       n += _mali_osk_snprintf(buf + n, size - n, "\tstate: %d\n", group->state);
-       if (group->gp_core) {
-               n += mali_gp_dump_state(group->gp_core, buf + n, size - n);
-               n += _mali_osk_snprintf(buf + n, size - n, "\tGP job: %p\n", group->gp_running_job);
-       }
-       if (group->pp_core) {
-               n += mali_pp_dump_state(group->pp_core, buf + n, size - n);
-               n += _mali_osk_snprintf(buf + n, size - n, "\tPP job: %p, subjob %d \n",
-                                       group->pp_running_job, group->pp_running_sub_job);
-       }
-
-       return n;
-}
-#endif
-
-/* Group must be locked when entering this function.  Will be unlocked before exiting. */
-static void mali_group_mmu_page_fault_and_unlock(struct mali_group *group)
-{
-       MALI_DEBUG_ASSERT_POINTER(group);
-       MALI_ASSERT_GROUP_LOCKED(group);
-
-       if (NULL != group->pp_core) {
-               struct mali_pp_job *pp_job_to_return;
-               u32 pp_sub_job_to_return;
-
-               MALI_DEBUG_ASSERT_POINTER(group->pp_running_job);
-
-               mali_group_post_process_job_pp(group);
-
-               pp_job_to_return = group->pp_running_job;
-               pp_sub_job_to_return = group->pp_running_sub_job;
-               group->state = MALI_GROUP_STATE_IDLE;
-               group->pp_running_job = NULL;
-
-               mali_group_recovery_reset(group); /* This will also clear the page fault itself */
-
-               /* Will unlock group. */
-               mali_pp_scheduler_job_done(group, pp_job_to_return, pp_sub_job_to_return, MALI_FALSE, MALI_FALSE);
+       if (mali_group_is_virtual(group)) {
+               n += _mali_osk_snprintf(buf + n, size - n,
+                                       "Virtual PP Group: %p\n", group);
+       } else if (mali_group_is_in_virtual(group)) {
+               n += _mali_osk_snprintf(buf + n, size - n,
+                                       "Child PP Group: %p\n", group);
+       } else if (NULL != group->pp_core) {
+               n += _mali_osk_snprintf(buf + n, size - n,
+                                       "Physical PP Group: %p\n", group);
        } else {
-               struct mali_gp_job *gp_job_to_return;
-
-               MALI_DEBUG_ASSERT_POINTER(group->gp_running_job);
-
-               mali_group_post_process_job_gp(group, MALI_FALSE);
-
-               gp_job_to_return = group->gp_running_job;
-               group->state = MALI_GROUP_STATE_IDLE;
-               group->gp_running_job = NULL;
-
-               mali_group_recovery_reset(group); /* This will also clear the page fault itself */
-
-               /* Will unlock group. */
-               mali_gp_scheduler_job_done(group, gp_job_to_return, MALI_FALSE);
+               MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+               n += _mali_osk_snprintf(buf + n, size - n,
+                                       "GP Group: %p\n", group);
        }
-}
 
-_mali_osk_errcode_t mali_group_upper_half_mmu(void * data)
-{
-       _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
-       struct mali_group *group = (struct mali_group *)data;
-       struct mali_mmu_core *mmu = group->mmu;
-       u32 int_stat;
-
-       MALI_DEBUG_ASSERT_POINTER(mmu);
-
-#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
-       if (MALI_FALSE == mali_pm_domain_lock_state(group->pm_domain)) {
-               goto out;
-       }
-#endif
-
-       /* Check if it was our device which caused the interrupt (we could be sharing the IRQ line) */
-       int_stat = mali_mmu_get_int_status(mmu);
-       if (0 != int_stat) {
-               struct mali_group *parent = group->parent_group;
-
-               /* page fault or bus error, we thread them both in the same way */
-               mali_mmu_mask_all_interrupts(mmu);
-               if (NULL == parent) {
-                       _mali_osk_wq_schedule_work(group->bottom_half_work_mmu);
-               } else {
-                       _mali_osk_wq_schedule_work(parent->bottom_half_work_mmu);
-               }
-               err = _MALI_OSK_ERR_OK;
-               goto out;
-       }
-
-out:
-#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
-       mali_pm_domain_unlock_state(group->pm_domain);
-#endif
-
-       return err;
-}
-
-static void mali_group_bottom_half_mmu(void * data)
-{
-       struct mali_group *group = (struct mali_group *)data;
-       struct mali_mmu_core *mmu = group->mmu;
-       u32 rawstat;
-       MALI_DEBUG_CODE(u32 status);
-
-       MALI_DEBUG_ASSERT_POINTER(mmu);
-
-       mali_group_lock(group);
-
-       MALI_DEBUG_ASSERT(NULL == group->parent_group);
-
-       if ( MALI_FALSE == mali_group_power_is_on(group) ) {
-               MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", mmu->hw_core.description));
-               mali_group_unlock(group);
-               return;
-       }
-
-       rawstat = mali_mmu_get_rawstat(mmu);
-       MALI_DEBUG_CODE(status = mali_mmu_get_status(mmu));
-
-       MALI_DEBUG_PRINT(4, ("Mali MMU: Bottom half, interrupt 0x%08X, status 0x%08X\n", rawstat, status));
-
-       if (rawstat & (MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR)) {
-               /* An actual page fault has occurred. */
-#ifdef DEBUG
-               u32 fault_address = mali_mmu_get_page_fault_addr(mmu);
-               MALI_DEBUG_PRINT(2,("Mali MMU: Page fault detected at 0x%x from bus id %d of type %s on %s\n",
-                                   (void*)fault_address,
-                                   (status >> 6) & 0x1F,
-                                   (status & 32) ? "write" : "read",
-                                   mmu->hw_core.description));
-#endif
-
-               mali_group_mmu_page_fault_and_unlock(group);
-               return;
-       }
-
-       mali_group_unlock(group);
-}
-
-_mali_osk_errcode_t mali_group_upper_half_gp(void *data)
-{
-       _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
-       struct mali_group *group = (struct mali_group *)data;
-       struct mali_gp_core *core = group->gp_core;
-       u32 irq_readout;
-
-#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
-       if (MALI_FALSE == mali_pm_domain_lock_state(group->pm_domain)) {
-               goto out;
+       switch (group->state) {
+       case MALI_GROUP_STATE_INACTIVE:
+               n += _mali_osk_snprintf(buf + n, size - n,
+                                       "\tstate: INACTIVE\n");
+               break;
+       case MALI_GROUP_STATE_ACTIVATION_PENDING:
+               n += _mali_osk_snprintf(buf + n, size - n,
+                                       "\tstate: ACTIVATION_PENDING\n");
+               break;
+       case MALI_GROUP_STATE_ACTIVE:
+               n += _mali_osk_snprintf(buf + n, size - n,
+                                       "\tstate: MALI_GROUP_STATE_ACTIVE\n");
+               break;
+       default:
+               n += _mali_osk_snprintf(buf + n, size - n,
+                                       "\tstate: UNKNOWN (%d)\n", group->state);
+               MALI_DEBUG_ASSERT(0);
+               break;
        }
-#endif
 
-       irq_readout = mali_gp_get_int_stat(core);
+       n += _mali_osk_snprintf(buf + n, size - n,
+                               "\tSW power: %s\n",
+                               group->power_is_on ? "On" : "Off");
 
-       if (MALIGP2_REG_VAL_IRQ_MASK_NONE != irq_readout) {
-               /* Mask out all IRQs from this core until IRQ is handled */
-               mali_gp_mask_all_interrupts(core);
+       n += mali_pm_dump_state_domain(group->pm_domain, buf + n, size - n);
 
-               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0)|MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT, irq_readout, 0, 0, 0, 0);
+       for (i = 0; i < 2; i++) {
+               if (NULL != group->l2_cache_core[i]) {
+                       struct mali_pm_domain *domain;
+                       domain = mali_l2_cache_get_pm_domain(
+                                        group->l2_cache_core[i]);
+                       n += mali_pm_dump_state_domain(domain,
+                                                      buf + n, size - n);
+               }
+       }
 
-               /* We do need to handle this in a bottom half */
-               _mali_osk_wq_schedule_work(group->bottom_half_work_gp);
+       if (group->gp_core) {
+               n += mali_gp_dump_state(group->gp_core, buf + n, size - n);
+               n += _mali_osk_snprintf(buf + n, size - n,
+                                       "\tGP running job: %p\n", group->gp_running_job);
+       }
 
-               err = _MALI_OSK_ERR_OK;
-               goto out;
+       if (group->pp_core) {
+               n += mali_pp_dump_state(group->pp_core, buf + n, size - n);
+               n += _mali_osk_snprintf(buf + n, size - n,
+                                       "\tPP running job: %p, subjob %d \n",
+                                       group->pp_running_job,
+                                       group->pp_running_sub_job);
        }
 
-out:
-#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
-       mali_pm_domain_unlock_state(group->pm_domain);
-#endif
+       _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list,
+                                   struct mali_group, group_list) {
+               n += mali_group_dump_state(child, buf + n, size - n);
+       }
 
-       return err;
+       return n;
 }
+#endif
 
-static void mali_group_bottom_half_gp(void *data)
+_mali_osk_errcode_t mali_group_upper_half_mmu(void *data)
 {
        struct mali_group *group = (struct mali_group *)data;
-       u32 irq_readout;
-       u32 irq_errors;
-
-       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0), 0, 0);
+       _mali_osk_errcode_t ret;
 
-       mali_group_lock(group);
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->mmu);
 
-       if ( MALI_FALSE == mali_group_power_is_on(group) ) {
-               MALI_PRINT_ERROR(("Mali group: Interrupt bottom half of %s when core is OFF.", mali_gp_get_hw_core_desc(group->gp_core)));
-               mali_group_unlock(group);
-               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
-               return;
+#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       mali_executor_lock();
+       if (!mali_group_is_working(group)) {
+               /* Not working, so nothing to do */
+               mali_executor_unlock();
+               return _MALI_OSK_ERR_FAULT;
        }
+#endif
+       if (NULL != group->gp_core) {
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+                                             0, 0, /* No pid and tid for interrupt handler */
+                                             MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
+                                             mali_mmu_get_rawstat(group->mmu), 0);
+       } else {
+               MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+                                             0, 0, /* No pid and tid for interrupt handler */
+                                             MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
+                                                     mali_pp_core_get_id(group->pp_core)),
+                                             mali_mmu_get_rawstat(group->mmu), 0);
+       }
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       mali_executor_unlock();
+#endif
+#endif
 
-       irq_readout = mali_gp_read_rawstat(group->gp_core);
-
-       MALI_DEBUG_PRINT(4, ("Mali group: GP bottom half IRQ 0x%08X from core %s\n", irq_readout, mali_gp_get_hw_core_desc(group->gp_core)));
-
-       if (irq_readout & (MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST|MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST)) {
-               u32 core_status = mali_gp_read_core_status(group->gp_core);
-               if (0 == (core_status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE)) {
-                       MALI_DEBUG_PRINT(4, ("Mali group: GP job completed, calling group handler\n"));
-                       group->core_timed_out = MALI_FALSE;
-                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
-                                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
-                                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
-                                                     0, _mali_osk_get_tid(), 0, 0, 0);
+       ret = mali_executor_interrupt_mmu(group, MALI_TRUE);
 
-                       mali_group_complete_gp_and_unlock(group, MALI_TRUE);
-                       return;
-               }
+#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       mali_executor_lock();
+       if (!mali_group_is_working(group)) {
+               /* Not working, so nothing to do */
+               mali_executor_unlock();
+               return _MALI_OSK_ERR_FAULT;
        }
+#endif
 
-       /*
-        * Now lets look at the possible error cases (IRQ indicating error or timeout)
-        * END_CMD_LST, HANG and PLBU_OOM interrupts are not considered error.
-        */
-       irq_errors = irq_readout & ~(MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST|MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST|MALIGP2_REG_VAL_IRQ_HANG|MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM);
-       if (0 != irq_errors) {
-               MALI_PRINT_ERROR(("Mali group: Unknown interrupt 0x%08X from core %s, aborting job\n", irq_readout, mali_gp_get_hw_core_desc(group->gp_core)));
-               group->core_timed_out = MALI_FALSE;
+       if (NULL != group->gp_core) {
                _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
-                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
-                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
-                                             0, _mali_osk_get_tid(), 0, 0, 0);
-
-               mali_group_complete_gp_and_unlock(group, MALI_FALSE);
-               return;
-       } else if (group->core_timed_out) { /* SW timeout */
-               group->core_timed_out = MALI_FALSE;
-               if (!_mali_osk_timer_pending(group->timeout_timer) && NULL != group->gp_running_job) {
-                       MALI_PRINT(("Mali group: Job %d timed out\n", mali_gp_job_get_id(group->gp_running_job)));
-
-                       mali_group_complete_gp_and_unlock(group, MALI_FALSE);
-                       return;
-               }
-       } else if (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM) {
-               /* GP wants more memory in order to continue. */
-               MALI_DEBUG_PRINT(3, ("Mali group: PLBU needs more heap memory\n"));
-
-               group->state = MALI_GROUP_STATE_OOM;
-               mali_group_unlock(group); /* Nothing to do on the HW side, so just release group lock right away */
-               mali_gp_scheduler_oom(group, group->gp_running_job);
-               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
-               return;
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+                                             0, 0, /* No pid and tid for interrupt handler */
+                                             MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
+                                             mali_mmu_get_rawstat(group->mmu), 0);
+       } else {
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+                                             0, 0, /* No pid and tid for interrupt handler */
+                                             MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
+                                                     mali_pp_core_get_id(group->pp_core)),
+                                             mali_mmu_get_rawstat(group->mmu), 0);
        }
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       mali_executor_unlock();
+#endif
+#endif
 
-       /*
-        * The only way to get here is if we only got one of two needed END_CMD_LST
-        * interrupts. Enable all but not the complete interrupt that has been
-        * received and continue to run.
-        */
-       mali_gp_enable_interrupts(group->gp_core, irq_readout & (MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST|MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST));
-       mali_group_unlock(group);
-
-       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
+       return ret;
 }
 
-static void mali_group_post_process_job_gp(struct mali_group *group, mali_bool suspend)
+static void mali_group_bottom_half_mmu(void *data)
 {
-       /* Stop the timeout timer. */
-       _mali_osk_timer_del_async(group->timeout_timer);
+       struct mali_group *group = (struct mali_group *)data;
 
-       if (NULL == group->gp_running_job) {
-               /* Nothing to do */
-               return;
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->mmu);
+
+       if (NULL != group->gp_core) {
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                                             0, _mali_osk_get_tid(), /* pid and tid */
+                                             MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
+                                             mali_mmu_get_rawstat(group->mmu), 0);
+       } else {
+               MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                                             0, _mali_osk_get_tid(), /* pid and tid */
+                                             MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
+                                                     mali_pp_core_get_id(group->pp_core)),
+                                             mali_mmu_get_rawstat(group->mmu), 0);
        }
 
-       mali_gp_update_performance_counters(group->gp_core, group->gp_running_job, suspend);
+       mali_executor_interrupt_mmu(group, MALI_FALSE);
 
-#if defined(CONFIG_MALI400_PROFILING)
-       if (suspend) {
-               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
-                                             mali_gp_job_get_perf_counter_value0(group->gp_running_job),
-                                             mali_gp_job_get_perf_counter_value1(group->gp_running_job),
-                                             mali_gp_job_get_perf_counter_src0(group->gp_running_job) | (mali_gp_job_get_perf_counter_src1(group->gp_running_job) << 8),
-                                             0, 0);
+       if (NULL != group->gp_core) {
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                                             0, _mali_osk_get_tid(), /* pid and tid */
+                                             MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
+                                             mali_mmu_get_rawstat(group->mmu), 0);
        } else {
-               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
-                                             mali_gp_job_get_perf_counter_value0(group->gp_running_job),
-                                             mali_gp_job_get_perf_counter_value1(group->gp_running_job),
-                                             mali_gp_job_get_perf_counter_src0(group->gp_running_job) | (mali_gp_job_get_perf_counter_src1(group->gp_running_job) << 8),
-                                             0, 0);
-
-               if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
-                   (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
-                       mali_group_report_l2_cache_counters_per_core(group, 0);
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                                             0, _mali_osk_get_tid(), /* pid and tid */
+                                             MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
+                                                     mali_pp_core_get_id(group->pp_core)),
+                                             mali_mmu_get_rawstat(group->mmu), 0);
        }
-#endif
-
-       mali_gp_job_set_current_heap_addr(group->gp_running_job,
-                                         mali_gp_read_plbu_alloc_start_addr(group->gp_core));
 }
 
-_mali_osk_errcode_t mali_group_upper_half_pp(void *data)
+_mali_osk_errcode_t mali_group_upper_half_gp(void *data)
 {
-       _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
        struct mali_group *group = (struct mali_group *)data;
-       struct mali_pp_core *core = group->pp_core;
-       u32 irq_readout;
+       _mali_osk_errcode_t ret;
 
-#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
-       if (MALI_FALSE == mali_pm_domain_lock_state(group->pm_domain)) {
-               goto out;
-       }
-#endif
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+       MALI_DEBUG_ASSERT_POINTER(group->mmu);
 
-       /*
-        * For Mali-450 there is one particular case we need to watch out for:
-        *
-        * Criteria 1) this function call can be due to a shared interrupt,
-        * and not necessary because this core signaled an interrupt.
-        * Criteria 2) this core is a part of a virtual group, and thus it should
-        * not do any post processing.
-        * Criteria 3) this core has actually indicated that is has completed by
-        * having set raw_stat/int_stat registers to != 0
-        *
-        * If all this criteria is meet, then we could incorrectly start post
-        * processing on the wrong group object (this should only happen on the
-        * parent group)
-        */
-#if !defined(MALI_UPPER_HALF_SCHEDULING)
-       if (mali_group_is_in_virtual(group)) {
-               /*
-                * This check is done without the group lock held, which could lead to
-                * a potential race. This is however ok, since we will safely re-check
-                * this with the group lock held at a later stage. This is just an
-                * early out which will strongly benefit shared IRQ systems.
-                */
-               err = _MALI_OSK_ERR_OK;
-               goto out;
+#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       mali_executor_lock();
+       if (!mali_group_is_working(group)) {
+               /* Not working, so nothing to do */
+               mali_executor_unlock();
+               return _MALI_OSK_ERR_FAULT;
        }
 #endif
-
-       irq_readout = mali_pp_get_int_stat(core);
-       if (MALI200_REG_VAL_IRQ_MASK_NONE != irq_readout) {
-               /* Mask out all IRQs from this core until IRQ is handled */
-               mali_pp_mask_all_interrupts(core);
-
-#if defined(CONFIG_MALI400_PROFILING)
-               /* Currently no support for this interrupt event for the virtual PP core */
-               if (!mali_group_is_virtual(group)) {
-                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
-                                                     MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_id) |
-                                                     MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT,
-                                                     irq_readout, 0, 0, 0, 0);
-               }
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+                                     0, 0, /* No pid and tid for interrupt handler */
+                                     MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
+                                     mali_gp_get_rawstat(group->gp_core), 0);
+
+       MALI_DEBUG_PRINT(4, ("Group: Interrupt 0x%08X from %s\n",
+                            mali_gp_get_rawstat(group->gp_core),
+                            mali_group_core_description(group)));
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       mali_executor_unlock();
 #endif
-
-#if defined(MALI_UPPER_HALF_SCHEDULING)
-               /* Check if job is complete without errors */
-               if (MALI200_REG_VAL_IRQ_END_OF_FRAME == irq_readout) {
-                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
-                                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
-                                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
-                                                     0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
-
-                       MALI_DEBUG_PRINT(3, ("Mali PP: Job completed, calling group handler from upper half\n"));
-
-                       mali_group_lock(group);
-
-                       /* Check if job is complete without errors, again, after taking the group lock */
-                       irq_readout = mali_pp_read_rawstat(core);
-                       if (MALI200_REG_VAL_IRQ_END_OF_FRAME != irq_readout) {
-                               mali_pp_enable_interrupts(core);
-                               mali_group_unlock(group);
-                               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
-                                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
-                                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
-                                                             0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
-                               err = _MALI_OSK_ERR_OK;
-                               goto out;
-                       }
-
-                       if (mali_group_is_virtual(group)) {
-                               u32 status_readout = mali_pp_read_status(group->pp_core);
-                               if (status_readout & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE) {
-                                       MALI_DEBUG_PRINT(6, ("Mali PP: Not all cores in broadcast completed\n"));
-                                       mali_pp_enable_interrupts(core);
-                                       mali_group_unlock(group);
-                                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
-                                                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
-                                                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
-                                                                     0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
-                                       err = _MALI_OSK_ERR_OK;
-                                       goto out;
-                               }
-                       }
-
-                       if (mali_group_is_in_virtual(group)) {
-                               /* We're member of a virtual group, so interrupt should be handled by the virtual group */
-                               mali_pp_enable_interrupts(core);
-                               mali_group_unlock(group);
-                               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
-                                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
-                                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
-                                                             0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
-                               err =  _MALI_OSK_ERR_FAULT;
-                               goto out;
-                       }
-
-                       group->core_timed_out = MALI_FALSE;
-
-                       mali_group_complete_pp_and_unlock(group, MALI_TRUE, MALI_TRUE);
-
-                       /* No need to enable interrupts again, since the core will be reset while completing the job */
-
-                       MALI_DEBUG_PRINT(6, ("Mali PP: Upper half job done\n"));
-
-                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
-                                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
-                                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
-                                                     0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
-
-                       err = _MALI_OSK_ERR_OK;
-                       goto out;
-               }
 #endif
+       ret = mali_executor_interrupt_gp(group, MALI_TRUE);
 
-               /* We do need to handle this in a bottom half */
-               _mali_osk_wq_schedule_work(group->bottom_half_work_pp);
-               err = _MALI_OSK_ERR_OK;
-               goto out;
+#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       mali_executor_lock();
+       if (!mali_group_is_working(group)) {
+               /* Not working, so nothing to do */
+               mali_executor_unlock();
+               return _MALI_OSK_ERR_FAULT;
        }
-
-out:
+#endif
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+                                     0, 0, /* No pid and tid for interrupt handler */
+                                     MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
+                                     mali_gp_get_rawstat(group->gp_core), 0);
 #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
-       mali_pm_domain_unlock_state(group->pm_domain);
+       mali_executor_unlock();
 #endif
-
-       return err;
+#endif
+       return ret;
 }
 
-static void mali_group_bottom_half_pp(void *data)
+static void mali_group_bottom_half_gp(void *data)
 {
        struct mali_group *group = (struct mali_group *)data;
-       struct mali_pp_core *core = group->pp_core;
-       u32 irq_readout;
-       u32 irq_errors;
 
-       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
-                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
-                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
-                                     0, _mali_osk_get_tid(), MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
-
-       mali_group_lock(group);
-
-       if (mali_group_is_in_virtual(group)) {
-               /* We're member of a virtual group, so interrupt should be handled by the virtual group */
-               mali_pp_enable_interrupts(core);
-               mali_group_unlock(group);
-               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
-                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
-                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
-                                             0, _mali_osk_get_tid(), 0, 0, 0);
-               return;
-       }
-
-       if ( MALI_FALSE == mali_group_power_is_on(group) ) {
-               MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", mali_pp_get_hw_core_desc(core)));
-               mali_group_unlock(group);
-               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
-                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
-                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
-                                             0, _mali_osk_get_tid(), 0, 0, 0);
-               return;
-       }
-
-       irq_readout = mali_pp_read_rawstat(group->pp_core);
-
-       MALI_DEBUG_PRINT(4, ("Mali PP: Bottom half IRQ 0x%08X from core %s\n", irq_readout, mali_pp_get_hw_core_desc(group->pp_core)));
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+       MALI_DEBUG_ASSERT_POINTER(group->mmu);
 
-       /* Check if job is complete without errors */
-       if (MALI200_REG_VAL_IRQ_END_OF_FRAME == irq_readout) {
-               if (mali_group_is_virtual(group)) {
-                       u32 status_readout = mali_pp_read_status(group->pp_core);
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                                     0, _mali_osk_get_tid(), /* pid and tid */
+                                     MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
+                                     mali_gp_get_rawstat(group->gp_core), 0);
 
-                       if (status_readout & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE && !group->core_timed_out) {
-                               MALI_DEBUG_PRINT(6, ("Mali PP: Not all cores in broadcast completed\n"));
-                               mali_pp_enable_interrupts(core);
-                               mali_group_unlock(group);
+       mali_executor_interrupt_gp(group, MALI_FALSE);
 
-                               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
-                                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
-                                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
-                                                             0, _mali_osk_get_tid(), 0, 0, 0);
-                               return;
-                       }
-               }
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                                     0, _mali_osk_get_tid(), /* pid and tid */
+                                     MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
+                                     mali_gp_get_rawstat(group->gp_core), 0);
+}
 
-               if (!group->core_timed_out) {
-                       MALI_DEBUG_PRINT(3, ("Mali PP: Job completed, calling group handler\n"));
-                       group->core_timed_out = MALI_FALSE;
+_mali_osk_errcode_t mali_group_upper_half_pp(void *data)
+{
+       struct mali_group *group = (struct mali_group *)data;
+       _mali_osk_errcode_t ret;
 
-                       mali_group_complete_pp_and_unlock(group, MALI_TRUE, MALI_FALSE);
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+       MALI_DEBUG_ASSERT_POINTER(group->mmu);
 
-                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
-                                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
-                                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
-                                                     0, _mali_osk_get_tid(), 0, 0, 0);
-                       return;
-               }
+#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       mali_executor_lock();
+       if (!mali_group_is_working(group)) {
+               /* Not working, so nothing to do */
+               mali_executor_unlock();
+               return _MALI_OSK_ERR_FAULT;
        }
+#endif
 
-       /*
-        * Now lets look at the possible error cases (IRQ indicating error or timeout)
-        * END_OF_FRAME and HANG interrupts are not considered error.
-        */
-       irq_errors = irq_readout & ~(MALI200_REG_VAL_IRQ_END_OF_FRAME|MALI200_REG_VAL_IRQ_HANG);
-       if (0 != irq_errors) {
-               MALI_PRINT_ERROR(("Mali PP: Unexpected interrupt 0x%08X from core %s, aborting job\n",
-                                 irq_readout, mali_pp_get_hw_core_desc(group->pp_core)));
-               group->core_timed_out = MALI_FALSE;
-
-               mali_group_complete_pp_and_unlock(group, MALI_FALSE, MALI_FALSE);
-
-               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
-                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
-                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
-                                             0, _mali_osk_get_tid(), 0, 0, 0);
-               return;
-       } else if (group->core_timed_out) { /* SW timeout */
-               group->core_timed_out = MALI_FALSE;
-               if (!_mali_osk_timer_pending(group->timeout_timer) && NULL != group->pp_running_job) {
-                       MALI_PRINT(("Mali PP: Job %d timed out on core %s\n",
-                                   mali_pp_job_get_id(group->pp_running_job), mali_pp_get_hw_core_desc(core)));
-
-                       mali_group_complete_pp_and_unlock(group, MALI_FALSE, MALI_FALSE);
-               } else {
-                       mali_group_unlock(group);
-               }
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+                                     0, 0, /* No pid and tid for interrupt handler */
+                                     MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
+                                             mali_pp_core_get_id(group->pp_core)),
+                                     mali_pp_get_rawstat(group->pp_core), 0);
+
+       MALI_DEBUG_PRINT(4, ("Group: Interrupt 0x%08X from %s\n",
+                            mali_pp_get_rawstat(group->pp_core),
+                            mali_group_core_description(group)));
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       mali_executor_unlock();
+#endif
+#endif
 
-               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
-                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
-                                             MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
-                                             0, _mali_osk_get_tid(), 0, 0, 0);
-               return;
-       }
+       ret = mali_executor_interrupt_pp(group, MALI_TRUE);
 
-       /*
-        * We should never get here, re-enable interrupts and continue
-        */
-       if (0 == irq_readout) {
-               MALI_DEBUG_PRINT(3, ("Mali group: No interrupt found on core %s\n",
-                                    mali_pp_get_hw_core_desc(group->pp_core)));
-       } else {
-               MALI_PRINT_ERROR(("Mali group: Unhandled PP interrupt 0x%08X on %s\n", irq_readout,
-                                 mali_pp_get_hw_core_desc(group->pp_core)));
+#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       mali_executor_lock();
+       if (!mali_group_is_working(group)) {
+               /* Not working, so nothing to do */
+               mali_executor_unlock();
+               return _MALI_OSK_ERR_FAULT;
        }
-       mali_pp_enable_interrupts(core);
-       mali_group_unlock(group);
-
+#endif
        _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
-                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
-                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
-                                     0, _mali_osk_get_tid(), 0, 0, 0);
+                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+                                     0, 0, /* No pid and tid for interrupt handler */
+                                     MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
+                                             mali_pp_core_get_id(group->pp_core)),
+                                     mali_pp_get_rawstat(group->pp_core), 0);
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+       mali_executor_unlock();
+#endif
+#endif
+       return ret;
 }
 
-static void mali_group_post_process_job_pp(struct mali_group *group)
+static void mali_group_bottom_half_pp(void *data)
 {
-       MALI_ASSERT_GROUP_LOCKED(group);
-
-       /* Stop the timeout timer. */
-       _mali_osk_timer_del_async(group->timeout_timer);
-
-       if (NULL != group->pp_running_job) {
-               if (MALI_TRUE == mali_group_is_virtual(group)) {
-                       struct mali_group *child;
-                       struct mali_group *temp;
-
-                       /* update performance counters from each physical pp core within this virtual group */
-                       _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
-                               mali_pp_update_performance_counters(group->pp_core, child->pp_core, group->pp_running_job, mali_pp_core_get_id(child->pp_core));
-                       }
+       struct mali_group *group = (struct mali_group *)data;
 
-#if defined(CONFIG_MALI400_PROFILING)
-                       /* send profiling data per physical core */
-                       _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
-                               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|
-                                                             MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core))|
-                                                             MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
-                                                             mali_pp_job_get_perf_counter_value0(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
-                                                             mali_pp_job_get_perf_counter_value1(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
-                                                             mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8),
-                                                             0, 0);
-                       }
-                       if (0 != group->l2_cache_core_ref_count[0]) {
-                               if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
-                                   (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
-                                       mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
-                               }
-                       }
-                       if (0 != group->l2_cache_core_ref_count[1]) {
-                               if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) &&
-                                   (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) {
-                                       mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1]));
-                               }
-                       }
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+       MALI_DEBUG_ASSERT_POINTER(group->mmu);
 
-#endif
-               } else {
-                       /* update performance counters for a physical group's pp core */
-                       mali_pp_update_performance_counters(group->pp_core, group->pp_core, group->pp_running_job, group->pp_running_sub_job);
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                                     0, _mali_osk_get_tid(), /* pid and tid */
+                                     MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
+                                             mali_pp_core_get_id(group->pp_core)),
+                                     mali_pp_get_rawstat(group->pp_core), 0);
 
-#if defined(CONFIG_MALI400_PROFILING)
-                       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|
-                                                     MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core))|
-                                                     MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL,
-                                                     mali_pp_job_get_perf_counter_value0(group->pp_running_job, group->pp_running_sub_job),
-                                                     mali_pp_job_get_perf_counter_value1(group->pp_running_job, group->pp_running_sub_job),
-                                                     mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8),
-                                                     0, 0);
+       mali_executor_interrupt_pp(group, MALI_FALSE);
 
-                       if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
-                           (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
-                               mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
-                       }
-#endif
-               }
-       }
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+                                     MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                     MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+                                     0, _mali_osk_get_tid(), /* pid and tid */
+                                     MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
+                                             mali_pp_core_get_id(group->pp_core)),
+                                     mali_pp_get_rawstat(group->pp_core), 0);
 }
 
 static void mali_group_timeout(void *data)
 {
        struct mali_group *group = (struct mali_group *)data;
+       MALI_DEBUG_ASSERT_POINTER(group);
 
-       group->core_timed_out = MALI_TRUE;
+       MALI_DEBUG_PRINT(2, ("Group: timeout handler for %s at %u\n",
+                            mali_group_core_description(group),
+                            _mali_osk_time_tickcount()));
 
        if (NULL != group->gp_core) {
-               MALI_DEBUG_PRINT(2, ("Mali group: TIMEOUT on %s\n", mali_gp_get_hw_core_desc(group->gp_core)));
-               _mali_osk_wq_schedule_work(group->bottom_half_work_gp);
+               mali_group_schedule_bottom_half_gp(group);
        } else {
-               MALI_DEBUG_PRINT(2, ("Mali group: TIMEOUT on %s\n", mali_pp_get_hw_core_desc(group->pp_core)));
-               _mali_osk_wq_schedule_work(group->bottom_half_work_pp);
+               MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+               mali_group_schedule_bottom_half_pp(group);
        }
 }
 
-void mali_group_zap_session(struct mali_group *group, struct mali_session_data *session)
+mali_bool mali_group_zap_session(struct mali_group *group,
+                                struct mali_session_data *session)
 {
        MALI_DEBUG_ASSERT_POINTER(group);
        MALI_DEBUG_ASSERT_POINTER(session);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
 
-       /* Early out - safe even if mutex is not held */
-       if (group->session != session) return;
-
-       mali_group_lock(group);
-
-       mali_group_remove_session_if_unused(group, session);
+       if (group->session != session) {
+               /* not running from this session */
+               return MALI_TRUE; /* success */
+       }
 
-       if (group->session == session) {
+       if (group->is_working) {
                /* The Zap also does the stall and disable_stall */
                mali_bool zap_success = mali_mmu_zap_tlb(group->mmu);
-               if (MALI_TRUE != zap_success) {
-                       MALI_DEBUG_PRINT(2, ("Mali memory unmap failed. Doing pagefault handling.\n"));
-
-                       mali_group_mmu_page_fault_and_unlock(group);
-                       return;
-               }
+               return zap_success;
+       } else {
+               /* Just remove the session instead of zapping */
+               mali_group_clear_session(group);
+               return MALI_TRUE; /* success */
        }
-
-       mali_group_unlock(group);
 }
 
 #if defined(CONFIG_MALI400_PROFILING)
@@ -1740,26 +1747,28 @@ static void mali_group_report_l2_cache_counters_per_core(struct mali_group *grou
        u32 value1 = 0;
        u32 profiling_channel = 0;
 
-       switch(core_num) {
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+       switch (core_num) {
        case 0:
                profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
-                                   MALI_PROFILING_EVENT_CHANNEL_GPU |
-                                   MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS;
+                                   MALI_PROFILING_EVENT_CHANNEL_GPU |
+                                   MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS;
                break;
        case 1:
                profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
-                                   MALI_PROFILING_EVENT_CHANNEL_GPU |
-                                   MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L21_COUNTERS;
+                                   MALI_PROFILING_EVENT_CHANNEL_GPU |
+                                   MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L21_COUNTERS;
                break;
        case 2:
                profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
-                                   MALI_PROFILING_EVENT_CHANNEL_GPU |
-                                   MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L22_COUNTERS;
+                                   MALI_PROFILING_EVENT_CHANNEL_GPU |
+                                   MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L22_COUNTERS;
                break;
        default:
                profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
-                                   MALI_PROFILING_EVENT_CHANNEL_GPU |
-                                   MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS;
+                                   MALI_PROFILING_EVENT_CHANNEL_GPU |
+                                   MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS;
                break;
        }
 
@@ -1784,72 +1793,3 @@ static void mali_group_report_l2_cache_counters_per_core(struct mali_group *grou
        _mali_osk_profiling_add_event(profiling_channel, source1 << 8 | source0, value0, value1, 0, 0);
 }
 #endif /* #if defined(CONFIG_MALI400_PROFILING) */
-
-mali_bool mali_group_is_enabled(struct mali_group *group)
-{
-       mali_bool enabled = MALI_TRUE;
-
-       MALI_DEBUG_ASSERT_POINTER(group);
-
-       mali_group_lock(group);
-       if (MALI_GROUP_STATE_DISABLED == group->state) {
-               enabled = MALI_FALSE;
-       }
-       mali_group_unlock(group);
-
-       return enabled;
-}
-
-void mali_group_enable(struct mali_group *group)
-{
-       MALI_DEBUG_ASSERT_POINTER(group);
-       MALI_DEBUG_ASSERT(   NULL != mali_group_get_pp_core(group)
-                            || NULL != mali_group_get_gp_core(group));
-
-       if (NULL != mali_group_get_pp_core(group)) {
-               mali_pp_scheduler_enable_group(group);
-       } else {
-               mali_gp_scheduler_enable_group(group);
-       }
-}
-
-void mali_group_disable(struct mali_group *group)
-{
-       MALI_DEBUG_ASSERT_POINTER(group);
-       MALI_DEBUG_ASSERT(   NULL != mali_group_get_pp_core(group)
-                            || NULL != mali_group_get_gp_core(group));
-
-       if (NULL != mali_group_get_pp_core(group)) {
-               mali_pp_scheduler_disable_group(group);
-       } else {
-               mali_gp_scheduler_disable_group(group);
-       }
-}
-
-static struct mali_pm_domain* mali_group_get_l2_domain(struct mali_group *group)
-{
-       MALI_DEBUG_ASSERT(NULL == group->l2_cache_core[1]);
-
-       /* l2_cache_core[0] stores the related l2 domain */
-       return group->l2_cache_core[0]->pm_domain;
-}
-
-void mali_group_get_pm_domain_ref(struct mali_group *group)
-{
-       MALI_DEBUG_ASSERT_POINTER(group);
-
-       /* Get group used l2 domain ref */
-       mali_pm_domain_ref_get(mali_group_get_l2_domain(group));
-       /* Get group used core domain ref */
-       mali_pm_domain_ref_get(group->pm_domain);
-}
-
-void mali_group_put_pm_domain_ref(struct mali_group *group)
-{
-       MALI_DEBUG_ASSERT_POINTER(group);
-
-       /* Put group used core domain ref */
-       mali_pm_domain_ref_put(group->pm_domain);
-       /* Put group used l2 domain ref */
-       mali_pm_domain_ref_put(mali_group_get_l2_domain(group));
-}
index ba86796711b620a49b198cb796ab61172b1ab752..d8f8ade891134c2047f1fd564f4af8373de78e60 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2013 ARM Limited
+ * (C) COPYRIGHT 2011-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 #ifndef __MALI_GROUP_H__
 #define __MALI_GROUP_H__
 
-#include "linux/jiffies.h"
 #include "mali_osk.h"
 #include "mali_l2_cache.h"
 #include "mali_mmu.h"
 #include "mali_gp.h"
 #include "mali_pp.h"
 #include "mali_session.h"
+#include "mali_osk_profiling.h"
 
 /**
  * @brief Default max runtime [ms] for a core job - used by timeout timers
  */
-#define MALI_MAX_JOB_RUNTIME_DEFAULT 4000
+#define MALI_MAX_JOB_RUNTIME_DEFAULT 5000
+
+extern int mali_max_job_runtime;
 
-/** @brief A mali group object represents a MMU and a PP and/or a GP core.
- *
- */
 #define MALI_MAX_NUMBER_OF_GROUPS 10
+#define MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS 8
 
-enum mali_group_core_state {
-       MALI_GROUP_STATE_IDLE,
-       MALI_GROUP_STATE_WORKING,
-       MALI_GROUP_STATE_OOM,
-       MALI_GROUP_STATE_IN_VIRTUAL,
-       MALI_GROUP_STATE_JOINING_VIRTUAL,
-       MALI_GROUP_STATE_LEAVING_VIRTUAL,
-       MALI_GROUP_STATE_DISABLED,
+enum mali_group_state {
+       MALI_GROUP_STATE_INACTIVE,
+       MALI_GROUP_STATE_ACTIVATION_PENDING,
+       MALI_GROUP_STATE_ACTIVE,
 };
 
-/* Forward declaration from mali_pm_domain.h */
-struct mali_pm_domain;
-
 /**
  * The structure represents a render group
  * A render group is defined by all the cores that share the same Mali MMU
@@ -51,8 +44,11 @@ struct mali_group {
        struct mali_mmu_core        *mmu;
        struct mali_session_data    *session;
 
-       mali_bool                   power_is_on;
-       enum mali_group_core_state  state;
+       enum mali_group_state        state;
+       mali_bool                    power_is_on;
+
+       mali_bool                    is_working;
+       unsigned long                start_time; /* in ticks */
 
        struct mali_gp_core         *gp_core;
        struct mali_gp_job          *gp_running_job;
@@ -61,86 +57,93 @@ struct mali_group {
        struct mali_pp_job          *pp_running_job;
        u32                         pp_running_sub_job;
 
+       struct mali_pm_domain       *pm_domain;
+
        struct mali_l2_cache_core   *l2_cache_core[2];
        u32                         l2_cache_core_ref_count[2];
 
+       /* Parent virtual group (if any) */
+       struct mali_group           *parent_group;
+
        struct mali_dlbu_core       *dlbu_core;
        struct mali_bcast_unit      *bcast_core;
 
-#ifdef MALI_UPPER_HALF_SCHEDULING
-       _mali_osk_spinlock_irq_t        *lock;
-#else
-       _mali_osk_spinlock_t            *lock;
-#endif
-
-       _mali_osk_list_t            pp_scheduler_list;
+       /* Used for working groups which needs to be disabled */
+       mali_bool                    disable_requested;
 
-       /* List used for virtual groups. For a virtual group, the list represents the
-        * head element. */
+       /* Used by group to link child groups (for virtual group) */
        _mali_osk_list_t            group_list;
 
-       struct mali_group           *pm_domain_list;
-       struct mali_pm_domain       *pm_domain;
+       /* Used by executor module in order to link groups of same state */
+       _mali_osk_list_t            executor_list;
 
-       /* Parent virtual group (if any) */
-       struct mali_group           *parent_group;
+       /* Used by PM domains to link groups of same domain */
+       _mali_osk_list_t             pm_domain_list;
 
        _mali_osk_wq_work_t         *bottom_half_work_mmu;
        _mali_osk_wq_work_t         *bottom_half_work_gp;
        _mali_osk_wq_work_t         *bottom_half_work_pp;
 
        _mali_osk_timer_t           *timeout_timer;
-       mali_bool                   core_timed_out;
 };
 
 /** @brief Create a new Mali group object
  *
- * @param cluster Pointer to the cluster to which the group is connected.
- * @param mmu Pointer to the MMU that defines this group
  * @return A pointer to a new group object
  */
 struct mali_group *mali_group_create(struct mali_l2_cache_core *core,
-                                     struct mali_dlbu_core *dlbu,
-                                     struct mali_bcast_unit *bcast);
+                                    struct mali_dlbu_core *dlbu,
+                                    struct mali_bcast_unit *bcast,
+                                    u32 domain_index);
+
+void mali_group_dump_status(struct mali_group *group);
 
-_mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group, struct mali_mmu_core* mmu_core);
+void mali_group_delete(struct mali_group *group);
+
+_mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group,
+               struct mali_mmu_core *mmu_core);
 void mali_group_remove_mmu_core(struct mali_group *group);
 
-_mali_osk_errcode_t mali_group_add_gp_core(struct mali_group *group, struct mali_gp_core* gp_core);
+_mali_osk_errcode_t mali_group_add_gp_core(struct mali_group *group,
+               struct mali_gp_core *gp_core);
 void mali_group_remove_gp_core(struct mali_group *group);
 
-_mali_osk_errcode_t mali_group_add_pp_core(struct mali_group *group, struct mali_pp_core* pp_core);
+_mali_osk_errcode_t mali_group_add_pp_core(struct mali_group *group,
+               struct mali_pp_core *pp_core);
 void mali_group_remove_pp_core(struct mali_group *group);
 
-void mali_group_set_pm_domain(struct mali_group *group, struct mali_pm_domain *domain);
-
-void mali_group_delete(struct mali_group *group);
-
-/** @brief Virtual groups */
-void mali_group_add_group(struct mali_group *parent, struct mali_group *child, mali_bool update_hw);
-void mali_group_remove_group(struct mali_group *parent, struct mali_group *child);
-struct mali_group *mali_group_acquire_group(struct mali_group *parent);
+MALI_STATIC_INLINE const char *mali_group_core_description(
+       struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       if (NULL != group->pp_core) {
+               return mali_pp_core_description(group->pp_core);
+       } else {
+               MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+               return mali_gp_core_description(group->gp_core);
+       }
+}
 
 MALI_STATIC_INLINE mali_bool mali_group_is_virtual(struct mali_group *group)
 {
-#if defined(CONFIG_MALI450)
+       MALI_DEBUG_ASSERT_POINTER(group);
+
+#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470))
        return (NULL != group->dlbu_core);
 #else
        return MALI_FALSE;
 #endif
 }
 
-/** @brief Check if a group is considered as part of a virtual group
- *
- * @note A group is considered to be "part of" a virtual group also during the transition
- *       in to / out of the virtual group.
+/** @brief Check if a group is a part of a virtual group or not
  */
 MALI_STATIC_INLINE mali_bool mali_group_is_in_virtual(struct mali_group *group)
 {
-#if defined(CONFIG_MALI450)
-       return (MALI_GROUP_STATE_IN_VIRTUAL == group->state ||
-               MALI_GROUP_STATE_JOINING_VIRTUAL == group->state ||
-               MALI_GROUP_STATE_LEAVING_VIRTUAL == group->state);
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470))
+       return (NULL != group->parent_group) ? MALI_TRUE : MALI_FALSE;
 #else
        return MALI_FALSE;
 #endif
@@ -148,162 +151,309 @@ MALI_STATIC_INLINE mali_bool mali_group_is_in_virtual(struct mali_group *group)
 
 /** @brief Reset group
  *
- * This function will reset the entire group, including all the cores present in the group.
+ * This function will reset the entire group,
+ * including all the cores present in the group.
  *
  * @param group Pointer to the group to reset
  */
 void mali_group_reset(struct mali_group *group);
 
-/** @brief Zap MMU TLB on all groups
- *
- * Zap TLB on group if \a session is active.
+MALI_STATIC_INLINE struct mali_session_data *mali_group_get_session(
+       struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+       return group->session;
+}
+
+MALI_STATIC_INLINE void mali_group_clear_session(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+       if (NULL != group->session) {
+               mali_mmu_activate_empty_page_directory(group->mmu);
+               group->session = NULL;
+       }
+}
+
+enum mali_group_state mali_group_activate(struct mali_group *group);
+
+/*
+ * Change state from ACTIVATION_PENDING to ACTIVE
+ * For virtual group, all childs need to be ACTIVE first
  */
-void mali_group_zap_session(struct mali_group* group, struct mali_session_data *session);
+mali_bool mali_group_set_active(struct mali_group *group);
 
-/** @brief Get pointer to GP core object
+/*
+ * @return MALI_TRUE means one or more domains can now be powered off,
+ * and caller should call either mali_pm_update_async() or
+ * mali_pm_update_sync() in order to do so.
  */
-struct mali_gp_core* mali_group_get_gp_core(struct mali_group *group);
+mali_bool mali_group_deactivate(struct mali_group *group);
 
-/** @brief Get pointer to PP core object
+MALI_STATIC_INLINE enum mali_group_state mali_group_get_state(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       return group->state;
+}
+
+MALI_STATIC_INLINE mali_bool mali_group_power_is_on(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       return group->power_is_on;
+}
+
+void mali_group_power_up(struct mali_group *group);
+void mali_group_power_down(struct mali_group *group);
+
+MALI_STATIC_INLINE void mali_group_set_disable_request(
+       struct mali_group *group, mali_bool disable)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       group->disable_requested = disable;
+
+       /**
+        * When one of child group's disable_requeset is set TRUE, then
+        * the disable_request of parent group should also be set to TRUE.
+        * While, the disable_request of parent group should only be set to FALSE
+        * only when all of its child group's disable_request are set to FALSE.
+        */
+       if (NULL != group->parent_group && MALI_TRUE == disable) {
+               group->parent_group->disable_requested = disable;
+       }
+}
+
+MALI_STATIC_INLINE mali_bool mali_group_disable_requested(
+       struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       return group->disable_requested;
+}
+
+/** @brief Virtual groups */
+void mali_group_add_group(struct mali_group *parent, struct mali_group *child);
+struct mali_group *mali_group_acquire_group(struct mali_group *parent);
+void mali_group_remove_group(struct mali_group *parent, struct mali_group *child);
+
+/** @brief Checks if the group is working.
  */
-struct mali_pp_core* mali_group_get_pp_core(struct mali_group *group);
+MALI_STATIC_INLINE mali_bool mali_group_is_working(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       if (mali_group_is_in_virtual(group)) {
+               struct mali_group *tmp_group = mali_executor_get_virtual_group();
+               return tmp_group->is_working;
+       }
+       return group->is_working;
+}
 
-/** @brief Lock group object
- *
- * Most group functions will lock the group object themselves. The expection is
- * the group_bottom_half which requires the group to be locked on entry.
+MALI_STATIC_INLINE struct mali_gp_job *mali_group_get_running_gp_job(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       return group->gp_running_job;
+}
+
+/** @brief Zap MMU TLB on all groups
  *
- * @param group Pointer to group to lock
+ * Zap TLB on group if \a session is active.
  */
-void mali_group_lock(struct mali_group *group);
+mali_bool mali_group_zap_session(struct mali_group *group,
+                                struct mali_session_data *session);
 
-/** @brief Unlock group object
- *
- * @param group Pointer to group to unlock
+/** @brief Get pointer to GP core object
  */
-void mali_group_unlock(struct mali_group *group);
-#ifdef DEBUG
-void mali_group_assert_locked(struct mali_group *group);
-#define MALI_ASSERT_GROUP_LOCKED(group) mali_group_assert_locked(group)
-#else
-#define MALI_ASSERT_GROUP_LOCKED(group)
-#endif
+MALI_STATIC_INLINE struct mali_gp_core *mali_group_get_gp_core(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       return group->gp_core;
+}
+
+/** @brief Get pointer to PP core object
+ */
+MALI_STATIC_INLINE struct mali_pp_core *mali_group_get_pp_core(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       return group->pp_core;
+}
 
 /** @brief Start GP job
  */
 void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job);
-/** @brief Start fragment of PP job
- */
+
 void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job);
 
-/** @brief Resume GP job that suspended waiting for more heap memory
- */
-struct mali_gp_job *mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr);
-/** @brief Abort GP job
- *
- * Used to abort suspended OOM jobs when user space failed to allocte more memory.
- */
-void mali_group_abort_gp_job(struct mali_group *group, u32 job_id);
-/** @brief Abort all GP jobs from \a session
- *
- * Used on session close when terminating all running and queued jobs from \a session.
- */
-void mali_group_abort_session(struct mali_group *group, struct mali_session_data *session);
+/** @brief Start virtual group Job on a virtual group
+*/
+void mali_group_start_job_on_virtual(struct mali_group *group, struct mali_pp_job *job, u32 first_subjob, u32 last_subjob);
 
-mali_bool mali_group_power_is_on(struct mali_group *group);
-void mali_group_power_on_group(struct mali_group *group);
-void mali_group_power_off_group(struct mali_group *group, mali_bool power_status);
-void mali_group_power_on(void);
 
-/** @brief Prepare group for power off
- *
- * Update the group's state and prepare for the group to be powered off.
- *
- * If do_power_change is MALI_FALSE group session will be set to NULL so that
- * no more activity will happen to this group, but the power state flag will be
- * left unchanged.
- *
- * @do_power_change MALI_TRUE if power status is to be updated
- */
-void mali_group_power_off(mali_bool do_power_change);
+/** @brief Start a subjob from a particular on a specific PP group
+*/
+void mali_group_start_job_on_group(struct mali_group *group, struct mali_pp_job *job, u32 subjob);
 
-struct mali_group *mali_group_get_glob_group(u32 index);
-u32 mali_group_get_glob_num_groups(void);
 
-u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size);
+/** @brief remove all the unused groups in tmp_unused group  list, so that the group is in consistent status.
+ */
+void mali_group_non_dlbu_job_done_virtual(struct mali_group *group);
 
-/* MMU-related functions */
-_mali_osk_errcode_t mali_group_upper_half_mmu(void * data);
 
-/* GP-related functions */
-_mali_osk_errcode_t mali_group_upper_half_gp(void *data);
+/** @brief Resume GP job that suspended waiting for more heap memory
+ */
+void mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr);
 
-/* PP-related functions */
-_mali_osk_errcode_t mali_group_upper_half_pp(void *data);
+MALI_STATIC_INLINE enum mali_interrupt_result mali_group_get_interrupt_result_gp(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       return mali_gp_get_interrupt_result(group->gp_core);
+}
 
-/** @brief Check if group is enabled
- *
- * @param group group to check
- * @return MALI_TRUE if enabled, MALI_FALSE if not
- */
-mali_bool mali_group_is_enabled(struct mali_group *group);
+MALI_STATIC_INLINE enum mali_interrupt_result mali_group_get_interrupt_result_pp(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       return mali_pp_get_interrupt_result(group->pp_core);
+}
 
-/** @brief Enable group
- *
- * An enabled job is put on the idle scheduler list and can be used to handle jobs.  Does nothing if
- * group is already enabled.
- *
- * @param group group to enable
- */
-void mali_group_enable(struct mali_group *group);
+MALI_STATIC_INLINE enum mali_interrupt_result mali_group_get_interrupt_result_mmu(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->mmu);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       return mali_mmu_get_interrupt_result(group->mmu);
+}
 
-/** @brief Disable group
- *
- * A disabled group will no longer be used by the scheduler.  If part of a virtual group, the group
- * will be removed before being disabled.  Cores part of a disabled group is safe to power down.
- *
- * @param group group to disable
- */
-void mali_group_disable(struct mali_group *group);
+MALI_STATIC_INLINE mali_bool mali_group_gp_is_active(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       return mali_gp_is_active(group->gp_core);
+}
 
-MALI_STATIC_INLINE mali_bool mali_group_virtual_disable_if_empty(struct mali_group *group)
+MALI_STATIC_INLINE mali_bool mali_group_pp_is_active(struct mali_group *group)
 {
-       mali_bool empty = MALI_FALSE;
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       return mali_pp_is_active(group->pp_core);
+}
 
-       MALI_ASSERT_GROUP_LOCKED(group);
-       MALI_DEBUG_ASSERT(mali_group_is_virtual(group));
+MALI_STATIC_INLINE mali_bool mali_group_has_timed_out(struct mali_group *group)
+{
+       unsigned long time_cost;
+       struct mali_group *tmp_group = group;
 
-       if (_mali_osk_list_empty(&group->group_list)) {
-               group->state = MALI_GROUP_STATE_DISABLED;
-               group->session = NULL;
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
 
-               empty = MALI_TRUE;
+       /* if the group is in virtual need to use virtual_group's start time */
+       if (mali_group_is_in_virtual(group)) {
+               tmp_group = mali_executor_get_virtual_group();
        }
 
-       return empty;
+       time_cost = _mali_osk_time_tickcount() - tmp_group->start_time;
+       if (_mali_osk_time_mstoticks(mali_max_job_runtime) <= time_cost) {
+               /*
+                * current tick is at or after timeout end time,
+                * so this is a valid timeout
+                */
+               return MALI_TRUE;
+       } else {
+               /*
+                * Not a valid timeout. A HW interrupt probably beat
+                * us to it, and the timer wasn't properly deleted
+                * (async deletion used due to atomic context).
+                */
+               return MALI_FALSE;
+       }
 }
 
-MALI_STATIC_INLINE mali_bool mali_group_virtual_enable_if_empty(struct mali_group *group)
+MALI_STATIC_INLINE void mali_group_mask_all_interrupts_gp(struct mali_group *group)
 {
-       mali_bool empty = MALI_FALSE;
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       return mali_gp_mask_all_interrupts(group->gp_core);
+}
 
-       MALI_ASSERT_GROUP_LOCKED(group);
-       MALI_DEBUG_ASSERT(mali_group_is_virtual(group));
+MALI_STATIC_INLINE void mali_group_mask_all_interrupts_pp(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       return mali_pp_mask_all_interrupts(group->pp_core);
+}
 
-       if (_mali_osk_list_empty(&group->group_list)) {
-               MALI_DEBUG_ASSERT(MALI_GROUP_STATE_DISABLED == group->state);
+MALI_STATIC_INLINE void mali_group_enable_interrupts_gp(
+       struct mali_group *group,
+       enum mali_interrupt_result exceptions)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       mali_gp_enable_interrupts(group->gp_core, exceptions);
+}
 
-               group->state = MALI_GROUP_STATE_IDLE;
+MALI_STATIC_INLINE void mali_group_schedule_bottom_half_gp(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+       _mali_osk_wq_schedule_work(group->bottom_half_work_gp);
+}
 
-               empty = MALI_TRUE;
-       }
+MALI_STATIC_INLINE void mali_group_schedule_bottom_half_pp(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+       _mali_osk_wq_schedule_work(group->bottom_half_work_pp);
+}
 
-       return empty;
+MALI_STATIC_INLINE void mali_group_schedule_bottom_half_mmu(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT_POINTER(group->mmu);
+       _mali_osk_wq_schedule_work(group->bottom_half_work_mmu);
+}
+
+struct mali_pp_job *mali_group_complete_pp(struct mali_group *group, mali_bool success, u32 *sub_job);
+
+struct mali_gp_job *mali_group_complete_gp(struct mali_group *group, mali_bool success);
+
+#if defined(CONFIG_MALI400_PROFILING)
+MALI_STATIC_INLINE void mali_group_oom(struct mali_group *group)
+{
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND |
+                                     MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+                                     0, 0, 0, 0, 0);
 }
+#endif
+
+struct mali_group *mali_group_get_glob_group(u32 index);
+u32 mali_group_get_glob_num_groups(void);
 
-/* Get group used l2 domain and core domain ref */
-void mali_group_get_pm_domain_ref(struct mali_group *group);
-/* Put group used l2 domain and core domain ref */
-void mali_group_put_pm_domain_ref(struct mali_group *group);
+u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size);
+
+
+_mali_osk_errcode_t mali_group_upper_half_mmu(void *data);
+_mali_osk_errcode_t mali_group_upper_half_gp(void *data);
+_mali_osk_errcode_t mali_group_upper_half_pp(void *data);
+
+MALI_STATIC_INLINE mali_bool mali_group_is_empty(struct mali_group *group)
+{
+       MALI_DEBUG_ASSERT_POINTER(group);
+       MALI_DEBUG_ASSERT(mali_group_is_virtual(group));
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+       return _mali_osk_list_empty(&group->group_list);
+}
 
 #endif /* __MALI_GROUP_H__ */
index fcde1438e08ae05a917908a1e109a4720fdad72f..0e2af861f2de19c675d2b116e85b99a4923cbe52 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2013 ARM Limited
+ * (C) COPYRIGHT 2011-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -39,7 +39,9 @@ _mali_osk_errcode_t mali_hw_core_create(struct mali_hw_core *core, const _mali_o
 
 void mali_hw_core_delete(struct mali_hw_core *core)
 {
-       _mali_osk_mem_unmapioregion(core->phys_addr, core->size, core->mapped_registers);
-       core->mapped_registers = NULL;
+       if (NULL != core->mapped_registers) {
+               _mali_osk_mem_unmapioregion(core->phys_addr, core->size, core->mapped_registers);
+               core->mapped_registers = NULL;
+       }
        _mali_osk_mem_unreqregion(core->phys_addr, core->size);
 }
index 66e9b42fe0a4be92c31a22b538f4441fc9471b1f..fc780903f93518a5e38ff9b1583023e91215b38c 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2013 ARM Limited
+ * (C) COPYRIGHT 2011-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
  * This struct is embedded inside all core specific structs.
  */
 struct mali_hw_core {
-       u32 phys_addr;                    /**< Physical address of the registers */
+       uintptr_t phys_addr;              /**< Physical address of the registers */
        u32 phys_offset;                  /**< Offset from start of Mali to registers */
        u32 size;                         /**< Size of registers */
        mali_io_address mapped_registers; /**< Virtual mapping of the registers */
-       const chardescription;          /**< Name of unit (as specified in device configuration) */
+       const char *description;          /**< Name of unit (as specified in device configuration) */
 };
 
-#define MALI_REG_POLL_COUNT_FAST 1000
+#define MALI_REG_POLL_COUNT_FAST 1000000
 #define MALI_REG_POLL_COUNT_SLOW 1000000
 
+/*
+ * GP and PP core translate their int_stat/rawstat into one of these
+ */
+enum mali_interrupt_result {
+       MALI_INTERRUPT_RESULT_NONE,
+       MALI_INTERRUPT_RESULT_SUCCESS,
+       MALI_INTERRUPT_RESULT_SUCCESS_VS,
+       MALI_INTERRUPT_RESULT_SUCCESS_PLBU,
+       MALI_INTERRUPT_RESULT_OOM,
+       MALI_INTERRUPT_RESULT_ERROR
+};
+
 _mali_osk_errcode_t mali_hw_core_create(struct mali_hw_core *core, const _mali_osk_resource_t *resource, u32 reg_size);
 void mali_hw_core_delete(struct mali_hw_core *core);
 
@@ -37,14 +49,14 @@ MALI_STATIC_INLINE u32 mali_hw_core_register_read(struct mali_hw_core *core, u32
        u32 read_val;
        read_val = _mali_osk_mem_ioread32(core->mapped_registers, relative_address);
        MALI_DEBUG_PRINT(6, ("register_read for core %s, relative addr=0x%04X, val=0x%08X\n",
-                            core->description, relative_address, read_val));
+                            core->description, relative_address, read_val));
        return read_val;
 }
 
 MALI_STATIC_INLINE void mali_hw_core_register_write_relaxed(struct mali_hw_core *core, u32 relative_address, u32 new_val)
 {
        MALI_DEBUG_PRINT(6, ("register_write_relaxed for core %s, relative addr=0x%04X, val=0x%08X\n",
-                            core->description, relative_address, new_val));
+                            core->description, relative_address, new_val));
        _mali_osk_mem_iowrite32_relaxed(core->mapped_registers, relative_address, new_val);
 }
 
@@ -54,17 +66,16 @@ MALI_STATIC_INLINE void mali_hw_core_register_write_relaxed(struct mali_hw_core
 MALI_STATIC_INLINE void mali_hw_core_register_write_relaxed_conditional(struct mali_hw_core *core, u32 relative_address, u32 new_val, const u32 old_val)
 {
        MALI_DEBUG_PRINT(6, ("register_write_relaxed for core %s, relative addr=0x%04X, val=0x%08X\n",
-                            core->description, relative_address, new_val));
-       if(old_val != new_val) {
+                            core->description, relative_address, new_val));
+       if (old_val != new_val) {
                _mali_osk_mem_iowrite32_relaxed(core->mapped_registers, relative_address, new_val);
        }
 }
 
-
 MALI_STATIC_INLINE void mali_hw_core_register_write(struct mali_hw_core *core, u32 relative_address, u32 new_val)
 {
        MALI_DEBUG_PRINT(6, ("register_write for core %s, relative addr=0x%04X, val=0x%08X\n",
-                            core->description, relative_address, new_val));
+                            core->description, relative_address, new_val));
        _mali_osk_mem_iowrite32(core->mapped_registers, relative_address, new_val);
 }
 
@@ -72,27 +83,27 @@ MALI_STATIC_INLINE void mali_hw_core_register_write_array_relaxed(struct mali_hw
 {
        u32 i;
        MALI_DEBUG_PRINT(6, ("register_write_array: for core %s, relative addr=0x%04X, nr of regs=%u\n",
-                            core->description,relative_address, nr_of_regs));
+                            core->description, relative_address, nr_of_regs));
 
        /* Do not use burst writes against the registers */
-       for (i = 0; i< nr_of_regs; i++) {
-               mali_hw_core_register_write_relaxed(core, relative_address + i*4, write_array[i]);
+       for (i = 0; i < nr_of_regs; i++) {
+               mali_hw_core_register_write_relaxed(core, relative_address + i * 4, write_array[i]);
        }
 }
 
 /* Conditionally write a set of registers.
  * The register will only be written if the new value is different from the old_value.
  * If the new value is different, the old value will also be updated */
-MALI_STATIC_INLINE void mali_hw_core_register_write_array_relaxed_conditional(struct mali_hw_core *core, u32 relative_address, u32 *write_array, u32 nr_of_regs, const u32old_array)
+MALI_STATIC_INLINE void mali_hw_core_register_write_array_relaxed_conditional(struct mali_hw_core *core, u32 relative_address, u32 *write_array, u32 nr_of_regs, const u32 *old_array)
 {
        u32 i;
        MALI_DEBUG_PRINT(6, ("register_write_array: for core %s, relative addr=0x%04X, nr of regs=%u\n",
-                            core->description,relative_address, nr_of_regs));
+                            core->description, relative_address, nr_of_regs));
 
        /* Do not use burst writes against the registers */
-       for (i = 0; i< nr_of_regs; i++) {
-               if(old_array[i] != write_array[i]) {
-                       mali_hw_core_register_write_relaxed(core, relative_address + i*4, write_array[i]);
+       for (i = 0; i < nr_of_regs; i++) {
+               if (old_array[i] != write_array[i]) {
+                       mali_hw_core_register_write_relaxed(core, relative_address + i * 4, write_array[i]);
                }
        }
 }
index ca5a34fd305a0e802391ce07768425cd930eb2ad..38b10035d200910de5d0756d9f3357c848bdecf5 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2007-2010, 2012-2013 ARM Limited
+ * (C) COPYRIGHT 2007-2010, 2012-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -11,7 +11,6 @@
 #ifndef __MALI_KERNEL_COMMON_H__
 #define __MALI_KERNEL_COMMON_H__
 
-#include "mtk_mali_kernel.h" /*Mediatek custom routine for Mali*/
 #include "mali_osk.h"
 
 /* Make sure debug is defined when it should be */
 #define MALI_SUCCESS MALI_ERROR(_MALI_OSK_ERR_OK)
 
 /**
- *     Basic error macro. This checks whether the given condition is true, and if not returns
- *     from this function with the supplied error code. This is a macro so that we can override it
- *     for stress testing.
+ *  Basic error macro. This checks whether the given condition is true, and if not returns
+ *  from this function with the supplied error code. This is a macro so that we can override it
+ *  for stress testing.
  *
- *     Note that this uses the do-while-0 wrapping to ensure that we don't get problems with dangling
- *     else clauses. Note also no closing semicolon - this is supplied in typical usage:
+ *  Note that this uses the do-while-0 wrapping to ensure that we don't get problems with dangling
+ *  else clauses. Note also no closing semicolon - this is supplied in typical usage:
  *
- *     MALI_CHECK((p!=NULL), ERROR_NO_OBJECT);
+ *  MALI_CHECK((p!=NULL), ERROR_NO_OBJECT);
  */
 #define MALI_CHECK(condition, error_code) do { if(!(condition)) MALI_ERROR(error_code); } while(0)
 
 /**
- *     Error propagation macro. If the expression given is anything other than _MALI_OSK_NO_ERROR,
- *     then the value is returned from the enclosing function as an error code. This effectively
- *     acts as a guard clause, and propagates error values up the call stack. This uses a
- *     temporary value to ensure that the error expression is not evaluated twice.
- *  If the counter for forcing a failure has been set using _mali_force_error, this error will be
- *  returned without evaluating the expression in MALI_CHECK_NO_ERROR
+ *  Error propagation macro. If the expression given is anything other than
+ *  _MALI_OSK_NO_ERROR, then the value is returned from the enclosing function
+ *  as an error code. This effectively acts as a guard clause, and propagates
+ *  error values up the call stack. This uses a temporary value to ensure that
+ *  the error expression is not evaluated twice.
+ *  If the counter for forcing a failure has been set using _mali_force_error,
+ *  this error will be returned without evaluating the expression in
+ *  MALI_CHECK_NO_ERROR
  */
 #define MALI_CHECK_NO_ERROR(expression) \
-    do { _mali_osk_errcode_t _check_no_error_result=(expression); \
-         if(_check_no_error_result != _MALI_OSK_ERR_OK) \
-         MALI_ERROR(_check_no_error_result); \
-    } while(0)
+       do { _mali_osk_errcode_t _check_no_error_result=(expression); \
+               if(_check_no_error_result != _MALI_OSK_ERR_OK) \
+                       MALI_ERROR(_check_no_error_result); \
+       } while(0)
 
 /**
  *  Pointer check macro. Checks non-null pointer.
 #define MALI_CHECK_NON_NULL(pointer, error_code) MALI_CHECK( ((pointer)!=NULL), (error_code) )
 
 /**
- *     Error macro with goto. This checks whether the given condition is true, and if not jumps
- *     to the specified label using a goto. The label must therefore be local to the function in
- *     which this macro appears. This is most usually used to execute some clean-up code before
- *     exiting with a call to ERROR.
+ *  Error macro with goto. This checks whether the given condition is true, and if not jumps
+ *  to the specified label using a goto. The label must therefore be local to the function in
+ *  which this macro appears. This is most usually used to execute some clean-up code before
+ *  exiting with a call to ERROR.
  *
- *     Like the other macros, this is a macro to allow us to override the condition if we wish,
- *     e.g. to force an error during stress testing.
+ *  Like the other macros, this is a macro to allow us to override the condition if we wish,
+ *  e.g. to force an error during stress testing.
  */
 #define MALI_CHECK_GOTO(condition, label) do { if(!(condition)) goto label; } while(0)
 
  */
 #define MALI_IGNORE(x) x=x
 
+#if defined(CONFIG_MALI_QUIET)
+#define MALI_PRINTF(args)
+#else
 #define MALI_PRINTF(args) _mali_osk_dbgmsg args;
+#endif
 
 #define MALI_PRINT_ERROR(args) do{ \
-       MALI_PRINTF(("Mali: ERR: %s\n" ,__FILE__)); \
-       MALI_PRINTF(("           %s()%4d\n           ", __FUNCTION__, __LINE__)) ; \
-       MALI_PRINTF(args); \
-       MALI_PRINTF(("\n"));   \
-       MTKMALI_DumpRegister();\
+               MALI_PRINTF(("Mali: ERR: %s\n" ,__FILE__)); \
+               MALI_PRINTF(("           %s()%4d\n           ", __FUNCTION__, __LINE__)) ; \
+               MALI_PRINTF(args); \
+               MALI_PRINTF(("\n")); \
        } while(0)
 
 #define MALI_PRINT(args) do{ \
-       MALI_PRINTF(("Mali: ")); \
-       MALI_PRINTF(args); \
+               MALI_PRINTF(("Mali: ")); \
+               MALI_PRINTF(args); \
        } while (0)
 
 #ifdef DEBUG
@@ -128,19 +132,19 @@ extern int mali_debug_level;
 
 #define MALI_DEBUG_CODE(code) code
 #define MALI_DEBUG_PRINT(level, args)  do { \
-       if((level) <=  mali_debug_level)\
-        {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); } \
+               if((level) <=  mali_debug_level)\
+               {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); } \
        } while (0)
 
 #define MALI_DEBUG_PRINT_ERROR(args) MALI_PRINT_ERROR(args)
 
 #define MALI_DEBUG_PRINT_IF(level,condition,args)  \
        if((condition)&&((level) <=  mali_debug_level))\
-        {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); }
+       {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); }
 
 #define MALI_DEBUG_PRINT_ELSE(level, args)\
        else if((level) <=  mali_debug_level)\
-    { MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); }
+       { MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); }
 
 /**
  * @note these variants of DEBUG ASSERTS will cause a debugger breakpoint
index 38b788485e69292792ea5a7e0341d35ec31f7df3..36683be3c5480a85c99d2e887b12e3a0aa82c9f0 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2007-2013 ARM Limited
+ * (C) COPYRIGHT 2007-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -22,8 +22,7 @@
 #include "mali_broadcast.h"
 #include "mali_gp.h"
 #include "mali_pp.h"
-#include "mali_gp_scheduler.h"
-#include "mali_pp_scheduler.h"
+#include "mali_executor.h"
 #include "mali_pp_job.h"
 #include "mali_group.h"
 #include "mali_pm.h"
@@ -31,7 +30,6 @@
 #include "mali_scheduler.h"
 #include "mali_kernel_utilization.h"
 #include "mali_l2_cache.h"
-#include "mali_dma.h"
 #include "mali_timeline.h"
 #include "mali_soft_job.h"
 #include "mali_pm_domain.h"
 #if defined(CONFIG_MALI400_INTERNAL_PROFILING)
 #include "mali_profiling_internal.h"
 #endif
+#include "mali_control_timer.h"
+#include "mali_dvfs_policy.h"
 
+#define MALI_SHARED_MEMORY_DEFAULT_SIZE 0xffffffff
 
 /* Mali GPU memory. Real values come from module parameter or from device specific data */
 unsigned int mali_dedicated_mem_start = 0;
 unsigned int mali_dedicated_mem_size = 0;
-unsigned int mali_shared_mem_size = 0;
+
+/* Default shared memory size is set to 4G. */
+unsigned int mali_shared_mem_size = MALI_SHARED_MEMORY_DEFAULT_SIZE;
 
 /* Frame buffer memory to be accessible by Mali GPU */
 int mali_fb_start = 0;
@@ -66,20 +69,23 @@ int mali_inited_pp_cores_group_1 = 0;
 int mali_inited_pp_cores_group_2 = 0;
 
 static _mali_product_id_t global_product_id = _MALI_PRODUCT_ID_UNKNOWN;
-static u32 global_gpu_base_address = 0;
+static uintptr_t global_gpu_base_address = 0;
 static u32 global_gpu_major_version = 0;
 static u32 global_gpu_minor_version = 0;
 
 mali_bool mali_gpu_class_is_mali450 = MALI_FALSE;
+mali_bool mali_gpu_class_is_mali470 = MALI_FALSE;
 
 static _mali_osk_errcode_t mali_set_global_gpu_base_address(void)
 {
+       _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+
        global_gpu_base_address = _mali_osk_resource_base_address();
        if (0 == global_gpu_base_address) {
-               return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+               err = _MALI_OSK_ERR_ITEM_NOT_FOUND;
        }
 
-       return _MALI_OSK_ERR_OK;
+       return err;
 }
 
 static u32 mali_get_bcast_id(_mali_osk_resource_t *resource_pp)
@@ -112,31 +118,19 @@ static u32 mali_get_bcast_id(_mali_osk_resource_t *resource_pp)
 
 static _mali_osk_errcode_t mali_parse_product_info(void)
 {
-       /*
-        * Mali-200 has the PP core first, while Mali-300, Mali-400 and Mali-450 have the GP core first.
-        * Look at the version register for the first PP core in order to determine the GPU HW revision.
-        */
-
-       u32 first_pp_offset;
        _mali_osk_resource_t first_pp_resource;
 
-       /* Find out where the first PP core is located */
-       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x8000, NULL)) {
-               /* Mali-300/400/450 */
-               first_pp_offset = 0x8000;
-       } else {
-               /* Mali-200 */
-               first_pp_offset = 0x0000;
-       }
-
        /* Find the first PP core resource (again) */
-       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + first_pp_offset, &first_pp_resource)) {
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_PP0, &first_pp_resource)) {
                /* Create a dummy PP object for this core so that we can read the version register */
-               struct mali_group *group = mali_group_create(NULL, NULL, NULL);
+               struct mali_group *group = mali_group_create(NULL, NULL, NULL, MALI_DOMAIN_INDEX_PP0);
                if (NULL != group) {
                        struct mali_pp_core *pp_core = mali_pp_create(&first_pp_resource, group, MALI_FALSE, mali_get_bcast_id(&first_pp_resource));
                        if (NULL != pp_core) {
-                               u32 pp_version = mali_pp_core_get_version(pp_core);
+                               u32 pp_version;
+
+                               pp_version = mali_pp_core_get_version(pp_core);
+
                                mali_group_delete(group);
 
                                global_gpu_major_version = (pp_version >> 8) & 0xFF;
@@ -161,6 +155,10 @@ static _mali_osk_errcode_t mali_parse_product_info(void)
                                        global_product_id = _MALI_PRODUCT_ID_MALI450;
                                        MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-450 MP r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
                                        break;
+                               case MALI470_PP_PRODUCT_ID:
+                                       global_product_id = _MALI_PRODUCT_ID_MALI470;
+                                       MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-470 MP r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+                                       break;
                                default:
                                        MALI_DEBUG_PRINT(2, ("Found unknown Mali GPU (r%up%u)\n", global_gpu_major_version, global_gpu_minor_version));
                                        return _MALI_OSK_ERR_FAULT;
@@ -180,48 +178,6 @@ static _mali_osk_errcode_t mali_parse_product_info(void)
        return _MALI_OSK_ERR_FAULT;
 }
 
-
-static void mali_resource_count(u32 *pp_count, u32 *l2_count)
-{
-       *pp_count = 0;
-       *l2_count = 0;
-
-       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x08000, NULL)) {
-               ++(*pp_count);
-       }
-       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0A000, NULL)) {
-               ++(*pp_count);
-       }
-       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0C000, NULL)) {
-               ++(*pp_count);
-       }
-       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0E000, NULL)) {
-               ++(*pp_count);
-       }
-       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x28000, NULL)) {
-               ++(*pp_count);
-       }
-       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2A000, NULL)) {
-               ++(*pp_count);
-       }
-       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2C000, NULL)) {
-               ++(*pp_count);
-       }
-       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2E000, NULL)) {
-               ++(*pp_count);
-       }
-
-       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x1000, NULL)) {
-               ++(*l2_count);
-       }
-       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x10000, NULL)) {
-               ++(*l2_count);
-       }
-       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x11000, NULL)) {
-               ++(*l2_count);
-       }
-}
-
 static void mali_delete_groups(void)
 {
        struct mali_group *group;
@@ -248,7 +204,7 @@ static void mali_delete_l2_cache_cores(void)
        MALI_DEBUG_ASSERT(0 == mali_l2_cache_core_get_glob_num_l2_cores());
 }
 
-static struct mali_l2_cache_core *mali_create_l2_cache_core(_mali_osk_resource_t *resource)
+static struct mali_l2_cache_core *mali_create_l2_cache_core(_mali_osk_resource_t *resource, u32 domain_index)
 {
        struct mali_l2_cache_core *l2_cache = NULL;
 
@@ -256,7 +212,7 @@ static struct mali_l2_cache_core *mali_create_l2_cache_core(_mali_osk_resource_t
 
                MALI_DEBUG_PRINT(3, ("Found L2 cache %s\n", resource->description));
 
-               l2_cache = mali_l2_cache_create(resource);
+               l2_cache = mali_l2_cache_create(resource, domain_index);
                if (NULL == l2_cache) {
                        MALI_PRINT_ERROR(("Failed to create L2 cache object\n"));
                        return NULL;
@@ -273,16 +229,15 @@ static _mali_osk_errcode_t mali_parse_config_l2_cache(void)
 
        if (mali_is_mali400()) {
                _mali_osk_resource_t l2_resource;
-               if (_MALI_OSK_ERR_OK != _mali_osk_resource_find(global_gpu_base_address + 0x1000, &l2_resource)) {
+               if (_MALI_OSK_ERR_OK != _mali_osk_resource_find(MALI400_OFFSET_L2_CACHE0, &l2_resource)) {
                        MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache in config file\n"));
                        return _MALI_OSK_ERR_FAULT;
                }
 
-               l2_cache = mali_create_l2_cache_core(&l2_resource);
+               l2_cache = mali_create_l2_cache_core(&l2_resource, MALI_DOMAIN_INDEX_L20);
                if (NULL == l2_cache) {
                        return _MALI_OSK_ERR_FAULT;
                }
-               mali_pm_domain_add_l2(mali_pmu_get_domain_mask(MALI_L20_DOMAIN_INDEX), l2_cache);
        } else if (mali_is_mali450()) {
                /*
                 * L2 for GP    at 0x10000
@@ -295,39 +250,50 @@ static _mali_osk_errcode_t mali_parse_config_l2_cache(void)
                _mali_osk_resource_t l2_pp_grp1_resource;
 
                /* Make cluster for GP's L2 */
-               if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x10000, &l2_gp_resource)) {
+               if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI450_OFFSET_L2_CACHE0, &l2_gp_resource)) {
                        MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for GP\n"));
-                       l2_cache = mali_create_l2_cache_core(&l2_gp_resource);
+                       l2_cache = mali_create_l2_cache_core(&l2_gp_resource, MALI_DOMAIN_INDEX_L20);
                        if (NULL == l2_cache) {
                                return _MALI_OSK_ERR_FAULT;
                        }
-                       mali_pm_domain_add_l2(mali_pmu_get_domain_mask(MALI_L20_DOMAIN_INDEX), l2_cache);
                } else {
                        MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for GP in config file\n"));
                        return _MALI_OSK_ERR_FAULT;
                }
 
                /* Find corresponding l2 domain */
-               if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x1000, &l2_pp_grp0_resource)) {
+               if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI450_OFFSET_L2_CACHE1, &l2_pp_grp0_resource)) {
                        MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for PP group 0\n"));
-                       l2_cache = mali_create_l2_cache_core(&l2_pp_grp0_resource);
+                       l2_cache = mali_create_l2_cache_core(&l2_pp_grp0_resource, MALI_DOMAIN_INDEX_L21);
                        if (NULL == l2_cache) {
                                return _MALI_OSK_ERR_FAULT;
                        }
-                       mali_pm_domain_add_l2(mali_pmu_get_domain_mask(MALI_L21_DOMAIN_INDEX), l2_cache);
                } else {
                        MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for PP group 0 in config file\n"));
                        return _MALI_OSK_ERR_FAULT;
                }
 
                /* Second PP core group is optional, don't fail if we don't find it */
-               if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x11000, &l2_pp_grp1_resource)) {
+               if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI450_OFFSET_L2_CACHE2, &l2_pp_grp1_resource)) {
                        MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for PP group 1\n"));
-                       l2_cache = mali_create_l2_cache_core(&l2_pp_grp1_resource);
+                       l2_cache = mali_create_l2_cache_core(&l2_pp_grp1_resource, MALI_DOMAIN_INDEX_L22);
                        if (NULL == l2_cache) {
                                return _MALI_OSK_ERR_FAULT;
                        }
-                       mali_pm_domain_add_l2(mali_pmu_get_domain_mask(MALI_L22_DOMAIN_INDEX), l2_cache);
+               }
+       } else if (mali_is_mali470()) {
+               _mali_osk_resource_t l2c1_resource;
+
+               /* Make cluster for L2C1 */
+               if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI470_OFFSET_L2_CACHE1, &l2c1_resource)) {
+                       MALI_DEBUG_PRINT(3, ("Creating Mali-470 L2 cache 1\n"));
+                       l2_cache = mali_create_l2_cache_core(&l2c1_resource, MALI_DOMAIN_INDEX_L21);
+                       if (NULL == l2_cache) {
+                               return _MALI_OSK_ERR_FAULT;
+                       }
+               } else {
+                       MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for L2C1\n"));
+                       return _MALI_OSK_ERR_FAULT;
                }
        }
 
@@ -335,9 +301,10 @@ static _mali_osk_errcode_t mali_parse_config_l2_cache(void)
 }
 
 static struct mali_group *mali_create_group(struct mali_l2_cache_core *cache,
-        _mali_osk_resource_t *resource_mmu,
-        _mali_osk_resource_t *resource_gp,
-        _mali_osk_resource_t *resource_pp)
+               _mali_osk_resource_t *resource_mmu,
+               _mali_osk_resource_t *resource_gp,
+               _mali_osk_resource_t *resource_pp,
+               u32 domain_index)
 {
        struct mali_mmu_core *mmu;
        struct mali_group *group;
@@ -345,7 +312,7 @@ static struct mali_group *mali_create_group(struct mali_l2_cache_core *cache,
        MALI_DEBUG_PRINT(3, ("Starting new group for MMU %s\n", resource_mmu->description));
 
        /* Create the group object */
-       group = mali_group_create(cache, NULL, NULL);
+       group = mali_group_create(cache, NULL, NULL, domain_index);
        if (NULL == group) {
                MALI_PRINT_ERROR(("Failed to create group object for MMU %s\n", resource_mmu->description));
                return NULL;
@@ -383,18 +350,13 @@ static struct mali_group *mali_create_group(struct mali_l2_cache_core *cache,
                }
        }
 
-       /* Reset group */
-       mali_group_lock(group);
-       mali_group_reset(group);
-       mali_group_unlock(group);
-
        return group;
 }
 
 static _mali_osk_errcode_t mali_create_virtual_group(_mali_osk_resource_t *resource_mmu_pp_bcast,
-        _mali_osk_resource_t *resource_pp_bcast,
-        _mali_osk_resource_t *resource_dlbu,
-        _mali_osk_resource_t *resource_bcast)
+               _mali_osk_resource_t *resource_pp_bcast,
+               _mali_osk_resource_t *resource_dlbu,
+               _mali_osk_resource_t *resource_bcast)
 {
        struct mali_mmu_core *mmu_pp_bcast_core;
        struct mali_pp_core *pp_bcast_core;
@@ -420,7 +382,34 @@ static _mali_osk_errcode_t mali_create_virtual_group(_mali_osk_resource_t *resou
        }
 
        /* Create the group object */
-       group = mali_group_create(NULL, dlbu_core, bcast_core);
+#if defined(DEBUG)
+       /* Get a physical PP group to temporarily add to broadcast unit.  IRQ
+        * verification needs a physical group in the broadcast unit to test
+        * the broadcast unit interrupt line. */
+       {
+               struct mali_group *phys_group = NULL;
+               int i;
+               for (i = 0; i < mali_group_get_glob_num_groups(); i++) {
+                       phys_group = mali_group_get_glob_group(i);
+                       if (NULL != mali_group_get_pp_core(phys_group)) break;
+               }
+               MALI_DEBUG_ASSERT(NULL != mali_group_get_pp_core(phys_group));
+
+               /* Add the group temporarily to the broadcast, and update the
+                * broadcast HW. Since the HW is not updated when removing the
+                * group the IRQ check will work when the virtual PP is created
+                * later.
+                *
+                * When the virtual group gets populated, the actually used
+                * groups will be added to the broadcast unit and the HW will
+                * be updated.
+                */
+               mali_bcast_add_group(bcast_core, phys_group);
+               mali_bcast_reset(bcast_core);
+               mali_bcast_remove_group(bcast_core, phys_group);
+       }
+#endif /* DEBUG */
+       group = mali_group_create(NULL, dlbu_core, bcast_core, MALI_DOMAIN_INDEX_DUMMY);
        if (NULL == group) {
                MALI_PRINT_ERROR(("Failed to create group object for MMU PP broadcast core %s\n", resource_mmu_pp_bcast->description));
                mali_bcast_unit_delete(bcast_core);
@@ -473,14 +462,14 @@ static _mali_osk_errcode_t mali_parse_config_groups(void)
        _mali_osk_errcode_t resource_dlbu_found;
        _mali_osk_errcode_t resource_bcast_found;
 
-       if (!(mali_is_mali400() || mali_is_mali450())) {
+       if (!(mali_is_mali400() || mali_is_mali450() || mali_is_mali470())) {
                /* No known HW core */
                return _MALI_OSK_ERR_FAULT;
        }
 
        if (MALI_MAX_JOB_RUNTIME_DEFAULT == mali_max_job_runtime) {
                /* Group settings are not overridden by module parameters, so use device settings */
-               struct _mali_osk_device_data data = { 0, };
+               _mali_osk_device_data data = { 0, };
 
                if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
                        /* Use device specific settings (if defined) */
@@ -496,37 +485,37 @@ static _mali_osk_errcode_t mali_parse_config_groups(void)
                cluster_id_pp_grp1 = 2;
        }
 
-       resource_gp_found = _mali_osk_resource_find(global_gpu_base_address + 0x00000, &resource_gp);
-       resource_gp_mmu_found = _mali_osk_resource_find(global_gpu_base_address + 0x03000, &resource_gp_mmu);
-       resource_pp_found[0] = _mali_osk_resource_find(global_gpu_base_address + 0x08000, &(resource_pp[0]));
-       resource_pp_found[1] = _mali_osk_resource_find(global_gpu_base_address + 0x0A000, &(resource_pp[1]));
-       resource_pp_found[2] = _mali_osk_resource_find(global_gpu_base_address + 0x0C000, &(resource_pp[2]));
-       resource_pp_found[3] = _mali_osk_resource_find(global_gpu_base_address + 0x0E000, &(resource_pp[3]));
-       resource_pp_found[4] = _mali_osk_resource_find(global_gpu_base_address + 0x28000, &(resource_pp[4]));
-       resource_pp_found[5] = _mali_osk_resource_find(global_gpu_base_address + 0x2A000, &(resource_pp[5]));
-       resource_pp_found[6] = _mali_osk_resource_find(global_gpu_base_address + 0x2C000, &(resource_pp[6]));
-       resource_pp_found[7] = _mali_osk_resource_find(global_gpu_base_address + 0x2E000, &(resource_pp[7]));
-       resource_pp_mmu_found[0] = _mali_osk_resource_find(global_gpu_base_address + 0x04000, &(resource_pp_mmu[0]));
-       resource_pp_mmu_found[1] = _mali_osk_resource_find(global_gpu_base_address + 0x05000, &(resource_pp_mmu[1]));
-       resource_pp_mmu_found[2] = _mali_osk_resource_find(global_gpu_base_address + 0x06000, &(resource_pp_mmu[2]));
-       resource_pp_mmu_found[3] = _mali_osk_resource_find(global_gpu_base_address + 0x07000, &(resource_pp_mmu[3]));
-       resource_pp_mmu_found[4] = _mali_osk_resource_find(global_gpu_base_address + 0x1C000, &(resource_pp_mmu[4]));
-       resource_pp_mmu_found[5] = _mali_osk_resource_find(global_gpu_base_address + 0x1D000, &(resource_pp_mmu[5]));
-       resource_pp_mmu_found[6] = _mali_osk_resource_find(global_gpu_base_address + 0x1E000, &(resource_pp_mmu[6]));
-       resource_pp_mmu_found[7] = _mali_osk_resource_find(global_gpu_base_address + 0x1F000, &(resource_pp_mmu[7]));
-
-
-       if (mali_is_mali450()) {
-               resource_bcast_found = _mali_osk_resource_find(global_gpu_base_address + 0x13000, &resource_bcast);
-               resource_dlbu_found = _mali_osk_resource_find(global_gpu_base_address + 0x14000, &resource_dlbu);
-               resource_pp_mmu_bcast_found = _mali_osk_resource_find(global_gpu_base_address + 0x15000, &resource_pp_mmu_bcast);
-               resource_pp_bcast_found = _mali_osk_resource_find(global_gpu_base_address + 0x16000, &resource_pp_bcast);
+       resource_gp_found = _mali_osk_resource_find(MALI_OFFSET_GP, &resource_gp);
+       resource_gp_mmu_found = _mali_osk_resource_find(MALI_OFFSET_GP_MMU, &resource_gp_mmu);
+       resource_pp_found[0] = _mali_osk_resource_find(MALI_OFFSET_PP0, &(resource_pp[0]));
+       resource_pp_found[1] = _mali_osk_resource_find(MALI_OFFSET_PP1, &(resource_pp[1]));
+       resource_pp_found[2] = _mali_osk_resource_find(MALI_OFFSET_PP2, &(resource_pp[2]));
+       resource_pp_found[3] = _mali_osk_resource_find(MALI_OFFSET_PP3, &(resource_pp[3]));
+       resource_pp_found[4] = _mali_osk_resource_find(MALI_OFFSET_PP4, &(resource_pp[4]));
+       resource_pp_found[5] = _mali_osk_resource_find(MALI_OFFSET_PP5, &(resource_pp[5]));
+       resource_pp_found[6] = _mali_osk_resource_find(MALI_OFFSET_PP6, &(resource_pp[6]));
+       resource_pp_found[7] = _mali_osk_resource_find(MALI_OFFSET_PP7, &(resource_pp[7]));
+       resource_pp_mmu_found[0] = _mali_osk_resource_find(MALI_OFFSET_PP0_MMU, &(resource_pp_mmu[0]));
+       resource_pp_mmu_found[1] = _mali_osk_resource_find(MALI_OFFSET_PP1_MMU, &(resource_pp_mmu[1]));
+       resource_pp_mmu_found[2] = _mali_osk_resource_find(MALI_OFFSET_PP2_MMU, &(resource_pp_mmu[2]));
+       resource_pp_mmu_found[3] = _mali_osk_resource_find(MALI_OFFSET_PP3_MMU, &(resource_pp_mmu[3]));
+       resource_pp_mmu_found[4] = _mali_osk_resource_find(MALI_OFFSET_PP4_MMU, &(resource_pp_mmu[4]));
+       resource_pp_mmu_found[5] = _mali_osk_resource_find(MALI_OFFSET_PP5_MMU, &(resource_pp_mmu[5]));
+       resource_pp_mmu_found[6] = _mali_osk_resource_find(MALI_OFFSET_PP6_MMU, &(resource_pp_mmu[6]));
+       resource_pp_mmu_found[7] = _mali_osk_resource_find(MALI_OFFSET_PP7_MMU, &(resource_pp_mmu[7]));
+
+
+       if (mali_is_mali450() || mali_is_mali470()) {
+               resource_bcast_found = _mali_osk_resource_find(MALI_OFFSET_BCAST, &resource_bcast);
+               resource_dlbu_found = _mali_osk_resource_find(MALI_OFFSET_DLBU, &resource_dlbu);
+               resource_pp_mmu_bcast_found = _mali_osk_resource_find(MALI_OFFSET_PP_BCAST_MMU, &resource_pp_mmu_bcast);
+               resource_pp_bcast_found = _mali_osk_resource_find(MALI_OFFSET_PP_BCAST, &resource_pp_bcast);
 
                if (_MALI_OSK_ERR_OK != resource_bcast_found ||
                    _MALI_OSK_ERR_OK != resource_dlbu_found ||
                    _MALI_OSK_ERR_OK != resource_pp_mmu_bcast_found ||
                    _MALI_OSK_ERR_OK != resource_pp_bcast_found) {
-                       /* Missing mandatory core(s) for Mali-450 */
+                       /* Missing mandatory core(s) for Mali-450 or Mali-470 */
                        MALI_DEBUG_PRINT(2, ("Missing mandatory resources, Mali-450 needs DLBU, Broadcast unit, virtual PP core and virtual MMU\n"));
                        return _MALI_OSK_ERR_FAULT;
                }
@@ -542,37 +531,29 @@ static _mali_osk_errcode_t mali_parse_config_groups(void)
        }
 
        MALI_DEBUG_ASSERT(1 <= mali_l2_cache_core_get_glob_num_l2_cores());
-       group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_gp), &resource_gp_mmu, &resource_gp, NULL);
+       group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_gp), &resource_gp_mmu, &resource_gp, NULL, MALI_DOMAIN_INDEX_GP);
        if (NULL == group) {
                return _MALI_OSK_ERR_FAULT;
        }
 
-       /* Add GP in group, for PMU ref count */
-       mali_pm_domain_add_group(mali_pmu_get_domain_mask(MALI_GP_DOMAIN_INDEX), group);
-
        /* Create group for first (and mandatory) PP core */
        MALI_DEBUG_ASSERT(mali_l2_cache_core_get_glob_num_l2_cores() >= (cluster_id_pp_grp0 + 1)); /* >= 1 on Mali-300 and Mali-400, >= 2 on Mali-450 */
-       group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp0), &resource_pp_mmu[0], NULL, &resource_pp[0]);
+       group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp0), &resource_pp_mmu[0], NULL, &resource_pp[0], MALI_DOMAIN_INDEX_PP0);
        if (NULL == group) {
                return _MALI_OSK_ERR_FAULT;
        }
 
-       /* Find corresponding pp domain */
-       mali_pm_domain_add_group(mali_pmu_get_domain_mask(MALI_PP0_DOMAIN_INDEX), group);
-
        mali_inited_pp_cores_group_1++;
 
        /* Create groups for rest of the cores in the first PP core group */
        for (i = 1; i < 4; i++) { /* First half of the PP cores belong to first core group */
                if (mali_inited_pp_cores_group_1 < mali_max_pp_cores_group_1) {
                        if (_MALI_OSK_ERR_OK == resource_pp_found[i] && _MALI_OSK_ERR_OK == resource_pp_mmu_found[i]) {
-                               group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp0), &resource_pp_mmu[i], NULL, &resource_pp[i]);
+                               group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp0), &resource_pp_mmu[i], NULL, &resource_pp[i], MALI_DOMAIN_INDEX_PP0 + i);
                                if (NULL == group) {
                                        return _MALI_OSK_ERR_FAULT;
                                }
 
-                               mali_pm_domain_add_group(mali_pmu_get_domain_mask(i + MALI_PP0_DOMAIN_INDEX), group);
-
                                mali_inited_pp_cores_group_1++;
                        }
                }
@@ -583,17 +564,17 @@ static _mali_osk_errcode_t mali_parse_config_groups(void)
                if (mali_inited_pp_cores_group_2 < mali_max_pp_cores_group_2) {
                        if (_MALI_OSK_ERR_OK == resource_pp_found[i] && _MALI_OSK_ERR_OK == resource_pp_mmu_found[i]) {
                                MALI_DEBUG_ASSERT(mali_l2_cache_core_get_glob_num_l2_cores() >= 2); /* Only Mali-450 have a second core group */
-                               group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp1), &resource_pp_mmu[i], NULL, &resource_pp[i]);
+                               group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp1), &resource_pp_mmu[i], NULL, &resource_pp[i], MALI_DOMAIN_INDEX_PP0 + i);
                                if (NULL == group) {
                                        return _MALI_OSK_ERR_FAULT;
                                }
-                               mali_pm_domain_add_group(mali_pmu_get_domain_mask(i + MALI_PP0_DOMAIN_INDEX), group);
+
                                mali_inited_pp_cores_group_2++;
                        }
                }
        }
 
-       if(mali_is_mali450()) {
+       if (mali_is_mali450() || mali_is_mali470()) {
                _mali_osk_errcode_t err = mali_create_virtual_group(&resource_pp_mmu_bcast, &resource_pp_bcast, &resource_dlbu, &resource_bcast);
                if (_MALI_OSK_ERR_OK != err) {
                        return err;
@@ -620,157 +601,15 @@ static _mali_osk_errcode_t mali_check_shared_interrupts(void)
        return _MALI_OSK_ERR_OK;
 }
 
-static _mali_osk_errcode_t mali_create_pm_domains(void)
-{
-       int i;
-
-       for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
-               if (0x0 == mali_pmu_get_domain_mask(i)) continue;
-
-               if (NULL == mali_pm_domain_create(mali_pmu_get_domain_mask(i))) {
-                       return _MALI_OSK_ERR_NOMEM;
-               }
-       }
-
-       return _MALI_OSK_ERR_OK;
-}
-
-static void mali_use_default_pm_domain_config(void)
-{
-       u32 pp_count_gr1 = 0;
-       u32 pp_count_gr2 = 0;
-       u32 l2_count = 0;
-
-       MALI_DEBUG_ASSERT(0 != global_gpu_base_address);
-
-       /* GP core */
-       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x00000, NULL)) {
-               mali_pmu_set_domain_mask(MALI_GP_DOMAIN_INDEX, 0x01);
-       }
-
-       /* PP0 - PP3 core */
-       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x08000, NULL)) {
-               ++pp_count_gr1;
-
-               if (mali_is_mali400()) {
-                       mali_pmu_set_domain_mask(MALI_PP0_DOMAIN_INDEX, 0x01<<2);
-               } else if (mali_is_mali450()) {
-                       mali_pmu_set_domain_mask(MALI_PP0_DOMAIN_INDEX, 0x01<<1);
-               }
-       }
-
-       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0A000, NULL)) {
-               ++pp_count_gr1;
-
-               if (mali_is_mali400()) {
-                       mali_pmu_set_domain_mask(MALI_PP1_DOMAIN_INDEX, 0x01<<3);
-               } else if (mali_is_mali450()) {
-                       mali_pmu_set_domain_mask(MALI_PP1_DOMAIN_INDEX, 0x01<<2);
-               }
-       }
-
-       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0C000, NULL)) {
-               ++pp_count_gr1;
-
-               if (mali_is_mali400()) {
-                       mali_pmu_set_domain_mask(MALI_PP2_DOMAIN_INDEX, 0x01<<4);
-               } else if (mali_is_mali450()) {
-                       mali_pmu_set_domain_mask(MALI_PP2_DOMAIN_INDEX, 0x01<<2);
-               }
-       }
-
-       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0E000, NULL)) {
-               ++pp_count_gr1;
-
-               if (mali_is_mali400()) {
-                       mali_pmu_set_domain_mask(MALI_PP3_DOMAIN_INDEX, 0x01<<5);
-               } else if (mali_is_mali450()) {
-                       mali_pmu_set_domain_mask(MALI_PP3_DOMAIN_INDEX, 0x01<<2);
-               }
-       }
-
-       /* PP4 - PP7 */
-       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x28000, NULL)) {
-               ++pp_count_gr2;
-
-               mali_pmu_set_domain_mask(MALI_PP4_DOMAIN_INDEX, 0x01<<3);
-       }
-
-       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2A000, NULL)) {
-               ++pp_count_gr2;
-
-               mali_pmu_set_domain_mask(MALI_PP5_DOMAIN_INDEX, 0x01<<3);
-       }
-
-       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2C000, NULL)) {
-               ++pp_count_gr2;
-
-               mali_pmu_set_domain_mask(MALI_PP6_DOMAIN_INDEX, 0x01<<3);
-       }
-
-       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2E000, NULL)) {
-               ++pp_count_gr2;
-
-               mali_pmu_set_domain_mask(MALI_PP7_DOMAIN_INDEX, 0x01<<3);
-       }
-
-       /* L2gp/L2PP0/L2PP4 */
-       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x10000, NULL)) {
-               ++l2_count;
-
-               if (mali_is_mali400()) {
-                       mali_pmu_set_domain_mask(MALI_L20_DOMAIN_INDEX, 0x01<<1);
-               } else if (mali_is_mali450()) {
-                       mali_pmu_set_domain_mask(MALI_L20_DOMAIN_INDEX, 0x01<<0);
-               }
-       }
-
-       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x1000, NULL)) {
-               ++l2_count;
-
-               mali_pmu_set_domain_mask(MALI_L21_DOMAIN_INDEX, 0x01<<1);
-       }
-
-       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x11000, NULL)) {
-               ++l2_count;
-
-               mali_pmu_set_domain_mask(MALI_L22_DOMAIN_INDEX, 0x01<<3);
-       }
-
-       MALI_DEBUG_PRINT(2, ("Using default PMU domain config: (%d) gr1_pp_cores, (%d) gr2_pp_cores, (%d) l2_count. \n", pp_count_gr1, pp_count_gr2, l2_count));
-}
-
-static void mali_set_pmu_global_domain_config(void)
-{
-       struct _mali_osk_device_data data = { 0, };
-       int i = 0;
-
-       if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
-               /* Check whether has customized pmu domain configure */
-               for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
-                       if (0 != data.pmu_domain_config[i]) break;
-               }
-
-               if (MALI_MAX_NUMBER_OF_DOMAINS == i) {
-                       mali_use_default_pm_domain_config();
-               } else {
-                       /* Copy the customer config to global config */
-                       mali_pmu_copy_domain_mask(data.pmu_domain_config, sizeof(data.pmu_domain_config));
-               }
-       }
-}
-
 static _mali_osk_errcode_t mali_parse_config_pmu(void)
 {
        _mali_osk_resource_t resource_pmu;
 
        MALI_DEBUG_ASSERT(0 != global_gpu_base_address);
 
-       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x02000, &resource_pmu)) {
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_PMU, &resource_pmu)) {
                struct mali_pmu_core *pmu;
 
-               mali_set_pmu_global_domain_config();
-
                pmu = mali_pmu_create(&resource_pmu);
                if (NULL == pmu) {
                        MALI_PRINT_ERROR(("Failed to create PMU\n"));
@@ -782,48 +621,35 @@ static _mali_osk_errcode_t mali_parse_config_pmu(void)
        return _MALI_OSK_ERR_OK;
 }
 
-static _mali_osk_errcode_t mali_parse_config_dma(void)
-{
-       _mali_osk_resource_t resource_dma;
-
-       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x12000, &resource_dma)) {
-               if (NULL == mali_dma_create(&resource_dma)) {
-                       return _MALI_OSK_ERR_FAULT;
-               }
-               return _MALI_OSK_ERR_OK;
-       } else {
-               return _MALI_OSK_ERR_ITEM_NOT_FOUND;
-       }
-}
-
 static _mali_osk_errcode_t mali_parse_config_memory(void)
 {
+       _mali_osk_device_data data = { 0, };
        _mali_osk_errcode_t ret;
 
-       if (0 == mali_dedicated_mem_start && 0 == mali_dedicated_mem_size && 0 == mali_shared_mem_size) {
+       /* The priority of setting the value of mali_shared_mem_size,
+        * mali_dedicated_mem_start and mali_dedicated_mem_size:
+        * 1. module parameter;
+        * 2. platform data;
+        * 3. default value;
+        **/
+       if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
                /* Memory settings are not overridden by module parameters, so use device settings */
-               struct _mali_osk_device_data data = { 0, };
-
-               if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+               if (0 == mali_dedicated_mem_start && 0 == mali_dedicated_mem_size) {
                        /* Use device specific settings (if defined) */
                        mali_dedicated_mem_start = data.dedicated_mem_start;
                        mali_dedicated_mem_size = data.dedicated_mem_size;
-                       mali_shared_mem_size = data.shared_mem_size;
                }
 
-               if (0 == mali_dedicated_mem_start && 0 == mali_dedicated_mem_size && 0 == mali_shared_mem_size) {
-                       /* No GPU memory specified */
-                       return _MALI_OSK_ERR_INVALID_ARGS;
+               if (MALI_SHARED_MEMORY_DEFAULT_SIZE == mali_shared_mem_size &&
+                   0 != data.shared_mem_size) {
+                       mali_shared_mem_size = data.shared_mem_size;
                }
-
-               MALI_DEBUG_PRINT(2, ("Using device defined memory settings (dedicated: 0x%08X@0x%08X, shared: 0x%08X)\n",
-                                    mali_dedicated_mem_size, mali_dedicated_mem_start, mali_shared_mem_size));
-       } else {
-               MALI_DEBUG_PRINT(2, ("Using module defined memory settings (dedicated: 0x%08X@0x%08X, shared: 0x%08X)\n",
-                                    mali_dedicated_mem_size, mali_dedicated_mem_start, mali_shared_mem_size));
        }
 
        if (0 < mali_dedicated_mem_size && 0 != mali_dedicated_mem_start) {
+               MALI_DEBUG_PRINT(2, ("Mali memory settings (dedicated: 0x%08X@0x%08X)\n",
+                                    mali_dedicated_mem_size, mali_dedicated_mem_start));
+
                /* Dedicated memory */
                ret = mali_memory_core_resource_dedicated_memory(mali_dedicated_mem_start, mali_dedicated_mem_size);
                if (_MALI_OSK_ERR_OK != ret) {
@@ -834,6 +660,8 @@ static _mali_osk_errcode_t mali_parse_config_memory(void)
        }
 
        if (0 < mali_shared_mem_size) {
+               MALI_DEBUG_PRINT(2, ("Mali memory settings (shared: 0x%08X)\n", mali_shared_mem_size));
+
                /* Shared OS memory */
                ret = mali_memory_core_resource_os_memory(mali_shared_mem_size);
                if (_MALI_OSK_ERR_OK != ret) {
@@ -845,7 +673,7 @@ static _mali_osk_errcode_t mali_parse_config_memory(void)
 
        if (0 == mali_fb_start && 0 == mali_fb_size) {
                /* Frame buffer settings are not overridden by module parameters, so use device settings */
-               struct _mali_osk_device_data data = { 0, };
+               _mali_osk_device_data data = { 0, };
 
                if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
                        /* Use device specific settings (if defined) */
@@ -854,10 +682,10 @@ static _mali_osk_errcode_t mali_parse_config_memory(void)
                }
 
                MALI_DEBUG_PRINT(2, ("Using device defined frame buffer settings (0x%08X@0x%08X)\n",
-                                    mali_fb_size, mali_fb_start));
+                                    mali_fb_size, mali_fb_start));
        } else {
                MALI_DEBUG_PRINT(2, ("Using module defined frame buffer settings (0x%08X@0x%08X)\n",
-                                    mali_fb_size, mali_fb_start));
+                                    mali_fb_size, mali_fb_start));
        }
 
        if (0 != mali_fb_size) {
@@ -875,24 +703,57 @@ static _mali_osk_errcode_t mali_parse_config_memory(void)
 
 static void mali_detect_gpu_class(void)
 {
-       u32 number_of_pp_cores = 0;
-       u32 number_of_l2_caches = 0;
-
-       mali_resource_count(&number_of_pp_cores, &number_of_l2_caches);
-       if (number_of_l2_caches > 1) {
+       if (_mali_osk_identify_gpu_resource() == 0x450)
                mali_gpu_class_is_mali450 = MALI_TRUE;
+
+       if (_mali_osk_identify_gpu_resource() == 0x470)
+               mali_gpu_class_is_mali470 = MALI_TRUE;
+}
+
+static _mali_osk_errcode_t mali_init_hw_reset(void)
+{
+#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470))
+       _mali_osk_resource_t resource_bcast;
+
+       /* Ensure broadcast unit is in a good state before we start creating
+        * groups and cores.
+        */
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_BCAST, &resource_bcast)) {
+               struct mali_bcast_unit *bcast_core;
+
+               bcast_core = mali_bcast_unit_create(&resource_bcast);
+               if (NULL == bcast_core) {
+                       MALI_PRINT_ERROR(("Failed to create Broadcast unit object!\n"));
+                       return _MALI_OSK_ERR_FAULT;
+               }
+               mali_bcast_unit_delete(bcast_core);
        }
+#endif /* (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) */
+
+       return _MALI_OSK_ERR_OK;
 }
 
 _mali_osk_errcode_t mali_initialize_subsystems(void)
 {
        _mali_osk_errcode_t err;
-       struct mali_pmu_core *pmu;
+
+#ifdef CONFIG_MALI_DT
+       err = _mali_osk_resource_initialize();
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_terminate_subsystems();
+               return err;
+       }
+#endif
 
        mali_pp_job_initialize();
 
+       mali_timeline_initialize();
+
        err = mali_session_initialize();
-       if (_MALI_OSK_ERR_OK != err) goto session_init_failed;
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_terminate_subsystems();
+               return err;
+       }
 
 #if defined(CONFIG_MALI400_PROFILING)
        err = _mali_osk_profiling_init(mali_boot_profiling ? MALI_TRUE : MALI_FALSE);
@@ -903,194 +764,187 @@ _mali_osk_errcode_t mali_initialize_subsystems(void)
 #endif
 
        err = mali_memory_initialize();
-       if (_MALI_OSK_ERR_OK != err) goto memory_init_failed;
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_terminate_subsystems();
+               return err;
+       }
+
+       err = mali_executor_initialize();
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_terminate_subsystems();
+               return err;
+       }
+
+       err = mali_scheduler_initialize();
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_terminate_subsystems();
+               return err;
+       }
 
-       /* Configure memory early. Memory allocation needed for mali_mmu_initialize. */
+       /* Configure memory early, needed by mali_mmu_initialize. */
        err = mali_parse_config_memory();
-       if (_MALI_OSK_ERR_OK != err) goto parse_memory_config_failed;
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_terminate_subsystems();
+               return err;
+       }
 
        err = mali_set_global_gpu_base_address();
-       if (_MALI_OSK_ERR_OK != err) goto set_global_gpu_base_address_failed;
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_terminate_subsystems();
+               return err;
+       }
 
-       /* Detect gpu class according to l2 cache number */
+       /* Detect GPU class (uses L2 cache count) */
        mali_detect_gpu_class();
 
        err = mali_check_shared_interrupts();
-       if (_MALI_OSK_ERR_OK != err) goto check_shared_interrupts_failed;
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_terminate_subsystems();
+               return err;
+       }
 
-       err = mali_pp_scheduler_initialize();
-       if (_MALI_OSK_ERR_OK != err) goto pp_scheduler_init_failed;
+       /* Initialize the MALI PMU (will not touch HW!) */
+       err = mali_parse_config_pmu();
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_terminate_subsystems();
+               return err;
+       }
 
        /* Initialize the power management module */
        err = mali_pm_initialize();
-       if (_MALI_OSK_ERR_OK != err) goto pm_init_failed;
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_terminate_subsystems();
+               return err;
+       }
 
-       /* Initialize the MALI PMU */
-       err = mali_parse_config_pmu();
-       if (_MALI_OSK_ERR_OK != err) goto parse_pmu_config_failed;
-
-       /* Make sure the power stays on for the rest of this function */
-       err = _mali_osk_pm_dev_ref_add();
-       if (_MALI_OSK_ERR_OK != err) goto pm_always_on_failed;
-
-       /*
-        * If run-time PM is used, then the mali_pm module has now already been
-        * notified that the power now is on (through the resume callback functions).
-        * However, if run-time PM is not used, then there will probably not be any
-        * calls to the resume callback functions, so we need to explicitly tell it
-        * that the power is on.
-        */
-       mali_pm_set_power_is_on();
+       /* Make sure the entire GPU stays on for the rest of this function */
+       mali_pm_init_begin();
 
-       /* Reset PMU HW and ensure all Mali power domains are on */
-       pmu = mali_pmu_get_global_pmu_core();
-       if (NULL != pmu) {
-               err = mali_pmu_reset(pmu);
-               if (_MALI_OSK_ERR_OK != err) goto pmu_reset_failed;
+       /* Ensure HW is in a good state before starting to access cores. */
+       err = mali_init_hw_reset();
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_terminate_subsystems();
+               return err;
        }
 
        /* Detect which Mali GPU we are dealing with */
        err = mali_parse_product_info();
-       if (_MALI_OSK_ERR_OK != err) goto product_info_parsing_failed;
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_pm_init_end();
+               mali_terminate_subsystems();
+               return err;
+       }
 
        /* The global_product_id is now populated with the correct Mali GPU */
 
-       /* Create PM domains only if PMU exists */
-       if (NULL != pmu) {
-               err = mali_create_pm_domains();
-               if (_MALI_OSK_ERR_OK != err) goto pm_domain_failed;
-       }
+       /* Start configuring the actual Mali hardware. */
 
-       /* Initialize MMU module */
        err = mali_mmu_initialize();
-       if (_MALI_OSK_ERR_OK != err) goto mmu_init_failed;
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_pm_init_end();
+               mali_terminate_subsystems();
+               return err;
+       }
 
-       if (mali_is_mali450()) {
+       if (mali_is_mali450() || mali_is_mali470()) {
                err = mali_dlbu_initialize();
-               if (_MALI_OSK_ERR_OK != err) goto dlbu_init_failed;
-
-               err = mali_parse_config_dma();
-               if (_MALI_OSK_ERR_OK != err) goto dma_parsing_failed;
+               if (_MALI_OSK_ERR_OK != err) {
+                       mali_pm_init_end();
+                       mali_terminate_subsystems();
+                       return err;
+               }
        }
 
-       /* Start configuring the actual Mali hardware. */
        err = mali_parse_config_l2_cache();
-       if (_MALI_OSK_ERR_OK != err) goto config_parsing_failed;
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_pm_init_end();
+               mali_terminate_subsystems();
+               return err;
+       }
+
        err = mali_parse_config_groups();
-       if (_MALI_OSK_ERR_OK != err) goto config_parsing_failed;
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_pm_init_end();
+               mali_terminate_subsystems();
+               return err;
+       }
 
-       /* Initialize the schedulers */
-       err = mali_scheduler_initialize();
-       if (_MALI_OSK_ERR_OK != err) goto scheduler_init_failed;
-       err = mali_gp_scheduler_initialize();
-       if (_MALI_OSK_ERR_OK != err) goto gp_scheduler_init_failed;
+       /* Move groups into executor */
+       mali_executor_populate();
 
-       /* PP scheduler population can't fail */
-       mali_pp_scheduler_populate();
+       /* Need call after all group has assigned a domain */
+       mali_pm_power_cost_setup();
+
+       /* Initialize the GPU timer */
+       err = mali_control_timer_init();
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_pm_init_end();
+               mali_terminate_subsystems();
+               return err;
+       }
 
        /* Initialize the GPU utilization tracking */
        err = mali_utilization_init();
-       if (_MALI_OSK_ERR_OK != err) goto utilization_init_failed;
-
-       /* Allowing the system to be turned off */
-       _mali_osk_pm_dev_ref_dec();
-
-       MALI_SUCCESS; /* all ok */
-
-       /* Error handling */
-
-utilization_init_failed:
-       mali_pp_scheduler_depopulate();
-       mali_gp_scheduler_terminate();
-gp_scheduler_init_failed:
-       mali_scheduler_terminate();
-scheduler_init_failed:
-config_parsing_failed:
-       mali_delete_groups(); /* Delete any groups not (yet) owned by a scheduler */
-       mali_delete_l2_cache_cores(); /* Delete L2 cache cores even if config parsing failed. */
-       {
-               struct mali_dma_core *dma = mali_dma_get_global_dma_core();
-               if (NULL != dma) mali_dma_delete(dma);
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_pm_init_end();
+               mali_terminate_subsystems();
+               return err;
        }
-dma_parsing_failed:
-       mali_dlbu_terminate();
-dlbu_init_failed:
-       mali_mmu_terminate();
-mmu_init_failed:
-       mali_pm_domain_terminate();
-pm_domain_failed:
-       /* Nothing to roll back */
-product_info_parsing_failed:
-       /* Nothing to roll back */
-pmu_reset_failed:
-       /* Allowing the system to be turned off */
-       _mali_osk_pm_dev_ref_dec();
-pm_always_on_failed:
-       pmu = mali_pmu_get_global_pmu_core();
-       if (NULL != pmu) {
-               mali_pmu_delete(pmu);
+
+#if defined(CONFIG_MALI_DVFS)
+       err = mali_dvfs_policy_init();
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_pm_init_end();
+               mali_terminate_subsystems();
+               return err;
        }
-parse_pmu_config_failed:
-       mali_pm_terminate();
-pm_init_failed:
-       mali_pp_scheduler_terminate();
-pp_scheduler_init_failed:
-check_shared_interrupts_failed:
-       global_gpu_base_address = 0;
-set_global_gpu_base_address_failed:
-       /* undoing mali_parse_config_memory() is done by mali_memory_terminate() */
-parse_memory_config_failed:
-       mali_memory_terminate();
-memory_init_failed:
-#if defined(CONFIG_MALI400_PROFILING)
-       _mali_osk_profiling_term();
 #endif
-       mali_session_terminate();
-session_init_failed:
-       mali_pp_job_terminate();
-       return err;
+
+       /* Allowing the system to be turned off */
+       mali_pm_init_end();
+
+       return _MALI_OSK_ERR_OK; /* all ok */
 }
 
 void mali_terminate_subsystems(void)
 {
        struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
-       struct mali_dma_core *dma = mali_dma_get_global_dma_core();
 
        MALI_DEBUG_PRINT(2, ("terminate_subsystems() called\n"));
 
-       /* shut down subsystems in reverse order from startup */
+       mali_utilization_term();
+       mali_control_timer_term();
 
-       /* We need the GPU to be powered up for the terminate sequence */
-       _mali_osk_pm_dev_ref_add();
+       mali_executor_depopulate();
+       mali_delete_groups(); /* Delete groups not added to executor */
+       mali_executor_terminate();
 
-       mali_utilization_term();
-       mali_pp_scheduler_depopulate();
-       mali_gp_scheduler_terminate();
        mali_scheduler_terminate();
+       mali_pp_job_terminate();
        mali_delete_l2_cache_cores();
-       if (mali_is_mali450()) {
+       mali_mmu_terminate();
+
+       if (mali_is_mali450() || mali_is_mali470()) {
                mali_dlbu_terminate();
        }
-       mali_mmu_terminate();
+
+       mali_pm_terminate();
+
        if (NULL != pmu) {
                mali_pmu_delete(pmu);
        }
-       if (NULL != dma) {
-               mali_dma_delete(dma);
-       }
-       mali_pm_terminate();
-       mali_memory_terminate();
+
 #if defined(CONFIG_MALI400_PROFILING)
        _mali_osk_profiling_term();
 #endif
 
-       /* Allowing the system to be turned off */
-       _mali_osk_pm_dev_ref_dec();
+       mali_memory_terminate();
 
-       mali_pp_scheduler_terminate();
        mali_session_terminate();
 
-       mali_pp_job_terminate();
+       mali_timeline_terminate();
+
+       global_gpu_base_address = 0;
 }
 
 _mali_product_id_t mali_kernel_core_get_product_id(void)
@@ -1108,13 +962,13 @@ u32 mali_kernel_core_get_gpu_minor_version(void)
        return global_gpu_minor_version;
 }
 
-_mali_osk_errcode_t _mali_ukk_get_api_version( _mali_uk_get_api_version_s *args )
+_mali_osk_errcode_t _mali_ukk_get_api_version(_mali_uk_get_api_version_s *args)
 {
        MALI_DEBUG_ASSERT_POINTER(args);
-       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+       MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
 
        /* check compatability */
-       if ( args->version == _MALI_UK_API_VERSION ) {
+       if (args->version == _MALI_UK_API_VERSION) {
                args->compatible = 1;
        } else {
                args->compatible = 0;
@@ -1126,23 +980,43 @@ _mali_osk_errcode_t _mali_ukk_get_api_version( _mali_uk_get_api_version_s *args
        MALI_SUCCESS;
 }
 
-_mali_osk_errcode_t _mali_ukk_wait_for_notification( _mali_uk_wait_for_notification_s *args )
+_mali_osk_errcode_t _mali_ukk_get_api_version_v2(_mali_uk_get_api_version_v2_s *args)
+{
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+       /* check compatability */
+       if (args->version == _MALI_UK_API_VERSION) {
+               args->compatible = 1;
+       } else {
+               args->compatible = 0;
+       }
+
+       args->version = _MALI_UK_API_VERSION; /* report our version */
+
+       /* success regardless of being compatible or not */
+       return _MALI_OSK_ERR_OK;;
+}
+
+_mali_osk_errcode_t _mali_ukk_wait_for_notification(_mali_uk_wait_for_notification_s *args)
 {
        _mali_osk_errcode_t err;
-       _mali_osk_notification_t * notification;
+       _mali_osk_notification_t *notification;
        _mali_osk_notification_queue_t *queue;
+       struct mali_session_data *session;
 
        /* check input */
        MALI_DEBUG_ASSERT_POINTER(args);
-       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+       MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
 
-       queue = ((struct mali_session_data *)args->ctx)->ioctl_queue;
+       session = (struct mali_session_data *)(uintptr_t)args->ctx;
+       queue = session->ioctl_queue;
 
        /* if the queue does not exist we're currently shutting down */
        if (NULL == queue) {
                MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n"));
                args->type = _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS;
-               MALI_SUCCESS;
+               return _MALI_OSK_ERR_OK;;
        }
 
        /* receive a notification, might sleep */
@@ -1156,54 +1030,56 @@ _mali_osk_errcode_t _mali_ukk_wait_for_notification( _mali_uk_wait_for_notificat
        _mali_osk_memcpy(&args->data, notification->result_buffer, notification->result_buffer_size);
 
        /* finished with the notification */
-       _mali_osk_notification_delete( notification );
+       _mali_osk_notification_delete(notification);
 
-       MALI_SUCCESS; /* all ok */
+       return _MALI_OSK_ERR_OK;; /* all ok */
 }
 
-_mali_osk_errcode_t _mali_ukk_post_notification( _mali_uk_post_notification_s *args )
+_mali_osk_errcode_t _mali_ukk_post_notification(_mali_uk_post_notification_s *args)
 {
-       _mali_osk_notification_t * notification;
+       _mali_osk_notification_t *notification;
        _mali_osk_notification_queue_t *queue;
+       struct mali_session_data *session;
 
        /* check input */
        MALI_DEBUG_ASSERT_POINTER(args);
-       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+       MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
 
-       queue = ((struct mali_session_data *)args->ctx)->ioctl_queue;
+       session = (struct mali_session_data *)(uintptr_t)args->ctx;
+       queue = session->ioctl_queue;
 
        /* if the queue does not exist we're currently shutting down */
        if (NULL == queue) {
                MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n"));
-               MALI_SUCCESS;
+               return _MALI_OSK_ERR_OK;;
        }
 
        notification = _mali_osk_notification_create(args->type, 0);
        if (NULL == notification) {
-               MALI_PRINT_ERROR( ("Failed to create notification object\n"));
+               MALI_PRINT_ERROR(("Failed to create notification object\n"));
                return _MALI_OSK_ERR_NOMEM;
        }
 
        _mali_osk_notification_queue_send(queue, notification);
 
-       MALI_SUCCESS; /* all ok */
+       return _MALI_OSK_ERR_OK;; /* all ok */
 }
 
-_mali_osk_errcode_t _mali_ukk_request_high_priority( _mali_uk_request_high_priority_s *args )
+_mali_osk_errcode_t _mali_ukk_request_high_priority(_mali_uk_request_high_priority_s *args)
 {
        struct mali_session_data *session;
 
        MALI_DEBUG_ASSERT_POINTER(args);
-       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+       MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
 
-       session = (struct mali_session_data *) args->ctx;
+       session = (struct mali_session_data *)(uintptr_t)args->ctx;
 
        if (!session->use_high_priority_job_queue) {
                session->use_high_priority_job_queue = MALI_TRUE;
                MALI_DEBUG_PRINT(2, ("Session 0x%08X with pid %d was granted higher priority.\n", session, _mali_osk_get_pid()));
        }
 
-       MALI_SUCCESS;
+       return _MALI_OSK_ERR_OK;;
 }
 
 _mali_osk_errcode_t _mali_ukk_open(void **context)
@@ -1233,6 +1109,7 @@ _mali_osk_errcode_t _mali_ukk_open(void **context)
 
        if (_MALI_OSK_ERR_OK != mali_mmu_pagedir_map(session->page_directory, MALI_DLBU_VIRT_ADDR, _MALI_OSK_MALI_PAGE_SIZE)) {
                MALI_PRINT_ERROR(("Failed to map DLBU page into session\n"));
+               mali_mmu_pagedir_free(session->page_directory);
                _mali_osk_notification_queue_term(session->ioctl_queue);
                _mali_osk_free(session);
                MALI_ERROR(_MALI_OSK_ERR_NOMEM);
@@ -1240,10 +1117,11 @@ _mali_osk_errcode_t _mali_ukk_open(void **context)
 
        if (0 != mali_dlbu_phys_addr) {
                mali_mmu_pagedir_update(session->page_directory, MALI_DLBU_VIRT_ADDR, mali_dlbu_phys_addr,
-                                       _MALI_OSK_MALI_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);
+                                       _MALI_OSK_MALI_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);
        }
 
        if (_MALI_OSK_ERR_OK != mali_memory_session_begin(session)) {
+               mali_mmu_pagedir_unmap(session->page_directory, MALI_DLBU_VIRT_ADDR, _MALI_OSK_MALI_PAGE_SIZE);
                mali_mmu_pagedir_free(session->page_directory);
                _mali_osk_notification_queue_term(session->ioctl_queue);
                _mali_osk_free(session);
@@ -1254,6 +1132,7 @@ _mali_osk_errcode_t _mali_ukk_open(void **context)
        session->soft_job_system = mali_soft_job_system_create(session);
        if (NULL == session->soft_job_system) {
                mali_memory_session_end(session);
+               mali_mmu_pagedir_unmap(session->page_directory, MALI_DLBU_VIRT_ADDR, _MALI_OSK_MALI_PAGE_SIZE);
                mali_mmu_pagedir_free(session->page_directory);
                _mali_osk_notification_queue_term(session->ioctl_queue);
                _mali_osk_free(session);
@@ -1265,23 +1144,15 @@ _mali_osk_errcode_t _mali_ukk_open(void **context)
        if (NULL == session->timeline_system) {
                mali_soft_job_system_destroy(session->soft_job_system);
                mali_memory_session_end(session);
+               mali_mmu_pagedir_unmap(session->page_directory, MALI_DLBU_VIRT_ADDR, _MALI_OSK_MALI_PAGE_SIZE);
                mali_mmu_pagedir_free(session->page_directory);
                _mali_osk_notification_queue_term(session->ioctl_queue);
                _mali_osk_free(session);
                MALI_ERROR(_MALI_OSK_ERR_NOMEM);
        }
 
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
-       if ( _MALI_OSK_ERR_OK != _mali_osk_atomic_init(&session->number_of_window_jobs, 0)) {
-               MALI_DEBUG_PRINT_ERROR(("Initialization of atomic number_of_window_jobs failed.\n"));
-               mali_timeline_system_destroy(session->timeline_system);
-               mali_soft_job_system_destroy(session->soft_job_system);
-               mali_memory_session_end(session);
-               mali_mmu_pagedir_free(session->page_directory);
-               _mali_osk_notification_queue_term(session->ioctl_queue);
-               _mali_osk_free(session);
-               return _MALI_OSK_ERR_FAULT;
-       }
+#if defined(CONFIG_MALI_DVFS)
+       _mali_osk_atomic_init(&session->number_of_window_jobs, 0);
 #endif
 
        session->use_high_priority_job_queue = MALI_FALSE;
@@ -1294,15 +1165,30 @@ _mali_osk_errcode_t _mali_ukk_open(void **context)
                _MALI_OSK_INIT_LIST_HEAD(&session->pp_job_fb_lookup_list[i]);
        }
 
-       *context = (void*)session;
+       session->pid = _mali_osk_get_pid();
+       session->comm = _mali_osk_get_comm();
+       session->max_mali_mem_allocated_size = 0;
+       for (i = 0; i < MALI_MEM_TYPE_MAX; i ++) {
+               atomic_set(&session->mali_mem_array[i], 0);
+       }
+       atomic_set(&session->mali_mem_allocated_pages, 0);
+       *context = (void *)session;
 
        /* Add session to the list of all sessions. */
        mali_session_add(session);
 
-       MALI_DEBUG_PRINT(2, ("Session started\n"));
-       MALI_SUCCESS;
+       MALI_DEBUG_PRINT(3, ("Session started\n"));
+       return _MALI_OSK_ERR_OK;;
 }
 
+#if defined(DEBUG)
+/* parameter used for debug */
+extern u32 num_pm_runtime_resume;
+extern u32 num_pm_updates;
+extern u32 num_pm_updates_up;
+extern u32 num_pm_updates_down;
+#endif
+
 _mali_osk_errcode_t _mali_ukk_close(void **context)
 {
        struct mali_session_data *session;
@@ -1323,9 +1209,11 @@ _mali_osk_errcode_t _mali_ukk_close(void **context)
        /* Stop the soft job timer. */
        mali_timeline_system_stop_timer(session->timeline_system);
 
-       /* Abort queued and running GP and PP jobs. */
-       mali_gp_scheduler_abort_session(session);
-       mali_pp_scheduler_abort_session(session);
+       /* Abort queued jobs */
+       mali_scheduler_abort_session(session);
+
+       /* Abort executing jobs */
+       mali_executor_abort_session(session);
 
        /* Abort the soft job system. */
        mali_soft_job_system_abort(session->soft_job_system);
@@ -1349,16 +1237,20 @@ _mali_osk_errcode_t _mali_ukk_close(void **context)
         * session has been completed, before we free internal data structures.
         */
        _mali_osk_wq_flush();
+#if 0 
+/// Robin (this part cause booting fail, 
+/// this part cause potential memory corruption)
 
        /* Destroy timeline system. */
        mali_timeline_system_destroy(session->timeline_system);
        session->timeline_system = NULL;
+#endif
 
        /* Destroy soft system. */
        mali_soft_job_system_destroy(session->soft_job_system);
        session->soft_job_system = NULL;
 
-       MALI_DEBUG_CODE( {
+       MALI_DEBUG_CODE({
                /* Check that the pp_job_fb_lookup_list array is empty. */
                u32 i;
                for (i = 0; i < MALI_PP_JOB_FB_LOOKUP_LIST_SIZE; ++i)
@@ -1370,29 +1262,44 @@ _mali_osk_errcode_t _mali_ukk_close(void **context)
        /* Free remaining memory allocated to this session */
        mali_memory_session_end(session);
 
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+#if defined(CONFIG_MALI_DVFS)
        _mali_osk_atomic_term(&session->number_of_window_jobs);
 #endif
 
+#if defined(CONFIG_MALI400_PROFILING)
+       _mali_osk_profiling_stop_sampling(session->pid);
+#endif
+
        /* Free session data structures */
+       mali_mmu_pagedir_unmap(session->page_directory, MALI_DLBU_VIRT_ADDR, _MALI_OSK_MALI_PAGE_SIZE);
        mali_mmu_pagedir_free(session->page_directory);
        _mali_osk_notification_queue_term(session->ioctl_queue);
        _mali_osk_free(session);
 
        *context = NULL;
 
-       MALI_DEBUG_PRINT(2, ("Session has ended\n"));
+       MALI_DEBUG_PRINT(3, ("Session has ended\n"));
 
-       MALI_SUCCESS;
+#if defined(DEBUG)
+       MALI_DEBUG_PRINT(3, ("Stats: # runtime resumes: %u\n", num_pm_runtime_resume));
+       MALI_DEBUG_PRINT(3, ("       # PM updates: .... %u (up %u, down %u)\n", num_pm_updates, num_pm_updates_up, num_pm_updates_down));
+
+       num_pm_runtime_resume = 0;
+       num_pm_updates = 0;
+       num_pm_updates_up = 0;
+       num_pm_updates_down = 0;
+#endif
+
+       return _MALI_OSK_ERR_OK;
 }
 
 #if MALI_STATE_TRACKING
-u32 _mali_kernel_core_dump_state(charbuf, u32 size)
+u32 _mali_kernel_core_dump_state(char *buf, u32 size)
 {
        int n = 0; /* Number of bytes written to buf */
 
-       n += mali_gp_scheduler_dump_state(buf + n, size - n);
-       n += mali_pp_scheduler_dump_state(buf + n, size - n);
+       n += mali_scheduler_dump_state(buf + n, size - n);
+       n += mali_executor_dump_state(buf + n, size - n);
 
        return n;
 }
index f0c0b0f69d7035b863e5b66a64a8cda7a4142d47..a5a40c7f926b12971935f396e7c071db34330682 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2007-2013 ARM Limited
+ * (C) COPYRIGHT 2007-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -19,9 +19,11 @@ typedef enum {
        _MALI_PRODUCT_ID_MALI300,
        _MALI_PRODUCT_ID_MALI400,
        _MALI_PRODUCT_ID_MALI450,
+       _MALI_PRODUCT_ID_MALI470,
 } _mali_product_id_t;
 
 extern mali_bool mali_gpu_class_is_mali450;
+extern mali_bool mali_gpu_class_is_mali470;
 
 _mali_osk_errcode_t mali_initialize_subsystems(void);
 
@@ -33,24 +35,23 @@ u32 mali_kernel_core_get_gpu_major_version(void);
 
 u32 mali_kernel_core_get_gpu_minor_version(void);
 
-u32 _mali_kernel_core_dump_state(char* buf, u32 size);
+u32 _mali_kernel_core_dump_state(char *buf, u32 size);
+
+MALI_STATIC_INLINE mali_bool mali_is_mali470(void)
+{
+       return mali_gpu_class_is_mali470;
+}
 
 MALI_STATIC_INLINE mali_bool mali_is_mali450(void)
 {
-#if defined(CONFIG_MALI450)
        return mali_gpu_class_is_mali450;
-#else
-       return MALI_FALSE;
-#endif
 }
 
 MALI_STATIC_INLINE mali_bool mali_is_mali400(void)
 {
-#if !defined(CONFIG_MALI450)
+       if (mali_gpu_class_is_mali450 || mali_gpu_class_is_mali470)
+               return MALI_FALSE;
+
        return MALI_TRUE;
-#else
-       return !mali_gpu_class_is_mali450;
-#endif
 }
-
 #endif /* __MALI_KERNEL_CORE_H__ */
diff --git a/drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_kernel_descriptor_mapping.c b/drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_kernel_descriptor_mapping.c
deleted file mode 100644 (file)
index a3f7026..0000000
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2010, 2012-2013 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
- */
-
-#include "mali_kernel_common.h"
-#include "mali_kernel_descriptor_mapping.h"
-#include "mali_osk.h"
-#include "mali_osk_bitops.h"
-
-#define MALI_PAD_INT(x) (((x) + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1))
-
-/**
- * Allocate a descriptor table capable of holding 'count' mappings
- * @param count Number of mappings in the table
- * @return Pointer to a new table, NULL on error
- */
-static mali_descriptor_table * descriptor_table_alloc(int count);
-
-/**
- * Free a descriptor table
- * @param table The table to free
- */
-static void descriptor_table_free(mali_descriptor_table * table);
-
-mali_descriptor_mapping * mali_descriptor_mapping_create(int init_entries, int max_entries)
-{
-       mali_descriptor_mapping * map = _mali_osk_calloc(1, sizeof(mali_descriptor_mapping));
-
-       init_entries = MALI_PAD_INT(init_entries);
-       max_entries = MALI_PAD_INT(max_entries);
-
-       if (NULL != map) {
-               map->table = descriptor_table_alloc(init_entries);
-               if (NULL != map->table) {
-                       map->lock = _mali_osk_mutex_rw_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP);
-                       if (NULL != map->lock) {
-                               _mali_osk_set_nonatomic_bit(0, map->table->usage); /* reserve bit 0 to prevent NULL/zero logic to kick in */
-                               map->max_nr_mappings_allowed = max_entries;
-                               map->current_nr_mappings = init_entries;
-                               return map;
-                       }
-                       descriptor_table_free(map->table);
-               }
-               _mali_osk_free(map);
-       }
-       return NULL;
-}
-
-void mali_descriptor_mapping_destroy(mali_descriptor_mapping * map)
-{
-       descriptor_table_free(map->table);
-       _mali_osk_mutex_rw_term(map->lock);
-       _mali_osk_free(map);
-}
-
-_mali_osk_errcode_t mali_descriptor_mapping_allocate_mapping(mali_descriptor_mapping * map, void * target, int *odescriptor)
-{
-       _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
-       int new_descriptor;
-
-       MALI_DEBUG_ASSERT_POINTER(map);
-       MALI_DEBUG_ASSERT_POINTER(odescriptor);
-
-       _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
-       new_descriptor = _mali_osk_find_first_zero_bit(map->table->usage, map->current_nr_mappings);
-       if (new_descriptor == map->current_nr_mappings) {
-               /* no free descriptor, try to expand the table */
-               mali_descriptor_table * new_table, * old_table;
-               if (map->current_nr_mappings >= map->max_nr_mappings_allowed) goto unlock_and_exit;
-
-               map->current_nr_mappings += BITS_PER_LONG;
-               new_table = descriptor_table_alloc(map->current_nr_mappings);
-               if (NULL == new_table) goto unlock_and_exit;
-
-               old_table = map->table;
-               _mali_osk_memcpy(new_table->usage, old_table->usage, (sizeof(unsigned long)*map->current_nr_mappings) / BITS_PER_LONG);
-               _mali_osk_memcpy(new_table->mappings, old_table->mappings, map->current_nr_mappings * sizeof(void*));
-               map->table = new_table;
-               descriptor_table_free(old_table);
-       }
-
-       /* we have found a valid descriptor, set the value and usage bit */
-       _mali_osk_set_nonatomic_bit(new_descriptor, map->table->usage);
-       map->table->mappings[new_descriptor] = target;
-       *odescriptor = new_descriptor;
-       err = _MALI_OSK_ERR_OK;
-
-unlock_and_exit:
-       _mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
-       MALI_ERROR(err);
-}
-
-void mali_descriptor_mapping_call_for_each(mali_descriptor_mapping * map, void (*callback)(int, void*))
-{
-       int i;
-
-       MALI_DEBUG_ASSERT_POINTER(map);
-       MALI_DEBUG_ASSERT_POINTER(callback);
-
-       _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
-       /* id 0 is skipped as it's an reserved ID not mapping to anything */
-       for (i = 1; i < map->current_nr_mappings; ++i) {
-               if (_mali_osk_test_bit(i, map->table->usage)) {
-                       callback(i, map->table->mappings[i]);
-               }
-       }
-       _mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
-}
-
-_mali_osk_errcode_t mali_descriptor_mapping_get(mali_descriptor_mapping * map, int descriptor, void** target)
-{
-       _mali_osk_errcode_t result = _MALI_OSK_ERR_FAULT;
-       MALI_DEBUG_ASSERT_POINTER(map);
-       _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
-       if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) ) {
-               *target = map->table->mappings[descriptor];
-               result = _MALI_OSK_ERR_OK;
-       } else *target = NULL;
-       _mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
-       MALI_ERROR(result);
-}
-
-_mali_osk_errcode_t mali_descriptor_mapping_set(mali_descriptor_mapping * map, int descriptor, void * target)
-{
-       _mali_osk_errcode_t result = _MALI_OSK_ERR_FAULT;
-       _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
-       if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) ) {
-               map->table->mappings[descriptor] = target;
-               result = _MALI_OSK_ERR_OK;
-       }
-       _mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
-       MALI_ERROR(result);
-}
-
-void *mali_descriptor_mapping_free(mali_descriptor_mapping * map, int descriptor)
-{
-       void *old_value = NULL;
-
-       _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
-       if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) ) {
-               old_value = map->table->mappings[descriptor];
-               map->table->mappings[descriptor] = NULL;
-               _mali_osk_clear_nonatomic_bit(descriptor, map->table->usage);
-       }
-       _mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
-
-       return old_value;
-}
-
-static mali_descriptor_table * descriptor_table_alloc(int count)
-{
-       mali_descriptor_table * table;
-
-       table = _mali_osk_calloc(1, sizeof(mali_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG) + (sizeof(void*) * count));
-
-       if (NULL != table) {
-               table->usage = (u32*)((u8*)table + sizeof(mali_descriptor_table));
-               table->mappings = (void**)((u8*)table + sizeof(mali_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG));
-       }
-
-       return table;
-}
-
-static void descriptor_table_free(mali_descriptor_table * table)
-{
-       _mali_osk_free(table);
-}
diff --git a/drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_kernel_descriptor_mapping.h b/drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_kernel_descriptor_mapping.h
deleted file mode 100644 (file)
index 59115bd..0000000
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2010, 2012-2013 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
- */
-
-/**
- * @file mali_kernel_descriptor_mapping.h
- */
-
-#ifndef __MALI_KERNEL_DESCRIPTOR_MAPPING_H__
-#define __MALI_KERNEL_DESCRIPTOR_MAPPING_H__
-
-#include "mali_osk.h"
-
-/**
- * The actual descriptor mapping table, never directly accessed by clients
- */
-typedef struct mali_descriptor_table {
-       u32 * usage; /**< Pointer to bitpattern indicating if a descriptor is valid/used or not */
-       void** mappings; /**< Array of the pointers the descriptors map to */
-} mali_descriptor_table;
-
-/**
- * The descriptor mapping object
- * Provides a separate namespace where we can map an integer to a pointer
- */
-typedef struct mali_descriptor_mapping {
-       _mali_osk_mutex_rw_t *lock; /**< Lock protecting access to the mapping object */
-       int max_nr_mappings_allowed; /**< Max number of mappings to support in this namespace */
-       int current_nr_mappings; /**< Current number of possible mappings */
-       mali_descriptor_table * table; /**< Pointer to the current mapping table */
-} mali_descriptor_mapping;
-
-/**
- * Create a descriptor mapping object
- * Create a descriptor mapping capable of holding init_entries growable to max_entries
- * @param init_entries Number of entries to preallocate memory for
- * @param max_entries Number of entries to max support
- * @return Pointer to a descriptor mapping object, NULL on failure
- */
-mali_descriptor_mapping * mali_descriptor_mapping_create(int init_entries, int max_entries);
-
-/**
- * Destroy a descriptor mapping object
- * @param map The map to free
- */
-void mali_descriptor_mapping_destroy(mali_descriptor_mapping * map);
-
-/**
- * Allocate a new mapping entry (descriptor ID)
- * Allocates a new entry in the map.
- * @param map The map to allocate a new entry in
- * @param target The value to map to
- * @return The descriptor allocated, a negative value on error
- */
-_mali_osk_errcode_t mali_descriptor_mapping_allocate_mapping(mali_descriptor_mapping * map, void * target, int *descriptor);
-
-/**
- * Get the value mapped to by a descriptor ID
- * @param map The map to lookup the descriptor id in
- * @param descriptor The descriptor ID to lookup
- * @param target Pointer to a pointer which will receive the stored value
- * @return 0 on successful lookup, negative on error
- */
-_mali_osk_errcode_t mali_descriptor_mapping_get(mali_descriptor_mapping * map, int descriptor, void** target);
-
-/**
- * Set the value mapped to by a descriptor ID
- * @param map The map to lookup the descriptor id in
- * @param descriptor The descriptor ID to lookup
- * @param target Pointer to replace the current value with
- * @return 0 on successful lookup, negative on error
- */
-_mali_osk_errcode_t mali_descriptor_mapping_set(mali_descriptor_mapping * map, int descriptor, void * target);
-
-/**
- * Call the specified callback function for each descriptor in map.
- * Entire function is mutex protected.
- * @param map The map to do callbacks for
- * @param callback A callback function which will be calle for each entry in map
- */
-void mali_descriptor_mapping_call_for_each(mali_descriptor_mapping * map, void (*callback)(int, void*));
-
-/**
- * Free the descriptor ID
- * For the descriptor to be reused it has to be freed
- * @param map The map to free the descriptor from
- * @param descriptor The descriptor ID to free
- *
- * @return old value of descriptor mapping
- */
-void *mali_descriptor_mapping_free(mali_descriptor_mapping * map, int descriptor);
-
-#endif /* __MALI_KERNEL_DESCRIPTOR_MAPPING_H__ */
index 4c3e85d3cdf909db9f1169169fee5fd6fa9b601e..6b9a210eac99d2ed86356231a97753677000a7a7 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2010-2013 ARM Limited
+ * (C) COPYRIGHT 2010-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 #include "mali_session.h"
 #include "mali_scheduler.h"
 
-static u32 mali_utilization_bw_sum_active = 0;
-static u32 mali_utilization_bw_sum_completed = 0;
+#include "mali_executor.h"
+#include "mali_dvfs_policy.h"
+#include "mali_control_timer.h"
 
 /* Thresholds for GP bound detection. */
 #define MALI_GP_BOUND_GP_UTILIZATION_THRESHOLD 240
 #define MALI_GP_BOUND_PP_UTILIZATION_THRESHOLD 250
 
-/* Define how often to calculate and report GPU utilization, in milliseconds */
-static _mali_osk_spinlock_irq_t *time_data_lock;
+static _mali_osk_spinlock_irq_t *utilization_data_lock;
 
-static u32 num_running_gp_cores;
-static u32 num_running_pp_cores;
+static u32 num_running_gp_cores = 0;
+static u32 num_running_pp_cores = 0;
 
 static u64 work_start_time_gpu = 0;
 static u64 work_start_time_gp = 0;
@@ -35,98 +35,19 @@ static u64 accumulated_work_time_gpu = 0;
 static u64 accumulated_work_time_gp = 0;
 static u64 accumulated_work_time_pp = 0;
 
-static u64 period_start_time = 0;
-static _mali_osk_timer_t *utilization_timer = NULL;
-static mali_bool timer_running = MALI_FALSE;
-
 static u32 last_utilization_gpu = 0 ;
 static u32 last_utilization_gp = 0 ;
 static u32 last_utilization_pp = 0 ;
 
-static u32 mali_utilization_timeout = 1000;
 void (*mali_utilization_callback)(struct mali_gpu_utilization_data *data) = NULL;
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
-extern void mali_power_performance_policy_callback(struct mali_gpu_utilization_data *data);
-#define NUMBER_OF_NANOSECONDS_PER_SECOND  1000000000ULL
-
-static u32 calculate_window_render_fps(u64 time_period)
-{
-       u32 max_window_number;
-       u64 tmp;
-       u64 max = time_period;
-       u32 leading_zeroes;
-       u32 shift_val;
-       u32 time_period_shift;
-       u32 max_window_number_shift;
-       u32 ret_val;
-
-       max_window_number = mali_session_max_window_num();
-       /* To avoid float division, extend the dividend to ns unit */
-       tmp = (u64)max_window_number * NUMBER_OF_NANOSECONDS_PER_SECOND;
-       if (tmp > time_period) {
-               max = tmp;
-       }
-
-       /*
-        * We may have 64-bit values, a dividend or a divisor or both
-        * To avoid dependencies to a 64-bit divider, we shift down the two values
-        * equally first.
-        */
-       leading_zeroes = _mali_osk_clz((u32)(max >> 32));
-       shift_val = 32 - leading_zeroes;
 
-       time_period_shift = (u32)(time_period >> shift_val);
-       max_window_number_shift = (u32)(tmp >> shift_val);
+/* Define the first timer control timer timeout in milliseconds */
+static u32 mali_control_first_timeout = 100;
+static struct mali_gpu_utilization_data mali_util_data = {0, };
 
-       ret_val = max_window_number_shift / time_period_shift;
-
-       return ret_val;
-}
-#endif  /* defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY) */
-
-
-void mali_utilization_bw_report_counters(u32 active, u32 completed)
-{
-       mali_utilization_bw_sum_active    += active;
-       mali_utilization_bw_sum_completed += completed;
-}
-
-static void mali_utilization_bw_reset(void)
-{
-       mali_utilization_bw_sum_active    = 0;
-       mali_utilization_bw_sum_completed = 0;
-}
-
-u32 mali_utilization_bw_get_period(void)
-{
-       u32 ret = 0;
-       u32 completed;
-
-       MALI_DEBUG_PRINT(1, ("Calculating bandwith factor: Active cycles: %u Instructions completed: %u\n",
-                               mali_utilization_bw_sum_active, mali_utilization_bw_sum_completed));
-
-       /* Shift down by 16 bits. This, together with the != 0 check below,
-        * will elimiate samples with a too low number of instructions to be used. */
-       completed = mali_utilization_bw_sum_completed >> 16;
-
-       /* Avoid divide by 0. When there is no data (either because of too few
-        * samples, or that profiling took reconfigured the counters, just
-        * return 0. */
-       if (0 != completed)
-       {
-               ret = (mali_utilization_bw_sum_active) / (mali_utilization_bw_sum_completed >> 16);
-       }
-
-       mali_utilization_bw_reset();
-
-       return ret;
-}
-EXPORT_SYMBOL(mali_utilization_bw_get_period);
-
-static void calculate_gpu_utilization(void* arg)
+struct mali_gpu_utilization_data *mali_utilization_calculate(u64 *start_time, u64 *time_period, mali_bool *need_add_timer)
 {
        u64 time_now;
-       u64 time_period;
        u32 leading_zeroes;
        u32 shift_val;
        u32 work_normalized_gpu;
@@ -136,39 +57,40 @@ static void calculate_gpu_utilization(void* arg)
        u32 utilization_gpu;
        u32 utilization_gp;
        u32 utilization_pp;
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
-       u32 window_render_fps;
-#endif
 
-       _mali_osk_spinlock_irq_lock(time_data_lock);
+       mali_utilization_data_lock();
+
+       time_now = _mali_osk_time_get_ns();
+
+       *time_period = time_now - *start_time;
 
        if (accumulated_work_time_gpu == 0 && work_start_time_gpu == 0) {
+               mali_control_timer_pause();
                /*
                 * No work done for this period
                 * - No need to reschedule timer
                 * - Report zero usage
                 */
-               timer_running = MALI_FALSE;
-
                last_utilization_gpu = 0;
                last_utilization_gp = 0;
                last_utilization_pp = 0;
 
-               _mali_osk_spinlock_irq_unlock(time_data_lock);
+               mali_util_data.utilization_gpu = last_utilization_gpu;
+               mali_util_data.utilization_gp = last_utilization_gp;
+               mali_util_data.utilization_pp = last_utilization_pp;
 
-               if (NULL != mali_utilization_callback) {
-                       struct mali_gpu_utilization_data data = { 0, };
-                       mali_utilization_callback(&data);
-               }
+               mali_utilization_data_unlock();
 
-               mali_scheduler_hint_disable(MALI_SCHEDULER_HINT_GP_BOUND);
+               *need_add_timer = MALI_FALSE;
 
-               return;
-       }
+               mali_executor_hint_disable(MALI_EXECUTOR_HINT_GP_BOUND);
 
-       time_now = _mali_osk_time_get_ns();
+               MALI_DEBUG_PRINT(4, ("last_utilization_gpu = %d \n", last_utilization_gpu));
+               MALI_DEBUG_PRINT(4, ("last_utilization_gp = %d \n", last_utilization_gp));
+               MALI_DEBUG_PRINT(4, ("last_utilization_pp = %d \n", last_utilization_pp));
 
-       time_period = time_now - period_start_time;
+               return &mali_util_data;
+       }
 
        /* If we are currently busy, update working period up to now */
        if (work_start_time_gpu != 0) {
@@ -196,12 +118,12 @@ static void calculate_gpu_utilization(void* arg)
         */
 
        /* Shift the 64-bit values down so they fit inside a 32-bit integer */
-       leading_zeroes = _mali_osk_clz((u32)(time_period >> 32));
+       leading_zeroes = _mali_osk_clz((u32)(*time_period >> 32));
        shift_val = 32 - leading_zeroes;
        work_normalized_gpu = (u32)(accumulated_work_time_gpu >> shift_val);
        work_normalized_gp = (u32)(accumulated_work_time_gp >> shift_val);
        work_normalized_pp = (u32)(accumulated_work_time_pp >> shift_val);
-       period_normalized = (u32)(time_period >> shift_val);
+       period_normalized = (u32)(*time_period >> shift_val);
 
        /*
         * Now, we should report the usage in parts of 256
@@ -226,118 +148,77 @@ static void calculate_gpu_utilization(void* arg)
        utilization_gp = work_normalized_gp / period_normalized;
        utilization_pp = work_normalized_pp / period_normalized;
 
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
-       window_render_fps = calculate_window_render_fps(time_period);
-#endif
-
        last_utilization_gpu = utilization_gpu;
        last_utilization_gp = utilization_gp;
        last_utilization_pp = utilization_pp;
 
        if ((MALI_GP_BOUND_GP_UTILIZATION_THRESHOLD < last_utilization_gp) &&
            (MALI_GP_BOUND_PP_UTILIZATION_THRESHOLD > last_utilization_pp)) {
-               mali_scheduler_hint_enable(MALI_SCHEDULER_HINT_GP_BOUND);
+               mali_executor_hint_enable(MALI_EXECUTOR_HINT_GP_BOUND);
        } else {
-               mali_scheduler_hint_disable(MALI_SCHEDULER_HINT_GP_BOUND);
+               mali_executor_hint_disable(MALI_EXECUTOR_HINT_GP_BOUND);
        }
 
        /* starting a new period */
        accumulated_work_time_gpu = 0;
        accumulated_work_time_gp = 0;
        accumulated_work_time_pp = 0;
-       period_start_time = time_now;
 
-       _mali_osk_spinlock_irq_unlock(time_data_lock);
+       *start_time = time_now;
 
-       _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks(mali_utilization_timeout));
+       mali_util_data.utilization_gp = last_utilization_gp;
+       mali_util_data.utilization_gpu = last_utilization_gpu;
+       mali_util_data.utilization_pp = last_utilization_pp;
 
-       if (NULL != mali_utilization_callback) {
-               struct mali_gpu_utilization_data data = {
-                       utilization_gpu, utilization_gp, utilization_pp,
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
-                       window_render_fps, window_render_fps
-#endif
-               };
-               mali_utilization_callback(&data);
-       }
+       mali_utilization_data_unlock();
+
+       *need_add_timer = MALI_TRUE;
+
+       MALI_DEBUG_PRINT(4, ("last_utilization_gpu = %d \n", last_utilization_gpu));
+       MALI_DEBUG_PRINT(4, ("last_utilization_gp = %d \n", last_utilization_gp));
+       MALI_DEBUG_PRINT(4, ("last_utilization_pp = %d \n", last_utilization_pp));
+
+       return &mali_util_data;
 }
 
 _mali_osk_errcode_t mali_utilization_init(void)
 {
 #if USING_GPU_UTILIZATION
-       struct _mali_osk_device_data data;
+       _mali_osk_device_data data;
+
        if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
-               /* Use device specific settings (if defined) */
-               if (0 != data.utilization_interval) {
-                       mali_utilization_timeout = data.utilization_interval;
-               }
                if (NULL != data.utilization_callback) {
                        mali_utilization_callback = data.utilization_callback;
-                       MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: Platform has it's own policy \n"));
-                       MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: Utilization handler installed with interval %u\n", mali_utilization_timeout));
+                       MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: Utilization handler installed \n"));
                }
        }
-#endif
-
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
-       if (mali_utilization_callback == NULL) {
-               MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: MALI Power Performance Policy Algorithm \n"));
-               mali_utilization_callback = mali_power_performance_policy_callback;
-       }
-#endif
+#endif /* defined(USING_GPU_UTILIZATION) */
 
        if (NULL == mali_utilization_callback) {
-               MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: No utilization handler installed\n"));
+               MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: No platform utilization handler installed\n"));
        }
 
-       time_data_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_UTILIZATION);
-
-       if (NULL == time_data_lock) {
+       utilization_data_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_UTILIZATION);
+       if (NULL == utilization_data_lock) {
                return _MALI_OSK_ERR_FAULT;
        }
 
        num_running_gp_cores = 0;
        num_running_pp_cores = 0;
 
-       utilization_timer = _mali_osk_timer_init();
-       if (NULL == utilization_timer) {
-               _mali_osk_spinlock_irq_term(time_data_lock);
-               return _MALI_OSK_ERR_FAULT;
-       }
-       _mali_osk_timer_setcallback(utilization_timer, calculate_gpu_utilization, NULL);
-
        return _MALI_OSK_ERR_OK;
 }
 
-void mali_utilization_suspend(void)
-{
-       _mali_osk_spinlock_irq_lock(time_data_lock);
-
-       if (timer_running == MALI_TRUE) {
-               timer_running = MALI_FALSE;
-               _mali_osk_spinlock_irq_unlock(time_data_lock);
-               _mali_osk_timer_del(utilization_timer);
-               return;
-       }
-
-       _mali_osk_spinlock_irq_unlock(time_data_lock);
-}
-
 void mali_utilization_term(void)
 {
-       if (NULL != utilization_timer) {
-               _mali_osk_timer_del(utilization_timer);
-               timer_running = MALI_FALSE;
-               _mali_osk_timer_term(utilization_timer);
-               utilization_timer = NULL;
+       if (NULL != utilization_data_lock) {
+               _mali_osk_spinlock_irq_term(utilization_data_lock);
        }
-
-       _mali_osk_spinlock_irq_term(time_data_lock);
 }
 
 void mali_utilization_gp_start(void)
 {
-       _mali_osk_spinlock_irq_lock(time_data_lock);
+       mali_utilization_data_lock();
 
        ++num_running_gp_cores;
        if (1 == num_running_gp_cores) {
@@ -347,37 +228,55 @@ void mali_utilization_gp_start(void)
                work_start_time_gp = time_now;
 
                if (0 == num_running_pp_cores) {
+                       mali_bool is_resume = MALI_FALSE;
                        /*
                         * There are no PP cores running, so this is also the point
                         * at which we consider the GPU to be busy as well.
                         */
                        work_start_time_gpu = time_now;
-               }
-
-               /* Start a new period (and timer) if needed */
-               if (timer_running != MALI_TRUE) {
-                       timer_running = MALI_TRUE;
-                       period_start_time = time_now;
 
-                       /* Clear session->number_of_window_jobs */
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
-                       mali_session_max_window_num();
+                       is_resume  = mali_control_timer_resume(time_now);
+
+                       mali_utilization_data_unlock();
+
+                       if (is_resume) {
+                               /* Do some policy in new period for performance consideration */
+#if defined(CONFIG_MALI_DVFS)
+                               /* Clear session->number_of_window_jobs, prepare parameter for dvfs */
+                               mali_session_max_window_num();
+                               if (0 == last_utilization_gpu) {
+                                       /*
+                                        * for mali_dev_pause is called in set clock,
+                                        * so each time we change clock, we will set clock to
+                                        * highest step even if under down clock case,
+                                        * it is not nessesary, so we only set the clock under
+                                        * last time utilization equal 0, we stop the timer then
+                                        * start the GPU again case
+                                        */
+                                       mali_dvfs_policy_new_period();
+                               }
 #endif
-                       _mali_osk_spinlock_irq_unlock(time_data_lock);
-
-                       _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks(mali_utilization_timeout));
+                               /*
+                                * First timeout using short interval for power consideration
+                                * because we give full power in the new period, but if the
+                                * job loading is light, finish in 10ms, the other time all keep
+                                * in high freq it will wast time.
+                                */
+                               mali_control_timer_add(mali_control_first_timeout);
+                       }
                } else {
-                       _mali_osk_spinlock_irq_unlock(time_data_lock);
+                       mali_utilization_data_unlock();
                }
+
        } else {
                /* Nothing to do */
-               _mali_osk_spinlock_irq_unlock(time_data_lock);
+               mali_utilization_data_unlock();
        }
 }
 
 void mali_utilization_pp_start(void)
 {
-       _mali_osk_spinlock_irq_lock(time_data_lock);
+       mali_utilization_data_lock();
 
        ++num_running_pp_cores;
        if (1 == num_running_pp_cores) {
@@ -387,37 +286,55 @@ void mali_utilization_pp_start(void)
                work_start_time_pp = time_now;
 
                if (0 == num_running_gp_cores) {
+                       mali_bool is_resume = MALI_FALSE;
                        /*
                         * There are no GP cores running, so this is also the point
                         * at which we consider the GPU to be busy as well.
                         */
                        work_start_time_gpu = time_now;
-               }
 
-               /* Start a new period (and timer) if needed */
-               if (timer_running != MALI_TRUE) {
-                       timer_running = MALI_TRUE;
-                       period_start_time = time_now;
-
-                       /* Clear session->number_of_window_jobs */
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
-                       mali_session_max_window_num();
+                       /* Start a new period if stoped */
+                       is_resume = mali_control_timer_resume(time_now);
+
+                       mali_utilization_data_unlock();
+
+                       if (is_resume) {
+#if defined(CONFIG_MALI_DVFS)
+                               /* Clear session->number_of_window_jobs, prepare parameter for dvfs */
+                               mali_session_max_window_num();
+                               if (0 == last_utilization_gpu) {
+                                       /*
+                                        * for mali_dev_pause is called in set clock,
+                                        * so each time we change clock, we will set clock to
+                                        * highest step even if under down clock case,
+                                        * it is not nessesary, so we only set the clock under
+                                        * last time utilization equal 0, we stop the timer then
+                                        * start the GPU again case
+                                        */
+                                       mali_dvfs_policy_new_period();
+                               }
 #endif
-                       _mali_osk_spinlock_irq_unlock(time_data_lock);
 
-                       _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks(mali_utilization_timeout));
+                               /*
+                                * First timeout using short interval for power consideration
+                                * because we give full power in the new period, but if the
+                                * job loading is light, finish in 10ms, the other time all keep
+                                * in high freq it will wast time.
+                                */
+                               mali_control_timer_add(mali_control_first_timeout);
+                       }
                } else {
-                       _mali_osk_spinlock_irq_unlock(time_data_lock);
+                       mali_utilization_data_unlock();
                }
        } else {
                /* Nothing to do */
-               _mali_osk_spinlock_irq_unlock(time_data_lock);
+               mali_utilization_data_unlock();
        }
 }
 
 void mali_utilization_gp_end(void)
 {
-       _mali_osk_spinlock_irq_lock(time_data_lock);
+       mali_utilization_data_lock();
 
        --num_running_gp_cores;
        if (0 == num_running_gp_cores) {
@@ -437,12 +354,12 @@ void mali_utilization_gp_end(void)
                }
        }
 
-       _mali_osk_spinlock_irq_unlock(time_data_lock);
+       mali_utilization_data_unlock();
 }
 
 void mali_utilization_pp_end(void)
 {
-       _mali_osk_spinlock_irq_lock(time_data_lock);
+       mali_utilization_data_lock();
 
        --num_running_pp_cores;
        if (0 == num_running_pp_cores) {
@@ -462,7 +379,49 @@ void mali_utilization_pp_end(void)
                }
        }
 
-       _mali_osk_spinlock_irq_unlock(time_data_lock);
+       mali_utilization_data_unlock();
+}
+
+mali_bool mali_utilization_enabled(void)
+{
+#if defined(CONFIG_MALI_DVFS)
+       return mali_dvfs_policy_enabled();
+#else
+       return (NULL != mali_utilization_callback);
+#endif /* defined(CONFIG_MALI_DVFS) */
+}
+
+void mali_utilization_platform_realize(struct mali_gpu_utilization_data *util_data)
+{
+       MALI_DEBUG_ASSERT_POINTER(mali_utilization_callback);
+
+       mali_utilization_callback(util_data);
+}
+
+void mali_utilization_reset(void)
+{
+       accumulated_work_time_gpu = 0;
+       accumulated_work_time_gp = 0;
+       accumulated_work_time_pp = 0;
+
+       last_utilization_gpu = 0;
+       last_utilization_gp = 0;
+       last_utilization_pp = 0;
+}
+
+void mali_utilization_data_lock(void)
+{
+       _mali_osk_spinlock_irq_lock(utilization_data_lock);
+}
+
+void mali_utilization_data_unlock(void)
+{
+       _mali_osk_spinlock_irq_unlock(utilization_data_lock);
+}
+
+void mali_utilization_data_assert_locked(void)
+{
+       MALI_DEBUG_ASSERT_LOCK_HELD(utilization_data_lock);
 }
 
 u32 _mali_ukk_utilization_gp_pp(void)
index 73e6a4bfbee5ef8e36d73fa5c5352eb76344b950..c33274cdc3aa0a72789a6903f2a377a590bdd5a5 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2010-2013 ARM Limited
+ * (C) COPYRIGHT 2010-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 #include <linux/mali/mali_utgard.h>
 #include "mali_osk.h"
 
-#define MALI_UTILIZATION_BW_CTR_SRC0 (0)  /* Active cycles */
-#define MALI_UTILIZATION_BW_CTR_SRC1 (37) /* Inctructions completed */
-
-void mali_utilization_bw_report_counters(u32 active, u32 completed);
-
-/*
- */
-u32 mali_utilization_bw_get_period(void);
-
-extern void (*mali_utilization_callback)(struct mali_gpu_utilization_data *data);
-
 /**
  * Initialize/start the Mali GPU utilization metrics reporting.
  *
@@ -40,10 +29,7 @@ void mali_utilization_term(void);
 /**
  * Check if Mali utilization is enabled
  */
-MALI_STATIC_INLINE mali_bool mali_utilization_enabled(void)
-{
-       return (NULL != mali_utilization_callback);
-}
+mali_bool mali_utilization_enabled(void);
 
 /**
  * Should be called when a job is about to execute a GP job
@@ -66,9 +52,21 @@ void mali_utilization_pp_start(void);
 void mali_utilization_pp_end(void);
 
 /**
- * Should be called to stop the utilization timer during system suspend
+ * Should be called to calcution the GPU utilization
  */
-void mali_utilization_suspend(void);
+struct mali_gpu_utilization_data *mali_utilization_calculate(u64 *start_time, u64 *time_period, mali_bool *need_add_timer);
+
+_mali_osk_spinlock_irq_t *mali_utilization_get_lock(void);
+
+void mali_utilization_platform_realize(struct mali_gpu_utilization_data *util_data);
+
+void mali_utilization_data_lock(void);
+
+void mali_utilization_data_unlock(void);
+
+void mali_utilization_data_assert_locked(void);
+
+void mali_utilization_reset(void);
 
 
 #endif /* __MALI_KERNEL_UTILIZATION_H__ */
index e7dc3dd64dd3b9a95c6595fa1ffc8e247183e143..db3ca893989928928de3a172f50da0caeeb2673b 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2013 ARM Limited
+ * (C) COPYRIGHT 2011-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 #include "mali_osk.h"
 #include "mali_ukk.h"
 
-#if defined(CONFIG_MALI400_PROFILING)
 #include "mali_osk_profiling.h"
-#endif
 
 _mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args)
 {
        _mali_uk_vsync_event event = (_mali_uk_vsync_event)args->event;
        MALI_IGNORE(event); /* event is not used for release code, and that is OK */
 
-#if defined(CONFIG_MALI400_PROFILING)
        /*
         * Manually generate user space events in kernel space.
         * This saves user space from calling kernel space twice in this case.
         * We just need to remember to add pid and tid manually.
         */
-       if ( event==_MALI_UK_VSYNC_EVENT_BEGIN_WAIT) {
+       if (event == _MALI_UK_VSYNC_EVENT_BEGIN_WAIT) {
                _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND |
-                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
-                                             MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC,
-                                             _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0);
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC,
+                                             _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0);
        }
 
-       if (event==_MALI_UK_VSYNC_EVENT_END_WAIT) {
-
+       if (event == _MALI_UK_VSYNC_EVENT_END_WAIT) {
                _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME |
-                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
-                                             MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC,
-                                             _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0);
+                                             MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+                                             MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC,
+                                             _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0);
        }
-#endif
+
 
        MALI_DEBUG_PRINT(4, ("Received VSYNC event: %d\n", event));
        MALI_SUCCESS;
index f7f91f85a79a4c11ff1ce096b315f9a0d55e4317..e4890de43bc37ed89aeb6a1c63469714dbdc66db 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
+ * (C) COPYRIGHT 2008-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -12,6 +12,7 @@
 #include "mali_l2_cache.h"
 #include "mali_hw_core.h"
 #include "mali_scheduler.h"
+#include "mali_pm.h"
 #include "mali_pm_domain.h"
 
 /**
@@ -28,10 +29,10 @@ typedef enum mali_l2_cache_register {
        MALI400_L2_CACHE_REGISTER_SIZE         = 0x0004,
        MALI400_L2_CACHE_REGISTER_STATUS       = 0x0008,
        /*unused                               = 0x000C */
-       MALI400_L2_CACHE_REGISTER_COMMAND      = 0x0010, /**< Misc cache commands, e.g. clear */
+       MALI400_L2_CACHE_REGISTER_COMMAND      = 0x0010,
        MALI400_L2_CACHE_REGISTER_CLEAR_PAGE   = 0x0014,
-       MALI400_L2_CACHE_REGISTER_MAX_READS    = 0x0018, /**< Limit of outstanding read requests */
-       MALI400_L2_CACHE_REGISTER_ENABLE       = 0x001C, /**< Enable misc cache features */
+       MALI400_L2_CACHE_REGISTER_MAX_READS    = 0x0018,
+       MALI400_L2_CACHE_REGISTER_ENABLE       = 0x001C,
        MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0 = 0x0020,
        MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0 = 0x0024,
        MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1 = 0x0028,
@@ -43,8 +44,7 @@ typedef enum mali_l2_cache_register {
  * These are the commands that can be sent to the Mali L2 cache unit
  */
 typedef enum mali_l2_cache_command {
-       MALI400_L2_CACHE_COMMAND_CLEAR_ALL = 0x01, /**< Clear the entire cache */
-       /* Read HW TRM carefully before adding/using other commands than the clear above */
+       MALI400_L2_CACHE_COMMAND_CLEAR_ALL = 0x01,
 } mali_l2_cache_command;
 
 /**
@@ -52,323 +52,296 @@ typedef enum mali_l2_cache_command {
  * These are the commands that can be sent to the Mali L2 cache unit
  */
 typedef enum mali_l2_cache_enable {
-       MALI400_L2_CACHE_ENABLE_DEFAULT = 0x0, /**< Default state of enable register */
-       MALI400_L2_CACHE_ENABLE_ACCESS = 0x01, /**< Permit cacheable accesses */
-       MALI400_L2_CACHE_ENABLE_READ_ALLOCATE = 0x02, /**< Permit cache read allocate */
+       MALI400_L2_CACHE_ENABLE_DEFAULT = 0x0, /* Default */
+       MALI400_L2_CACHE_ENABLE_ACCESS = 0x01,
+       MALI400_L2_CACHE_ENABLE_READ_ALLOCATE = 0x02,
 } mali_l2_cache_enable;
 
 /**
  * Mali L2 cache status bits
  */
 typedef enum mali_l2_cache_status {
-       MALI400_L2_CACHE_STATUS_COMMAND_BUSY = 0x01, /**< Command handler of L2 cache is busy */
-       MALI400_L2_CACHE_STATUS_DATA_BUSY    = 0x02, /**< L2 cache is busy handling data requests */
+       MALI400_L2_CACHE_STATUS_COMMAND_BUSY = 0x01,
+       MALI400_L2_CACHE_STATUS_DATA_BUSY    = 0x02,
 } mali_l2_cache_status;
 
-#define MALI400_L2_MAX_READS_DEFAULT 0x1C
+#define MALI400_L2_MAX_READS_NOT_SET -1
 
-static struct mali_l2_cache_core *mali_global_l2_cache_cores[MALI_MAX_NUMBER_OF_L2_CACHE_CORES] = { NULL, };
-static u32 mali_global_num_l2_cache_cores = 0;
+static struct mali_l2_cache_core *
+       mali_global_l2s[MALI_MAX_NUMBER_OF_L2_CACHE_CORES] = { NULL, };
+static u32 mali_global_num_l2s = 0;
 
-int mali_l2_max_reads = MALI400_L2_MAX_READS_DEFAULT;
+int mali_l2_max_reads = MALI400_L2_MAX_READS_NOT_SET;
 
 
 /* Local helper functions */
-static _mali_osk_errcode_t mali_l2_cache_send_command(struct mali_l2_cache_core *cache, u32 reg, u32 val);
 
+static void mali_l2_cache_reset(struct mali_l2_cache_core *cache);
 
-static void mali_l2_cache_counter_lock(struct mali_l2_cache_core *cache)
-{
-#ifdef MALI_UPPER_HALF_SCHEDULING
-       _mali_osk_spinlock_irq_lock(cache->counter_lock);
-#else
-       _mali_osk_spinlock_lock(cache->counter_lock);
-#endif
-}
+static _mali_osk_errcode_t mali_l2_cache_send_command(
+       struct mali_l2_cache_core *cache, u32 reg, u32 val);
 
-static void mali_l2_cache_counter_unlock(struct mali_l2_cache_core *cache)
+static void mali_l2_cache_lock(struct mali_l2_cache_core *cache)
 {
-#ifdef MALI_UPPER_HALF_SCHEDULING
-       _mali_osk_spinlock_irq_unlock(cache->counter_lock);
-#else
-       _mali_osk_spinlock_unlock(cache->counter_lock);
-#endif
+       MALI_DEBUG_ASSERT_POINTER(cache);
+       _mali_osk_spinlock_irq_lock(cache->lock);
 }
 
-static void mali_l2_cache_command_lock(struct mali_l2_cache_core *cache)
+static void mali_l2_cache_unlock(struct mali_l2_cache_core *cache)
 {
-#ifdef MALI_UPPER_HALF_SCHEDULING
-       _mali_osk_spinlock_irq_lock(cache->command_lock);
-#else
-       _mali_osk_spinlock_lock(cache->command_lock);
-#endif
+       MALI_DEBUG_ASSERT_POINTER(cache);
+       _mali_osk_spinlock_irq_unlock(cache->lock);
 }
 
-static void mali_l2_cache_command_unlock(struct mali_l2_cache_core *cache)
-{
-#ifdef MALI_UPPER_HALF_SCHEDULING
-       _mali_osk_spinlock_irq_unlock(cache->command_lock);
-#else
-       _mali_osk_spinlock_unlock(cache->command_lock);
-#endif
-}
+/* Implementation of the L2 cache interface */
 
-struct mali_l2_cache_core *mali_l2_cache_create(_mali_osk_resource_t *resource)
+struct mali_l2_cache_core *mali_l2_cache_create(
+       _mali_osk_resource_t *resource, u32 domain_index)
 {
        struct mali_l2_cache_core *cache = NULL;
+#if defined(DEBUG)
+       u32 cache_size;
+#endif
 
-       MALI_DEBUG_PRINT(4, ("Mali L2 cache: Creating Mali L2 cache: %s\n", resource->description));
+       MALI_DEBUG_PRINT(4, ("Mali L2 cache: Creating Mali L2 cache: %s\n",
+                            resource->description));
 
-       if (mali_global_num_l2_cache_cores >= MALI_MAX_NUMBER_OF_L2_CACHE_CORES) {
-               MALI_PRINT_ERROR(("Mali L2 cache: Too many L2 cache core objects created\n"));
+       if (mali_global_num_l2s >= MALI_MAX_NUMBER_OF_L2_CACHE_CORES) {
+               MALI_PRINT_ERROR(("Mali L2 cache: Too many L2 caches\n"));
                return NULL;
        }
 
        cache = _mali_osk_malloc(sizeof(struct mali_l2_cache_core));
-       if (NULL != cache) {
-               cache->core_id =  mali_global_num_l2_cache_cores;
-               cache->counter_src0 = MALI_HW_CORE_NO_COUNTER;
-               cache->counter_src1 = MALI_HW_CORE_NO_COUNTER;
-               cache->pm_domain = NULL;
-               cache->mali_l2_status = MALI_L2_NORMAL;
-               if (_MALI_OSK_ERR_OK == mali_hw_core_create(&cache->hw_core, resource, MALI400_L2_CACHE_REGISTERS_SIZE)) {
-                       MALI_DEBUG_CODE(u32 cache_size = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_SIZE));
-                       MALI_DEBUG_PRINT(2, ("Mali L2 cache: Created %s: % 3uK, %u-way, % 2ubyte cache line, % 3ubit external bus\n",
-                                            resource->description,
-                                            1 << (((cache_size >> 16) & 0xff) - 10),
-                                            1 << ((cache_size >> 8) & 0xff),
-                                            1 << (cache_size & 0xff),
-                                            1 << ((cache_size >> 24) & 0xff)));
-
-#ifdef MALI_UPPER_HALF_SCHEDULING
-                       cache->command_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
-#else
-                       cache->command_lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
-#endif
-                       if (NULL != cache->command_lock) {
-#ifdef MALI_UPPER_HALF_SCHEDULING
-                               cache->counter_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
-#else
-                               cache->counter_lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
-#endif
-                               if (NULL != cache->counter_lock) {
-                                       mali_l2_cache_reset(cache);
-
-                                       cache->last_invalidated_id = 0;
-
-                                       mali_global_l2_cache_cores[mali_global_num_l2_cache_cores] = cache;
-                                       mali_global_num_l2_cache_cores++;
-
-                                       return cache;
-                               } else {
-                                       MALI_PRINT_ERROR(("Mali L2 cache: Failed to create counter lock for L2 cache core %s\n", cache->hw_core.description));
-                               }
-#ifdef MALI_UPPER_HALF_SCHEDULING
-                               _mali_osk_spinlock_irq_term(cache->command_lock);
-#else
-                               _mali_osk_spinlock_term(cache->command_lock);
-#endif
-                       } else {
-                               MALI_PRINT_ERROR(("Mali L2 cache: Failed to create command lock for L2 cache core %s\n", cache->hw_core.description));
-                       }
+       if (NULL == cache) {
+               MALI_PRINT_ERROR(("Mali L2 cache: Failed to allocate memory for L2 cache core\n"));
+               return NULL;
+       }
 
-                       mali_hw_core_delete(&cache->hw_core);
-               }
+       cache->core_id =  mali_global_num_l2s;
+       cache->counter_src0 = MALI_HW_CORE_NO_COUNTER;
+       cache->counter_src1 = MALI_HW_CORE_NO_COUNTER;
+       cache->counter_value0_base = 0;
+       cache->counter_value1_base = 0;
+       cache->pm_domain = NULL;
+       cache->power_is_on = MALI_FALSE;
+       cache->last_invalidated_id = 0;
+
+       if (_MALI_OSK_ERR_OK != mali_hw_core_create(&cache->hw_core,
+                       resource, MALI400_L2_CACHE_REGISTERS_SIZE)) {
+               _mali_osk_free(cache);
+               return NULL;
+       }
+
+#if defined(DEBUG)
+       cache_size = mali_hw_core_register_read(&cache->hw_core,
+                                               MALI400_L2_CACHE_REGISTER_SIZE);
+       MALI_DEBUG_PRINT(2, ("Mali L2 cache: Created %s: % 3uK, %u-way, % 2ubyte cache line, % 3ubit external bus\n",
+                            resource->description,
+                            1 << (((cache_size >> 16) & 0xff) - 10),
+                            1 << ((cache_size >> 8) & 0xff),
+                            1 << (cache_size & 0xff),
+                            1 << ((cache_size >> 24) & 0xff)));
+#endif
 
+       cache->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED,
+                       _MALI_OSK_LOCK_ORDER_L2);
+       if (NULL == cache->lock) {
+               MALI_PRINT_ERROR(("Mali L2 cache: Failed to create counter lock for L2 cache core %s\n",
+                                 cache->hw_core.description));
+               mali_hw_core_delete(&cache->hw_core);
                _mali_osk_free(cache);
-       } else {
-               MALI_PRINT_ERROR(("Mali L2 cache: Failed to allocate memory for L2 cache core\n"));
+               return NULL;
        }
 
-       return NULL;
+       /* register with correct power domain */
+       cache->pm_domain = mali_pm_register_l2_cache(
+                                  domain_index, cache);
+
+       mali_global_l2s[mali_global_num_l2s] = cache;
+       mali_global_num_l2s++;
+
+       return cache;
 }
 
 void mali_l2_cache_delete(struct mali_l2_cache_core *cache)
 {
        u32 i;
+       for (i = 0; i < mali_global_num_l2s; i++) {
+               if (mali_global_l2s[i] != cache) {
+                       continue;
+               }
 
-       /* reset to defaults */
-       mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)MALI400_L2_MAX_READS_DEFAULT);
-       mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_DEFAULT);
-
-#ifdef MALI_UPPER_HALF_SCHEDULING
-       _mali_osk_spinlock_irq_term(cache->counter_lock);
-       _mali_osk_spinlock_irq_term(cache->command_lock);
-#else
-       _mali_osk_spinlock_term(cache->command_lock);
-       _mali_osk_spinlock_term(cache->counter_lock);
-#endif
-
-       mali_hw_core_delete(&cache->hw_core);
-
-       for (i = 0; i < mali_global_num_l2_cache_cores; i++) {
-               if (mali_global_l2_cache_cores[i] == cache) {
-                       mali_global_l2_cache_cores[i] = NULL;
-                       mali_global_num_l2_cache_cores--;
-
-                       if (i != mali_global_num_l2_cache_cores) {
-                               /* We removed a l2 cache from the middle of the array -- move the last
-                                * l2 cache to the current position to close the gap */
-                               mali_global_l2_cache_cores[i] = mali_global_l2_cache_cores[mali_global_num_l2_cache_cores];
-                               mali_global_l2_cache_cores[mali_global_num_l2_cache_cores] = NULL;
-                       }
+               mali_global_l2s[i] = NULL;
+               mali_global_num_l2s--;
 
+               if (i == mali_global_num_l2s) {
+                       /* Removed last element, nothing more to do */
                        break;
                }
+
+               /*
+                * We removed a l2 cache from the middle of the array,
+                * so move the last l2 cache to current position
+                */
+               mali_global_l2s[i] = mali_global_l2s[mali_global_num_l2s];
+               mali_global_l2s[mali_global_num_l2s] = NULL;
+
+               /* All good */
+               break;
        }
 
+       _mali_osk_spinlock_irq_term(cache->lock);
+       mali_hw_core_delete(&cache->hw_core);
        _mali_osk_free(cache);
 }
 
-u32 mali_l2_cache_get_id(struct mali_l2_cache_core *cache)
+void mali_l2_cache_power_up(struct mali_l2_cache_core *cache)
 {
-       return cache->core_id;
+       MALI_DEBUG_ASSERT_POINTER(cache);
+
+       mali_l2_cache_lock(cache);
+
+       mali_l2_cache_reset(cache);
+
+       MALI_DEBUG_ASSERT(MALI_FALSE == cache->power_is_on);
+       cache->power_is_on = MALI_TRUE;
+
+       mali_l2_cache_unlock(cache);
 }
 
-static void mali_l2_cache_core_set_counter_internal(struct mali_l2_cache_core *cache, u32 source_id, u32 counter)
+void mali_l2_cache_power_down(struct mali_l2_cache_core *cache)
 {
-       u32 value = 0; /* disabled src */
-       u32 reg_offset = 0;
-       mali_bool core_is_on;
-
        MALI_DEBUG_ASSERT_POINTER(cache);
 
-       core_is_on = mali_l2_cache_lock_power_state(cache);
-
-       mali_l2_cache_counter_lock(cache);
+       mali_l2_cache_lock(cache);
 
-       switch (source_id) {
-       case 0:
-               cache->counter_src0 = counter;
-               reg_offset = MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0;
-               break;
+       MALI_DEBUG_ASSERT(MALI_TRUE == cache->power_is_on);
 
-       case 1:
-               cache->counter_src1 = counter;
-               reg_offset = MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1;
-               break;
+       /*
+        * The HW counters will start from zero again when we resume,
+        * but we should report counters as always increasing.
+        * Take a copy of the HW values now in order to add this to
+        * the values we report after being powered up.
+        *
+        * The physical power off of the L2 cache might be outside our
+        * own control (e.g. runtime PM). That is why we must manually
+        * set set the counter value to zero as well.
+        */
 
-       default:
-               MALI_DEBUG_ASSERT(0);
-               break;
+       if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) {
+               cache->counter_value0_base += mali_hw_core_register_read(
+                                                     &cache->hw_core,
+                                                     MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0);
+               mali_hw_core_register_write(&cache->hw_core,
+                                           MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0, 0);
        }
 
-       if (MALI_L2_PAUSE == cache->mali_l2_status) {
-               mali_l2_cache_counter_unlock(cache);
-               mali_l2_cache_unlock_power_state(cache);
-               return;
+       if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) {
+               cache->counter_value1_base += mali_hw_core_register_read(
+                                                     &cache->hw_core,
+                                                     MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1);
+               mali_hw_core_register_write(&cache->hw_core,
+                                           MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1, 0);
        }
 
-       if (MALI_HW_CORE_NO_COUNTER != counter) {
-               value = counter;
-       }
 
-       if (MALI_TRUE == core_is_on) {
-               mali_hw_core_register_write(&cache->hw_core, reg_offset, value);
-       }
+       cache->power_is_on = MALI_FALSE;
 
-       mali_l2_cache_counter_unlock(cache);
-       mali_l2_cache_unlock_power_state(cache);
+       mali_l2_cache_unlock(cache);
 }
 
-void mali_l2_cache_core_set_counter_src0(struct mali_l2_cache_core *cache, u32 counter)
+void mali_l2_cache_core_set_counter_src(
+       struct mali_l2_cache_core *cache, u32 source_id, u32 counter)
 {
-       mali_l2_cache_core_set_counter_internal(cache, 0, counter);
-}
+       u32 reg_offset_src;
+       u32 reg_offset_val;
 
-void mali_l2_cache_core_set_counter_src1(struct mali_l2_cache_core *cache, u32 counter)
-{
-       mali_l2_cache_core_set_counter_internal(cache, 1, counter);
-}
+       MALI_DEBUG_ASSERT_POINTER(cache);
+       MALI_DEBUG_ASSERT(source_id >= 0 && source_id <= 1);
 
-u32 mali_l2_cache_core_get_counter_src0(struct mali_l2_cache_core *cache)
-{
-       return cache->counter_src0;
-}
+       mali_l2_cache_lock(cache);
 
-u32 mali_l2_cache_core_get_counter_src1(struct mali_l2_cache_core *cache)
-{
-       return cache->counter_src1;
+       if (0 == source_id) {
+               /* start counting from 0 */
+               cache->counter_value0_base = 0;
+               cache->counter_src0 = counter;
+               reg_offset_src = MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0;
+               reg_offset_val = MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0;
+       } else {
+               /* start counting from 0 */
+               cache->counter_value1_base = 0;
+               cache->counter_src1 = counter;
+               reg_offset_src = MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1;
+               reg_offset_val = MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1;
+       }
+
+       if (cache->power_is_on) {
+               u32 hw_src;
+
+               if (MALI_HW_CORE_NO_COUNTER != counter) {
+                       hw_src = counter;
+               } else {
+                       hw_src = 0; /* disable value for HW */
+               }
+
+               /* Set counter src */
+               mali_hw_core_register_write(&cache->hw_core,
+                                           reg_offset_src, hw_src);
+
+               /* Make sure the HW starts counting from 0 again */
+               mali_hw_core_register_write(&cache->hw_core,
+                                           reg_offset_val, 0);
+       }
+
+       mali_l2_cache_unlock(cache);
 }
 
-void mali_l2_cache_core_get_counter_values(struct mali_l2_cache_core *cache, u32 *src0, u32 *value0, u32 *src1, u32 *value1)
+void mali_l2_cache_core_get_counter_values(
+       struct mali_l2_cache_core *cache,
+       u32 *src0, u32 *value0, u32 *src1, u32 *value1)
 {
+       MALI_DEBUG_ASSERT_POINTER(cache);
        MALI_DEBUG_ASSERT(NULL != src0);
        MALI_DEBUG_ASSERT(NULL != value0);
        MALI_DEBUG_ASSERT(NULL != src1);
        MALI_DEBUG_ASSERT(NULL != value1);
 
-       /* Caller must hold the PM lock and know that we are powered on */
-
-       mali_l2_cache_counter_lock(cache);
-
-       if (MALI_L2_PAUSE == cache->mali_l2_status) {
-               mali_l2_cache_counter_unlock(cache);
-
-               return;
-       }
+       mali_l2_cache_lock(cache);
 
        *src0 = cache->counter_src0;
        *src1 = cache->counter_src1;
 
        if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) {
-               *value0 = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0);
-       }
+               if (MALI_TRUE == cache->power_is_on) {
+                       *value0 = mali_hw_core_register_read(&cache->hw_core,
+                                                            MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0);
+               } else {
+                       *value0 = 0;
+               }
 
-       if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) {
-               *value1 = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1);
+               /* Add base offset value (in case we have been power off) */
+               *value0 += cache->counter_value0_base;
        }
 
-       mali_l2_cache_counter_unlock(cache);
-}
-
-static void mali_l2_cache_reset_counters_all(void)
-{
-       int i;
-       u32 value;
-       struct mali_l2_cache_core *cache;
-       u32 num_cores = mali_l2_cache_core_get_glob_num_l2_cores();
-
-       for (i = 0; i < num_cores; i++) {
-               cache = mali_l2_cache_core_get_glob_l2_core(i);
-               if (MALI_TRUE == mali_l2_cache_lock_power_state(cache)) {
-                       mali_l2_cache_counter_lock(cache);
-
-                       if (MALI_L2_PAUSE == cache->mali_l2_status) {
-                               mali_l2_cache_counter_unlock(cache);
-                               mali_l2_cache_unlock_power_state(cache);
-                               return;
-                       }
-
-                       /* Reset performance counters */
-                       if (MALI_HW_CORE_NO_COUNTER == cache->counter_src0) {
-                               value = 0;
-                       } else {
-                               value = cache->counter_src0;
-                       }
-                       mali_hw_core_register_write(&cache->hw_core,
-                                                   MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, value);
-
-                       if (MALI_HW_CORE_NO_COUNTER == cache->counter_src1) {
-                               value = 0;
-                       } else {
-                               value = cache->counter_src1;
-                       }
-                       mali_hw_core_register_write(&cache->hw_core,
-                                                   MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, value);
-
-                       mali_l2_cache_counter_unlock(cache);
+       if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) {
+               if (MALI_TRUE == cache->power_is_on) {
+                       *value1 = mali_hw_core_register_read(&cache->hw_core,
+                                                            MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1);
+               } else {
+                       *value1 = 0;
                }
 
-               mali_l2_cache_unlock_power_state(cache);
+               /* Add base offset value (in case we have been power off) */
+               *value1 += cache->counter_value1_base;
        }
-}
 
+       mali_l2_cache_unlock(cache);
+}
 
 struct mali_l2_cache_core *mali_l2_cache_core_get_glob_l2_core(u32 index)
 {
-       if (mali_global_num_l2_cache_cores > index) {
-               return mali_global_l2_cache_cores[index];
+       if (mali_global_num_l2s > index) {
+               return mali_global_l2s[index];
        }
 
        return NULL;
@@ -376,206 +349,185 @@ struct mali_l2_cache_core *mali_l2_cache_core_get_glob_l2_core(u32 index)
 
 u32 mali_l2_cache_core_get_glob_num_l2_cores(void)
 {
-       return mali_global_num_l2_cache_cores;
+       return mali_global_num_l2s;
 }
 
-void mali_l2_cache_reset(struct mali_l2_cache_core *cache)
+void mali_l2_cache_invalidate(struct mali_l2_cache_core *cache)
 {
-       /* Invalidate cache (just to keep it in a known state at startup) */
-       mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
-
-       mali_l2_cache_counter_lock(cache);
-
-       if (MALI_L2_PAUSE == cache->mali_l2_status) {
-               mali_l2_cache_counter_unlock(cache);
+       MALI_DEBUG_ASSERT_POINTER(cache);
 
+       if (NULL == cache) {
                return;
        }
 
-       /* Enable cache */
-       mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_ACCESS | (u32)MALI400_L2_CACHE_ENABLE_READ_ALLOCATE);
-       mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)mali_l2_max_reads);
-
-       /* Restart any performance counters (if enabled) */
-       if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) {
-               mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, cache->counter_src0);
-       }
+       mali_l2_cache_lock(cache);
 
-       if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) {
-               mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, cache->counter_src1);
-       }
+       cache->last_invalidated_id = mali_scheduler_get_new_cache_order();
+       mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND,
+                                  MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
 
-       mali_l2_cache_counter_unlock(cache);
+       mali_l2_cache_unlock(cache);
 }
 
-void mali_l2_cache_reset_all(void)
-{
-       int i;
-       u32 num_cores = mali_l2_cache_core_get_glob_num_l2_cores();
-
-       for (i = 0; i < num_cores; i++) {
-               mali_l2_cache_reset(mali_l2_cache_core_get_glob_l2_core(i));
-       }
-}
-
-void mali_l2_cache_invalidate(struct mali_l2_cache_core *cache)
+void mali_l2_cache_invalidate_conditional(
+       struct mali_l2_cache_core *cache, u32 id)
 {
        MALI_DEBUG_ASSERT_POINTER(cache);
 
-       if (NULL != cache) {
-               cache->last_invalidated_id = mali_scheduler_get_new_cache_order();
-               mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+       if (NULL == cache) {
+               return;
        }
-}
 
-mali_bool mali_l2_cache_invalidate_conditional(struct mali_l2_cache_core *cache, u32 id)
-{
-       MALI_DEBUG_ASSERT_POINTER(cache);
+       /*
+        * If the last cache invalidation was done by a job with a higher id we
+        * don't have to flush. Since user space will store jobs w/ their
+        * corresponding memory in sequence (first job #0, then job #1, ...),
+        * we don't have to flush for job n-1 if job n has already invalidated
+        * the cache since we know for sure that job n-1's memory was already
+        * written when job n was started.
+        */
 
-       if (NULL != cache) {
-               /* If the last cache invalidation was done by a job with a higher id we
-                * don't have to flush. Since user space will store jobs w/ their
-                * corresponding memory in sequence (first job #0, then job #1, ...),
-                * we don't have to flush for job n-1 if job n has already invalidated
-                * the cache since we know for sure that job n-1's memory was already
-                * written when job n was started. */
-               if (((s32)id) <= ((s32)cache->last_invalidated_id)) {
-                       return MALI_FALSE;
-               } else {
-                       cache->last_invalidated_id = mali_scheduler_get_new_cache_order();
-               }
+       mali_l2_cache_lock(cache);
 
-               mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+       if (((s32)id) > ((s32)cache->last_invalidated_id)) {
+               /* Set latest invalidated id to current "point in time" */
+               cache->last_invalidated_id =
+                       mali_scheduler_get_new_cache_order();
+               mali_l2_cache_send_command(cache,
+                                          MALI400_L2_CACHE_REGISTER_COMMAND,
+                                          MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
        }
-       return MALI_TRUE;
+
+       mali_l2_cache_unlock(cache);
 }
 
 void mali_l2_cache_invalidate_all(void)
 {
        u32 i;
-       for (i = 0; i < mali_global_num_l2_cache_cores; i++) {
-               /*additional check*/
-               if (MALI_TRUE == mali_l2_cache_lock_power_state(mali_global_l2_cache_cores[i])) {
-                       _mali_osk_errcode_t ret;
-                       mali_global_l2_cache_cores[i]->last_invalidated_id = mali_scheduler_get_new_cache_order();
-                       ret = mali_l2_cache_send_command(mali_global_l2_cache_cores[i], MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
-                       if (_MALI_OSK_ERR_OK != ret) {
-                               MALI_PRINT_ERROR(("Failed to invalidate cache\n"));
-                       }
+       for (i = 0; i < mali_global_num_l2s; i++) {
+               struct mali_l2_cache_core *cache = mali_global_l2s[i];
+               _mali_osk_errcode_t ret;
+
+               MALI_DEBUG_ASSERT_POINTER(cache);
+
+               mali_l2_cache_lock(cache);
+
+               if (MALI_TRUE != cache->power_is_on) {
+                       mali_l2_cache_unlock(cache);
+                       continue;
                }
-               mali_l2_cache_unlock_power_state(mali_global_l2_cache_cores[i]);
+
+               cache->last_invalidated_id =
+                       mali_scheduler_get_new_cache_order();
+
+               ret = mali_l2_cache_send_command(cache,
+                                                MALI400_L2_CACHE_REGISTER_COMMAND,
+                                                MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+               if (_MALI_OSK_ERR_OK != ret) {
+                       MALI_PRINT_ERROR(("Failed to invalidate cache\n"));
+               }
+
+               mali_l2_cache_unlock(cache);
        }
 }
 
 void mali_l2_cache_invalidate_all_pages(u32 *pages, u32 num_pages)
 {
        u32 i;
-       for (i = 0; i < mali_global_num_l2_cache_cores; i++) {
-               /*additional check*/
-               if (MALI_TRUE == mali_l2_cache_lock_power_state(mali_global_l2_cache_cores[i])) {
-                       u32 j;
-                       for (j = 0; j < num_pages; j++) {
-                               _mali_osk_errcode_t ret;
-                               ret = mali_l2_cache_send_command(mali_global_l2_cache_cores[i], MALI400_L2_CACHE_REGISTER_CLEAR_PAGE, pages[j]);
-                               if (_MALI_OSK_ERR_OK != ret) {
-                                       MALI_PRINT_ERROR(("Failed to invalidate page cache\n"));
-                               }
+       for (i = 0; i < mali_global_num_l2s; i++) {
+               struct mali_l2_cache_core *cache = mali_global_l2s[i];
+               u32 j;
+
+               MALI_DEBUG_ASSERT_POINTER(cache);
+
+               mali_l2_cache_lock(cache);
+
+               if (MALI_TRUE != cache->power_is_on) {
+                       mali_l2_cache_unlock(cache);
+                       continue;
+               }
+
+               for (j = 0; j < num_pages; j++) {
+                       _mali_osk_errcode_t ret;
+
+                       ret = mali_l2_cache_send_command(cache,
+                                                        MALI400_L2_CACHE_REGISTER_CLEAR_PAGE,
+                                                        pages[j]);
+                       if (_MALI_OSK_ERR_OK != ret) {
+                               MALI_PRINT_ERROR(("Failed to invalidate cache (page)\n"));
                        }
                }
-               mali_l2_cache_unlock_power_state(mali_global_l2_cache_cores[i]);
+
+               mali_l2_cache_unlock(cache);
        }
 }
 
-mali_bool mali_l2_cache_lock_power_state(struct mali_l2_cache_core *cache)
-{
-       return mali_pm_domain_lock_state(cache->pm_domain);
-}
+/* -------- local helper functions below -------- */
 
-void mali_l2_cache_unlock_power_state(struct mali_l2_cache_core *cache)
+static void mali_l2_cache_reset(struct mali_l2_cache_core *cache)
 {
-       return mali_pm_domain_unlock_state(cache->pm_domain);
-}
+       MALI_DEBUG_ASSERT_POINTER(cache);
+       MALI_DEBUG_ASSERT_LOCK_HELD(cache->lock);
 
-/* -------- local helper functions below -------- */
+       /* Invalidate cache (just to keep it in a known state at startup) */
+       mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND,
+                                  MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+
+       /* Enable cache */
+       mali_hw_core_register_write(&cache->hw_core,
+                                   MALI400_L2_CACHE_REGISTER_ENABLE,
+                                   (u32)MALI400_L2_CACHE_ENABLE_ACCESS |
+                                   (u32)MALI400_L2_CACHE_ENABLE_READ_ALLOCATE);
+
+       if (MALI400_L2_MAX_READS_NOT_SET != mali_l2_max_reads) {
+               mali_hw_core_register_write(&cache->hw_core,
+                                           MALI400_L2_CACHE_REGISTER_MAX_READS,
+                                           (u32)mali_l2_max_reads);
+       }
+
+       /* Restart any performance counters (if enabled) */
+       if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) {
+
+               mali_hw_core_register_write(&cache->hw_core,
+                                           MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0,
+                                           cache->counter_src0);
+       }
 
+       if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) {
+               mali_hw_core_register_write(&cache->hw_core,
+                                           MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1,
+                                           cache->counter_src1);
+       }
+}
 
-static _mali_osk_errcode_t mali_l2_cache_send_command(struct mali_l2_cache_core *cache, u32 reg, u32 val)
+static _mali_osk_errcode_t mali_l2_cache_send_command(
+       struct mali_l2_cache_core *cache, u32 reg, u32 val)
 {
        int i = 0;
        const int loop_count = 100000;
 
+       MALI_DEBUG_ASSERT_POINTER(cache);
+       MALI_DEBUG_ASSERT_LOCK_HELD(cache->lock);
+
        /*
-        * Grab lock in order to send commands to the L2 cache in a serialized fashion.
-        * The L2 cache will ignore commands if it is busy.
+        * First, wait for L2 cache command handler to go idle.
+        * (Commands received while processing another command will be ignored)
         */
-       mali_l2_cache_command_lock(cache);
-
-       if (MALI_L2_PAUSE == cache->mali_l2_status) {
-               mali_l2_cache_command_unlock(cache);
-               MALI_DEBUG_PRINT(1, ( "Mali L2 cache: aborting wait for L2 come back\n"));
-
-               MALI_ERROR( _MALI_OSK_ERR_BUSY );
-       }
-
-       /* First, wait for L2 cache command handler to go idle */
-
        for (i = 0; i < loop_count; i++) {
-               if (!(mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_STATUS) & (u32)MALI400_L2_CACHE_STATUS_COMMAND_BUSY)) {
+               if (!(mali_hw_core_register_read(&cache->hw_core,
+                                                MALI400_L2_CACHE_REGISTER_STATUS) &
+                     (u32)MALI400_L2_CACHE_STATUS_COMMAND_BUSY)) {
                        break;
                }
        }
 
        if (i == loop_count) {
-               mali_l2_cache_command_unlock(cache);
-               MALI_DEBUG_PRINT(1, ( "Mali L2 cache: aborting wait for command interface to go idle\n"));
-               MALI_ERROR( _MALI_OSK_ERR_FAULT );
+               MALI_DEBUG_PRINT(1, ("Mali L2 cache: aborting wait for command interface to go idle\n"));
+               return _MALI_OSK_ERR_FAULT;
        }
 
        /* then issue the command */
        mali_hw_core_register_write(&cache->hw_core, reg, val);
 
-       mali_l2_cache_command_unlock(cache);
-
-       MALI_SUCCESS;
-}
-
-void mali_l2_cache_pause_all(mali_bool pause)
-{
-       int i;
-       struct mali_l2_cache_core * cache;
-       u32 num_cores = mali_l2_cache_core_get_glob_num_l2_cores();
-       mali_l2_power_status status = MALI_L2_NORMAL;
-
-       if (pause) {
-               status = MALI_L2_PAUSE;
-       }
-
-       for (i = 0; i < num_cores; i++) {
-               cache = mali_l2_cache_core_get_glob_l2_core(i);
-               if (NULL != cache) {
-                       cache->mali_l2_status = status;
-
-                       /* Take and release the counter and command locks to
-                        * ensure there are no active threads that didn't get
-                        * the status flag update.
-                        *
-                        * The locks will also ensure the necessary memory
-                        * barriers are done on SMP systems.
-                        */
-                       mali_l2_cache_counter_lock(cache);
-                       mali_l2_cache_counter_unlock(cache);
-
-                       mali_l2_cache_command_lock(cache);
-                       mali_l2_cache_command_unlock(cache);
-               }
-       }
-
-       /* Resume from pause: do the cache invalidation here to prevent any
-        * loss of cache operation during the pause period to make sure the SW
-        * status is consistent with L2 cache status.
-        */
-       if(!pause) {
-               mali_l2_cache_invalidate_all();
-               mali_l2_cache_reset_counters_all();
-       }
+       return _MALI_OSK_ERR_OK;
 }
index 5d979ad36176abb2ee4ae522bc8ed98a082e22c1..085f9214e4981556e57ff8361dadb96b5f612de3 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
+ * (C) COPYRIGHT 2008-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 #include "mali_hw_core.h"
 
 #define MALI_MAX_NUMBER_OF_L2_CACHE_CORES  3
-/* Maximum 1 GP and 4 PP for an L2 cache core (Mali-400 Quad-core) */
+/* Maximum 1 GP and 4 PP for an L2 cache core (Mali-400 MP4) */
 #define MALI_MAX_NUMBER_OF_GROUPS_PER_L2_CACHE 5
 
-struct mali_group;
-struct mali_pm_domain;
-
-/* Flags describing state of the L2 */
-typedef enum mali_l2_power_status {
-       MALI_L2_NORMAL, /**< L2 is in normal state and operational */
-       MALI_L2_PAUSE,  /**< L2 may not be accessed and may be powered off */
-} mali_l2_power_status;
-
 /**
  * Definition of the L2 cache core struct
  * Used to track a L2 cache unit in the system.
  * Contains information about the mapping of the registers
  */
 struct mali_l2_cache_core {
-       struct mali_hw_core  hw_core;      /**< Common for all HW cores */
-       u32                  core_id;      /**< Unique core ID */
-#ifdef MALI_UPPER_HALF_SCHEDULING
-       _mali_osk_spinlock_irq_t    *command_lock; /**< Serialize all L2 cache commands */
-       _mali_osk_spinlock_irq_t    *counter_lock; /**< Synchronize L2 cache counter access */
-#else
-       _mali_osk_spinlock_t        *command_lock;
-       _mali_osk_spinlock_t        *counter_lock;
-#endif
-       u32                  counter_src0; /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
-       u32                  counter_src1; /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
-       u32                  last_invalidated_id;
+       /* Common HW core functionality */
+       struct mali_hw_core hw_core;
+
+       /* Synchronize L2 cache access */
+       _mali_osk_spinlock_irq_t *lock;
+
+       /* Unique core ID */
+       u32 core_id;
+
+       /* The power domain this L2 cache belongs to */
        struct mali_pm_domain *pm_domain;
-       mali_l2_power_status   mali_l2_status; /**< Indicate whether the L2 is paused or not */
+
+       /* MALI_TRUE if power is on for this L2 cache */
+       mali_bool power_is_on;
+
+       /* A "timestamp" to avoid unnecessary flushes */
+       u32 last_invalidated_id;
+
+       /* Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
+       u32 counter_src0;
+
+       /* Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
+       u32 counter_src1;
+
+       /*
+        * Performance counter 0 value base/offset
+        * (allows accumulative reporting even after power off)
+        */
+       u32 counter_value0_base;
+
+       /*
+        * Performance counter 0 value base/offset
+        * (allows accumulative reporting even after power off)
+        */
+       u32 counter_value1_base;
+
+       /* Used by PM domains to link L2 caches of same domain */
+       _mali_osk_list_t pm_domain_list;
 };
 
 _mali_osk_errcode_t mali_l2_cache_initialize(void);
 void mali_l2_cache_terminate(void);
-/**
- * L2 pause is just a status that the L2 can't be accessed temporarily.
-*/
-void mali_l2_cache_pause_all(mali_bool pause);
-struct mali_l2_cache_core *mali_l2_cache_create(_mali_osk_resource_t * resource);
+
+struct mali_l2_cache_core *mali_l2_cache_create(
+       _mali_osk_resource_t *resource, u32 domain_index);
 void mali_l2_cache_delete(struct mali_l2_cache_core *cache);
 
-MALI_STATIC_INLINE void mali_l2_cache_set_pm_domain(struct mali_l2_cache_core *cache, struct mali_pm_domain *domain)
+MALI_STATIC_INLINE u32 mali_l2_cache_get_id(struct mali_l2_cache_core *cache)
 {
-       cache->pm_domain = domain;
+       MALI_DEBUG_ASSERT_POINTER(cache);
+       return cache->core_id;
 }
 
-u32 mali_l2_cache_get_id(struct mali_l2_cache_core *cache);
+MALI_STATIC_INLINE struct mali_pm_domain *mali_l2_cache_get_pm_domain(
+       struct mali_l2_cache_core *cache)
+{
+       MALI_DEBUG_ASSERT_POINTER(cache);
+       return cache->pm_domain;
+}
+
+void mali_l2_cache_power_up(struct mali_l2_cache_core *cache);
+void mali_l2_cache_power_down(struct mali_l2_cache_core *cache);
+
+void mali_l2_cache_core_set_counter_src(
+       struct mali_l2_cache_core *cache, u32 source_id, u32 counter);
+
+MALI_STATIC_INLINE u32 mali_l2_cache_core_get_counter_src0(
+       struct mali_l2_cache_core *cache)
+{
+       MALI_DEBUG_ASSERT_POINTER(cache);
+       return cache->counter_src0;
+}
+
+MALI_STATIC_INLINE u32 mali_l2_cache_core_get_counter_src1(
+       struct mali_l2_cache_core *cache)
+{
+       MALI_DEBUG_ASSERT_POINTER(cache);
+       return cache->counter_src1;
+}
+
+void mali_l2_cache_core_get_counter_values(
+       struct mali_l2_cache_core *cache,
+       u32 *src0, u32 *value0, u32 *src1, u32 *value1);
 
-void mali_l2_cache_core_set_counter_src0(struct mali_l2_cache_core *cache, u32 counter);
-void mali_l2_cache_core_set_counter_src1(struct mali_l2_cache_core *cache, u32 counter);
-u32 mali_l2_cache_core_get_counter_src0(struct mali_l2_cache_core *cache);
-u32 mali_l2_cache_core_get_counter_src1(struct mali_l2_cache_core *cache);
-void mali_l2_cache_core_get_counter_values(struct mali_l2_cache_core *cache, u32 *src0, u32 *value0, u32 *src1, u32 *value1);
 struct mali_l2_cache_core *mali_l2_cache_core_get_glob_l2_core(u32 index);
 u32 mali_l2_cache_core_get_glob_num_l2_cores(void);
 
-void mali_l2_cache_reset(struct mali_l2_cache_core *cache);
-void mali_l2_cache_reset_all(void);
-
-struct mali_group *mali_l2_cache_get_group(struct mali_l2_cache_core *cache, u32 index);
+struct mali_group *mali_l2_cache_get_group(
+       struct mali_l2_cache_core *cache, u32 index);
 
 void mali_l2_cache_invalidate(struct mali_l2_cache_core *cache);
-mali_bool mali_l2_cache_invalidate_conditional(struct mali_l2_cache_core *cache, u32 id);
+void mali_l2_cache_invalidate_conditional(
+       struct mali_l2_cache_core *cache, u32 id);
+
 void mali_l2_cache_invalidate_all(void);
 void mali_l2_cache_invalidate_all_pages(u32 *pages, u32 num_pages);
 
-mali_bool mali_l2_cache_lock_power_state(struct mali_l2_cache_core *cache);
-void mali_l2_cache_unlock_power_state(struct mali_l2_cache_core *cache);
-
 #endif /* __MALI_KERNEL_L2_CACHE_H__ */
index 159902fcdb770b9f3abbeb9d9b6ea2f37d9309da..271bcdec16957c20707be5929d5137aa9bb2f4f6 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2013 ARM Limited
+ * (C) COPYRIGHT 2011-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -39,7 +39,7 @@ _mali_osk_errcode_t mali_mem_validation_add_range(u32 start, u32 size)
        mali_mem_validator.phys_base = start;
        mali_mem_validator.size = size;
        MALI_DEBUG_PRINT(2, ("Memory Validator installed for Mali physical address base=0x%08X, size=0x%08X\n",
-                            mali_mem_validator.phys_base, mali_mem_validator.size));
+                            mali_mem_validator.phys_base, mali_mem_validator.size));
 
        return _MALI_OSK_ERR_OK;
 }
@@ -47,12 +47,12 @@ _mali_osk_errcode_t mali_mem_validation_add_range(u32 start, u32 size)
 _mali_osk_errcode_t mali_mem_validation_check(u32 phys_addr, u32 size)
 {
        if (phys_addr < (phys_addr + size)) { /* Don't allow overflow (or zero size) */
-               if ((0 == ( phys_addr & (~_MALI_OSK_CPU_PAGE_MASK))) &&
-                   (0 == ( size & (~_MALI_OSK_CPU_PAGE_MASK)))) {
+               if ((0 == (phys_addr & (~_MALI_OSK_CPU_PAGE_MASK))) &&
+                   (0 == (size & (~_MALI_OSK_CPU_PAGE_MASK)))) {
                        if ((phys_addr          >= mali_mem_validator.phys_base) &&
                            ((phys_addr + (size - 1)) >= mali_mem_validator.phys_base) &&
                            (phys_addr          <= (mali_mem_validator.phys_base + (mali_mem_validator.size - 1))) &&
-                           ((phys_addr + (size - 1)) <= (mali_mem_validator.phys_base + (mali_mem_validator.size - 1))) ) {
+                           ((phys_addr + (size - 1)) <= (mali_mem_validator.phys_base + (mali_mem_validator.size - 1)))) {
                                MALI_DEBUG_PRINT(3, ("Accepted range 0x%08X + size 0x%08X (= 0x%08X)\n", phys_addr, size, (phys_addr + size - 1)));
                                return _MALI_OSK_ERR_OK;
                        }
index 4a247ae79e3550e2875bc8316de32c3e96e84356..95e2c85a219e535acd0adc382a2ef96aac5a6ef6 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2013 ARM Limited
+ * (C) COPYRIGHT 2011-2013, 2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
index 92b088835e5bc8a91a2e5de6bfe4da9c2a2ad421..f0ce01e29c7a768ad44fd85205b5ada9fa0a51fd 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2007-2013 ARM Limited
+ * (C) COPYRIGHT 2007-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -45,15 +45,15 @@ MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_raw_reset(struct mali_mmu_core *
 
 /* page fault queue flush helper pages
  * note that the mapping pointers are currently unused outside of the initialization functions */
-static u32 mali_page_fault_flush_page_directory = MALI_INVALID_PAGE;
+static mali_dma_addr mali_page_fault_flush_page_directory = MALI_INVALID_PAGE;
 static mali_io_address mali_page_fault_flush_page_directory_mapping = NULL;
-static u32 mali_page_fault_flush_page_table = MALI_INVALID_PAGE;
+static mali_dma_addr mali_page_fault_flush_page_table = MALI_INVALID_PAGE;
 static mali_io_address mali_page_fault_flush_page_table_mapping = NULL;
-static u32 mali_page_fault_flush_data_page = MALI_INVALID_PAGE;
+static mali_dma_addr mali_page_fault_flush_data_page = MALI_INVALID_PAGE;
 static mali_io_address mali_page_fault_flush_data_page_mapping = NULL;
 
 /* an empty page directory (no address valid) which is active on any MMU not currently marked as in use */
-static u32 mali_empty_page_directory_phys   = MALI_INVALID_PAGE;
+static mali_dma_addr mali_empty_page_directory_phys   = MALI_INVALID_PAGE;
 static mali_io_address mali_empty_page_directory_virt = NULL;
 
 
@@ -61,18 +61,18 @@ _mali_osk_errcode_t mali_mmu_initialize(void)
 {
        /* allocate the helper pages */
        mali_empty_page_directory_phys = mali_allocate_empty_page(&mali_empty_page_directory_virt);
-       if(0 == mali_empty_page_directory_phys) {
+       if (0 == mali_empty_page_directory_phys) {
                MALI_DEBUG_PRINT_ERROR(("Mali MMU: Could not allocate empty page directory.\n"));
                mali_empty_page_directory_phys = MALI_INVALID_PAGE;
                return _MALI_OSK_ERR_NOMEM;
        }
 
        if (_MALI_OSK_ERR_OK != mali_create_fault_flush_pages(&mali_page_fault_flush_page_directory,
-               &mali_page_fault_flush_page_directory_mapping,
-               &mali_page_fault_flush_page_table,
-               &mali_page_fault_flush_page_table_mapping,
-               &mali_page_fault_flush_data_page,
-               &mali_page_fault_flush_data_page_mapping)) {
+                       &mali_page_fault_flush_page_directory_mapping,
+                       &mali_page_fault_flush_page_table,
+                       &mali_page_fault_flush_page_table_mapping,
+                       &mali_page_fault_flush_data_page,
+                       &mali_page_fault_flush_data_page_mapping)) {
                MALI_DEBUG_PRINT_ERROR(("Mali MMU: Could not allocate fault flush pages\n"));
                mali_free_empty_page(mali_empty_page_directory_phys, mali_empty_page_directory_virt);
                mali_empty_page_directory_phys = MALI_INVALID_PAGE;
@@ -93,20 +93,23 @@ void mali_mmu_terminate(void)
        mali_empty_page_directory_virt = NULL;
 
        /* Free the page fault flush pages */
-       mali_destroy_fault_flush_pages(&mali_page_fault_flush_page_directory, &mali_page_fault_flush_page_directory_mapping,
-                                      &mali_page_fault_flush_page_table, &mali_page_fault_flush_page_table_mapping,
-                                      &mali_page_fault_flush_data_page, &mali_page_fault_flush_data_page_mapping);
+       mali_destroy_fault_flush_pages(&mali_page_fault_flush_page_directory,
+                                      &mali_page_fault_flush_page_directory_mapping,
+                                      &mali_page_fault_flush_page_table,
+                                      &mali_page_fault_flush_page_table_mapping,
+                                      &mali_page_fault_flush_data_page,
+                                      &mali_page_fault_flush_data_page_mapping);
 }
 
 struct mali_mmu_core *mali_mmu_create(_mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual)
 {
-       struct mali_mmu_coremmu = NULL;
+       struct mali_mmu_core *mmu = NULL;
 
        MALI_DEBUG_ASSERT_POINTER(resource);
 
        MALI_DEBUG_PRINT(2, ("Mali MMU: Creating Mali MMU: %s\n", resource->description));
 
-       mmu = _mali_osk_calloc(1,sizeof(struct mali_mmu_core));
+       mmu = _mali_osk_calloc(1, sizeof(struct mali_mmu_core));
        if (NULL != mmu) {
                if (_MALI_OSK_ERR_OK == mali_hw_core_create(&mmu->hw_core, resource, MALI_MMU_REGISTERS_SIZE)) {
                        if (_MALI_OSK_ERR_OK == mali_group_add_mmu_core(group, mmu)) {
@@ -118,12 +121,12 @@ struct mali_mmu_core *mali_mmu_create(_mali_osk_resource_t *resource, struct mal
                                if (_MALI_OSK_ERR_OK == mali_mmu_reset(mmu)) {
                                        /* Setup IRQ handlers (which will do IRQ probing if needed) */
                                        mmu->irq = _mali_osk_irq_init(resource->irq,
-                                                                     mali_group_upper_half_mmu,
-                                                                     group,
-                                                                     mali_mmu_probe_trigger,
-                                                                     mali_mmu_probe_ack,
-                                                                     mmu,
-                                                                     resource->description);
+                                                                     mali_group_upper_half_mmu,
+                                                                     group,
+                                                                     mali_mmu_probe_trigger,
+                                                                     mali_mmu_probe_ack,
+                                                                     mmu,
+                                                                     resource->description);
                                        if (NULL != mmu->irq) {
                                                return mmu;
                                        } else {
@@ -181,12 +184,12 @@ static mali_bool mali_mmu_enable_stall(struct mali_mmu_core *mmu)
        int i;
        u32 mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
 
-       if ( 0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED) ) {
+       if (0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED)) {
                MALI_DEBUG_PRINT(4, ("MMU stall is implicit when Paging is not enabled.\n"));
                return MALI_TRUE;
        }
 
-       if ( mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE ) {
+       if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
                MALI_DEBUG_PRINT(3, ("Aborting MMU stall request since it is in pagefault state.\n"));
                return MALI_FALSE;
        }
@@ -201,7 +204,7 @@ static mali_bool mali_mmu_enable_stall(struct mali_mmu_core *mmu)
                if ((mmu_status & MALI_MMU_STATUS_BIT_STALL_ACTIVE) && (0 == (mmu_status & MALI_MMU_STATUS_BIT_STALL_NOT_ACTIVE))) {
                        break;
                }
-               if (0 == (mmu_status & ( MALI_MMU_STATUS_BIT_PAGING_ENABLED ))) {
+               if (0 == (mmu_status & (MALI_MMU_STATUS_BIT_PAGING_ENABLED))) {
                        break;
                }
        }
@@ -210,7 +213,7 @@ static mali_bool mali_mmu_enable_stall(struct mali_mmu_core *mmu)
                return MALI_FALSE;
        }
 
-       if ( mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE ) {
+       if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
                MALI_DEBUG_PRINT(2, ("Aborting MMU stall request since it has a pagefault.\n"));
                return MALI_FALSE;
        }
@@ -227,7 +230,7 @@ static void mali_mmu_disable_stall(struct mali_mmu_core *mmu)
        int i;
        u32 mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
 
-       if ( 0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED )) {
+       if (0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED)) {
                MALI_DEBUG_PRINT(3, ("MMU disable skipped since it was not enabled.\n"));
                return;
        }
@@ -240,17 +243,17 @@ static void mali_mmu_disable_stall(struct mali_mmu_core *mmu)
 
        for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) {
                u32 status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
-               if ( 0 == (status & MALI_MMU_STATUS_BIT_STALL_ACTIVE) ) {
+               if (0 == (status & MALI_MMU_STATUS_BIT_STALL_ACTIVE)) {
                        break;
                }
-               if ( status &  MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE ) {
+               if (status &  MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
                        break;
                }
-               if ( 0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED )) {
+               if (0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED)) {
                        break;
                }
        }
-       if (MALI_REG_POLL_COUNT_FAST == i) MALI_DEBUG_PRINT(1,("Disable stall request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+       if (MALI_REG_POLL_COUNT_FAST == i) MALI_DEBUG_PRINT(1, ("Disable stall request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
 }
 
 void mali_mmu_page_fault_done(struct mali_mmu_core *mmu)
@@ -334,8 +337,8 @@ void mali_mmu_invalidate_page(struct mali_mmu_core *mmu, u32 mali_address)
 static void mali_mmu_activate_address_space(struct mali_mmu_core *mmu, u32 page_directory)
 {
        /* The MMU must be in stalled or page fault mode, for this writing to work */
-       MALI_DEBUG_ASSERT( 0 != ( mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)
-                                 & (MALI_MMU_STATUS_BIT_STALL_ACTIVE|MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) ) );
+       MALI_DEBUG_ASSERT(0 != (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)
+                               & (MALI_MMU_STATUS_BIT_STALL_ACTIVE | MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE)));
        mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, page_directory);
        mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
 
@@ -355,7 +358,7 @@ void mali_mmu_activate_page_directory(struct mali_mmu_core *mmu, struct mali_pag
        mali_mmu_disable_stall(mmu);
 }
 
-void mali_mmu_activate_empty_page_directory(struct mali_mmu_coremmu)
+void mali_mmu_activate_empty_page_directory(struct mali_mmu_core *mmu)
 {
        mali_bool stall_success;
 
@@ -372,7 +375,7 @@ void mali_mmu_activate_empty_page_directory(struct mali_mmu_core* mmu)
        mali_mmu_disable_stall(mmu);
 }
 
-void mali_mmu_activate_fault_flush_page_directory(struct mali_mmu_coremmu)
+void mali_mmu_activate_fault_flush_page_directory(struct mali_mmu_core *mmu)
 {
        mali_bool stall_success;
        MALI_DEBUG_ASSERT_POINTER(mmu);
@@ -381,14 +384,14 @@ void mali_mmu_activate_fault_flush_page_directory(struct mali_mmu_core* mmu)
        stall_success = mali_mmu_enable_stall(mmu);
        /* This function is expect to fail the stalling, since it might be in PageFault mode when it is called */
        mali_mmu_activate_address_space(mmu, mali_page_fault_flush_page_directory);
-       if ( MALI_TRUE==stall_success ) mali_mmu_disable_stall(mmu);
+       if (MALI_TRUE == stall_success) mali_mmu_disable_stall(mmu);
 }
 
 /* Is called when we want the mmu to give an interrupt */
 static void mali_mmu_probe_trigger(void *data)
 {
        struct mali_mmu_core *mmu = (struct mali_mmu_core *)data;
-       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT, MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+       mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
 }
 
 /* Is called when the irq probe wants the mmu to acknowledge an interrupt from the hw */
@@ -414,8 +417,8 @@ static _mali_osk_errcode_t mali_mmu_probe_ack(void *data)
                MALI_DEBUG_PRINT(1, ("Probe: Bus read error detect: FAILED\n"));
        }
 
-       if ( (int_stat & (MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR)) ==
-            (MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR)) {
+       if ((int_stat & (MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR)) ==
+           (MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR)) {
                return _MALI_OSK_ERR_OK;
        }
 
index d11748c93fb04dc4c6b1c8dd2f713c1d15a509e1..3f219b286f2705d0ca4e2aac4e5ea474f87d012c 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2007-2013 ARM Limited
+ * (C) COPYRIGHT 2007-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -79,13 +79,22 @@ mali_bool mali_mmu_zap_tlb(struct mali_mmu_core *mmu);
 void mali_mmu_zap_tlb_without_stall(struct mali_mmu_core *mmu);
 void mali_mmu_invalidate_page(struct mali_mmu_core *mmu, u32 mali_address);
 
-void mali_mmu_activate_page_directory(struct mali_mmu_coremmu, struct mali_page_directory *pagedir);
-void mali_mmu_activate_empty_page_directory(struct mali_mmu_coremmu);
-void mali_mmu_activate_fault_flush_page_directory(struct mali_mmu_coremmu);
+void mali_mmu_activate_page_directory(struct mali_mmu_core *mmu, struct mali_page_directory *pagedir);
+void mali_mmu_activate_empty_page_directory(struct mali_mmu_core *mmu);
+void mali_mmu_activate_fault_flush_page_directory(struct mali_mmu_core *mmu);
 
 void mali_mmu_page_fault_done(struct mali_mmu_core *mmu);
 
-/*** Register reading/writing functions ***/
+MALI_STATIC_INLINE enum mali_interrupt_result mali_mmu_get_interrupt_result(struct mali_mmu_core *mmu)
+{
+       u32 rawstat_used = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT);
+       if (0 == rawstat_used) {
+               return MALI_INTERRUPT_RESULT_NONE;
+       }
+       return MALI_INTERRUPT_RESULT_ERROR;
+}
+
+
 MALI_STATIC_INLINE u32 mali_mmu_get_int_status(struct mali_mmu_core *mmu)
 {
        return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_STATUS);
index 1439d7b21c29acc9bae4a61529f565dd76fc58e2..8ea1df2d3e7e643c6e8c8de27a7b2e0e7fd7c029 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2013 ARM Limited
+ * (C) COPYRIGHT 2011-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -10,6 +10,7 @@
 
 #include "mali_kernel_common.h"
 #include "mali_osk.h"
+#include "mali_ukk.h"
 #include "mali_uk_types.h"
 #include "mali_mmu_page_directory.h"
 #include "mali_memory.h"
@@ -21,15 +22,15 @@ u32 mali_allocate_empty_page(mali_io_address *virt_addr)
 {
        _mali_osk_errcode_t err;
        mali_io_address mapping;
-       u32 address;
+       mali_dma_addr address;
 
-       if(_MALI_OSK_ERR_OK != mali_mmu_get_table_page(&address, &mapping)) {
+       if (_MALI_OSK_ERR_OK != mali_mmu_get_table_page(&address, &mapping)) {
                /* Allocation failed */
                MALI_DEBUG_PRINT(2, ("Mali MMU: Failed to get table page for empty pgdir\n"));
                return 0;
        }
 
-       MALI_DEBUG_ASSERT_POINTER( mapping );
+       MALI_DEBUG_ASSERT_POINTER(mapping);
 
        err = fill_page(mapping, 0);
        if (_MALI_OSK_ERR_OK != err) {
@@ -42,16 +43,17 @@ u32 mali_allocate_empty_page(mali_io_address *virt_addr)
        return address;
 }
 
-void mali_free_empty_page(u32 address, mali_io_address virt_addr)
+void mali_free_empty_page(mali_dma_addr address, mali_io_address virt_addr)
 {
        if (MALI_INVALID_PAGE != address) {
                mali_mmu_release_table_page(address, virt_addr);
        }
 }
 
-_mali_osk_errcode_t mali_create_fault_flush_pages(u32 *page_directory, mali_io_address *page_directory_mapping,
-        u32 *page_table, mali_io_address *page_table_mapping,
-        u32 *data_page, mali_io_address *data_page_mapping)
+_mali_osk_errcode_t mali_create_fault_flush_pages(mali_dma_addr *page_directory,
+               mali_io_address *page_directory_mapping,
+               mali_dma_addr *page_table, mali_io_address *page_table_mapping,
+               mali_dma_addr *data_page, mali_io_address *data_page_mapping)
 {
        _mali_osk_errcode_t err;
 
@@ -75,9 +77,10 @@ _mali_osk_errcode_t mali_create_fault_flush_pages(u32 *page_directory, mali_io_a
        return err;
 }
 
-void mali_destroy_fault_flush_pages(u32 *page_directory, mali_io_address *page_directory_mapping,
-                                    u32 *page_table, mali_io_address *page_table_mapping,
-                                    u32 *data_page, mali_io_address *data_page_mapping)
+void mali_destroy_fault_flush_pages(
+       mali_dma_addr *page_directory, mali_io_address *page_directory_mapping,
+       mali_dma_addr *page_table, mali_io_address *page_table_mapping,
+       mali_dma_addr *data_page, mali_io_address *data_page_mapping)
 {
        if (MALI_INVALID_PAGE != *page_directory) {
                mali_mmu_release_table_page(*page_directory, *page_directory_mapping);
@@ -101,10 +104,10 @@ void mali_destroy_fault_flush_pages(u32 *page_directory, mali_io_address *page_d
 static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data)
 {
        int i;
-       MALI_DEBUG_ASSERT_POINTER( mapping );
+       MALI_DEBUG_ASSERT_POINTER(mapping);
 
-       for(i = 0; i < MALI_MMU_PAGE_SIZE/4; i++) {
-               _mali_osk_mem_iowrite32_relaxed( mapping, i * sizeof(u32), data);
+       for (i = 0; i < MALI_MMU_PAGE_SIZE / 4; i++) {
+               _mali_osk_mem_iowrite32_relaxed(mapping, i * sizeof(u32), data);
        }
        _mali_osk_mem_barrier();
        MALI_SUCCESS;
@@ -116,29 +119,29 @@ _mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u3
        const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1);
        _mali_osk_errcode_t err;
        mali_io_address pde_mapping;
-       u32 pde_phys;
+       mali_dma_addr pde_phys;
        int i;
 
-       if (last_pde < first_pde) {
-               MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
-       }
+       if (last_pde < first_pde)
+               return _MALI_OSK_ERR_INVALID_ARGS;
 
-       for(i = first_pde; i <= last_pde; i++) {
-               if(0 == (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i*sizeof(u32)) & MALI_MMU_FLAGS_PRESENT)) {
+       for (i = first_pde; i <= last_pde; i++) {
+               if (0 == (_mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+                                                i * sizeof(u32)) & MALI_MMU_FLAGS_PRESENT)) {
                        /* Page table not present */
                        MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]);
                        MALI_DEBUG_ASSERT(NULL == pagedir->page_entries_mapped[i]);
 
                        err = mali_mmu_get_table_page(&pde_phys, &pde_mapping);
-                       if(_MALI_OSK_ERR_OK != err) {
+                       if (_MALI_OSK_ERR_OK != err) {
                                MALI_PRINT_ERROR(("Failed to allocate page table page.\n"));
                                return err;
                        }
                        pagedir->page_entries_mapped[i] = pde_mapping;
 
                        /* Update PDE, mark as present */
-                       _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i*sizeof(u32),
-                                                       pde_phys | MALI_MMU_FLAGS_PRESENT);
+                       _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32),
+                                                       pde_phys | MALI_MMU_FLAGS_PRESENT);
 
                        MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]);
                        pagedir->page_entries_usage_count[i] = 1;
@@ -148,7 +151,7 @@ _mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u3
        }
        _mali_osk_write_mem_barrier();
 
-       MALI_SUCCESS;
+       return _MALI_OSK_ERR_OK;
 }
 
 MALI_STATIC_INLINE void mali_mmu_zero_pte(mali_io_address page_table, u32 mali_address, u32 size)
@@ -162,6 +165,13 @@ MALI_STATIC_INLINE void mali_mmu_zero_pte(mali_io_address page_table, u32 mali_a
        }
 }
 
+static u32 mali_page_directory_get_phys_address(struct mali_page_directory *pagedir, u32 index)
+{
+       return (_mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+                                      index * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
+}
+
+
 _mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size)
 {
        const int first_pde = MALI_MMU_PDE_ENTRY(mali_address);
@@ -197,10 +207,10 @@ _mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir,
                        MALI_DEBUG_PRINT(4, ("Releasing page table as this is the last reference\n"));
                        /* last reference removed, no need to zero out each PTE  */
 
-                       page_phys = MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i*sizeof(u32)));
+                       page_phys = MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i * sizeof(u32)));
                        page_virt = pagedir->page_entries_mapped[i];
                        pagedir->page_entries_mapped[i] = NULL;
-                       _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i*sizeof(u32), 0);
+                       _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32), 0);
 
                        mali_mmu_release_table_page(page_phys, page_virt);
                        pd_changed = MALI_TRUE;
@@ -245,17 +255,22 @@ _mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir,
 struct mali_page_directory *mali_mmu_pagedir_alloc(void)
 {
        struct mali_page_directory *pagedir;
+       _mali_osk_errcode_t err;
+       mali_dma_addr phys;
 
        pagedir = _mali_osk_calloc(1, sizeof(struct mali_page_directory));
-       if(NULL == pagedir) {
+       if (NULL == pagedir) {
                return NULL;
        }
 
-       if(_MALI_OSK_ERR_OK != mali_mmu_get_table_page(&pagedir->page_directory, &pagedir->page_directory_mapped)) {
+       err = mali_mmu_get_table_page(&phys, &pagedir->page_directory_mapped);
+       if (_MALI_OSK_ERR_OK != err) {
                _mali_osk_free(pagedir);
                return NULL;
        }
 
+       pagedir->page_directory = (u32)phys;
+
        /* Zero page directory */
        fill_page(pagedir->page_directory_mapped, 0);
 
@@ -269,8 +284,11 @@ void mali_mmu_pagedir_free(struct mali_page_directory *pagedir)
 
        /* Free referenced page tables and zero PDEs. */
        for (i = 0; i < num_page_table_entries; i++) {
-               if (pagedir->page_directory_mapped && (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, sizeof(u32)*i) & MALI_MMU_FLAGS_PRESENT)) {
-                       u32 phys = _mali_osk_mem_ioread32(pagedir->page_directory_mapped, i*sizeof(u32)) & ~MALI_MMU_FLAGS_MASK;
+               if (pagedir->page_directory_mapped && (_mali_osk_mem_ioread32(
+                               pagedir->page_directory_mapped,
+                               sizeof(u32)*i) & MALI_MMU_FLAGS_PRESENT)) {
+                       mali_dma_addr phys = _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+                                            i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK;
                        _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32), 0);
                        mali_mmu_release_table_page(phys, pagedir->page_entries_mapped[i]);
                }
@@ -284,22 +302,54 @@ void mali_mmu_pagedir_free(struct mali_page_directory *pagedir)
 }
 
 
-void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address, u32 phys_address, u32 size, u32 permission_bits)
+void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address,
+                            mali_dma_addr phys_address, u32 size, u32 permission_bits)
 {
        u32 end_address = mali_address + size;
+       u32 mali_phys = (u32)phys_address;
 
        /* Map physical pages into MMU page tables */
-       for ( ; mali_address < end_address; mali_address += MALI_MMU_PAGE_SIZE, phys_address += MALI_MMU_PAGE_SIZE) {
+       for (; mali_address < end_address; mali_address += MALI_MMU_PAGE_SIZE, mali_phys += MALI_MMU_PAGE_SIZE) {
                MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)]);
                _mali_osk_mem_iowrite32_relaxed(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)],
-                                               MALI_MMU_PTE_ENTRY(mali_address) * sizeof(u32),
-                                               phys_address | permission_bits);
+                                               MALI_MMU_PTE_ENTRY(mali_address) * sizeof(u32),
+                                               mali_phys | permission_bits);
        }
 }
 
-u32 mali_page_directory_get_phys_address(struct mali_page_directory *pagedir, u32 index)
+void mali_mmu_pagedir_diag(struct mali_page_directory *pagedir, u32 fault_addr)
 {
-       return (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, index*sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
+#if defined(DEBUG)
+       u32 pde_index, pte_index;
+       u32 pde, pte;
+
+       pde_index = MALI_MMU_PDE_ENTRY(fault_addr);
+       pte_index = MALI_MMU_PTE_ENTRY(fault_addr);
+
+
+       pde = _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+                                    pde_index * sizeof(u32));
+
+
+       if (pde & MALI_MMU_FLAGS_PRESENT) {
+               u32 pte_addr = MALI_MMU_ENTRY_ADDRESS(pde);
+
+               pte = _mali_osk_mem_ioread32(pagedir->page_entries_mapped[pde_index],
+                                            pte_index * sizeof(u32));
+
+               MALI_DEBUG_PRINT(2, ("\tMMU: %08x: Page table present: %08x\n"
+                                    "\t\tPTE: %08x, page %08x is %s\n",
+                                    fault_addr, pte_addr, pte,
+                                    MALI_MMU_ENTRY_ADDRESS(pte),
+                                    pte & MALI_MMU_FLAGS_DEFAULT ? "rw" : "not present"));
+       } else {
+               MALI_DEBUG_PRINT(2, ("\tMMU: %08x: Page table not present: %08x\n",
+                                    fault_addr, pde));
+       }
+#else
+       MALI_IGNORE(pagedir);
+       MALI_IGNORE(fault_addr);
+#endif
 }
 
 /* For instrumented */
@@ -313,11 +363,11 @@ struct dump_info {
 static _mali_osk_errcode_t writereg(u32 where, u32 what, const char *comment, struct dump_info *info)
 {
        if (NULL != info) {
-               info->register_writes_size += sizeof(u32)*2; /* two 32-bit words */
+               info->register_writes_size += sizeof(u32) * 2; /* two 32-bit words */
 
                if (NULL != info->buffer) {
                        /* check that we have enough space */
-                       if (info->buffer_left < sizeof(u32)*2) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+                       if (info->buffer_left < sizeof(u32) * 2) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
 
                        *info->buffer = where;
                        info->buffer++;
@@ -325,14 +375,14 @@ static _mali_osk_errcode_t writereg(u32 where, u32 what, const char *comment, st
                        *info->buffer = what;
                        info->buffer++;
 
-                       info->buffer_left -= sizeof(u32)*2;
+                       info->buffer_left -= sizeof(u32) * 2;
                }
        }
 
        MALI_SUCCESS;
 }
 
-static _mali_osk_errcode_t mali_mmu_dump_page(mali_io_address page, u32 phys_addr, struct dump_info * info)
+static _mali_osk_errcode_t mali_mmu_dump_page(mali_io_address page, u32 phys_addr, struct dump_info *info)
 {
        if (NULL != info) {
                /* 4096 for the page and 4 bytes for the address */
@@ -358,7 +408,7 @@ static _mali_osk_errcode_t mali_mmu_dump_page(mali_io_address page, u32 phys_add
        MALI_SUCCESS;
 }
 
-static _mali_osk_errcode_t dump_mmu_page_table(struct mali_page_directory *pagedir, struct dump_info * info)
+static _mali_osk_errcode_t dump_mmu_page_table(struct mali_page_directory *pagedir, struct dump_info *info)
 {
        MALI_DEBUG_ASSERT_POINTER(pagedir);
        MALI_DEBUG_ASSERT_POINTER(info);
@@ -367,15 +417,15 @@ static _mali_osk_errcode_t dump_mmu_page_table(struct mali_page_directory *paged
                int i;
 
                MALI_CHECK_NO_ERROR(
-                   mali_mmu_dump_page(pagedir->page_directory_mapped, pagedir->page_directory, info)
+                       mali_mmu_dump_page(pagedir->page_directory_mapped, pagedir->page_directory, info)
                );
 
                for (i = 0; i < 1024; i++) {
                        if (NULL != pagedir->page_entries_mapped[i]) {
                                MALI_CHECK_NO_ERROR(
-                                   mali_mmu_dump_page(pagedir->page_entries_mapped[i],
-                                                      _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
-                                                              i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK, info)
+                                       mali_mmu_dump_page(pagedir->page_entries_mapped[i],
+                                                          _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+                                                                          i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK, info)
                                );
                        }
                }
@@ -384,24 +434,23 @@ static _mali_osk_errcode_t dump_mmu_page_table(struct mali_page_directory *paged
        MALI_SUCCESS;
 }
 
-static _mali_osk_errcode_t dump_mmu_registers(struct mali_page_directory *pagedir, struct dump_info * info)
+static _mali_osk_errcode_t dump_mmu_registers(struct mali_page_directory *pagedir, struct dump_info *info)
 {
        MALI_CHECK_NO_ERROR(writereg(0x00000000, pagedir->page_directory,
-                                    "set the page directory address", info));
+                                    "set the page directory address", info));
        MALI_CHECK_NO_ERROR(writereg(0x00000008, 4, "zap???", info));
        MALI_CHECK_NO_ERROR(writereg(0x00000008, 0, "enable paging", info));
        MALI_SUCCESS;
 }
 
-_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size( _mali_uk_query_mmu_page_table_dump_size_s *args )
+_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size(_mali_uk_query_mmu_page_table_dump_size_s *args)
 {
        struct dump_info info = { 0, 0, 0, NULL };
-       struct mali_session_data * session_data;
+       struct mali_session_data *session_data;
 
+       session_data = (struct mali_session_data *)(uintptr_t)(args->ctx);
+       MALI_DEBUG_ASSERT_POINTER(session_data);
        MALI_DEBUG_ASSERT_POINTER(args);
-       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
-
-       session_data = (struct mali_session_data *)(args->ctx);
 
        MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info));
        MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info));
@@ -409,24 +458,23 @@ _mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size( _mali_uk_query_mmu
        MALI_SUCCESS;
 }
 
-_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table( _mali_uk_dump_mmu_page_table_s * args )
+_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table(_mali_uk_dump_mmu_page_table_s *args)
 {
        struct dump_info info = { 0, 0, 0, NULL };
-       struct mali_session_data * session_data;
+       struct mali_session_data *session_data;
 
        MALI_DEBUG_ASSERT_POINTER(args);
-       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
-       MALI_CHECK_NON_NULL(args->buffer, _MALI_OSK_ERR_INVALID_ARGS);
 
-       session_data = (struct mali_session_data *)(args->ctx);
+       session_data = (struct mali_session_data *)(uintptr_t)(args->ctx);
+       MALI_DEBUG_ASSERT_POINTER(session_data);
 
        info.buffer_left = args->size;
-       info.buffer = args->buffer;
+       info.buffer = (u32 *)(uintptr_t)args->buffer;
 
-       args->register_writes = info.buffer;
+       args->register_writes = (uintptr_t)info.buffer;
        MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info));
 
-       args->page_table_dump = info.buffer;
+       args->page_table_dump = (uintptr_t)info.buffer;
        MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info));
 
        args->register_writes_size = info.register_writes_size;
index 0db8a76f2ffc2d28318dcc7e3d30d1149a1e038d..765e8ff9d251b417713197330b1fc98d93e92936 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2013 ARM Limited
+ * (C) COPYRIGHT 2011-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -60,19 +60,19 @@ typedef enum mali_mmu_entry_flags {
 
 
 #define MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE ( \
-MALI_MMU_FLAGS_PRESENT | \
-       MALI_MMU_FLAGS_READ_PERMISSION |  \
-       MALI_MMU_FLAGS_WRITE_PERMISSION | \
-       MALI_MMU_FLAGS_OVERRIDE_CACHE | \
-       MALI_MMU_FLAGS_WRITE_CACHEABLE | \
-       MALI_MMU_FLAGS_WRITE_BUFFERABLE | \
-       MALI_MMU_FLAGS_READ_CACHEABLE | \
-       MALI_MMU_FLAGS_READ_ALLOCATE )
+               MALI_MMU_FLAGS_PRESENT | \
+               MALI_MMU_FLAGS_READ_PERMISSION |  \
+               MALI_MMU_FLAGS_WRITE_PERMISSION | \
+               MALI_MMU_FLAGS_OVERRIDE_CACHE | \
+               MALI_MMU_FLAGS_WRITE_CACHEABLE | \
+               MALI_MMU_FLAGS_WRITE_BUFFERABLE | \
+               MALI_MMU_FLAGS_READ_CACHEABLE | \
+               MALI_MMU_FLAGS_READ_ALLOCATE )
 
 #define MALI_MMU_FLAGS_DEFAULT ( \
-       MALI_MMU_FLAGS_PRESENT | \
-       MALI_MMU_FLAGS_READ_PERMISSION |  \
-       MALI_MMU_FLAGS_WRITE_PERMISSION )
+                                MALI_MMU_FLAGS_PRESENT | \
+                                MALI_MMU_FLAGS_READ_PERMISSION |  \
+                                MALI_MMU_FLAGS_WRITE_PERMISSION )
 
 
 struct mali_page_directory {
@@ -88,20 +88,23 @@ _mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u3
 _mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size);
 
 /* Back virtual address space with actual pages. Assumes input is contiguous and 4k aligned. */
-void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address, u32 phys_address, u32 size, u32 cache_settings);
-
-u32 mali_page_directory_get_phys_address(struct mali_page_directory *pagedir, u32 index);
+void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address,
+                            mali_dma_addr phys_address, u32 size, u32 permission_bits);
 
 u32 mali_allocate_empty_page(mali_io_address *virtual);
-void mali_free_empty_page(u32 address, mali_io_address virtual);
-_mali_osk_errcode_t mali_create_fault_flush_pages(u32 *page_directory, mali_io_address *page_directory_mapping,
-        u32 *page_table, mali_io_address *page_table_mapping,
-        u32 *data_page, mali_io_address *data_page_mapping);
-void mali_destroy_fault_flush_pages(u32 *page_directory, mali_io_address *page_directory_mapping,
-                                    u32 *page_table, mali_io_address *page_table_mapping,
-                                    u32 *data_page, mali_io_address *data_page_mapping);
+void mali_free_empty_page(mali_dma_addr address, mali_io_address virt_addr);
+_mali_osk_errcode_t mali_create_fault_flush_pages(mali_dma_addr *page_directory,
+               mali_io_address *page_directory_mapping,
+               mali_dma_addr *page_table, mali_io_address *page_table_mapping,
+               mali_dma_addr *data_page, mali_io_address *data_page_mapping);
+void mali_destroy_fault_flush_pages(
+       mali_dma_addr *page_directory, mali_io_address *page_directory_mapping,
+       mali_dma_addr *page_table, mali_io_address *page_table_mapping,
+       mali_dma_addr *data_page, mali_io_address *data_page_mapping);
 
 struct mali_page_directory *mali_mmu_pagedir_alloc(void);
 void mali_mmu_pagedir_free(struct mali_page_directory *pagedir);
 
+void mali_mmu_pagedir_diag(struct mali_page_directory *pagedir, u32 fault_addr);
+
 #endif /* __MALI_MMU_PAGE_DIRECTORY_H__ */
index a8a71d018e9d67d4d2ce3c34d96df6594b41914e..f1ebae7e5d706a265d750a7fabf72e2dcfa25a98 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
+ * (C) COPYRIGHT 2008-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -78,7 +78,7 @@ extern "C" {
  * to by \a ptr.
  */
 #define _MALI_OSK_CONTAINER_OF(ptr, type, member) \
-             ((type *)( ((char *)ptr) - offsetof(type,member) ))
+       ((type *)( ((char *)ptr) - offsetof(type,member) ))
 
 /** @addtogroup _mali_osk_wq
  * @{ */
@@ -104,7 +104,7 @@ void _mali_osk_wq_term(void);
  * The returned pointer must be freed with \ref _mali_osk_wq_delete_work()
  * when no longer needed.
  */
-_mali_osk_wq_work_t *_mali_osk_wq_create_work( _mali_osk_wq_work_handler_t handler, void *data );
+_mali_osk_wq_work_t *_mali_osk_wq_create_work(_mali_osk_wq_work_handler_t handler, void *data);
 
 /** @brief A high priority version of \a _mali_osk_wq_create_work()
  *
@@ -122,21 +122,21 @@ _mali_osk_wq_work_t *_mali_osk_wq_create_work( _mali_osk_wq_work_handler_t handl
  *
  * Start the high priority work with: \a _mali_osk_wq_schedule_work_high_pri()
  */
-_mali_osk_wq_work_t *_mali_osk_wq_create_work_high_pri( _mali_osk_wq_work_handler_t handler, void *data );
+_mali_osk_wq_work_t *_mali_osk_wq_create_work_high_pri(_mali_osk_wq_work_handler_t handler, void *data);
 
 /** @brief Delete a work object
  *
  * This will flush the work queue to ensure that the work handler will not
  * be called after deletion.
  */
-void _mali_osk_wq_delete_work( _mali_osk_wq_work_t *work );
+void _mali_osk_wq_delete_work(_mali_osk_wq_work_t *work);
 
 /** @brief Delete a work object
  *
  * This will NOT flush the work queue, so only call this if you are sure that the work handler will
  * not be called after deletion.
  */
-void _mali_osk_wq_delete_work_nonflush( _mali_osk_wq_work_t *work );
+void _mali_osk_wq_delete_work_nonflush(_mali_osk_wq_work_t *work);
 
 /** @brief Cause a queued, deferred call of the work handler
  *
@@ -188,7 +188,7 @@ void _mali_osk_wq_delete_work_nonflush( _mali_osk_wq_work_t *work );
  * @param work a pointer to the _mali_osk_wq_work_t object corresponding to the
  * work to begin processing.
  */
-void _mali_osk_wq_schedule_work( _mali_osk_wq_work_t *work );
+void _mali_osk_wq_schedule_work(_mali_osk_wq_work_t *work);
 
 /** @brief Cause a queued, deferred call of the high priority work handler
  *
@@ -200,7 +200,7 @@ void _mali_osk_wq_schedule_work( _mali_osk_wq_work_t *work );
  * This is allowed to sleep, but the work should be small since it will block
  * all other applications.
 */
-void _mali_osk_wq_schedule_work_high_pri( _mali_osk_wq_work_t *work );
+void _mali_osk_wq_schedule_work_high_pri(_mali_osk_wq_work_t *work);
 
 /** @brief Flush the work queue
  *
@@ -295,7 +295,7 @@ void _mali_osk_wq_delayed_schedule_work(_mali_osk_wq_delayed_work_t *work, u32 d
  * @return on success, a pointer to a _mali_osk_irq_t object, which represents
  * the IRQ handling on this resource. NULL on failure.
  */
-_mali_osk_irq_t *_mali_osk_irq_init( u32 irqnum, _mali_osk_irq_uhandler_t uhandler, void *int_data, _mali_osk_irq_trigger_t trigger_func, _mali_osk_irq_ack_t ack_func, void *probe_data, const char *description );
+_mali_osk_irq_t *_mali_osk_irq_init(u32 irqnum, _mali_osk_irq_uhandler_t uhandler, void *int_data, _mali_osk_irq_trigger_t trigger_func, _mali_osk_irq_ack_t ack_func, void *probe_data, const char *description);
 
 /** @brief Terminate IRQ handling on a resource.
  *
@@ -309,7 +309,7 @@ _mali_osk_irq_t *_mali_osk_irq_init( u32 irqnum, _mali_osk_irq_uhandler_t uhandl
  * @param irq a pointer to the _mali_osk_irq_t object corresponding to the
  * resource whose IRQ handling is to be terminated.
  */
-void _mali_osk_irq_term( _mali_osk_irq_t *irq );
+void _mali_osk_irq_term(_mali_osk_irq_t *irq);
 
 /** @} */ /* end group _mali_osk_irq */
 
@@ -322,25 +322,25 @@ void _mali_osk_irq_term( _mali_osk_irq_t *irq );
  * @note It is an error to decrement the counter beyond -(1<<23)
  *
  * @param atom pointer to an atomic counter */
-void _mali_osk_atomic_dec( _mali_osk_atomic_t *atom );
+void _mali_osk_atomic_dec(_mali_osk_atomic_t *atom);
 
 /** @brief Decrement an atomic counter, return new value
  *
  * @param atom pointer to an atomic counter
  * @return The new value, after decrement */
-u32 _mali_osk_atomic_dec_return( _mali_osk_atomic_t *atom );
+u32 _mali_osk_atomic_dec_return(_mali_osk_atomic_t *atom);
 
 /** @brief Increment an atomic counter
  *
  * @note It is an error to increment the counter beyond (1<<23)-1
  *
  * @param atom pointer to an atomic counter */
-void _mali_osk_atomic_inc( _mali_osk_atomic_t *atom );
+void _mali_osk_atomic_inc(_mali_osk_atomic_t *atom);
 
 /** @brief Increment an atomic counter, return new value
  *
  * @param atom pointer to an atomic counter */
-u32 _mali_osk_atomic_inc_return( _mali_osk_atomic_t *atom );
+u32 _mali_osk_atomic_inc_return(_mali_osk_atomic_t *atom);
 
 /** @brief Initialize an atomic counter
  *
@@ -349,10 +349,8 @@ u32 _mali_osk_atomic_inc_return( _mali_osk_atomic_t *atom );
  *
  * @param atom pointer to an atomic counter
  * @param val the value to initialize the atomic counter.
- * @return _MALI_OSK_ERR_OK on success, otherwise, a suitable
- * _mali_osk_errcode_t on failure.
  */
-_mali_osk_errcode_t _mali_osk_atomic_init( _mali_osk_atomic_t *atom, u32 val );
+void _mali_osk_atomic_init(_mali_osk_atomic_t *atom, u32 val);
 
 /** @brief Read a value from an atomic counter
  *
@@ -362,13 +360,13 @@ _mali_osk_errcode_t _mali_osk_atomic_init( _mali_osk_atomic_t *atom, u32 val );
  *
  * @param atom pointer to an atomic counter
  */
-u32 _mali_osk_atomic_read( _mali_osk_atomic_t *atom );
+u32 _mali_osk_atomic_read(_mali_osk_atomic_t *atom);
 
 /** @brief Terminate an atomic counter
  *
  * @param atom pointer to an atomic counter
  */
-void _mali_osk_atomic_term( _mali_osk_atomic_t *atom );
+void _mali_osk_atomic_term(_mali_osk_atomic_t *atom);
 
 /** @brief Assign a new val to atomic counter, and return the old atomic counter
  *
@@ -376,7 +374,7 @@ void _mali_osk_atomic_term( _mali_osk_atomic_t *atom );
  * @param val the new value assign to the atomic counter
  * @return the old value of the atomic counter
  */
-u32 _mali_osk_atomic_xchg( _mali_osk_atomic_t *atom, u32 val );
+u32 _mali_osk_atomic_xchg(_mali_osk_atomic_t *atom, u32 val);
 /** @} */  /* end group _mali_osk_atomic */
 
 
@@ -407,7 +405,7 @@ u32 _mali_osk_atomic_xchg( _mali_osk_atomic_t *atom, u32 val );
  * @param size Size of each element
  * @return On success, the zero-initialized buffer allocated. NULL on failure
  */
-void *_mali_osk_calloc( u32 n, u32 size );
+void *_mali_osk_calloc(u32 n, u32 size);
 
 /** @brief Allocate memory.
  *
@@ -433,7 +431,7 @@ void *_mali_osk_calloc( u32 n, u32 size );
  * @param size Number of bytes to allocate
  * @return On success, the buffer allocated. NULL on failure.
  */
-void *_mali_osk_malloc( u32 size );
+void *_mali_osk_malloc(u32 size);
 
 /** @brief Free memory.
  *
@@ -449,7 +447,7 @@ void *_mali_osk_malloc( u32 size );
  *
  * @param ptr Pointer to buffer to free
  */
-void _mali_osk_free( void *ptr );
+void _mali_osk_free(void *ptr);
 
 /** @brief Allocate memory.
  *
@@ -474,7 +472,7 @@ void _mali_osk_free( void *ptr );
  * @param size Number of bytes to allocate
  * @return On success, the buffer allocated. NULL on failure.
  */
-void *_mali_osk_valloc( u32 size );
+void *_mali_osk_valloc(u32 size);
 
 /** @brief Free memory.
  *
@@ -489,7 +487,7 @@ void *_mali_osk_valloc( u32 size );
  *
  * @param ptr Pointer to buffer to free
  */
-void _mali_osk_vfree( void *ptr );
+void _mali_osk_vfree(void *ptr);
 
 /** @brief Copies memory.
  *
@@ -504,7 +502,7 @@ void _mali_osk_vfree( void *ptr );
  * @param len Number of bytes to copy.
  * @return \a dst is always passed through unmodified.
  */
-void *_mali_osk_memcpy( void *dst, const void *src, u32 len );
+void *_mali_osk_memcpy(void *dst, const void *src, u32 len);
 
 /** @brief Fills memory.
  *
@@ -516,7 +514,7 @@ void *_mali_osk_memcpy( void *dst, const void *src, u32 len );
  * @param n Number of bytes to be set to the value.
  * @return \a s is always passed through unmodified
  */
-void *_mali_osk_memset( void *s, u32 c, u32 n );
+void *_mali_osk_memset(void *s, u32 c, u32 n);
 /** @} */ /* end group _mali_osk_memory */
 
 
@@ -533,7 +531,7 @@ void *_mali_osk_memset( void *s, u32 c, u32 n );
  * @return MALI_TRUE when \a max_allocated bytes are not in use yet. MALI_FALSE
  * when at least \a max_allocated bytes are in use.
  */
-mali_bool _mali_osk_mem_check_allocated( u32 max_allocated );
+mali_bool _mali_osk_mem_check_allocated(u32 max_allocated);
 
 
 /** @addtogroup _mali_osk_low_level_memory
@@ -544,14 +542,14 @@ mali_bool _mali_osk_mem_check_allocated( u32 max_allocated );
  * This defines an arbitrary memory barrier operation, which forces an ordering constraint
  * on memory read and write operations.
  */
-void _mali_osk_mem_barrier( void );
+void _mali_osk_mem_barrier(void);
 
 /** @brief Issue a write memory barrier
  *
  * This defines an write memory barrier operation which forces an ordering constraint
  * on memory write operations.
  */
-void _mali_osk_write_mem_barrier( void );
+void _mali_osk_write_mem_barrier(void);
 
 /** @brief Map a physically contiguous region into kernel space
  *
@@ -568,7 +566,7 @@ void _mali_osk_write_mem_barrier( void );
  * @return On success, a Mali IO address through which the mapped-in
  * memory/registers can be accessed. NULL on failure.
  */
-mali_io_address _mali_osk_mem_mapioregion( u32 phys, u32 size, const char *description );
+mali_io_address _mali_osk_mem_mapioregion(uintptr_t phys, u32 size, const char *description);
 
 /** @brief Unmap a physically contiguous address range from kernel space.
  *
@@ -590,7 +588,7 @@ mali_io_address _mali_osk_mem_mapioregion( u32 phys, u32 size, const char *descr
  * @param mapping The Mali IO address through which the mapping is
  * accessed.
  */
-void _mali_osk_mem_unmapioregion( u32 phys, u32 size, mali_io_address mapping );
+void _mali_osk_mem_unmapioregion(uintptr_t phys, u32 size, mali_io_address mapping);
 
 /** @brief Allocate and Map a physically contiguous region into kernel space
  *
@@ -617,7 +615,7 @@ void _mali_osk_mem_unmapioregion( u32 phys, u32 size, mali_io_address mapping );
  * @return On success, a Mali IO address through which the mapped-in
  * memory/registers can be accessed. NULL on failure, and (*phys) is unmodified.
  */
-mali_io_address _mali_osk_mem_allocioregion( u32 *phys, u32 size );
+mali_io_address _mali_osk_mem_allocioregion(u32 *phys, u32 size);
 
 /** @brief Free a physically contiguous address range from kernel space.
  *
@@ -639,7 +637,7 @@ mali_io_address _mali_osk_mem_allocioregion( u32 *phys, u32 size );
  * @param mapping The Mali IO address through which the mapping is
  * accessed.
  */
-void _mali_osk_mem_freeioregion( u32 phys, u32 size, mali_io_address mapping );
+void _mali_osk_mem_freeioregion(u32 phys, u32 size, mali_io_address mapping);
 
 /** @brief Request a region of physically contiguous memory
  *
@@ -659,7 +657,7 @@ void _mali_osk_mem_freeioregion( u32 phys, u32 size, mali_io_address mapping );
  * @return _MALI_OSK_ERR_OK on success. Otherwise, a suitable
  * _mali_osk_errcode_t on failure.
  */
-_mali_osk_errcode_t _mali_osk_mem_reqregion( u32 phys, u32 size, const char *description );
+_mali_osk_errcode_t _mali_osk_mem_reqregion(uintptr_t phys, u32 size, const char *description);
 
 /** @brief Un-request a region of physically contiguous memory
  *
@@ -679,7 +677,7 @@ _mali_osk_errcode_t _mali_osk_mem_reqregion( u32 phys, u32 size, const char *des
  * @param size the number of bytes of physically contiguous address space to
  * un-request.
  */
-void _mali_osk_mem_unreqregion( u32 phys, u32 size );
+void _mali_osk_mem_unreqregion(uintptr_t phys, u32 size);
 
 /** @brief Read from a location currently mapped in through
  * _mali_osk_mem_mapioregion
@@ -693,7 +691,7 @@ void _mali_osk_mem_unreqregion( u32 phys, u32 size );
  * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
  * @return the 32-bit word from the specified location.
  */
-u32 _mali_osk_mem_ioread32( volatile mali_io_address mapping, u32 offset );
+u32 _mali_osk_mem_ioread32(volatile mali_io_address mapping, u32 offset);
 
 /** @brief Write to a location currently mapped in through
  * _mali_osk_mem_mapioregion without memory barriers
@@ -707,7 +705,7 @@ u32 _mali_osk_mem_ioread32( volatile mali_io_address mapping, u32 offset );
  * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
  * @param val the 32-bit word to write.
  */
-void _mali_osk_mem_iowrite32_relaxed( volatile mali_io_address addr, u32 offset, u32 val );
+void _mali_osk_mem_iowrite32_relaxed(volatile mali_io_address addr, u32 offset, u32 val);
 
 /** @brief Write to a location currently mapped in through
  * _mali_osk_mem_mapioregion with write memory barrier
@@ -721,14 +719,14 @@ void _mali_osk_mem_iowrite32_relaxed( volatile mali_io_address addr, u32 offset,
  * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
  * @param val the 32-bit word to write.
  */
-void _mali_osk_mem_iowrite32( volatile mali_io_address mapping, u32 offset, u32 val );
+void _mali_osk_mem_iowrite32(volatile mali_io_address mapping, u32 offset, u32 val);
 
 /** @brief Flush all CPU caches
  *
  * This should only be implemented if flushing of the cache is required for
  * memory mapped in through _mali_osk_mem_mapregion.
  */
-void _mali_osk_cache_flushall( void );
+void _mali_osk_cache_flushall(void);
 
 /** @brief Flush any caches necessary for the CPU and MALI to have the same view of a range of uncached mapped memory
  *
@@ -739,7 +737,18 @@ void _mali_osk_cache_flushall( void );
  * They zero the memory through a cached mapping, then flush the inner caches but not the outer caches.
  * This is required for MALI to have the correct view of the memory.
  */
-void _mali_osk_cache_ensure_uncached_range_flushed( void *uncached_mapping, u32 offset, u32 size );
+void _mali_osk_cache_ensure_uncached_range_flushed(void *uncached_mapping, u32 offset, u32 size);
+
+/** @brief Safely copy as much data as possible from src to dest
+ *
+ * Do not crash if src or dest isn't available.
+ *
+ * @param dest Destination buffer (limited to user space mapped Mali memory)
+ * @param src Source buffer
+ * @param size Number of bytes to copy
+ * @return Number of bytes actually copied
+ */
+u32 _mali_osk_mem_write_safe(void *dest, const void *src, u32 size);
 
 /** @} */ /* end group _mali_osk_low_level_memory */
 
@@ -804,7 +813,7 @@ void _mali_osk_cache_ensure_uncached_range_flushed( void *uncached_mapping, u32
  * @param size The size of the type specific buffer to send
  * @return Pointer to a notification object with a suitable buffer, or NULL on error.
  */
-_mali_osk_notification_t *_mali_osk_notification_create( u32 type, u32 size );
+_mali_osk_notification_t *_mali_osk_notification_create(u32 type, u32 size);
 
 /** @brief Delete a notification object
  *
@@ -824,7 +833,7 @@ _mali_osk_notification_t *_mali_osk_notification_create( u32 type, u32 size );
  *
  * @param object the notification object to delete.
  */
-void _mali_osk_notification_delete( _mali_osk_notification_t *object );
+void _mali_osk_notification_delete(_mali_osk_notification_t *object);
 
 /** @brief Create a notification queue
  *
@@ -840,7 +849,7 @@ void _mali_osk_notification_delete( _mali_osk_notification_t *object );
  *
  * @return Pointer to a new notification queue or NULL on error.
  */
-_mali_osk_notification_queue_t *_mali_osk_notification_queue_init( void );
+_mali_osk_notification_queue_t *_mali_osk_notification_queue_init(void);
 
 /** @brief Destroy a notification queue
  *
@@ -870,7 +879,7 @@ _mali_osk_notification_queue_t *_mali_osk_notification_queue_init( void );
  *
  * @param queue The queue to destroy
  */
-void _mali_osk_notification_queue_term( _mali_osk_notification_queue_t *queue );
+void _mali_osk_notification_queue_term(_mali_osk_notification_queue_t *queue);
 
 /** @brief Schedule notification for delivery
  *
@@ -891,7 +900,7 @@ void _mali_osk_notification_queue_term( _mali_osk_notification_queue_t *queue );
  * @param queue The notification queue to add this notification to
  * @param object The entry to add
  */
-void _mali_osk_notification_queue_send( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t *object );
+void _mali_osk_notification_queue_send(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t *object);
 
 /** @brief Receive a notification from a queue
  *
@@ -908,7 +917,7 @@ void _mali_osk_notification_queue_send( _mali_osk_notification_queue_t *queue, _
  * \ref _mali_osk_notification_t object, or NULL if none were received.
  * @return _MALI_OSK_ERR_OK on success. _MALI_OSK_ERR_RESTARTSYSCALL if the sleep was interrupted.
  */
-_mali_osk_errcode_t _mali_osk_notification_queue_receive( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result );
+_mali_osk_errcode_t _mali_osk_notification_queue_receive(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result);
 
 /** @brief Dequeues a notification from a queue
  *
@@ -923,7 +932,7 @@ _mali_osk_errcode_t _mali_osk_notification_queue_receive( _mali_osk_notification
  * \ref _mali_osk_notification_t object, or NULL if none were received.
  * @return _MALI_OSK_ERR_OK on success, _MALI_OSK_ERR_ITEM_NOT_FOUND if queue was empty.
  */
-_mali_osk_errcode_t _mali_osk_notification_queue_dequeue( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result );
+_mali_osk_errcode_t _mali_osk_notification_queue_dequeue(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result);
 
 /** @} */ /* end group _mali_osk_notification */
 
@@ -960,7 +969,7 @@ _mali_osk_timer_t *_mali_osk_timer_init(void);
  * @param ticks_to_expire the amount of time in ticks for the timer to run
  * before triggering.
  */
-void _mali_osk_timer_add( _mali_osk_timer_t *tim, u32 ticks_to_expire );
+void _mali_osk_timer_add(_mali_osk_timer_t *tim, unsigned long ticks_to_expire);
 
 /** @brief Modify a timer
  *
@@ -979,7 +988,7 @@ void _mali_osk_timer_add( _mali_osk_timer_t *tim, u32 ticks_to_expire );
  * should trigger.
  *
  */
-void _mali_osk_timer_mod( _mali_osk_timer_t *tim, u32 ticks_to_expire);
+void _mali_osk_timer_mod(_mali_osk_timer_t *tim, unsigned long ticks_to_expire);
 
 /** @brief Stop a timer, and block on its completion.
  *
@@ -1000,7 +1009,7 @@ void _mali_osk_timer_mod( _mali_osk_timer_t *tim, u32 ticks_to_expire);
  * @param tim the timer to stop.
  *
  */
-void _mali_osk_timer_del( _mali_osk_timer_t *tim );
+void _mali_osk_timer_del(_mali_osk_timer_t *tim);
 
 /** @brief Stop a timer.
  *
@@ -1011,7 +1020,7 @@ void _mali_osk_timer_del( _mali_osk_timer_t *tim );
  *
  * @param tim the timer to stop.
  */
-void _mali_osk_timer_del_async( _mali_osk_timer_t *tim );
+void _mali_osk_timer_del_async(_mali_osk_timer_t *tim);
 
 /** @brief Check if timer is pending.
  *
@@ -1020,7 +1029,7 @@ void _mali_osk_timer_del_async( _mali_osk_timer_t *tim );
  * @param tim the timer to check
  * @return MALI_TRUE if time is active, MALI_FALSE if it is not active
  */
-mali_bool _mali_osk_timer_pending( _mali_osk_timer_t *tim);
+mali_bool _mali_osk_timer_pending(_mali_osk_timer_t *tim);
 
 /** @brief Set a timer's callback parameters.
  *
@@ -1034,7 +1043,7 @@ mali_bool _mali_osk_timer_pending( _mali_osk_timer_t *tim);
  * @param callback Function to call when timer expires
  * @param data Function-specific data to supply to the function on expiry.
  */
-void _mali_osk_timer_setcallback( _mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data );
+void _mali_osk_timer_setcallback(_mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data);
 
 /** @brief Terminate a timer, and deallocate resources.
  *
@@ -1046,7 +1055,7 @@ void _mali_osk_timer_setcallback( _mali_osk_timer_t *tim, _mali_osk_timer_callba
  *
  * @param tim the timer to deallocate.
  */
-void _mali_osk_timer_term( _mali_osk_timer_t *tim );
+void _mali_osk_timer_term(_mali_osk_timer_t *tim);
 /** @} */ /* end group _mali_osk_timer */
 
 
@@ -1072,38 +1081,35 @@ void _mali_osk_timer_term( _mali_osk_timer_t *tim );
  *
  * @{ */
 
-/** @brief Return whether ticka occurs after tickb
+/** @brief Return whether ticka occurs after or at the same time as  tickb
  *
- * Some OSs handle tick 'rollover' specially, and so can be more robust against
- * tick counters rolling-over. This function must therefore be called to
- * determine if a time (in ticks) really occurs after another time (in ticks).
+ * Systems where ticks can wrap must handle that.
  *
  * @param ticka ticka
  * @param tickb tickb
- * @return non-zero if ticka represents a time that occurs after tickb.
- * Zero otherwise.
+ * @return MALI_TRUE if ticka represents a time that occurs at or after tickb.
  */
-int    _mali_osk_time_after( u32 ticka, u32 tickb );
+mali_bool _mali_osk_time_after_eq(unsigned long ticka, unsigned long tickb);
 
 /** @brief Convert milliseconds to OS 'ticks'
  *
  * @param ms time interval in milliseconds
  * @return the corresponding time interval in OS ticks.
  */
-u32    _mali_osk_time_mstoticks( u32 ms );
+unsigned long _mali_osk_time_mstoticks(u32 ms);
 
 /** @brief Convert OS 'ticks' to milliseconds
  *
  * @param ticks time interval in OS ticks.
  * @return the corresponding time interval in milliseconds
  */
-u32    _mali_osk_time_tickstoms( u32 ticks );
+u32 _mali_osk_time_tickstoms(unsigned long ticks);
 
 
 /** @brief Get the current time in OS 'ticks'.
  * @return the current time in OS 'ticks'.
  */
-u32    _mali_osk_time_tickcount( void );
+unsigned long _mali_osk_time_tickcount(void);
 
 /** @brief Cause a microsecond delay
  *
@@ -1117,14 +1123,19 @@ u32     _mali_osk_time_tickcount( void );
  *
  * @param usecs the number of microseconds to wait for.
  */
-void _mali_osk_time_ubusydelay( u32 usecs );
+void _mali_osk_time_ubusydelay(u32 usecs);
 
 /** @brief Return time in nano seconds, since any given reference.
  *
  * @return Time in nano seconds
  */
-u64 _mali_osk_time_get_ns( void );
+u64 _mali_osk_time_get_ns(void);
 
+/** @brief Return time in nano seconds, since boot time.
+ *
+ * @return Time in nano seconds
+ */
+u64 _mali_osk_boot_time_get_ns(void);
 
 /** @} */ /* end group _mali_osk_time */
 
@@ -1140,14 +1151,14 @@ u64 _mali_osk_time_get_ns( void );
  * @param val 32-bit words to count leading zeros on
  * @return the number of leading zeros.
  */
-u32 _mali_osk_clz( u32 val );
+u32 _mali_osk_clz(u32 val);
 
 /** @brief find last (most-significant) bit set
  *
  * @param val 32-bit words to count last bit set on
  * @return last bit set.
  */
-u32 _mali_osk_fls( u32 val );
+u32 _mali_osk_fls(u32 val);
 
 /** @} */ /* end group _mali_osk_math */
 
@@ -1155,7 +1166,7 @@ u32 _mali_osk_fls( u32 val );
  * @{ */
 
 /** @brief Initialize an empty Wait Queue */
-_mali_osk_wait_queue_t* _mali_osk_wait_queue_init( void );
+_mali_osk_wait_queue_t *_mali_osk_wait_queue_init(void);
 
 /** @brief Sleep if condition is false
  *
@@ -1167,7 +1178,7 @@ _mali_osk_wait_queue_t* _mali_osk_wait_queue_init( void );
  * being asked to wake up again, the condition will be re-checked and the
  * thread only woken up if the condition is now true.
  */
-void _mali_osk_wait_queue_wait_event( _mali_osk_wait_queue_t *queue, mali_bool (*condition)(void *), void *data );
+void _mali_osk_wait_queue_wait_event(_mali_osk_wait_queue_t *queue, mali_bool(*condition)(void *), void *data);
 
 /** @brief Sleep if condition is false
  *
@@ -1181,7 +1192,7 @@ void _mali_osk_wait_queue_wait_event( _mali_osk_wait_queue_t *queue, mali_bool (
  * thread only woken up if the condition is now true.  Will return if time
  * exceeds timeout.
  */
-void _mali_osk_wait_queue_wait_event_timeout( _mali_osk_wait_queue_t *queue, mali_bool (*condition)(void *), void *data, u32 timeout );
+void _mali_osk_wait_queue_wait_event_timeout(_mali_osk_wait_queue_t *queue, mali_bool(*condition)(void *), void *data, u32 timeout);
 
 /** @brief Wake up all threads in wait queue if their respective conditions are
  * true
@@ -1190,13 +1201,13 @@ void _mali_osk_wait_queue_wait_event_timeout( _mali_osk_wait_queue_t *queue, mal
  *
  * Wake up all threads in wait queue \a queue whose condition is now true.
  */
-void _mali_osk_wait_queue_wake_up( _mali_osk_wait_queue_t *queue );
+void _mali_osk_wait_queue_wake_up(_mali_osk_wait_queue_t *queue);
 
 /** @brief terminate a wait queue
  *
  * @param queue the queue to terminate.
  */
-void _mali_osk_wait_queue_term( _mali_osk_wait_queue_t *queue );
+void _mali_osk_wait_queue_term(_mali_osk_wait_queue_t *queue);
 /** @} */ /* end group _mali_osk_wait_queue */
 
 
@@ -1211,7 +1222,7 @@ void _mali_osk_wait_queue_term( _mali_osk_wait_queue_t *queue );
  * @param fmt a _mali_osu_vsnprintf() style format string
  * @param ... a variable-number of parameters suitable for \a fmt
  */
-void _mali_osk_dbgmsg( const char *fmt, ... );
+void _mali_osk_dbgmsg(const char *fmt, ...);
 
 /** @brief Print fmt into buf.
  *
@@ -1224,7 +1235,18 @@ void _mali_osk_dbgmsg( const char *fmt, ... );
  * @param ... a variable-number of parameters suitable for \a fmt
  * @return The number of bytes written to \a buf
  */
-u32 _mali_osk_snprintf( char *buf, u32 size, const char *fmt, ... );
+u32 _mali_osk_snprintf(char *buf, u32 size, const char *fmt, ...);
+
+/** @brief Print fmt into print_ctx.
+ *
+ * The interpretation of \a fmt is the same as the \c format parameter in
+ * _mali_osu_vsnprintf().
+ *
+ * @param print_ctx a pointer to the result file buffer
+ * @param fmt a _mali_osu_vsnprintf() style format string
+ * @param ... a variable-number of parameters suitable for \a fmt
+ */
+void _mali_osk_ctxprintf(_mali_osk_print_ctx *print_ctx, const char *fmt, ...);
 
 /** @brief Abnormal process abort.
  *
@@ -1255,60 +1277,47 @@ void _mali_osk_break(void);
  */
 u32 _mali_osk_get_pid(void);
 
+/** @brief Return an name for calling process.
+ *
+ * @return name for calling process.
+ */
+char *_mali_osk_get_comm(void);
+
 /** @brief Return an identificator for calling thread.
  *
  * @return Identificator for calling thread.
  */
 u32 _mali_osk_get_tid(void);
 
-/** @brief Enable OS controlled runtime power management
- */
-void _mali_osk_pm_dev_enable(void);
-
-/** @brief Disable OS controlled runtime power management
- */
-void _mali_osk_pm_dev_disable(void);
-
-_mali_osk_errcode_t _mali_osk_pm_delete_callback_timer(void);
 
-/** @brief Take a reference to the power manager system for the Mali device.
+/** @brief Take a reference to the power manager system for the Mali device (synchronously).
  *
  * When function returns successfully, Mali is ON.
  *
- * @note Call \a _mali_osk_pm_dev_ref_dec() to release this reference.
- */
-_mali_osk_errcode_t _mali_osk_pm_dev_ref_add(void);
-
-
-/** @brief Release the reference to the power manger system for the Mali device.
- *
- * When reference count reach zero, the cores can be off.
- *
- * @note This must be used to release references taken with \a _mali_osk_pm_dev_ref_add().
+ * @note Call \a _mali_osk_pm_dev_ref_put() to release this reference.
  */
-void _mali_osk_pm_dev_ref_dec(void);
-
+_mali_osk_errcode_t _mali_osk_pm_dev_ref_get_sync(void);
 
-/** @brief Take a reference to the power manager system for the Mali device.
+/** @brief Take a reference to the external power manager system for the Mali device (asynchronously).
  *
- * Will leave the cores powered off if they are already powered off.
+ * Mali might not yet be on after this function as returned.
+ * Please use \a _mali_osk_pm_dev_barrier() or \a _mali_osk_pm_dev_ref_get_sync()
+ * to wait for Mali to be powered on.
  *
  * @note Call \a _mali_osk_pm_dev_ref_dec() to release this reference.
- *
- * @return MALI_TRUE if the Mali GPU is powered on, otherwise MALI_FALSE.
  */
-mali_bool _mali_osk_pm_dev_ref_add_no_power_on(void);
-
+_mali_osk_errcode_t _mali_osk_pm_dev_ref_get_async(void);
 
-/** @brief Releasing the reference to the power manger system for the Mali device.
+/** @brief Release the reference to the external power manger system for the Mali device.
  *
  * When reference count reach zero, the cores can be off.
  *
- * @note This must be used to release references taken with \a _mali_osk_pm_dev_ref_add_no_power_on().
+ * @note This must be used to release references taken with
+ * \a _mali_osk_pm_dev_ref_get_sync() or \a _mali_osk_pm_dev_ref_get_sync().
  */
-void _mali_osk_pm_dev_ref_dec_no_power_on(void);
+void _mali_osk_pm_dev_ref_put(void);
 
-/** @brief Block untill pending PM operations are done
+/** @brief Block until pending PM operations are done
  */
 void _mali_osk_pm_dev_barrier(void);
 
index f1ddc2ca1907db2bbaeeae4970f63a79ccbef0dd..d4f4d96a79cc8ace3736ca571888a8a6fa00432a 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2010, 2013 ARM Limited
+ * (C) COPYRIGHT 2008-2010, 2013-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 extern "C" {
 #endif
 
-MALI_STATIC_INLINE void _mali_internal_clear_bit( u32 bit, u32 *addr )
+MALI_STATIC_INLINE void _mali_internal_clear_bit(u32 bit, u32 *addr)
 {
-       MALI_DEBUG_ASSERT( bit < 32 );
-       MALI_DEBUG_ASSERT( NULL != addr );
+       MALI_DEBUG_ASSERT(bit < 32);
+       MALI_DEBUG_ASSERT(NULL != addr);
 
        (*addr) &= ~(1 << bit);
 }
 
-MALI_STATIC_INLINE void _mali_internal_set_bit( u32 bit, u32 *addr )
+MALI_STATIC_INLINE void _mali_internal_set_bit(u32 bit, u32 *addr)
 {
-       MALI_DEBUG_ASSERT( bit < 32 );
-       MALI_DEBUG_ASSERT( NULL != addr );
+       MALI_DEBUG_ASSERT(bit < 32);
+       MALI_DEBUG_ASSERT(NULL != addr);
 
        (*addr) |= (1 << bit);
 }
 
-MALI_STATIC_INLINE u32 _mali_internal_test_bit( u32 bit, u32 value )
+MALI_STATIC_INLINE u32 _mali_internal_test_bit(u32 bit, u32 value)
 {
-       MALI_DEBUG_ASSERT( bit < 32 );
+       MALI_DEBUG_ASSERT(bit < 32);
        return value & (1 << bit);
 }
 
-MALI_STATIC_INLINE int _mali_internal_find_first_zero_bit( u32 value )
+MALI_STATIC_INLINE int _mali_internal_find_first_zero_bit(u32 value)
 {
        u32 inverted;
        u32 negated;
@@ -55,14 +55,14 @@ MALI_STATIC_INLINE int _mali_internal_find_first_zero_bit( u32 value )
         * See ARM System Developers Guide for details of count_trailing_zeros */
 
        /* Isolate the zero: it is preceeded by a run of 1s, so add 1 to it */
-       negated = (u32)-inverted ; /* -a == ~a + 1 (mod 2^n) for n-bit numbers */
+       negated = (u32) - inverted ; /* -a == ~a + 1 (mod 2^n) for n-bit numbers */
        /* negated = xxx...x1000...0 */
 
        isolated = negated & inverted ; /* xxx...x1000...0 & zzz...z1000...0, zs are ~xs */
        /* And so the first zero bit is in the same position as the 1 == number of 1s that preceeded it
         * Note that the output is zero if value was all 1s */
 
-       leading_zeros = _mali_osk_clz( isolated );
+       leading_zeros = _mali_osk_clz(isolated);
 
        return 31 - leading_zeros;
 }
@@ -88,12 +88,12 @@ MALI_STATIC_INLINE int _mali_internal_find_first_zero_bit( u32 value )
  * significant bit
  * @param addr starting point for counting.
  */
-MALI_STATIC_INLINE void _mali_osk_clear_nonatomic_bit( u32 nr, u32 *addr )
+MALI_STATIC_INLINE void _mali_osk_clear_nonatomic_bit(u32 nr, u32 *addr)
 {
        addr += nr >> 5; /* find the correct word */
-       nr = nr & ((1 << 5)-1); /* The bit number within the word */
+       nr = nr & ((1 << 5) - 1); /* The bit number within the word */
 
-       _mali_internal_clear_bit( nr, addr );
+       _mali_internal_clear_bit(nr, addr);
 }
 
 /** @brief Set a bit in a sequence of 32-bit words
@@ -101,12 +101,12 @@ MALI_STATIC_INLINE void _mali_osk_clear_nonatomic_bit( u32 nr, u32 *addr )
  * significant bit
  * @param addr starting point for counting.
  */
-MALI_STATIC_INLINE void _mali_osk_set_nonatomic_bit( u32 nr, u32 *addr )
+MALI_STATIC_INLINE void _mali_osk_set_nonatomic_bit(u32 nr, u32 *addr)
 {
        addr += nr >> 5; /* find the correct word */
-       nr = nr & ((1 << 5)-1); /* The bit number within the word */
+       nr = nr & ((1 << 5) - 1); /* The bit number within the word */
 
-       _mali_internal_set_bit( nr, addr );
+       _mali_internal_set_bit(nr, addr);
 }
 
 /** @brief Test a bit in a sequence of 32-bit words
@@ -116,12 +116,12 @@ MALI_STATIC_INLINE void _mali_osk_set_nonatomic_bit( u32 nr, u32 *addr )
  * @return zero if bit was clear, non-zero if set. Do not rely on the return
  * value being related to the actual word under test.
  */
-MALI_STATIC_INLINE u32 _mali_osk_test_bit( u32 nr, u32 *addr )
+MALI_STATIC_INLINE u32 _mali_osk_test_bit(u32 nr, u32 *addr)
 {
        addr += nr >> 5; /* find the correct word */
-       nr = nr & ((1 << 5)-1); /* The bit number within the word */
+       nr = nr & ((1 << 5) - 1); /* The bit number within the word */
 
-       return _mali_internal_test_bit( nr, *addr );
+       return _mali_internal_test_bit(nr, *addr);
 }
 
 /* Return maxbit if not found */
@@ -131,23 +131,23 @@ MALI_STATIC_INLINE u32 _mali_osk_test_bit( u32 nr, u32 *addr )
  * @return the number of the first zero bit found, or maxbit if none were found
  * in the specified range.
  */
-MALI_STATIC_INLINE u32 _mali_osk_find_first_zero_bit( const u32 *addr, u32 maxbit )
+MALI_STATIC_INLINE u32 _mali_osk_find_first_zero_bit(const u32 *addr, u32 maxbit)
 {
        u32 total;
 
-       for ( total = 0; total < maxbit; total += 32, ++addr ) {
+       for (total = 0; total < maxbit; total += 32, ++addr) {
                int result;
-               result = _mali_internal_find_first_zero_bit( *addr );
+               result = _mali_internal_find_first_zero_bit(*addr);
 
                /* non-negative signifies the bit was found */
-               if ( result >= 0 ) {
+               if (result >= 0) {
                        total += (u32)result;
                        break;
                }
        }
 
        /* Now check if we reached maxbit or above */
-       if ( total >= maxbit ) {
+       if (total >= maxbit) {
                total = maxbit;
        }
 
index c44013f67430424644f1f1e3358bbbb3204e114c..fc04452c56bd42f41033f2bb896073f8a5bd21c6 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
+ * (C) COPYRIGHT 2008-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -68,7 +68,7 @@ MALI_STATIC_INLINE void __mali_osk_list_del(_mali_osk_list_t *prev, _mali_osk_li
  *
  * @param list the list element to initialize
  */
-MALI_STATIC_INLINE void _mali_osk_list_init( _mali_osk_list_t *list )
+MALI_STATIC_INLINE void _mali_osk_list_init(_mali_osk_list_t *list)
 {
        list->next = list;
        list->prev = list;
@@ -87,7 +87,7 @@ MALI_STATIC_INLINE void _mali_osk_list_init( _mali_osk_list_t *list )
  * @param list the list in which to insert. The new element will be the next
  * entry in this list
  */
-MALI_STATIC_INLINE void _mali_osk_list_add( _mali_osk_list_t *new_entry, _mali_osk_list_t *list )
+MALI_STATIC_INLINE void _mali_osk_list_add(_mali_osk_list_t *new_entry, _mali_osk_list_t *list)
 {
        __mali_osk_list_add(new_entry, list, list->next);
 }
@@ -104,7 +104,7 @@ MALI_STATIC_INLINE void _mali_osk_list_add( _mali_osk_list_t *new_entry, _mali_o
  * @param list the list in which to insert. The new element will be the previous
  * entry in this list
  */
-MALI_STATIC_INLINE void _mali_osk_list_addtail( _mali_osk_list_t *new_entry, _mali_osk_list_t *list )
+MALI_STATIC_INLINE void _mali_osk_list_addtail(_mali_osk_list_t *new_entry, _mali_osk_list_t *list)
 {
        __mali_osk_list_add(new_entry, list->prev, list);
 }
@@ -117,7 +117,7 @@ MALI_STATIC_INLINE void _mali_osk_list_addtail( _mali_osk_list_t *new_entry, _ma
  *
  * @param list the list element to remove.
  */
-MALI_STATIC_INLINE void _mali_osk_list_del( _mali_osk_list_t *list )
+MALI_STATIC_INLINE void _mali_osk_list_del(_mali_osk_list_t *list)
 {
        __mali_osk_list_del(list->prev, list->next);
 }
@@ -129,7 +129,7 @@ MALI_STATIC_INLINE void _mali_osk_list_del( _mali_osk_list_t *list )
  *
  * @param list the list element to remove and initialize.
  */
-MALI_STATIC_INLINE void _mali_osk_list_delinit( _mali_osk_list_t *list )
+MALI_STATIC_INLINE void _mali_osk_list_delinit(_mali_osk_list_t *list)
 {
        __mali_osk_list_del(list->prev, list->next);
        _mali_osk_list_init(list);
@@ -142,7 +142,7 @@ MALI_STATIC_INLINE void _mali_osk_list_delinit( _mali_osk_list_t *list )
  * @param list the list to check.
  * @return non-zero if the list is empty, and zero otherwise.
  */
-MALI_STATIC_INLINE mali_bool _mali_osk_list_empty( _mali_osk_list_t *list )
+MALI_STATIC_INLINE mali_bool _mali_osk_list_empty(_mali_osk_list_t *list)
 {
        return list->next == list;
 }
@@ -158,7 +158,7 @@ MALI_STATIC_INLINE mali_bool _mali_osk_list_empty( _mali_osk_list_t *list )
  * @param list the new list into which the element will be inserted, as the next
  * element in the list.
  */
-MALI_STATIC_INLINE void _mali_osk_list_move( _mali_osk_list_t *move_entry, _mali_osk_list_t *list )
+MALI_STATIC_INLINE void _mali_osk_list_move(_mali_osk_list_t *move_entry, _mali_osk_list_t *list)
 {
        __mali_osk_list_del(move_entry->prev, move_entry->next);
        _mali_osk_list_add(move_entry, list);
@@ -173,7 +173,7 @@ MALI_STATIC_INLINE void _mali_osk_list_move( _mali_osk_list_t *move_entry, _mali
  * @param old_list The existing list head
  * @param new_list The new list head (must be an empty list)
  */
-MALI_STATIC_INLINE void _mali_osk_list_move_list( _mali_osk_list_t *old_list, _mali_osk_list_t *new_list )
+MALI_STATIC_INLINE void _mali_osk_list_move_list(_mali_osk_list_t *old_list, _mali_osk_list_t *new_list)
 {
        MALI_DEBUG_ASSERT(_mali_osk_list_empty(new_list));
        if (!_mali_osk_list_empty(old_list)) {
index b6e3c62e42aa9d1bce0177aa7e160bdc6a23cfef..1de192c838f599899becce79564b6debe6e4d215 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
+ * (C) COPYRIGHT 2008-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -28,44 +28,15 @@ extern "C" {
 
 /** @brief Struct with device specific configuration data
  */
-struct _mali_osk_device_data {
-       /* Dedicated GPU memory range (physical). */
-       u32 dedicated_mem_start;
-       u32 dedicated_mem_size;
+typedef struct mali_gpu_device_data _mali_osk_device_data;
 
-       /* Shared GPU memory */
-       u32 shared_mem_size;
-
-       /* Frame buffer memory to be accessible by Mali GPU (physical) */
-       u32 fb_start;
-       u32 fb_size;
-
-       /* Max runtime [ms] for jobs */
-       int max_job_runtime;
-
-       /* Report GPU utilization in this interval (specified in ms) */
-       u32 utilization_interval;
-
-       /* Function that will receive periodic GPU utilization numbers */
-       void (*utilization_callback)(struct mali_gpu_utilization_data *data);
-
-       /*
-        * Mali PMU switch delay.
-        * Only needed if the power gates are connected to the PMU in a high fanout
-        * network. This value is the number of Mali clock cycles it takes to
-        * enable the power gates and turn on the power mesh.
-        * This value will have no effect if a daisy chain implementation is used.
-        */
-       u32 pmu_switch_delay;
-
-       /* Mali Dynamic power domain configuration in sequence from 0-11
-        *  GP  PP0 PP1  PP2  PP3  PP4  PP5  PP6  PP7, L2$0 L2$1 L2$2
-        */
-       u16 pmu_domain_config[12];
-
-       /* Fuction that platform callback for freq tunning, needed when MALI400_POWER_PERFORMANCE_POLICY enabled */
-       int (*set_freq_callback)(unsigned int mhz);
-};
+#ifdef CONFIG_MALI_DT
+/** @brief Initialize those device resources when we use device tree
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_resource_initialize(void);
+#endif
 
 /** @brief Find Mali GPU HW resource
  *
@@ -80,36 +51,44 @@ _mali_osk_errcode_t _mali_osk_resource_find(u32 addr, _mali_osk_resource_t *res)
  *
  * @return 0 if resources are found, otherwise the Mali GPU component with lowest address.
  */
-u32 _mali_osk_resource_base_address(void);
+uintptr_t _mali_osk_resource_base_address(void);
+
+/** @brief Find the specific GPU resource.
+ *
+ * @return value
+ * 0x400 if Mali 400 specific GPU resource identified
+ * 0x450 if Mali 450 specific GPU resource identified
+ * 0x470 if Mali 470 specific GPU resource identified
+ *
+ */
+u32 _mali_osk_identify_gpu_resource(void);
 
 /** @brief Retrieve the Mali GPU specific data
  *
  * @return _MALI_OSK_ERR_OK on success, otherwise failure.
  */
-_mali_osk_errcode_t _mali_osk_device_data_get(struct _mali_osk_device_data *data);
+_mali_osk_errcode_t _mali_osk_device_data_get(_mali_osk_device_data *data);
 
-/** @brief Determines if Mali GPU has been configured with shared interrupts.
+/** @brief Find the pmu domain config from device data.
  *
- * @return MALI_TRUE if shared interrupts, MALI_FALSE if not.
+ * @param domain_config_array used to store pmu domain config found in device data.
+ * @param array_size is the size of array domain_config_array.
  */
-mali_bool _mali_osk_shared_interrupts(void);
-
-/** @} */ /* end group _mali_osk_miscellaneous */
-
-/** @addtogroup _mali_osk_low_level_memory
- * @{ */
+void _mali_osk_device_data_pmu_config_get(u16 *domain_config_array, int array_size);
 
-/** @brief Copy as much data as possible from src to dest, do not crash if src or dest isn't available.
+/** @brief Get Mali PMU switch delay
  *
- * @param dest Destination buffer (limited to user space mapped Mali memory)
- * @param src Source buffer
- * @param size Number of bytes to copy
- * @return Number of bytes actually copied
+ *@return pmu switch delay if it is configured
  */
-u32 _mali_osk_mem_write_safe(void *dest, const void *src, u32 size);
+u32 _mali_osk_get_pmu_switch_delay(void);
 
-/** @} */ /* end group _mali_osk_low_level_memory */
+/** @brief Determines if Mali GPU has been configured with shared interrupts.
+ *
+ * @return MALI_TRUE if shared interrupts, MALI_FALSE if not.
+ */
+mali_bool _mali_osk_shared_interrupts(void);
 
+/** @} */ /* end group _mali_osk_miscellaneous */
 
 #ifdef __cplusplus
 }
index a5136fd52733b14d7c02e23a5308b35f04b8646b..43084e89a94a89215a111e5bf37a846476e0e499 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2010-2013 ARM Limited
+ * (C) COPYRIGHT 2010-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -35,6 +35,11 @@ _mali_osk_errcode_t _mali_osk_profiling_init(mali_bool auto_start);
  */
 void _mali_osk_profiling_term(void);
 
+/**
+ * Stop the profile sampling operation.
+ */
+void _mali_osk_profiling_stop_sampling(u32 pid);
+
 /**
  * Start recording profiling data
  *
@@ -44,7 +49,7 @@ void _mali_osk_profiling_term(void);
  * @param limit The desired maximum number of events to record on input, the actual maximum on output.
  * @return _MALI_OSK_ERR_OK on success, otherwise failure.
  */
-_mali_osk_errcode_t _mali_osk_profiling_start(u32 * limit);
+_mali_osk_errcode_t _mali_osk_profiling_start(u32 *limit);
 
 /**
  * Add an profiling event
@@ -55,10 +60,8 @@ _mali_osk_errcode_t _mali_osk_profiling_start(u32 * limit);
  * @param data2 Third data parameter, depending on event_id specified.
  * @param data3 Fourth data parameter, depending on event_id specified.
  * @param data4 Fifth data parameter, depending on event_id specified.
- * @return _MALI_OSK_ERR_OK on success, otherwise failure.
  */
-/* Call Linux tracepoint directly */
-#define _mali_osk_profiling_add_event(event_id, data0, data1, data2, data3, data4) trace_mali_timeline_event((event_id), (data0), (data1), (data2), (data3), (data4))
+void    _mali_osk_profiling_add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4);
 
 /**
  * Report a hardware counter event.
@@ -77,13 +80,15 @@ _mali_osk_errcode_t _mali_osk_profiling_start(u32 * limit);
  */
 void _mali_osk_profiling_report_sw_counters(u32 *counters);
 
+void _mali_osk_profiling_record_global_counters(int counter_id, u32 value);
+
 /**
  * Stop recording profiling data
  *
  * @param count Returns the number of recorded events.
  * @return _MALI_OSK_ERR_OK on success, otherwise failure.
  */
-_mali_osk_errcode_t _mali_osk_profiling_stop(u32 * count);
+_mali_osk_errcode_t _mali_osk_profiling_stop(u32 *count);
 
 /**
  * Retrieves the number of events that can be retrieved
@@ -101,7 +106,7 @@ u32 _mali_osk_profiling_get_count(void);
  * @param data The 5 data values for the retrieved event will be stored here.
  * @return _MALI_OSK_ERR_OK on success, otherwise failure.
  */
-_mali_osk_errcode_t _mali_osk_profiling_get_event(u32 index, u64* timestamp, u32* event_id, u32 data[5]);
+_mali_osk_errcode_t _mali_osk_profiling_get_event(u32 index, u64 *timestamp, u32 *event_id, u32 data[5]);
 
 /**
  * Clear the recorded buffer.
index 313c49a48a03987a083aa15fa8e5ed606af8e31a..56eb00f56aa3df3d79b04140f4cd7d8f66343474 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
+ * (C) COPYRIGHT 2008-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -109,7 +109,7 @@ typedef struct _mali_osk_wq_delayed_work_s _mali_osk_wq_delayed_work_t;
  *
  * @param arg resource-specific data
  */
-typedef void (*_mali_osk_wq_work_handler_t)( void * arg );
+typedef void (*_mali_osk_wq_work_handler_t)(void *arg);
 
 /* @} */ /* end group _mali_osk_wq */
 
@@ -123,14 +123,14 @@ typedef struct _mali_osk_irq_t_struct _mali_osk_irq_t;
  *
  * This function is implemented by the common layer to allow probing of a resource's IRQ.
  * @param arg resource-specific data */
-typedef void  (*_mali_osk_irq_trigger_t)( void * arg );
+typedef void (*_mali_osk_irq_trigger_t)(void *arg);
 
 /** @brief Optional function to acknowledge an irq from a resource
  *
  * This function is implemented by the common layer to allow probing of a resource's IRQ.
  * @param arg resource-specific data
  * @return _MALI_OSK_ERR_OK if the IRQ was successful, or a suitable _mali_osk_errcode_t on failure. */
-typedef _mali_osk_errcode_t (*_mali_osk_irq_ack_t)( void * arg );
+typedef _mali_osk_errcode_t (*_mali_osk_irq_ack_t)(void *arg);
 
 /** @brief IRQ 'upper-half' handler callback.
  *
@@ -154,7 +154,7 @@ typedef _mali_osk_errcode_t (*_mali_osk_irq_ack_t)( void * arg );
  * @return _MALI_OSK_ERR_OK if the IRQ was correctly handled, or a suitable
  * _mali_osk_errcode_t otherwise.
  */
-typedef _mali_osk_errcode_t  (*_mali_osk_irq_uhandler_t)( void * arg );
+typedef _mali_osk_errcode_t (*_mali_osk_irq_uhandler_t)(void *arg);
 
 
 /** @} */ /* end group _mali_osk_irq */
@@ -210,21 +210,17 @@ typedef enum {
        _MALI_OSK_LOCK_ORDER_MEM_INFO,
        _MALI_OSK_LOCK_ORDER_MEM_PT_CACHE,
        _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP,
-       _MALI_OSK_LOCK_ORDER_GROUP_VIRTUAL,
-       _MALI_OSK_LOCK_ORDER_GROUP,
+       _MALI_OSK_LOCK_ORDER_PM_EXECUTION,
+       _MALI_OSK_LOCK_ORDER_EXECUTOR,
        _MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM,
        _MALI_OSK_LOCK_ORDER_SCHEDULER,
        _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED,
-       _MALI_OSK_LOCK_ORDER_PM_CORE_STATE,
-       _MALI_OSK_LOCK_ORDER_L2_COMMAND,
-       _MALI_OSK_LOCK_ORDER_DMA_COMMAND,
        _MALI_OSK_LOCK_ORDER_PROFILING,
-       _MALI_OSK_LOCK_ORDER_L2_COUNTER,
+       _MALI_OSK_LOCK_ORDER_L2,
+       _MALI_OSK_LOCK_ORDER_L2_COMMAND,
        _MALI_OSK_LOCK_ORDER_UTILIZATION,
-       _MALI_OSK_LOCK_ORDER_PM_EXECUTE,
        _MALI_OSK_LOCK_ORDER_SESSION_PENDING_JOBS,
-       _MALI_OSK_LOCK_ORDER_PM_DOMAIN,
-       _MALI_OSK_LOCK_ORDER_PMU,
+       _MALI_OSK_LOCK_ORDER_PM_STATE,
 
        _MALI_OSK_LOCK_ORDER_LAST,
 } _mali_osk_lock_order_t;
@@ -290,7 +286,7 @@ typedef struct _mali_osk_mutex_rw_s _mali_osk_mutex_rw_t;
  * Access to this range must be made through the _mali_osk_mem_ioread32() and
  * _mali_osk_mem_iowrite32() functions.
  */
-typedef struct _mali_io_address * mali_io_address;
+typedef struct _mali_io_address *mali_io_address;
 
 /** @defgroup _MALI_OSK_CPU_PAGE CPU Physical page size macros.
  *
@@ -339,11 +335,11 @@ typedef struct _mali_io_address * mali_io_address;
  */
 
 /** Mali Page Order, as log to base 2 of the Page size. @see _MALI_OSK_MALI_PAGE_SIZE */
-#define _MALI_OSK_MALI_PAGE_ORDER ((u32)12)
+#define _MALI_OSK_MALI_PAGE_ORDER PAGE_SHIFT
 /** Mali Page Size, in bytes.               */
-#define _MALI_OSK_MALI_PAGE_SIZE (((u32)1) << (_MALI_OSK_MALI_PAGE_ORDER))
+#define _MALI_OSK_MALI_PAGE_SIZE PAGE_SIZE
 /** Mali Page Mask, which masks off the offset within a page */
-#define _MALI_OSK_MALI_PAGE_MASK (~((((u32)1) << (_MALI_OSK_MALI_PAGE_ORDER)) - ((u32)1)))
+#define _MALI_OSK_MALI_PAGE_MASK PAGE_MASK
 /** @} */ /* end of group _MALI_OSK_MALI_PAGE*/
 
 /** @brief flags for mapping a user-accessible memory range
@@ -371,7 +367,7 @@ typedef struct _mali_osk_notification_queue_t_struct _mali_osk_notification_queu
 typedef struct _mali_osk_notification_t_struct {
        u32 notification_type;   /**< The notification type */
        u32 result_buffer_size; /**< Size of the result buffer to copy to user space */
-       void * result_buffer;   /**< Buffer containing any type specific data */
+       void *result_buffer;    /**< Buffer containing any type specific data */
 } _mali_osk_notification_t;
 
 /** @} */ /* end group _mali_osk_notification */
@@ -396,7 +392,7 @@ typedef struct _mali_osk_notification_t_struct {
  * by any callers of _mali_osk_timer_del(). Otherwise, a deadlock may occur.
  *
  * @param arg Function-specific data */
-typedef void (*_mali_osk_timer_callback_t)(void * arg);
+typedef void (*_mali_osk_timer_callback_t)(void *arg);
 
 /** @brief Private type for Timer Callback Objects */
 typedef struct _mali_osk_timer_t_struct _mali_osk_timer_t;
@@ -432,8 +428,9 @@ typedef struct _mali_osk_list_s {
  * Platform independent representation of a Mali HW resource
  */
 typedef struct _mali_osk_resource {
-       const char * description;       /**< short description of the resource */
-       u32 base;                       /**< Physical base address of the resource, as seen by Mali resources. */
+       const char *description;        /**< short description of the resource */
+       uintptr_t base;                 /**< Physical base address of the resource, as seen by Mali resources. */
+       const char *irq_name;           /**< Name of irq belong to this resource */
        u32 irq;                        /**< IRQ number delivered to the CPU, or -1 to tell the driver to probe for it (if possible) */
 } _mali_osk_resource_t;
 /** @} */ /* end group _mali_osk_miscellaneous */
@@ -448,6 +445,10 @@ typedef struct _mali_osk_wait_queue_t_struct _mali_osk_wait_queue_t;
 
 /** @} */ /* end group uddapi */
 
+/** @brief Mali print ctx type which uses seq_file
+  */
+typedef struct seq_file _mali_osk_print_ctx;
+
 #ifdef __cplusplus
 }
 #endif
index 58e7b622cd8dfb0ead4f192ac415dbba60f1b34f..305a899a9efef5a982c2edd9e13a31d6494e41df 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2013 ARM Limited
+ * (C) COPYRIGHT 2011-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 #include "mali_pm.h"
 #include "mali_kernel_common.h"
 #include "mali_osk.h"
-#include "mali_gp_scheduler.h"
-#include "mali_pp_scheduler.h"
+#include "mali_osk_mali.h"
 #include "mali_scheduler.h"
-#include "mali_kernel_utilization.h"
 #include "mali_group.h"
 #include "mali_pm_domain.h"
 #include "mali_pmu.h"
 
-static mali_bool mali_power_on = MALI_FALSE;
+#include "mali_executor.h"
+#include "mali_control_timer.h"
+
+#if defined(DEBUG)
+u32 num_pm_runtime_resume = 0;
+u32 num_pm_updates = 0;
+u32 num_pm_updates_up = 0;
+u32 num_pm_updates_down = 0;
+#endif
+
+#define MALI_PM_DOMAIN_DUMMY_MASK (1 << MALI_DOMAIN_INDEX_DUMMY)
+
+/* lock protecting power state (including pm_domains) */
+static _mali_osk_spinlock_irq_t *pm_lock_state = NULL;
+
+/* the wanted domain mask (protected by pm_lock_state) */
+static u32 pd_mask_wanted = 0;
+
+/* used to deferring the actual power changes */
+static _mali_osk_wq_work_t *pm_work = NULL;
+
+/* lock protecting power change execution */
+static _mali_osk_mutex_t *pm_lock_exec = NULL;
+
+/* PMU domains which are actually powered on (protected by pm_lock_exec) */
+static u32 pmu_mask_current = 0;
+
+/*
+ * domains which marked as powered on (protected by pm_lock_exec)
+ * This can be different from pmu_mask_current right after GPU power on
+ * if the PMU domains default to powered up.
+ */
+static u32 pd_mask_current = 0;
+
+static u16 domain_config[MALI_MAX_NUMBER_OF_DOMAINS] = {
+       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+       1 << MALI_DOMAIN_INDEX_DUMMY
+};
+
+/* The relative core power cost */
+#define MALI_GP_COST 3
+#define MALI_PP_COST 6
+#define MALI_L2_COST 1
+
+/*
+ *We have MALI_MAX_NUMBER_OF_PP_PHYSICAL_CORES + 1 rows in this matrix
+ *because we mush store the mask of different pp cores: 0, 1, 2, 3, 4, 5, 6, 7, 8.
+ */
+static int mali_pm_domain_power_cost_result[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS + 1][MALI_MAX_NUMBER_OF_DOMAINS];
+/*
+ * Keep track of runtime PM state, so that we know
+ * how to resume during OS resume.
+ */
+#ifdef CONFIG_PM_RUNTIME
+static mali_bool mali_pm_runtime_active = MALI_FALSE;
+#else
+/* when kernel don't enable PM_RUNTIME, set the flag always true,
+ * for GPU will not power off by runtime */
+static mali_bool mali_pm_runtime_active = MALI_TRUE;
+#endif
+
+static void mali_pm_state_lock(void);
+static void mali_pm_state_unlock(void);
+static _mali_osk_errcode_t mali_pm_create_pm_domains(void);
+static void mali_pm_set_pmu_domain_config(void);
+static u32 mali_pm_get_registered_cores_mask(void);
+static void mali_pm_update_sync_internal(void);
+static mali_bool mali_pm_common_suspend(void);
+static void mali_pm_update_work(void *data);
+#if defined(DEBUG)
+const char *mali_pm_mask_to_string(u32 mask);
+const char *mali_pm_group_stats_to_string(void);
+#endif
 
 _mali_osk_errcode_t mali_pm_initialize(void)
 {
-       _mali_osk_pm_dev_enable();
+       _mali_osk_errcode_t err;
+       struct mali_pmu_core *pmu;
+
+       pm_lock_state = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED,
+                       _MALI_OSK_LOCK_ORDER_PM_STATE);
+       if (NULL == pm_lock_state) {
+               mali_pm_terminate();
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       pm_lock_exec = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED,
+                                           _MALI_OSK_LOCK_ORDER_PM_STATE);
+       if (NULL == pm_lock_exec) {
+               mali_pm_terminate();
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       pm_work = _mali_osk_wq_create_work(mali_pm_update_work, NULL);
+       if (NULL == pm_work) {
+               mali_pm_terminate();
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       pmu = mali_pmu_get_global_pmu_core();
+       if (NULL != pmu) {
+               /*
+                * We have a Mali PMU, set the correct domain
+                * configuration (default or custom)
+                */
+
+               u32 registered_cores_mask;
+
+               mali_pm_set_pmu_domain_config();
+
+               registered_cores_mask = mali_pm_get_registered_cores_mask();
+               mali_pmu_set_registered_cores_mask(pmu, registered_cores_mask);
+
+               MALI_DEBUG_ASSERT(0 == pd_mask_wanted);
+       }
+
+       /* Create all power domains needed (at least one dummy domain) */
+       err = mali_pm_create_pm_domains();
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_pm_terminate();
+               return err;
+       }
+
        return _MALI_OSK_ERR_OK;
 }
 
 void mali_pm_terminate(void)
 {
+       if (NULL != pm_work) {
+               _mali_osk_wq_delete_work(pm_work);
+               pm_work = NULL;
+       }
+
        mali_pm_domain_terminate();
-       _mali_osk_pm_dev_disable();
+
+       if (NULL != pm_lock_exec) {
+               _mali_osk_mutex_term(pm_lock_exec);
+               pm_lock_exec = NULL;
+       }
+
+       if (NULL != pm_lock_state) {
+               _mali_osk_spinlock_irq_term(pm_lock_state);
+               pm_lock_state = NULL;
+       }
+}
+
+struct mali_pm_domain *mali_pm_register_l2_cache(u32 domain_index,
+               struct mali_l2_cache_core *l2_cache)
+{
+       struct mali_pm_domain *domain;
+
+       domain = mali_pm_domain_get_from_mask(domain_config[domain_index]);
+       if (NULL == domain) {
+               MALI_DEBUG_ASSERT(0 == domain_config[domain_index]);
+               domain = mali_pm_domain_get_from_index(
+                                MALI_DOMAIN_INDEX_DUMMY);
+               domain_config[domain_index] = MALI_PM_DOMAIN_DUMMY_MASK;
+       } else {
+               MALI_DEBUG_ASSERT(0 != domain_config[domain_index]);
+       }
+
+       MALI_DEBUG_ASSERT(NULL != domain);
+
+       mali_pm_domain_add_l2_cache(domain, l2_cache);
+
+       return domain; /* return the actual domain this was registered in */
 }
 
-/* Reset GPU after power up */
-static void mali_pm_reset_gpu(void)
+struct mali_pm_domain *mali_pm_register_group(u32 domain_index,
+               struct mali_group *group)
 {
-       /* Reset all L2 caches */
-       mali_l2_cache_reset_all();
+       struct mali_pm_domain *domain;
 
-       /* Reset all groups */
-       mali_scheduler_reset_all_groups();
+       domain = mali_pm_domain_get_from_mask(domain_config[domain_index]);
+       if (NULL == domain) {
+               MALI_DEBUG_ASSERT(0 == domain_config[domain_index]);
+               domain = mali_pm_domain_get_from_index(
+                                MALI_DOMAIN_INDEX_DUMMY);
+               domain_config[domain_index] = MALI_PM_DOMAIN_DUMMY_MASK;
+       } else {
+               MALI_DEBUG_ASSERT(0 != domain_config[domain_index]);
+       }
+
+       MALI_DEBUG_ASSERT(NULL != domain);
+
+       mali_pm_domain_add_group(domain, group);
+
+       return domain; /* return the actual domain this was registered in */
 }
 
-void mali_pm_os_suspend(void)
+mali_bool mali_pm_get_domain_refs(struct mali_pm_domain **domains,
+                                 struct mali_group **groups,
+                                 u32 num_domains)
 {
-       MALI_DEBUG_PRINT(3, ("Mali PM: OS suspend\n"));
-       mali_gp_scheduler_suspend();
-       mali_pp_scheduler_suspend();
-       mali_utilization_suspend();
-       mali_group_power_off(MALI_TRUE);
-       mali_power_on = MALI_FALSE;
+       mali_bool ret = MALI_TRUE; /* Assume all is powered on instantly */
+       u32 i;
+
+       mali_pm_state_lock();
+
+       for (i = 0; i < num_domains; i++) {
+               MALI_DEBUG_ASSERT_POINTER(domains[i]);
+               pd_mask_wanted |= mali_pm_domain_ref_get(domains[i]);
+               if (MALI_FALSE == mali_pm_domain_power_is_on(domains[i])) {
+                       /*
+                        * Tell caller that the corresponding group
+                        * was not already powered on.
+                        */
+                       ret = MALI_FALSE;
+               } else {
+                       /*
+                        * There is a time gap between we power on the domain and
+                        * set the power state of the corresponding groups to be on.
+                        */
+                       if (NULL != groups[i] &&
+                           MALI_FALSE == mali_group_power_is_on(groups[i])) {
+                               ret = MALI_FALSE;
+                       }
+               }
+       }
+
+       MALI_DEBUG_PRINT(3, ("PM: wanted domain mask = 0x%08X (get refs)\n", pd_mask_wanted));
+
+       mali_pm_state_unlock();
+
+       return ret;
 }
 
-void mali_pm_os_resume(void)
+mali_bool mali_pm_put_domain_refs(struct mali_pm_domain **domains,
+                                 u32 num_domains)
+{
+       u32 mask = 0;
+       mali_bool ret;
+       u32 i;
+
+       mali_pm_state_lock();
+
+       for (i = 0; i < num_domains; i++) {
+               MALI_DEBUG_ASSERT_POINTER(domains[i]);
+               mask |= mali_pm_domain_ref_put(domains[i]);
+       }
+
+       if (0 == mask) {
+               /* return false, all domains should still stay on */
+               ret = MALI_FALSE;
+       } else {
+               /* Assert that we are dealing with a change */
+               MALI_DEBUG_ASSERT((pd_mask_wanted & mask) == mask);
+
+               /* Update our desired domain mask */
+               pd_mask_wanted &= ~mask;
+
+               /* return true; one or more domains can now be powered down */
+               ret = MALI_TRUE;
+       }
+
+       MALI_DEBUG_PRINT(3, ("PM: wanted domain mask = 0x%08X (put refs)\n", pd_mask_wanted));
+
+       mali_pm_state_unlock();
+
+       return ret;
+}
+
+void mali_pm_init_begin(void)
 {
        struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
-       mali_bool do_reset = MALI_FALSE;
 
-       MALI_DEBUG_PRINT(3, ("Mali PM: OS resume\n"));
+       _mali_osk_pm_dev_ref_get_sync();
 
-       if (MALI_TRUE != mali_power_on) {
-               do_reset = MALI_TRUE;
+       /* Ensure all PMU domains are on */
+       if (NULL != pmu) {
+               mali_pmu_power_up_all(pmu);
        }
+}
 
+void mali_pm_init_end(void)
+{
+       struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+       /* Ensure all PMU domains are off */
        if (NULL != pmu) {
-               mali_pmu_reset(pmu);
+               mali_pmu_power_down_all(pmu);
+       }
+
+       _mali_osk_pm_dev_ref_put();
+}
+
+void mali_pm_update_sync(void)
+{
+       mali_pm_exec_lock();
+
+       if (MALI_TRUE == mali_pm_runtime_active) {
+               /*
+                * Only update if GPU is powered on.
+                * Deactivation of the last group will result in both a
+                * deferred runtime PM suspend operation and
+                * deferred execution of this function.
+                * mali_pm_runtime_active will be false if runtime PM
+                * executed first and thus the GPU is now fully powered off.
+                */
+               mali_pm_update_sync_internal();
+       }
+
+       mali_pm_exec_unlock();
+}
+
+void mali_pm_update_async(void)
+{
+       _mali_osk_wq_schedule_work(pm_work);
+}
+
+void mali_pm_os_suspend(mali_bool os_suspend)
+{
+       int ret;
+
+       MALI_DEBUG_PRINT(2, ("Mali PM: OS suspend\n"));
+
+       /* Suspend execution of all jobs, and go to inactive state */
+       mali_executor_suspend();
+
+       if (os_suspend) {
+               mali_control_timer_suspend(MALI_TRUE);
        }
 
-       mali_power_on = MALI_TRUE;
-       _mali_osk_write_mem_barrier();
+       mali_pm_exec_lock();
+
+       ret = mali_pm_common_suspend();
+
+       MALI_DEBUG_ASSERT(MALI_TRUE == ret);
+       MALI_IGNORE(ret);
+
+       mali_pm_exec_unlock();
+}
+
+void mali_pm_os_resume(void)
+{
+       struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+       MALI_DEBUG_PRINT(2, ("Mali PM: OS resume\n"));
+
+       mali_pm_exec_lock();
+
+#if defined(DEBUG)
+       mali_pm_state_lock();
+
+       /* Assert that things are as we left them in os_suspend(). */
+       MALI_DEBUG_ASSERT(0 == pd_mask_wanted);
+       MALI_DEBUG_ASSERT(0 == pd_mask_current);
+       MALI_DEBUG_ASSERT(0 == pmu_mask_current);
+
+       MALI_DEBUG_ASSERT(MALI_TRUE == mali_pm_domain_all_unused());
 
-       if (do_reset) {
-               mali_pm_reset_gpu();
-               mali_group_power_on();
+       mali_pm_state_unlock();
+#endif
+
+       if (MALI_TRUE == mali_pm_runtime_active) {
+               /* Runtime PM was active, so reset PMU */
+               if (NULL != pmu) {
+                       mali_pmu_reset(pmu);
+                       pmu_mask_current = mali_pmu_get_mask(pmu);
+
+                       MALI_DEBUG_PRINT(3, ("Mali PM: OS resume 0x%x \n", pmu_mask_current));
+               }
+
+               mali_pm_update_sync_internal();
        }
 
-       mali_gp_scheduler_resume();
-       mali_pp_scheduler_resume();
+       mali_pm_exec_unlock();
+
+       /* Start executing jobs again */
+       mali_executor_resume();
 }
 
-void mali_pm_runtime_suspend(void)
+mali_bool mali_pm_runtime_suspend(void)
 {
+       mali_bool ret;
+
        MALI_DEBUG_PRINT(3, ("Mali PM: Runtime suspend\n"));
-       mali_group_power_off(MALI_TRUE);
-       mali_power_on = MALI_FALSE;
+
+       mali_pm_exec_lock();
+
+       /*
+        * Put SW state directly into "off" state, and do not bother to power
+        * down each power domain, because entire GPU will be powered off
+        * when we return.
+        * For runtime PM suspend, in contrast to OS suspend, there is a race
+        * between this function and the mali_pm_update_sync_internal(), which
+        * is fine...
+        */
+       ret = mali_pm_common_suspend();
+       if (MALI_TRUE == ret) {
+               mali_pm_runtime_active = MALI_FALSE;
+       } else {
+               /*
+                * Process the "power up" instead,
+                * which could have been "lost"
+                */
+               mali_pm_update_sync_internal();
+       }
+
+       mali_pm_exec_unlock();
+
+       return ret;
 }
 
 void mali_pm_runtime_resume(void)
 {
        struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
-       mali_bool do_reset = MALI_FALSE;
 
-       MALI_DEBUG_PRINT(3, ("Mali PM: Runtime resume\n"));
+       mali_pm_exec_lock();
 
-       if (MALI_TRUE != mali_power_on) {
-               do_reset = MALI_TRUE;
-       }
+       mali_pm_runtime_active = MALI_TRUE;
+
+#if defined(DEBUG)
+       ++num_pm_runtime_resume;
+
+       mali_pm_state_lock();
+
+       /*
+        * Assert that things are as we left them in runtime_suspend(),
+        * except for pd_mask_wanted which normally will be the reason we
+        * got here (job queued => domains wanted)
+        */
+       MALI_DEBUG_ASSERT(0 == pd_mask_current);
+       MALI_DEBUG_ASSERT(0 == pmu_mask_current);
+
+       mali_pm_state_unlock();
+#endif
 
        if (NULL != pmu) {
                mali_pmu_reset(pmu);
+               pmu_mask_current = mali_pmu_get_mask(pmu);
+               MALI_DEBUG_PRINT(3, ("Mali PM: Runtime resume 0x%x \n", pmu_mask_current));
+       }
+
+       /*
+        * Normally we are resumed because a job has just been queued.
+        * pd_mask_wanted should thus be != 0.
+        * It is however possible for others to take a Mali Runtime PM ref
+        * without having a job queued.
+        * We should however always call mali_pm_update_sync_internal(),
+        * because this will take care of any potential mismatch between
+        * pmu_mask_current and pd_mask_current.
+        */
+       mali_pm_update_sync_internal();
+
+       mali_pm_exec_unlock();
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_pm_dump_state_domain(struct mali_pm_domain *domain,
+                             char *buf, u32 size)
+{
+       int n = 0;
+
+       n += _mali_osk_snprintf(buf + n, size - n,
+                               "\tPower domain: id %u\n",
+                               mali_pm_domain_get_id(domain));
+
+       n += _mali_osk_snprintf(buf + n, size - n,
+                               "\t\tMask: 0x%04x\n",
+                               mali_pm_domain_get_mask(domain));
+
+       n += _mali_osk_snprintf(buf + n, size - n,
+                               "\t\tUse count: %u\n",
+                               mali_pm_domain_get_use_count(domain));
+
+       n += _mali_osk_snprintf(buf + n, size - n,
+                               "\t\tCurrent power state: %s\n",
+                               (mali_pm_domain_get_mask(domain) & pd_mask_current) ?
+                               "On" : "Off");
+
+       n += _mali_osk_snprintf(buf + n, size - n,
+                               "\t\tWanted power state: %s\n",
+                               (mali_pm_domain_get_mask(domain) & pd_mask_wanted) ?
+                               "On" : "Off");
+
+       return n;
+}
+#endif
+
+static void mali_pm_state_lock(void)
+{
+       _mali_osk_spinlock_irq_lock(pm_lock_state);
+}
+
+static void mali_pm_state_unlock(void)
+{
+       _mali_osk_spinlock_irq_unlock(pm_lock_state);
+}
+
+void mali_pm_exec_lock(void)
+{
+       _mali_osk_mutex_wait(pm_lock_exec);
+}
+
+void mali_pm_exec_unlock(void)
+{
+       _mali_osk_mutex_signal(pm_lock_exec);
+}
+
+static void mali_pm_domain_power_up(u32 power_up_mask,
+                                   struct mali_group *groups_up[MALI_MAX_NUMBER_OF_GROUPS],
+                                   u32 *num_groups_up,
+                                   struct mali_l2_cache_core *l2_up[MALI_MAX_NUMBER_OF_L2_CACHE_CORES],
+                                   u32 *num_l2_up)
+{
+       u32 domain_bit;
+       u32 notify_mask = power_up_mask;
+
+       MALI_DEBUG_ASSERT(0 != power_up_mask);
+       MALI_DEBUG_ASSERT_POINTER(groups_up);
+       MALI_DEBUG_ASSERT_POINTER(num_groups_up);
+       MALI_DEBUG_ASSERT(0 == *num_groups_up);
+       MALI_DEBUG_ASSERT_POINTER(l2_up);
+       MALI_DEBUG_ASSERT_POINTER(num_l2_up);
+       MALI_DEBUG_ASSERT(0 == *num_l2_up);
+
+       MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec);
+       MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_state);
+
+       MALI_DEBUG_PRINT(5,
+                        ("PM update:      Powering up domains: . [%s]\n",
+                         mali_pm_mask_to_string(power_up_mask)));
+
+       pd_mask_current |= power_up_mask;
+
+       domain_bit = _mali_osk_fls(notify_mask);
+       while (0 != domain_bit) {
+               u32 domain_id = domain_bit - 1;
+               struct mali_pm_domain *domain =
+                       mali_pm_domain_get_from_index(
+                               domain_id);
+               struct mali_l2_cache_core *l2_cache;
+               struct mali_l2_cache_core *l2_cache_tmp;
+               struct mali_group *group;
+               struct mali_group *group_tmp;
+
+               /* Mark domain as powered up */
+               mali_pm_domain_set_power_on(domain, MALI_TRUE);
+
+               /*
+                * Make a note of the L2 and/or group(s) to notify
+                * (need to release the PM state lock before doing so)
+                */
+
+               _MALI_OSK_LIST_FOREACHENTRY(l2_cache,
+                                           l2_cache_tmp,
+                                           mali_pm_domain_get_l2_cache_list(
+                                                   domain),
+                                           struct mali_l2_cache_core,
+                                           pm_domain_list) {
+                       MALI_DEBUG_ASSERT(*num_l2_up <
+                                         MALI_MAX_NUMBER_OF_L2_CACHE_CORES);
+                       l2_up[*num_l2_up] = l2_cache;
+                       (*num_l2_up)++;
+               }
+
+               _MALI_OSK_LIST_FOREACHENTRY(group,
+                                           group_tmp,
+                                           mali_pm_domain_get_group_list(domain),
+                                           struct mali_group,
+                                           pm_domain_list) {
+                       MALI_DEBUG_ASSERT(*num_groups_up <
+                                         MALI_MAX_NUMBER_OF_GROUPS);
+                       groups_up[*num_groups_up] = group;
+
+                       (*num_groups_up)++;
+               }
+
+               /* Remove current bit and find next */
+               notify_mask &= ~(1 << (domain_id));
+               domain_bit = _mali_osk_fls(notify_mask);
+       }
+}
+static void mali_pm_domain_power_down(u32 power_down_mask,
+                                     struct mali_group *groups_down[MALI_MAX_NUMBER_OF_GROUPS],
+                                     u32 *num_groups_down,
+                                     struct mali_l2_cache_core *l2_down[MALI_MAX_NUMBER_OF_L2_CACHE_CORES],
+                                     u32 *num_l2_down)
+{
+       u32 domain_bit;
+       u32 notify_mask = power_down_mask;
+
+       MALI_DEBUG_ASSERT(0 != power_down_mask);
+       MALI_DEBUG_ASSERT_POINTER(groups_down);
+       MALI_DEBUG_ASSERT_POINTER(num_groups_down);
+       MALI_DEBUG_ASSERT(0 == *num_groups_down);
+       MALI_DEBUG_ASSERT_POINTER(l2_down);
+       MALI_DEBUG_ASSERT_POINTER(num_l2_down);
+       MALI_DEBUG_ASSERT(0 == *num_l2_down);
+
+       MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec);
+       MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_state);
+
+       MALI_DEBUG_PRINT(5,
+                        ("PM update:      Powering down domains: [%s]\n",
+                         mali_pm_mask_to_string(power_down_mask)));
+
+       pd_mask_current &= ~power_down_mask;
+
+       domain_bit = _mali_osk_fls(notify_mask);
+       while (0 != domain_bit) {
+               u32 domain_id = domain_bit - 1;
+               struct mali_pm_domain *domain =
+                       mali_pm_domain_get_from_index(domain_id);
+               struct mali_l2_cache_core *l2_cache;
+               struct mali_l2_cache_core *l2_cache_tmp;
+               struct mali_group *group;
+               struct mali_group *group_tmp;
+
+               /* Mark domain as powered down */
+               mali_pm_domain_set_power_on(domain, MALI_FALSE);
+
+               /*
+                * Make a note of the L2s and/or groups to notify
+                * (need to release the PM state lock before doing so)
+                */
+
+               _MALI_OSK_LIST_FOREACHENTRY(l2_cache,
+                                           l2_cache_tmp,
+                                           mali_pm_domain_get_l2_cache_list(domain),
+                                           struct mali_l2_cache_core,
+                                           pm_domain_list) {
+                       MALI_DEBUG_ASSERT(*num_l2_down <
+                                         MALI_MAX_NUMBER_OF_L2_CACHE_CORES);
+                       l2_down[*num_l2_down] = l2_cache;
+                       (*num_l2_down)++;
+               }
+
+               _MALI_OSK_LIST_FOREACHENTRY(group,
+                                           group_tmp,
+                                           mali_pm_domain_get_group_list(domain),
+                                           struct mali_group,
+                                           pm_domain_list) {
+                       MALI_DEBUG_ASSERT(*num_groups_down <
+                                         MALI_MAX_NUMBER_OF_GROUPS);
+                       groups_down[*num_groups_down] = group;
+                       (*num_groups_down)++;
+               }
+
+               /* Remove current bit and find next */
+               notify_mask &= ~(1 << (domain_id));
+               domain_bit = _mali_osk_fls(notify_mask);
+       }
+}
+
+/*
+ * Execute pending power domain changes
+ * pm_lock_exec lock must be taken by caller.
+ */
+static void mali_pm_update_sync_internal(void)
+{
+       /*
+        * This should only be called in non-atomic context
+        * (normally as deferred work)
+        *
+        * Look at the pending power domain changes, and execute these.
+        * Make sure group and schedulers are notified about changes.
+        */
+
+       struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+       u32 power_down_mask;
+       u32 power_up_mask;
+
+       MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec);
+
+#if defined(DEBUG)
+       ++num_pm_updates;
+#endif
+
+       /* Hold PM state lock while we look at (and obey) the wanted state */
+       mali_pm_state_lock();
+
+       MALI_DEBUG_PRINT(5, ("PM update pre:  Wanted domain mask: .. [%s]\n",
+                            mali_pm_mask_to_string(pd_mask_wanted)));
+       MALI_DEBUG_PRINT(5, ("PM update pre:  Current domain mask: . [%s]\n",
+                            mali_pm_mask_to_string(pd_mask_current)));
+       MALI_DEBUG_PRINT(5, ("PM update pre:  Current PMU mask: .... [%s]\n",
+                            mali_pm_mask_to_string(pmu_mask_current)));
+       MALI_DEBUG_PRINT(5, ("PM update pre:  Group power stats: ... <%s>\n",
+                            mali_pm_group_stats_to_string()));
+
+       /* Figure out which cores we need to power on */
+       power_up_mask = pd_mask_wanted &
+                       (pd_mask_wanted ^ pd_mask_current);
+
+       if (0 != power_up_mask) {
+               u32 power_up_mask_pmu;
+               struct mali_group *groups_up[MALI_MAX_NUMBER_OF_GROUPS];
+               u32 num_groups_up = 0;
+               struct mali_l2_cache_core *
+                       l2_up[MALI_MAX_NUMBER_OF_L2_CACHE_CORES];
+               u32 num_l2_up = 0;
+               u32 i;
+
+#if defined(DEBUG)
+               ++num_pm_updates_up;
+#endif
+
+               /*
+                * Make sure dummy/global domain is always included when
+                * powering up, since this is controlled by runtime PM,
+                * and device power is on at this stage.
+                */
+               power_up_mask |= MALI_PM_DOMAIN_DUMMY_MASK;
+
+               /* Power up only real PMU domains */
+               power_up_mask_pmu = power_up_mask & ~MALI_PM_DOMAIN_DUMMY_MASK;
+
+               /* But not those that happen to be powered on already */
+               power_up_mask_pmu &= (power_up_mask ^ pmu_mask_current) &
+                                    power_up_mask;
+
+               if (0 != power_up_mask_pmu) {
+                       MALI_DEBUG_ASSERT(NULL != pmu);
+                       pmu_mask_current |= power_up_mask_pmu;
+                       mali_pmu_power_up(pmu, power_up_mask_pmu);
+               }
+
+               /*
+                * Put the domains themselves in power up state.
+                * We get the groups and L2s to notify in return.
+                */
+               mali_pm_domain_power_up(power_up_mask,
+                                       groups_up, &num_groups_up,
+                                       l2_up, &num_l2_up);
+
+               /* Need to unlock PM state lock before notifying L2 + groups */
+               mali_pm_state_unlock();
+
+               /* Notify each L2 cache that we have be powered up */
+               for (i = 0; i < num_l2_up; i++) {
+                       mali_l2_cache_power_up(l2_up[i]);
+               }
+
+               /*
+                * Tell execution module about all the groups we have
+                * powered up. Groups will be notified as a result of this.
+                */
+               mali_executor_group_power_up(groups_up, num_groups_up);
+
+               /* Lock state again before checking for power down */
+               mali_pm_state_lock();
+       }
+
+       /* Figure out which cores we need to power off */
+       power_down_mask = pd_mask_current &
+                         (pd_mask_wanted ^ pd_mask_current);
+
+       /*
+        * Never power down the dummy/global domain here. This is to be done
+        * from a suspend request (since this domain is only physicall powered
+        * down at that point)
+        */
+       power_down_mask &= ~MALI_PM_DOMAIN_DUMMY_MASK;
+
+       if (0 != power_down_mask) {
+               u32 power_down_mask_pmu;
+               struct mali_group *groups_down[MALI_MAX_NUMBER_OF_GROUPS];
+               u32 num_groups_down = 0;
+               struct mali_l2_cache_core *
+                       l2_down[MALI_MAX_NUMBER_OF_L2_CACHE_CORES];
+               u32 num_l2_down = 0;
+               u32 i;
+
+#if defined(DEBUG)
+               ++num_pm_updates_down;
+#endif
+
+               /*
+                * Put the domains themselves in power down state.
+                * We get the groups and L2s to notify in return.
+                */
+               mali_pm_domain_power_down(power_down_mask,
+                                         groups_down, &num_groups_down,
+                                         l2_down, &num_l2_down);
+
+               /* Need to unlock PM state lock before notifying L2 + groups */
+               mali_pm_state_unlock();
+
+               /*
+                * Tell execution module about all the groups we will be
+                * powering down. Groups will be notified as a result of this.
+                */
+               if (0 < num_groups_down) {
+                       mali_executor_group_power_down(groups_down, num_groups_down);
+               }
+
+               /* Notify each L2 cache that we will be powering down */
+               for (i = 0; i < num_l2_down; i++) {
+                       mali_l2_cache_power_down(l2_down[i]);
+               }
+
+               /*
+                * Power down only PMU domains which should not stay on
+                * Some domains might for instance currently be incorrectly
+                * powered up if default domain power state is all on.
+                */
+               power_down_mask_pmu = pmu_mask_current & (~pd_mask_current);
+
+               if (0 != power_down_mask_pmu) {
+                       MALI_DEBUG_ASSERT(NULL != pmu);
+                       pmu_mask_current &= ~power_down_mask_pmu;
+                       mali_pmu_power_down(pmu, power_down_mask_pmu);
+
+               }
+       } else {
+               /*
+                * Power down only PMU domains which should not stay on
+                * Some domains might for instance currently be incorrectly
+                * powered up if default domain power state is all on.
+                */
+               u32 power_down_mask_pmu;
+
+               /* No need for state lock since we'll only update PMU */
+               mali_pm_state_unlock();
+
+               power_down_mask_pmu = pmu_mask_current & (~pd_mask_current);
+
+               if (0 != power_down_mask_pmu) {
+                       MALI_DEBUG_ASSERT(NULL != pmu);
+                       pmu_mask_current &= ~power_down_mask_pmu;
+                       mali_pmu_power_down(pmu, power_down_mask_pmu);
+               }
+       }
+
+       MALI_DEBUG_PRINT(5, ("PM update post: Current domain mask: . [%s]\n",
+                            mali_pm_mask_to_string(pd_mask_current)));
+       MALI_DEBUG_PRINT(5, ("PM update post: Current PMU mask: .... [%s]\n",
+                            mali_pm_mask_to_string(pmu_mask_current)));
+       MALI_DEBUG_PRINT(5, ("PM update post: Group power stats: ... <%s>\n",
+                            mali_pm_group_stats_to_string()));
+}
+
+static mali_bool mali_pm_common_suspend(void)
+{
+       mali_pm_state_lock();
+
+       if (0 != pd_mask_wanted) {
+               MALI_DEBUG_PRINT(5, ("PM: Aborting suspend operation\n\n\n"));
+               mali_pm_state_unlock();
+               return MALI_FALSE;
+       }
+
+       MALI_DEBUG_PRINT(5, ("PM suspend pre: Wanted domain mask: .. [%s]\n",
+                            mali_pm_mask_to_string(pd_mask_wanted)));
+       MALI_DEBUG_PRINT(5, ("PM suspend pre: Current domain mask: . [%s]\n",
+                            mali_pm_mask_to_string(pd_mask_current)));
+       MALI_DEBUG_PRINT(5, ("PM suspend pre: Current PMU mask: .... [%s]\n",
+                            mali_pm_mask_to_string(pmu_mask_current)));
+       MALI_DEBUG_PRINT(5, ("PM suspend pre: Group power stats: ... <%s>\n",
+                            mali_pm_group_stats_to_string()));
+
+       if (0 != pd_mask_current) {
+               /*
+                * We have still some domains powered on.
+                * It is for instance very normal that at least the
+                * dummy/global domain is marked as powered on at this point.
+                * (because it is physically powered on until this function
+                * returns)
+                */
+
+               struct mali_group *groups_down[MALI_MAX_NUMBER_OF_GROUPS];
+               u32 num_groups_down = 0;
+               struct mali_l2_cache_core *
+                       l2_down[MALI_MAX_NUMBER_OF_L2_CACHE_CORES];
+               u32 num_l2_down = 0;
+               u32 i;
+
+               /*
+                * Put the domains themselves in power down state.
+                * We get the groups and L2s to notify in return.
+                */
+               mali_pm_domain_power_down(pd_mask_current,
+                                         groups_down,
+                                         &num_groups_down,
+                                         l2_down,
+                                         &num_l2_down);
+
+               MALI_DEBUG_ASSERT(0 == pd_mask_current);
+               MALI_DEBUG_ASSERT(MALI_TRUE == mali_pm_domain_all_unused());
+
+               /* Need to unlock PM state lock before notifying L2 + groups */
+               mali_pm_state_unlock();
+
+               /*
+                * Tell execution module about all the groups we will be
+                * powering down. Groups will be notified as a result of this.
+                */
+               if (0 < num_groups_down) {
+                       mali_executor_group_power_down(groups_down, num_groups_down);
+               }
+
+               /* Notify each L2 cache that we will be powering down */
+               for (i = 0; i < num_l2_down; i++) {
+                       mali_l2_cache_power_down(l2_down[i]);
+               }
+
+               pmu_mask_current = 0;
+       } else {
+               MALI_DEBUG_ASSERT(0 == pmu_mask_current);
+
+               MALI_DEBUG_ASSERT(MALI_TRUE == mali_pm_domain_all_unused());
+
+               mali_pm_state_unlock();
+       }
+
+       MALI_DEBUG_PRINT(5, ("PM suspend post: Current domain mask:  [%s]\n",
+                            mali_pm_mask_to_string(pd_mask_current)));
+       MALI_DEBUG_PRINT(5, ("PM suspend post: Current PMU mask: ... [%s]\n",
+                            mali_pm_mask_to_string(pmu_mask_current)));
+       MALI_DEBUG_PRINT(5, ("PM suspend post: Group power stats: .. <%s>\n",
+                            mali_pm_group_stats_to_string()));
+
+       return MALI_TRUE;
+}
+
+static void mali_pm_update_work(void *data)
+{
+       MALI_IGNORE(data);
+       mali_pm_update_sync();
+}
+
+static _mali_osk_errcode_t mali_pm_create_pm_domains(void)
+{
+       int i;
+
+       /* Create all domains (including dummy domain) */
+       for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+               if (0x0 == domain_config[i]) continue;
+
+               if (NULL == mali_pm_domain_create(domain_config[i])) {
+                       return _MALI_OSK_ERR_NOMEM;
+               }
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+static void mali_pm_set_default_pm_domain_config(void)
+{
+       MALI_DEBUG_ASSERT(0 != _mali_osk_resource_base_address());
+
+       /* GP core */
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+                   MALI_OFFSET_GP, NULL)) {
+               domain_config[MALI_DOMAIN_INDEX_GP] = 0x01;
+       }
+
+       /* PP0 - PP3 core */
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+                   MALI_OFFSET_PP0, NULL)) {
+               if (mali_is_mali400()) {
+                       domain_config[MALI_DOMAIN_INDEX_PP0] = 0x01 << 2;
+               } else if (mali_is_mali450()) {
+                       domain_config[MALI_DOMAIN_INDEX_PP0] = 0x01 << 1;
+               } else if (mali_is_mali470()) {
+                       domain_config[MALI_DOMAIN_INDEX_PP0] = 0x01 << 0;
+               }
+       }
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+                   MALI_OFFSET_PP1, NULL)) {
+               if (mali_is_mali400()) {
+                       domain_config[MALI_DOMAIN_INDEX_PP1] = 0x01 << 3;
+               } else if (mali_is_mali450()) {
+                       domain_config[MALI_DOMAIN_INDEX_PP1] = 0x01 << 2;
+               } else if (mali_is_mali470()) {
+                       domain_config[MALI_DOMAIN_INDEX_PP1] = 0x01 << 1;
+               }
+       }
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+                   MALI_OFFSET_PP2, NULL)) {
+               if (mali_is_mali400()) {
+                       domain_config[MALI_DOMAIN_INDEX_PP2] = 0x01 << 4;
+               } else if (mali_is_mali450()) {
+                       domain_config[MALI_DOMAIN_INDEX_PP2] = 0x01 << 2;
+               } else if (mali_is_mali470()) {
+                       domain_config[MALI_DOMAIN_INDEX_PP2] = 0x01 << 1;
+               }
+       }
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+                   MALI_OFFSET_PP3, NULL)) {
+               if (mali_is_mali400()) {
+                       domain_config[MALI_DOMAIN_INDEX_PP3] = 0x01 << 5;
+               } else if (mali_is_mali450()) {
+                       domain_config[MALI_DOMAIN_INDEX_PP3] = 0x01 << 2;
+               } else if (mali_is_mali470()) {
+                       domain_config[MALI_DOMAIN_INDEX_PP3] = 0x01 << 1;
+               }
        }
 
-       mali_power_on = MALI_TRUE;
-       _mali_osk_write_mem_barrier();
+       /* PP4 - PP7 */
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+                   MALI_OFFSET_PP4, NULL)) {
+               domain_config[MALI_DOMAIN_INDEX_PP4] = 0x01 << 3;
+       }
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+                   MALI_OFFSET_PP5, NULL)) {
+               domain_config[MALI_DOMAIN_INDEX_PP5] = 0x01 << 3;
+       }
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+                   MALI_OFFSET_PP6, NULL)) {
+               domain_config[MALI_DOMAIN_INDEX_PP6] = 0x01 << 3;
+       }
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+                   MALI_OFFSET_PP7, NULL)) {
+               domain_config[MALI_DOMAIN_INDEX_PP7] = 0x01 << 3;
+       }
+
+       /* L2gp/L2PP0/L2PP4 */
+       if (mali_is_mali400()) {
+               if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+                           MALI400_OFFSET_L2_CACHE0, NULL)) {
+                       domain_config[MALI_DOMAIN_INDEX_L20] = 0x01 << 1;
+               }
+       } else if (mali_is_mali450()) {
+               if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+                           MALI450_OFFSET_L2_CACHE0, NULL)) {
+                       domain_config[MALI_DOMAIN_INDEX_L20] = 0x01 << 0;
+               }
+
+               if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+                           MALI450_OFFSET_L2_CACHE1, NULL)) {
+                       domain_config[MALI_DOMAIN_INDEX_L21] = 0x01 << 1;
+               }
+
+               if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+                           MALI450_OFFSET_L2_CACHE2, NULL)) {
+                       domain_config[MALI_DOMAIN_INDEX_L22] = 0x01 << 3;
+               }
+       } else if (mali_is_mali470()) {
+               if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+                           MALI470_OFFSET_L2_CACHE1, NULL)) {
+                       domain_config[MALI_DOMAIN_INDEX_L21] = 0x01 << 0;
+               }
+       }
+}
+
+static u32 mali_pm_get_registered_cores_mask(void)
+{
+       int i = 0;
+       u32 mask = 0;
+
+       for (i = 0; i < MALI_DOMAIN_INDEX_DUMMY; i++) {
+               mask |= domain_config[i];
+       }
+
+       return mask;
+}
+
+static void mali_pm_set_pmu_domain_config(void)
+{
+       int i = 0;
+
+       _mali_osk_device_data_pmu_config_get(domain_config, MALI_MAX_NUMBER_OF_DOMAINS - 1);
+
+       for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS - 1; i++) {
+               if (0 != domain_config[i]) {
+                       MALI_DEBUG_PRINT(2, ("Using customer pmu config:\n"));
+                       break;
+               }
+       }
+
+       if (MALI_MAX_NUMBER_OF_DOMAINS - 1 == i) {
+               MALI_DEBUG_PRINT(2, ("Using hw detect pmu config:\n"));
+               mali_pm_set_default_pm_domain_config();
+       }
+
+       for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS - 1; i++) {
+               if (domain_config[i]) {
+                       MALI_DEBUG_PRINT(2, ("domain_config[%d] = 0x%x \n", i, domain_config[i]));
+               }
+       }
+       /* Can't override dummy domain mask */
+       domain_config[MALI_DOMAIN_INDEX_DUMMY] =
+               1 << MALI_DOMAIN_INDEX_DUMMY;
+}
+
+#if defined(DEBUG)
+const char *mali_pm_mask_to_string(u32 mask)
+{
+       static char bit_str[MALI_MAX_NUMBER_OF_DOMAINS + 1];
+       int bit;
+       int str_pos = 0;
+
+       /* Must be protected by lock since we use shared string buffer */
+       if (NULL != pm_lock_exec) {
+               MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec);
+       }
+
+       for (bit = MALI_MAX_NUMBER_OF_DOMAINS - 1; bit >= 0; bit--) {
+               if (mask & (1 << bit)) {
+                       bit_str[str_pos] = 'X';
+               } else {
+                       bit_str[str_pos] = '-';
+               }
+               str_pos++;
+       }
+
+       bit_str[MALI_MAX_NUMBER_OF_DOMAINS] = '\0';
+
+       return bit_str;
+}
+
+const char *mali_pm_group_stats_to_string(void)
+{
+       static char bit_str[MALI_MAX_NUMBER_OF_GROUPS + 1];
+       u32 num_groups = mali_group_get_glob_num_groups();
+       u32 i;
+
+       /* Must be protected by lock since we use shared string buffer */
+       if (NULL != pm_lock_exec) {
+               MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec);
+       }
+
+       for (i = 0; i < num_groups && i < MALI_MAX_NUMBER_OF_GROUPS; i++) {
+               struct mali_group *group;
+
+               group = mali_group_get_glob_group(i);
 
-       if (do_reset) {
-               mali_pm_reset_gpu();
-               mali_group_power_on();
+               if (MALI_TRUE == mali_group_power_is_on(group)) {
+                       bit_str[i] = 'X';
+               } else {
+                       bit_str[i] = '-';
+               }
        }
+
+       bit_str[i] = '\0';
+
+       return bit_str;
+}
+#endif
+
+/*
+ * num_pp is the number of PP cores which will be powered on given this mask
+ * cost is the total power cost of cores which will be powered on given this mask
+ */
+static void mali_pm_stat_from_mask(u32 mask, u32 *num_pp, u32 *cost)
+{
+       u32 i;
+
+       /* loop through all cores */
+       for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+               if (!(domain_config[i] & mask)) {
+                       continue;
+               }
+
+               switch (i) {
+               case MALI_DOMAIN_INDEX_GP:
+                       *cost += MALI_GP_COST;
+
+                       break;
+               case MALI_DOMAIN_INDEX_PP0: /* Fall through */
+               case MALI_DOMAIN_INDEX_PP1: /* Fall through */
+               case MALI_DOMAIN_INDEX_PP2: /* Fall through */
+               case MALI_DOMAIN_INDEX_PP3:
+                       if (mali_is_mali400()) {
+                               if ((domain_config[MALI_DOMAIN_INDEX_L20] & mask)
+                                   || (domain_config[MALI_DOMAIN_INDEX_DUMMY]
+                                       == domain_config[MALI_DOMAIN_INDEX_L20])) {
+                                       *num_pp += 1;
+                               }
+                       } else {
+                               if ((domain_config[MALI_DOMAIN_INDEX_L21] & mask)
+                                   || (domain_config[MALI_DOMAIN_INDEX_DUMMY]
+                                       == domain_config[MALI_DOMAIN_INDEX_L21])) {
+                                       *num_pp += 1;
+                               }
+                       }
+
+                       *cost += MALI_PP_COST;
+                       break;
+               case MALI_DOMAIN_INDEX_PP4: /* Fall through */
+               case MALI_DOMAIN_INDEX_PP5: /* Fall through */
+               case MALI_DOMAIN_INDEX_PP6: /* Fall through */
+               case MALI_DOMAIN_INDEX_PP7:
+                       MALI_DEBUG_ASSERT(mali_is_mali450());
+
+                       if ((domain_config[MALI_DOMAIN_INDEX_L22] & mask)
+                           || (domain_config[MALI_DOMAIN_INDEX_DUMMY]
+                               == domain_config[MALI_DOMAIN_INDEX_L22])) {
+                               *num_pp += 1;
+                       }
+
+                       *cost += MALI_PP_COST;
+                       break;
+               case MALI_DOMAIN_INDEX_L20: /* Fall through */
+               case MALI_DOMAIN_INDEX_L21: /* Fall through */
+               case MALI_DOMAIN_INDEX_L22:
+                       *cost += MALI_L2_COST;
+
+                       break;
+               }
+       }
+}
+
+void mali_pm_power_cost_setup(void)
+{
+       /*
+        * Two parallel arrays which store the best domain mask and its cost
+        * The index is the number of PP cores, E.g. Index 0 is for 1 PP option,
+        * might have mask 0x2 and with cost of 1, lower cost is better
+        */
+       u32 best_mask[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS] = { 0 };
+       u32 best_cost[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS] = { 0 };
+       /* Array cores_in_domain is used to store the total pp cores in each pm domain. */
+       u32 cores_in_domain[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 };
+       /* Domain_count is used to represent the max domain we have.*/
+       u32 max_domain_mask = 0;
+       u32 max_domain_id = 0;
+       u32 always_on_pp_cores = 0;
+
+       u32 num_pp, cost, mask;
+       u32 i, j , k;
+
+       /* Initialize statistics */
+       for (i = 0; i < MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS; i++) {
+               best_mask[i] = 0;
+               best_cost[i] = 0xFFFFFFFF; /* lower cost is better */
+       }
+
+       for (i = 0; i < MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS + 1; i++) {
+               for (j = 0; j < MALI_MAX_NUMBER_OF_DOMAINS; j++) {
+                       mali_pm_domain_power_cost_result[i][j] = 0;
+               }
+       }
+
+       /* Caculate number of pp cores of a given domain config. */
+       for (i = MALI_DOMAIN_INDEX_PP0; i <= MALI_DOMAIN_INDEX_PP7; i++) {
+               if (0 < domain_config[i]) {
+                       /* Get the max domain mask value used to caculate power cost
+                        * and we don't count in always on pp cores. */
+                       if (MALI_PM_DOMAIN_DUMMY_MASK != domain_config[i]
+                           && max_domain_mask < domain_config[i]) {
+                               max_domain_mask = domain_config[i];
+                       }
+
+                       if (MALI_PM_DOMAIN_DUMMY_MASK == domain_config[i]) {
+                               always_on_pp_cores++;
+                       }
+               }
+       }
+       max_domain_id = _mali_osk_fls(max_domain_mask);
+
+       /*
+        * Try all combinations of power domains and check how many PP cores
+        * they have and their power cost.
+        */
+       for (mask = 0; mask < (1 << max_domain_id); mask++) {
+               num_pp = 0;
+               cost = 0;
+
+               mali_pm_stat_from_mask(mask, &num_pp, &cost);
+
+               /* This mask is usable for all MP1 up to num_pp PP cores, check statistics for all */
+               for (i = 0; i < num_pp; i++) {
+                       if (best_cost[i] >= cost) {
+                               best_cost[i] = cost;
+                               best_mask[i] = mask;
+                       }
+               }
+       }
+
+       /*
+        * If we want to enable x pp cores, if x is less than number of always_on pp cores,
+        * all of pp cores we will enable must be always_on pp cores.
+        */
+       for (i = 0; i < mali_executor_get_num_cores_total(); i++) {
+               if (i < always_on_pp_cores) {
+                       mali_pm_domain_power_cost_result[i + 1][MALI_MAX_NUMBER_OF_DOMAINS - 1]
+                               = i + 1;
+               } else {
+                       mali_pm_domain_power_cost_result[i + 1][MALI_MAX_NUMBER_OF_DOMAINS - 1]
+                               = always_on_pp_cores;
+               }
+       }
+
+       /* In this loop, variable i represent for the number of non-always on pp cores we want to enabled. */
+       for (i = 0; i < (mali_executor_get_num_cores_total() - always_on_pp_cores); i++) {
+               if (best_mask[i] == 0) {
+                       /* This MP variant is not available */
+                       continue;
+               }
+
+               for (j = 0; j < MALI_MAX_NUMBER_OF_DOMAINS; j++) {
+                       cores_in_domain[j] = 0;
+               }
+
+               for (j = MALI_DOMAIN_INDEX_PP0; j <= MALI_DOMAIN_INDEX_PP7; j++) {
+                       if (0 < domain_config[j]
+                           && (MALI_PM_DOMAIN_DUMMY_MASK != domain_config[i])) {
+                               cores_in_domain[_mali_osk_fls(domain_config[j]) - 1]++;
+                       }
+               }
+
+               /* In this loop, j represent for the number we have already enabled.*/
+               for (j = 0; j <= i;) {
+                       /* j used to visit all of domain to get the number of pp cores remained in it. */
+                       for (k = 0; k < max_domain_id; k++) {
+                               /* If domain k in best_mask[i] is enabled and this domain has extra pp cores,
+                                * we know we must pick at least one pp core from this domain.
+                                * And then we move to next enabled pm domain. */
+                               if ((best_mask[i] & (0x1 << k)) && (0 < cores_in_domain[k])) {
+                                       cores_in_domain[k]--;
+                                       mali_pm_domain_power_cost_result[always_on_pp_cores + i + 1][k]++;
+                                       j++;
+                                       if (j > i) {
+                                               break;
+                                       }
+                               }
+                       }
+               }
+       }
+}
+
+/*
+ * When we are doing core scaling,
+ * this function is called to return the best mask to
+ * achieve the best pp group power cost.
+ */
+void mali_pm_get_best_power_cost_mask(int num_requested, int *dst)
+{
+       MALI_DEBUG_ASSERT((mali_executor_get_num_cores_total() >= num_requested) && (0 <= num_requested));
+
+       _mali_osk_memcpy(dst, mali_pm_domain_power_cost_result[num_requested], MALI_MAX_NUMBER_OF_DOMAINS * sizeof(int));
 }
 
-void mali_pm_set_power_is_on(void)
+u32 mali_pm_get_current_mask(void)
 {
-       mali_power_on = MALI_TRUE;
+       return pd_mask_current;
 }
 
-mali_bool mali_pm_is_power_on(void)
+u32 mali_pm_get_wanted_mask(void)
 {
-       return mali_power_on;
+       return pd_mask_wanted;
 }
index 2c1063ab4a8c63b0d518c2ac28cab3cf2ba83d8d..fce2209dca299f2dbefc2ad087bbd86effbc7a4f 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2013 ARM Limited
+ * (C) COPYRIGHT 2011-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 #define __MALI_PM_H__
 
 #include "mali_osk.h"
+#include "mali_pm_domain.h"
 
+#define MALI_DOMAIN_INDEX_GP        0
+#define MALI_DOMAIN_INDEX_PP0       1
+#define MALI_DOMAIN_INDEX_PP1       2
+#define MALI_DOMAIN_INDEX_PP2       3
+#define MALI_DOMAIN_INDEX_PP3       4
+#define MALI_DOMAIN_INDEX_PP4       5
+#define MALI_DOMAIN_INDEX_PP5       6
+#define MALI_DOMAIN_INDEX_PP6       7
+#define MALI_DOMAIN_INDEX_PP7       8
+#define MALI_DOMAIN_INDEX_L20       9
+#define MALI_DOMAIN_INDEX_L21      10
+#define MALI_DOMAIN_INDEX_L22      11
+/*
+ * The dummy domain is used when there is no physical power domain
+ * (e.g. no PMU or always on cores)
+ */
+#define MALI_DOMAIN_INDEX_DUMMY    12
+#define MALI_MAX_NUMBER_OF_DOMAINS 13
+
+/**
+ * Initialize the Mali PM module
+ *
+ * PM module covers Mali PM core, PM domains and Mali PMU
+ */
 _mali_osk_errcode_t mali_pm_initialize(void);
+
+/**
+ * Terminate the Mali PM module
+ */
 void mali_pm_terminate(void);
 
-/* Callback functions registered for the runtime PMM system */
-void mali_pm_os_suspend(void);
+void mali_pm_exec_lock(void);
+void mali_pm_exec_unlock(void);
+
+
+struct mali_pm_domain *mali_pm_register_l2_cache(u32 domain_index,
+               struct mali_l2_cache_core *l2_cache);
+struct mali_pm_domain *mali_pm_register_group(u32 domain_index,
+               struct mali_group *group);
+
+mali_bool mali_pm_get_domain_refs(struct mali_pm_domain **domains,
+                                 struct mali_group **groups,
+                                 u32 num_domains);
+mali_bool mali_pm_put_domain_refs(struct mali_pm_domain **domains,
+                                 u32 num_domains);
+
+void mali_pm_init_begin(void);
+void mali_pm_init_end(void);
+
+void mali_pm_update_sync(void);
+void mali_pm_update_async(void);
+
+/* Callback functions for system power management */
+void mali_pm_os_suspend(mali_bool os_suspend);
 void mali_pm_os_resume(void);
-void mali_pm_runtime_suspend(void);
+
+mali_bool mali_pm_runtime_suspend(void);
 void mali_pm_runtime_resume(void);
 
-void mali_pm_set_power_is_on(void);
-mali_bool mali_pm_is_power_on(void);
+#if MALI_STATE_TRACKING
+u32 mali_pm_dump_state_domain(struct mali_pm_domain *domain,
+                             char *buf, u32 size);
+#endif
+
+void mali_pm_power_cost_setup(void);
+
+void mali_pm_get_best_power_cost_mask(int num_requested, int *dst);
+
+#if defined(DEBUG)
+const char *mali_pm_mask_to_string(u32 mask);
+#endif
 
+u32 mali_pm_get_current_mask(void);
+u32 mali_pm_get_wanted_mask(void);
 #endif /* __MALI_PM_H__ */
index 486e8939f666779099a28d87d2dab720e7fc63b1..a36e57a0f1f9086b9a6843efc192e6c611a593c1 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013 ARM Limited
+ * (C) COPYRIGHT 2013-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 #include "mali_pm_domain.h"
 #include "mali_pmu.h"
 #include "mali_group.h"
+#include "mali_pm.h"
 
-static struct mali_pm_domain *mali_pm_domains[MALI_MAX_NUMBER_OF_DOMAINS] = { NULL, };
+static struct mali_pm_domain *mali_pm_domains[MALI_MAX_NUMBER_OF_DOMAINS] =
+{ NULL, };
 
-static void mali_pm_domain_lock(struct mali_pm_domain *domain)
+void mali_pm_domain_initialize(void)
 {
-       _mali_osk_spinlock_irq_lock(domain->lock);
+       /* Domains will be initialized/created on demand */
 }
 
-static void mali_pm_domain_unlock(struct mali_pm_domain *domain)
+void mali_pm_domain_terminate(void)
 {
-       _mali_osk_spinlock_irq_unlock(domain->lock);
-}
+       int i;
 
-MALI_STATIC_INLINE void mali_pm_domain_state_set(struct mali_pm_domain *domain, mali_pm_domain_state state)
-{
-       domain->state = state;
+       /* Delete all domains that has been created */
+       for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+               mali_pm_domain_delete(mali_pm_domains[i]);
+               mali_pm_domains[i] = NULL;
+       }
 }
 
 struct mali_pm_domain *mali_pm_domain_create(u32 pmu_mask)
 {
-       struct mali_pm_domaindomain = NULL;
+       struct mali_pm_domain *domain = NULL;
        u32 domain_id = 0;
 
        domain = mali_pm_domain_get_from_mask(pmu_mask);
        if (NULL != domain) return domain;
 
-       MALI_DEBUG_PRINT(2, ("Mali PM domain: Creating Mali PM domain (mask=0x%08X)\n", pmu_mask));
+       MALI_DEBUG_PRINT(2,
+                        ("Mali PM domain: Creating Mali PM domain (mask=0x%08X)\n",
+                         pmu_mask));
 
-       domain = (struct mali_pm_domain *)_mali_osk_malloc(sizeof(struct mali_pm_domain));
+       domain = (struct mali_pm_domain *)_mali_osk_malloc(
+                        sizeof(struct mali_pm_domain));
        if (NULL != domain) {
-               domain->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_PM_DOMAIN);
-               if (NULL == domain->lock) {
-                       _mali_osk_free(domain);
-                       return NULL;
-               }
-
-               domain->state = MALI_PM_DOMAIN_ON;
+               domain->power_is_on = MALI_FALSE;
                domain->pmu_mask = pmu_mask;
                domain->use_count = 0;
-               domain->group_list = NULL;
-               domain->group_count = 0;
-               domain->l2 = NULL;
+               _mali_osk_list_init(&domain->group_list);
+               _mali_osk_list_init(&domain->l2_cache_list);
 
                domain_id = _mali_osk_fls(pmu_mask) - 1;
                /* Verify the domain_id */
@@ -76,64 +75,44 @@ void mali_pm_domain_delete(struct mali_pm_domain *domain)
        if (NULL == domain) {
                return;
        }
-       _mali_osk_spinlock_irq_term(domain->lock);
-
-       _mali_osk_free(domain);
-}
 
-void mali_pm_domain_terminate(void)
-{
-       int i;
+       _mali_osk_list_delinit(&domain->group_list);
+       _mali_osk_list_delinit(&domain->l2_cache_list);
 
-       /* Delete all domains */
-       for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
-               mali_pm_domain_delete(mali_pm_domains[i]);
-       }
+       _mali_osk_free(domain);
 }
 
-void mali_pm_domain_add_group(u32 mask, struct mali_group *group)
+void mali_pm_domain_add_group(struct mali_pm_domain *domain,
+                             struct mali_group *group)
 {
-       struct mali_pm_domain *domain = mali_pm_domain_get_from_mask(mask);
-       struct mali_group *next;
-
-       if (NULL == domain) return;
-
+       MALI_DEBUG_ASSERT_POINTER(domain);
        MALI_DEBUG_ASSERT_POINTER(group);
 
-       ++domain->group_count;
-       next = domain->group_list;
-
-       domain->group_list = group;
-
-       group->pm_domain_list = next;
-
-       mali_group_set_pm_domain(group, domain);
-
-       /* Get pm domain ref after mali_group_set_pm_domain */
-       mali_group_get_pm_domain_ref(group);
+       /*
+        * Use addtail because virtual group is created last and it needs
+        * to be at the end of the list (in order to be activated after
+        * all children.
+        */
+       _mali_osk_list_addtail(&group->pm_domain_list, &domain->group_list);
 }
 
-void mali_pm_domain_add_l2(u32 mask, struct mali_l2_cache_core *l2)
+void mali_pm_domain_add_l2_cache(struct mali_pm_domain *domain,
+                                struct mali_l2_cache_core *l2_cache)
 {
-       struct mali_pm_domain *domain = mali_pm_domain_get_from_mask(mask);
-
-       if (NULL == domain) return;
-
-       MALI_DEBUG_ASSERT(NULL == domain->l2);
-       MALI_DEBUG_ASSERT(NULL != l2);
-
-       domain->l2 = l2;
-
-       mali_l2_cache_set_pm_domain(l2, domain);
+       MALI_DEBUG_ASSERT_POINTER(domain);
+       MALI_DEBUG_ASSERT_POINTER(l2_cache);
+       _mali_osk_list_add(&l2_cache->pm_domain_list, &domain->l2_cache_list);
 }
 
 struct mali_pm_domain *mali_pm_domain_get_from_mask(u32 mask)
 {
        u32 id = 0;
 
-       if (0 == mask) return NULL;
+       if (0 == mask) {
+               return NULL;
+       }
 
-       id = _mali_osk_fls(mask)-1;
+       id = _mali_osk_fls(mask) - 1;
 
        MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > id);
        /* Verify that pmu_mask only one bit is set */
@@ -149,93 +128,82 @@ struct mali_pm_domain *mali_pm_domain_get_from_index(u32 id)
        return mali_pm_domains[id];
 }
 
-void mali_pm_domain_ref_get(struct mali_pm_domain *domain)
+u32 mali_pm_domain_ref_get(struct mali_pm_domain *domain)
 {
-       if (NULL == domain) return;
+       MALI_DEBUG_ASSERT_POINTER(domain);
 
-       mali_pm_domain_lock(domain);
-       ++domain->use_count;
+       if (0 == domain->use_count) {
+               _mali_osk_pm_dev_ref_get_async();
+       }
 
-       if (MALI_PM_DOMAIN_ON != domain->state) {
-               /* Power on */
-               struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+       ++domain->use_count;
+       MALI_DEBUG_PRINT(4, ("PM domain %p: ref_get, use_count => %u\n", domain, domain->use_count));
 
-               MALI_DEBUG_PRINT(3, ("PM Domain: Powering on 0x%08x\n", domain->pmu_mask));
+       /* Return our mask so caller can check this against wanted mask */
+       return domain->pmu_mask;
+}
 
-               if (NULL != pmu) {
-                       _mali_osk_errcode_t err;
+u32 mali_pm_domain_ref_put(struct mali_pm_domain *domain)
+{
+       MALI_DEBUG_ASSERT_POINTER(domain);
 
-                       err = mali_pmu_power_up(pmu, domain->pmu_mask);
+       --domain->use_count;
+       MALI_DEBUG_PRINT(4, ("PM domain %p: ref_put, use_count => %u\n", domain, domain->use_count));
 
-                       if (_MALI_OSK_ERR_OK != err && _MALI_OSK_ERR_BUSY != err) {
-                               MALI_PRINT_ERROR(("PM Domain: Failed to power up PM domain 0x%08x\n",
-                                                 domain->pmu_mask));
-                       }
-               }
-               mali_pm_domain_state_set(domain, MALI_PM_DOMAIN_ON);
-       } else {
-               MALI_DEBUG_ASSERT(MALI_PM_DOMAIN_ON == mali_pm_domain_state_get(domain));
+       if (0 == domain->use_count) {
+               _mali_osk_pm_dev_ref_put();
        }
 
-       mali_pm_domain_unlock(domain);
+       /*
+        * Return the PMU mask which now could be be powered down
+        * (the bit for this domain).
+        * This is the responsibility of the caller (mali_pm)
+        */
+       return (0 == domain->use_count ? domain->pmu_mask : 0);
 }
 
-void mali_pm_domain_ref_put(struct mali_pm_domain *domain)
+#if MALI_STATE_TRACKING
+u32 mali_pm_domain_get_id(struct mali_pm_domain *domain)
 {
-       if (NULL == domain) return;
-
-       mali_pm_domain_lock(domain);
-       --domain->use_count;
-
-       if (0 == domain->use_count && MALI_PM_DOMAIN_OFF != domain->state) {
-               /* Power off */
-               struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
-
-               MALI_DEBUG_PRINT(3, ("PM Domain: Powering off 0x%08x\n", domain->pmu_mask));
+       u32 id = 0;
 
-               mali_pm_domain_state_set(domain, MALI_PM_DOMAIN_OFF);
+       MALI_DEBUG_ASSERT_POINTER(domain);
+       MALI_DEBUG_ASSERT(0 != domain->pmu_mask);
 
-               if (NULL != pmu) {
-                       _mali_osk_errcode_t err;
+       id = _mali_osk_fls(domain->pmu_mask) - 1;
 
-                       err = mali_pmu_power_down(pmu, domain->pmu_mask);
+       MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > id);
+       /* Verify that pmu_mask only one bit is set */
+       MALI_DEBUG_ASSERT((1 << id) == domain->pmu_mask);
+       /* Verify that we have stored the domain at right id/index */
+       MALI_DEBUG_ASSERT(domain == mali_pm_domains[id]);
 
-                       if (_MALI_OSK_ERR_OK != err && _MALI_OSK_ERR_BUSY != err) {
-                               MALI_PRINT_ERROR(("PM Domain: Failed to power down PM domain 0x%08x\n",
-                                                 domain->pmu_mask));
-                       }
-               }
-       }
-       mali_pm_domain_unlock(domain);
+       return id;
 }
+#endif
 
-mali_bool mali_pm_domain_lock_state(struct mali_pm_domain *domain)
+#if defined(DEBUG)
+mali_bool mali_pm_domain_all_unused(void)
 {
-       mali_bool is_powered = MALI_TRUE;
+       int i;
 
-       /* Take a reference without powering on */
-       if (NULL != domain) {
-               mali_pm_domain_lock(domain);
-               ++domain->use_count;
+       for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+               if (NULL == mali_pm_domains[i]) {
+                       /* Nothing to check */
+                       continue;
+               }
 
-               if (MALI_PM_DOMAIN_ON != domain->state) {
-                       is_powered = MALI_FALSE;
+               if (MALI_TRUE == mali_pm_domains[i]->power_is_on) {
+                       /* Not ready for suspend! */
+                       return MALI_FALSE;
                }
-               mali_pm_domain_unlock(domain);
-       }
 
-       if(!_mali_osk_pm_dev_ref_add_no_power_on()) {
-               is_powered = MALI_FALSE;
+               if (0 != mali_pm_domains[i]->use_count) {
+                       /* Not ready for suspend! */
+                       return MALI_FALSE;
+               }
        }
 
-       return is_powered;
-}
-
-void mali_pm_domain_unlock_state(struct mali_pm_domain *domain)
-{
-       _mali_osk_pm_dev_ref_dec_no_power_on();
-
-       if (NULL != domain) {
-               mali_pm_domain_ref_put(domain);
-       }
+       return MALI_TRUE;
 }
+#endif
index c5776dd01dbda8055a5130d9c3e8141c79a46353..3097043d7640385effb2f6bf22e7434251f27f32 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013 ARM Limited
+ * (C) COPYRIGHT 2013-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 #include "mali_group.h"
 #include "mali_pmu.h"
 
-typedef enum {
-       MALI_PM_DOMAIN_ON,
-       MALI_PM_DOMAIN_OFF,
-} mali_pm_domain_state;
-
+/* Instances are protected by PM state lock */
 struct mali_pm_domain {
-       mali_pm_domain_state state;
-       _mali_osk_spinlock_irq_t *lock;
-
+       mali_bool power_is_on;
        s32 use_count;
-
        u32 pmu_mask;
 
-       int group_count;
-       struct mali_group *group_list;
+       /* Zero or more groups can belong to this domain */
+       _mali_osk_list_t group_list;
 
-       struct mali_l2_cache_core *l2;
+       /* Zero or more L2 caches can belong to this domain */
+       _mali_osk_list_t l2_cache_list;
 };
 
-struct mali_pm_domain *mali_pm_domain_create(u32 pmu_mask);
 
-void mali_pm_domain_add_group(u32 mask, struct mali_group *group);
+void mali_pm_domain_initialize(void);
+void mali_pm_domain_terminate(void);
 
-void mali_pm_domain_add_l2(u32 mask, struct mali_l2_cache_core *l2);
+struct mali_pm_domain *mali_pm_domain_create(u32 pmu_mask);
 void mali_pm_domain_delete(struct mali_pm_domain *domain);
 
-void mali_pm_domain_terminate(void);
+void mali_pm_domain_add_l2_cache(
+       struct mali_pm_domain *domain,
+       struct mali_l2_cache_core *l2_cache);
+void mali_pm_domain_add_group(struct mali_pm_domain *domain,
+                             struct mali_group *group);
 
-/** Get PM domain from domain ID
- */
 struct mali_pm_domain *mali_pm_domain_get_from_mask(u32 mask);
 struct mali_pm_domain *mali_pm_domain_get_from_index(u32 id);
 
 /* Ref counting */
-void mali_pm_domain_ref_get(struct mali_pm_domain *domain);
-void mali_pm_domain_ref_put(struct mali_pm_domain *domain);
+u32 mali_pm_domain_ref_get(struct mali_pm_domain *domain);
+u32 mali_pm_domain_ref_put(struct mali_pm_domain *domain);
+
+MALI_STATIC_INLINE _mali_osk_list_t *mali_pm_domain_get_group_list(
+       struct mali_pm_domain *domain)
+{
+       MALI_DEBUG_ASSERT_POINTER(domain);
+       return &domain->group_list;
+}
+
+MALI_STATIC_INLINE _mali_osk_list_t *mali_pm_domain_get_l2_cache_list(
+       struct mali_pm_domain *domain)
+{
+       MALI_DEBUG_ASSERT_POINTER(domain);
+       return &domain->l2_cache_list;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pm_domain_power_is_on(
+       struct mali_pm_domain *domain)
+{
+       MALI_DEBUG_ASSERT_POINTER(domain);
+       return domain->power_is_on;
+}
 
-MALI_STATIC_INLINE struct mali_l2_cache_core *mali_pm_domain_l2_get(struct mali_pm_domain *domain)
+MALI_STATIC_INLINE void mali_pm_domain_set_power_on(
+       struct mali_pm_domain *domain,
+       mali_bool power_is_on)
 {
-       return domain->l2;
+       MALI_DEBUG_ASSERT_POINTER(domain);
+       domain->power_is_on = power_is_on;
 }
 
-MALI_STATIC_INLINE mali_pm_domain_state mali_pm_domain_state_get(struct mali_pm_domain *domain)
+MALI_STATIC_INLINE u32 mali_pm_domain_get_use_count(
+       struct mali_pm_domain *domain)
 {
-       return domain->state;
+       MALI_DEBUG_ASSERT_POINTER(domain);
+       return domain->use_count;
 }
 
-mali_bool mali_pm_domain_lock_state(struct mali_pm_domain *domain);
-void mali_pm_domain_unlock_state(struct mali_pm_domain *domain);
+#if MALI_STATE_TRACKING
+u32 mali_pm_domain_get_id(struct mali_pm_domain *domain);
+
+MALI_STATIC_INLINE u32 mali_pm_domain_get_mask(struct mali_pm_domain *domain)
+{
+       MALI_DEBUG_ASSERT_POINTER(domain);
+       return domain->pmu_mask;
+}
+#endif
 
-#define MALI_PM_DOMAIN_FOR_EACH_GROUP(group, domain) for ((group) = (domain)->group_list;\
-               NULL != (group); (group) = (group)->pm_domain_list)
+#if defined(DEBUG)
+mali_bool mali_pm_domain_all_unused(void);
+#endif
 
 #endif /* __MALI_PM_DOMAIN_H__ */
index 3f99465fe8e955905a9b1d252b76cd7e3d23a315..4e4af08fcfd5e4892682d7a95867facdc9e1e545 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2009-2013 ARM Limited
+ * (C) COPYRIGHT 2009-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 #include "mali_pm.h"
 #include "mali_osk_mali.h"
 
-u16 mali_pmu_global_domain_config[MALI_MAX_NUMBER_OF_DOMAINS]= {0};
+struct mali_pmu_core *mali_global_pmu_core = NULL;
 
-static u32 mali_pmu_detect_mask(void);
-
-/** @brief MALI inbuilt PMU hardware info and PMU hardware has knowledge of cores power mask
- */
-struct mali_pmu_core {
-       struct mali_hw_core hw_core;
-       _mali_osk_spinlock_t *lock;
-       u32 registered_cores_mask;
-       u32 active_cores_mask;
-       u32 switch_delay;
-};
-
-static struct mali_pmu_core *mali_global_pmu_core = NULL;
-
-/** @brief Register layout for hardware PMU
- */
-typedef enum {
-       PMU_REG_ADDR_MGMT_POWER_UP                  = 0x00,     /*< Power up register */
-       PMU_REG_ADDR_MGMT_POWER_DOWN                = 0x04,     /*< Power down register */
-       PMU_REG_ADDR_MGMT_STATUS                    = 0x08,     /*< Core sleep status register */
-       PMU_REG_ADDR_MGMT_INT_MASK                  = 0x0C,     /*< Interrupt mask register */
-       PMU_REG_ADDR_MGMT_INT_RAWSTAT               = 0x10,     /*< Interrupt raw status register */
-       PMU_REG_ADDR_MGMT_INT_CLEAR                 = 0x18,     /*< Interrupt clear register */
-       PMU_REG_ADDR_MGMT_SW_DELAY                  = 0x1C,     /*< Switch delay register */
-       PMU_REGISTER_ADDRESS_SPACE_SIZE             = 0x28,     /*< Size of register space */
-} pmu_reg_addr_mgmt_addr;
-
-#define PMU_REG_VAL_IRQ 1
+static _mali_osk_errcode_t mali_pmu_wait_for_command_finish(
+       struct mali_pmu_core *pmu);
 
 struct mali_pmu_core *mali_pmu_create(_mali_osk_resource_t *resource)
 {
-       struct mali_pmu_corepmu;
+       struct mali_pmu_core *pmu;
 
        MALI_DEBUG_ASSERT(NULL == mali_global_pmu_core);
        MALI_DEBUG_PRINT(2, ("Mali PMU: Creating Mali PMU core\n"));
 
-       pmu = (struct mali_pmu_core *)_mali_osk_malloc(sizeof(struct mali_pmu_core));
+       pmu = (struct mali_pmu_core *)_mali_osk_malloc(
+                     sizeof(struct mali_pmu_core));
        if (NULL != pmu) {
-               pmu->lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_PMU);
-               if (NULL != pmu->lock) {
-                       pmu->registered_cores_mask = mali_pmu_detect_mask();
-                       pmu->active_cores_mask = pmu->registered_cores_mask;
-
-                       if (_MALI_OSK_ERR_OK == mali_hw_core_create(&pmu->hw_core, resource, PMU_REGISTER_ADDRESS_SPACE_SIZE)) {
-                               _mali_osk_errcode_t err;
-                               struct _mali_osk_device_data data = { 0, };
-
-                               err = _mali_osk_device_data_get(&data);
-                               if (_MALI_OSK_ERR_OK == err) {
-                                       pmu->switch_delay = data.pmu_switch_delay;
-                                       mali_global_pmu_core = pmu;
-                                       return pmu;
-                               }
-                               mali_hw_core_delete(&pmu->hw_core);
-                       }
-                       _mali_osk_spinlock_term(pmu->lock);
+               pmu->registered_cores_mask = 0; /* to be set later */
+
+               if (_MALI_OSK_ERR_OK == mali_hw_core_create(&pmu->hw_core,
+                               resource, PMU_REGISTER_ADDRESS_SPACE_SIZE)) {
+
+                       pmu->switch_delay = _mali_osk_get_pmu_switch_delay();
+
+                       mali_global_pmu_core = pmu;
+
+                       return pmu;
                }
                _mali_osk_free(pmu);
        }
@@ -89,318 +56,211 @@ void mali_pmu_delete(struct mali_pmu_core *pmu)
 {
        MALI_DEBUG_ASSERT_POINTER(pmu);
        MALI_DEBUG_ASSERT(pmu == mali_global_pmu_core);
+
        MALI_DEBUG_PRINT(2, ("Mali PMU: Deleting Mali PMU core\n"));
 
-       _mali_osk_spinlock_term(pmu->lock);
+       mali_global_pmu_core = NULL;
+
        mali_hw_core_delete(&pmu->hw_core);
        _mali_osk_free(pmu);
-       mali_global_pmu_core = NULL;
 }
 
-static void mali_pmu_lock(struct mali_pmu_core *pmu)
+void mali_pmu_set_registered_cores_mask(struct mali_pmu_core *pmu, u32 mask)
 {
-       _mali_osk_spinlock_lock(pmu->lock);
+       pmu->registered_cores_mask = mask;
 }
-static void mali_pmu_unlock(struct mali_pmu_core *pmu)
+
+void mali_pmu_reset(struct mali_pmu_core *pmu)
 {
-       _mali_osk_spinlock_unlock(pmu->lock);
+       MALI_DEBUG_ASSERT_POINTER(pmu);
+       MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+
+       /* Setup the desired defaults */
+       mali_hw_core_register_write_relaxed(&pmu->hw_core,
+                                           PMU_REG_ADDR_MGMT_INT_MASK, 0);
+       mali_hw_core_register_write_relaxed(&pmu->hw_core,
+                                           PMU_REG_ADDR_MGMT_SW_DELAY, pmu->switch_delay);
 }
 
-static _mali_osk_errcode_t mali_pmu_wait_for_command_finish(struct mali_pmu_core *pmu)
+void mali_pmu_power_up_all(struct mali_pmu_core *pmu)
 {
-       u32 rawstat;
-       u32 timeout = MALI_REG_POLL_COUNT_SLOW;
+       u32 stat;
 
-       MALI_DEBUG_ASSERT(pmu);
+       MALI_DEBUG_ASSERT_POINTER(pmu);
+       MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
 
-       /* Wait for the command to complete */
-       do {
-               rawstat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_RAWSTAT);
-               --timeout;
-       } while (0 == (rawstat & PMU_REG_VAL_IRQ) && 0 < timeout);
+       mali_pm_exec_lock();
 
-       MALI_DEBUG_ASSERT(0 < timeout);
-       if (0 == timeout) {
-               return _MALI_OSK_ERR_TIMEOUT;
-       }
+       mali_pmu_reset(pmu);
 
-       mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_CLEAR, PMU_REG_VAL_IRQ);
+       /* Now simply power up the domains which are marked as powered down */
+       stat = mali_hw_core_register_read(&pmu->hw_core,
+                                         PMU_REG_ADDR_MGMT_STATUS);
+       mali_pmu_power_up(pmu, stat);
 
-       return _MALI_OSK_ERR_OK;
+       mali_pm_exec_unlock();
 }
 
-static _mali_osk_errcode_t mali_pmu_power_up_internal(struct mali_pmu_core *pmu, const u32 mask)
+void mali_pmu_power_down_all(struct mali_pmu_core *pmu)
 {
        u32 stat;
-       _mali_osk_errcode_t err;
-#if !defined(CONFIG_MALI_PMU_PARALLEL_POWER_UP)
-       u32 current_domain;
-#endif
 
        MALI_DEBUG_ASSERT_POINTER(pmu);
-       MALI_DEBUG_ASSERT(0 == (mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_RAWSTAT)
-                               & PMU_REG_VAL_IRQ));
-
-       stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
-       stat &= pmu->registered_cores_mask;
-       if (0 == mask || 0 == (stat & mask)) return _MALI_OSK_ERR_OK;
-
-#if defined(CONFIG_MALI_PMU_PARALLEL_POWER_UP)
-       mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_POWER_UP, mask);
-
-       err = mali_pmu_wait_for_command_finish(pmu);
-       if (_MALI_OSK_ERR_OK != err) {
-               return err;
-       }
-#else
-       for (current_domain = 1; current_domain <= pmu->registered_cores_mask; current_domain <<= 1) {
-               if (current_domain & mask & stat) {
-                       mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_POWER_UP, current_domain);
-
-                       err = mali_pmu_wait_for_command_finish(pmu);
-                       if (_MALI_OSK_ERR_OK != err) {
-                               return err;
-                       }
-               }
-       }
-#endif
+       MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
 
-#if defined(DEBUG)
-       /* Get power status of cores */
-       stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
-       stat &= pmu->registered_cores_mask;
+       mali_pm_exec_lock();
 
-       MALI_DEBUG_ASSERT(0 == (stat & mask));
-       MALI_DEBUG_ASSERT(0 == (stat & pmu->active_cores_mask));
-#endif /* defined(DEBUG) */
+       /* Now simply power down the domains which are marked as powered up */
+       stat = mali_hw_core_register_read(&pmu->hw_core,
+                                         PMU_REG_ADDR_MGMT_STATUS);
+       mali_pmu_power_down(pmu, (~stat) & pmu->registered_cores_mask);
 
-       return _MALI_OSK_ERR_OK;
+       mali_pm_exec_unlock();
 }
 
-static _mali_osk_errcode_t mali_pmu_power_down_internal(struct mali_pmu_core *pmu, const u32 mask)
+_mali_osk_errcode_t mali_pmu_power_down(struct mali_pmu_core *pmu, u32 mask)
 {
        u32 stat;
        _mali_osk_errcode_t err;
 
        MALI_DEBUG_ASSERT_POINTER(pmu);
-       MALI_DEBUG_ASSERT(0 == (mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_RAWSTAT)
-                               & PMU_REG_VAL_IRQ));
-
-       stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
-       stat &= pmu->registered_cores_mask;
+       MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+       MALI_DEBUG_ASSERT(mask <= pmu->registered_cores_mask);
+       MALI_DEBUG_ASSERT(0 == (mali_hw_core_register_read(&pmu->hw_core,
+                               PMU_REG_ADDR_MGMT_INT_RAWSTAT) &
+                               PMU_REG_VAL_IRQ));
+
+       MALI_DEBUG_PRINT(3,
+                        ("PMU power down: ...................... [%s]\n",
+                         mali_pm_mask_to_string(mask)));
+
+       stat = mali_hw_core_register_read(&pmu->hw_core,
+                                         PMU_REG_ADDR_MGMT_STATUS);
+
+       /*
+        * Assert that we are not powering down domains which are already
+        * powered down.
+        */
+       MALI_DEBUG_ASSERT(0 == (stat & mask));
 
        if (0 == mask || 0 == ((~stat) & mask)) return _MALI_OSK_ERR_OK;
 
-       mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_POWER_DOWN, mask);
+       mali_hw_core_register_write(&pmu->hw_core,
+                                   PMU_REG_ADDR_MGMT_POWER_DOWN, mask);
 
-       /* Do not wait for interrupt on Mali-300/400 if all domains are powered off
-        * by our power down command, because the HW will simply not generate an
-        * interrupt in this case.*/
-       if (mali_is_mali450() || pmu->registered_cores_mask != (mask | stat)) {
+       /*
+        * Do not wait for interrupt on Mali-300/400 if all domains are
+        * powered off by our power down command, because the HW will simply
+        * not generate an interrupt in this case.
+        */
+       if (mali_is_mali450() || mali_is_mali470() || pmu->registered_cores_mask != (mask | stat)) {
                err = mali_pmu_wait_for_command_finish(pmu);
                if (_MALI_OSK_ERR_OK != err) {
                        return err;
                }
        } else {
-               mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_CLEAR, PMU_REG_VAL_IRQ);
+               mali_hw_core_register_write(&pmu->hw_core,
+                                           PMU_REG_ADDR_MGMT_INT_CLEAR, PMU_REG_VAL_IRQ);
        }
-#if defined(DEBUG)
-       /* Get power status of cores */
-       stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
-       stat &= pmu->registered_cores_mask;
 
+#if defined(DEBUG)
+       /* Verify power status of domains after power down */
+       stat = mali_hw_core_register_read(&pmu->hw_core,
+                                         PMU_REG_ADDR_MGMT_STATUS);
        MALI_DEBUG_ASSERT(mask == (stat & mask));
 #endif
 
        return _MALI_OSK_ERR_OK;
 }
 
-_mali_osk_errcode_t mali_pmu_reset(struct mali_pmu_core *pmu)
+_mali_osk_errcode_t mali_pmu_power_up(struct mali_pmu_core *pmu, u32 mask)
 {
+       u32 stat;
        _mali_osk_errcode_t err;
-       u32 cores_off_mask, cores_on_mask, stat;
+#if !defined(CONFIG_MALI_PMU_PARALLEL_POWER_UP)
+       u32 current_domain;
+#endif
+
+       MALI_DEBUG_ASSERT_POINTER(pmu);
+       MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+       MALI_DEBUG_ASSERT(mask <= pmu->registered_cores_mask);
+       MALI_DEBUG_ASSERT(0 == (mali_hw_core_register_read(&pmu->hw_core,
+                               PMU_REG_ADDR_MGMT_INT_RAWSTAT) &
+                               PMU_REG_VAL_IRQ));
 
-       mali_pmu_lock(pmu);
+       MALI_DEBUG_PRINT(3,
+                        ("PMU power up: ........................ [%s]\n",
+                         mali_pm_mask_to_string(mask)));
 
-       /* Setup the desired defaults */
-       mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_MASK, 0);
-       mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_SW_DELAY, pmu->switch_delay);
+       stat = mali_hw_core_register_read(&pmu->hw_core,
+                                         PMU_REG_ADDR_MGMT_STATUS);
+       stat &= pmu->registered_cores_mask;
+       if (0 == mask || 0 == (stat & mask)) return _MALI_OSK_ERR_OK;
 
-       /* Get power status of cores */
-       stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
+       /*
+        * Assert that we are only powering up domains which are currently
+        * powered down.
+        */
+       MALI_DEBUG_ASSERT(mask == (stat & mask));
 
-       cores_off_mask = pmu->registered_cores_mask & ~(stat | pmu->active_cores_mask);
-       cores_on_mask  = pmu->registered_cores_mask &  (stat & pmu->active_cores_mask);
+#if defined(CONFIG_MALI_PMU_PARALLEL_POWER_UP)
+       mali_hw_core_register_write(&pmu->hw_core,
+                                   PMU_REG_ADDR_MGMT_POWER_UP, mask);
 
-       if (0 != cores_off_mask) {
-               err = mali_pmu_power_down_internal(pmu, cores_off_mask);
-               if (_MALI_OSK_ERR_OK != err) return err;
+       err = mali_pmu_wait_for_command_finish(pmu);
+       if (_MALI_OSK_ERR_OK != err) {
+               return err;
        }
+#else
+       for (current_domain = 1;
+            current_domain <= pmu->registered_cores_mask;
+            current_domain <<= 1) {
+               if (current_domain & mask & stat) {
+                       mali_hw_core_register_write(&pmu->hw_core,
+                                                   PMU_REG_ADDR_MGMT_POWER_UP,
+                                                   current_domain);
 
-       if (0 != cores_on_mask) {
-               err = mali_pmu_power_up_internal(pmu, cores_on_mask);
-               if (_MALI_OSK_ERR_OK != err) return err;
+                       err = mali_pmu_wait_for_command_finish(pmu);
+                       if (_MALI_OSK_ERR_OK != err) {
+                               return err;
+                       }
+               }
        }
+#endif
 
 #if defined(DEBUG)
-       {
-               stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
-               stat &= pmu->registered_cores_mask;
-
-               MALI_DEBUG_ASSERT(stat == (pmu->registered_cores_mask & ~pmu->active_cores_mask));
-       }
+       /* Verify power status of domains after power up */
+       stat = mali_hw_core_register_read(&pmu->hw_core,
+                                         PMU_REG_ADDR_MGMT_STATUS);
+       MALI_DEBUG_ASSERT(0 == (stat & mask));
 #endif /* defined(DEBUG) */
 
-       mali_pmu_unlock(pmu);
-
        return _MALI_OSK_ERR_OK;
 }
 
-_mali_osk_errcode_t mali_pmu_power_down(struct mali_pmu_core *pmu, u32 mask)
+static _mali_osk_errcode_t mali_pmu_wait_for_command_finish(
+       struct mali_pmu_core *pmu)
 {
-       _mali_osk_errcode_t err;
-
-       MALI_DEBUG_ASSERT_POINTER(pmu);
-       MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0 );
-
-       /* Make sure we have a valid power domain mask */
-       if (mask > pmu->registered_cores_mask) {
-               return _MALI_OSK_ERR_INVALID_ARGS;
-       }
-
-       mali_pmu_lock(pmu);
-
-       MALI_DEBUG_PRINT(4, ("Mali PMU: Power down (0x%08X)\n", mask));
-
-       pmu->active_cores_mask &= ~mask;
-
-       _mali_osk_pm_dev_ref_add_no_power_on();
-       if (!mali_pm_is_power_on()) {
-               /* Don't touch hardware if all of Mali is powered off. */
-               _mali_osk_pm_dev_ref_dec_no_power_on();
-               mali_pmu_unlock(pmu);
-
-               MALI_DEBUG_PRINT(4, ("Mali PMU: Skipping power down (0x%08X) since Mali is off\n", mask));
-
-               return _MALI_OSK_ERR_BUSY;
-       }
-
-       err = mali_pmu_power_down_internal(pmu, mask);
-
-       _mali_osk_pm_dev_ref_dec_no_power_on();
-       mali_pmu_unlock(pmu);
-
-       return err;
-}
-
-_mali_osk_errcode_t mali_pmu_power_up(struct mali_pmu_core *pmu, u32 mask)
-{
-       _mali_osk_errcode_t err;
-
-       MALI_DEBUG_ASSERT_POINTER(pmu);
-       MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0 );
-
-       /* Make sure we have a valid power domain mask */
-       if (mask & ~pmu->registered_cores_mask) {
-               return _MALI_OSK_ERR_INVALID_ARGS;
-       }
-
-       mali_pmu_lock(pmu);
-
-       MALI_DEBUG_PRINT(4, ("Mali PMU: Power up (0x%08X)\n", mask));
-
-       pmu->active_cores_mask |= mask;
-
-       _mali_osk_pm_dev_ref_add_no_power_on();
-       if (!mali_pm_is_power_on()) {
-               /* Don't touch hardware if all of Mali is powered off. */
-               _mali_osk_pm_dev_ref_dec_no_power_on();
-               mali_pmu_unlock(pmu);
-
-               MALI_DEBUG_PRINT(4, ("Mali PMU: Skipping power up (0x%08X) since Mali is off\n", mask));
-
-               return _MALI_OSK_ERR_BUSY;
-       }
-
-       err = mali_pmu_power_up_internal(pmu, mask);
-
-       _mali_osk_pm_dev_ref_dec_no_power_on();
-       mali_pmu_unlock(pmu);
-
-       return err;
-}
-
-_mali_osk_errcode_t mali_pmu_power_down_all(struct mali_pmu_core *pmu)
-{
-       _mali_osk_errcode_t err;
-
-       MALI_DEBUG_ASSERT_POINTER(pmu);
-       MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
-
-       mali_pmu_lock(pmu);
-
-       /* Setup the desired defaults in case we were called before mali_pmu_reset() */
-       mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_MASK, 0);
-       mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_SW_DELAY, pmu->switch_delay);
-
-       err = mali_pmu_power_down_internal(pmu, pmu->registered_cores_mask);
-
-       mali_pmu_unlock(pmu);
-
-       return err;
-}
-
-_mali_osk_errcode_t mali_pmu_power_up_all(struct mali_pmu_core *pmu)
-{
-       _mali_osk_errcode_t err;
-
-       MALI_DEBUG_ASSERT_POINTER(pmu);
-       MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
-
-       mali_pmu_lock(pmu);
-
-       /* Setup the desired defaults in case we were called before mali_pmu_reset() */
-       mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_MASK, 0);
-       mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_SW_DELAY, pmu->switch_delay);
-
-       err = mali_pmu_power_up_internal(pmu, pmu->active_cores_mask);
-
-       mali_pmu_unlock(pmu);
-       return err;
-}
-
-struct mali_pmu_core *mali_pmu_get_global_pmu_core(void)
-{
-       return mali_global_pmu_core;
-}
-
-static u32 mali_pmu_detect_mask(void)
-{
-       int dynamic_config_pp = 0;
-       int dynamic_config_l2 = 0;
-       int i = 0;
-       u32 mask = 0;
-
-       /* Check if PM domain compatible with actually pp core and l2 cache and collection info about domain */
-       mask = mali_pmu_get_domain_mask(MALI_GP_DOMAIN_INDEX);
+       u32 rawstat;
+       u32 timeout = MALI_REG_POLL_COUNT_SLOW;
 
-       for (i = MALI_PP0_DOMAIN_INDEX; i <= MALI_PP7_DOMAIN_INDEX; i++) {
-               mask |= mali_pmu_get_domain_mask(i);
+       MALI_DEBUG_ASSERT(pmu);
 
-               if (0x0 != mali_pmu_get_domain_mask(i)) {
-                       dynamic_config_pp++;
-               }
-       }
+       /* Wait for the command to complete */
+       do {
+               rawstat = mali_hw_core_register_read(&pmu->hw_core,
+                                                    PMU_REG_ADDR_MGMT_INT_RAWSTAT);
+               --timeout;
+       } while (0 == (rawstat & PMU_REG_VAL_IRQ) && 0 < timeout);
 
-       for (i = MALI_L20_DOMAIN_INDEX; i <= MALI_L22_DOMAIN_INDEX; i++) {
-               mask |= mali_pmu_get_domain_mask(i);
+       MALI_DEBUG_ASSERT(0 < timeout);
 
-               if (0x0 != mali_pmu_get_domain_mask(i)) {
-                       dynamic_config_l2++;
-               }
+       if (0 == timeout) {
+               return _MALI_OSK_ERR_TIMEOUT;
        }
 
-       MALI_DEBUG_PRINT(2, ("Mali PMU: mask 0x%x, pp_core %d, l2_core %d \n", mask, dynamic_config_pp, dynamic_config_l2));
+       mali_hw_core_register_write(&pmu->hw_core,
+                                   PMU_REG_ADDR_MGMT_INT_CLEAR, PMU_REG_VAL_IRQ);
 
-       return mask;
+       return _MALI_OSK_ERR_OK;
 }
index cdbd6742803cde8ec8f474024c40c52e51691b50..9b5a73a32f4ca763c09ad3499b0c782c52103b8c 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2009-2013 ARM Limited
+ * (C) COPYRIGHT 2009-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 #define __MALI_PMU_H__
 
 #include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_hw_core.h"
 
-#define MALI_GP_DOMAIN_INDEX   0
-#define MALI_PP0_DOMAIN_INDEX  1
-#define MALI_PP1_DOMAIN_INDEX  2
-#define MALI_PP2_DOMAIN_INDEX  3
-#define MALI_PP3_DOMAIN_INDEX  4
-#define MALI_PP4_DOMAIN_INDEX  5
-#define MALI_PP5_DOMAIN_INDEX  6
-#define MALI_PP6_DOMAIN_INDEX  7
-#define MALI_PP7_DOMAIN_INDEX  8
-#define MALI_L20_DOMAIN_INDEX  9
-#define MALI_L21_DOMAIN_INDEX  10
-#define MALI_L22_DOMAIN_INDEX  11
-
-#define MALI_MAX_NUMBER_OF_DOMAINS     12
-
-/* Record the domain config from the customer or default config */
-extern u16 mali_pmu_global_domain_config[];
-
-static inline u16 mali_pmu_get_domain_mask(u32 index)
-{
-       MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > index);
-
-       return mali_pmu_global_domain_config[index];
-}
-
-static inline void mali_pmu_set_domain_mask(u32 index, u16 value)
-{
-       MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > index);
+/** @brief MALI inbuilt PMU hardware info and PMU hardware has knowledge of cores power mask
+ */
+struct mali_pmu_core {
+       struct mali_hw_core hw_core;
+       u32 registered_cores_mask;
+       u32 switch_delay;
+};
 
-       mali_pmu_global_domain_config[index] = value;
-}
+/** @brief Register layout for hardware PMU
+ */
+typedef enum {
+       PMU_REG_ADDR_MGMT_POWER_UP                  = 0x00,     /*< Power up register */
+       PMU_REG_ADDR_MGMT_POWER_DOWN                = 0x04,     /*< Power down register */
+       PMU_REG_ADDR_MGMT_STATUS                    = 0x08,     /*< Core sleep status register */
+       PMU_REG_ADDR_MGMT_INT_MASK                  = 0x0C,     /*< Interrupt mask register */
+       PMU_REG_ADDR_MGMT_INT_RAWSTAT               = 0x10,     /*< Interrupt raw status register */
+       PMU_REG_ADDR_MGMT_INT_CLEAR                 = 0x18,     /*< Interrupt clear register */
+       PMU_REG_ADDR_MGMT_SW_DELAY                  = 0x1C,     /*< Switch delay register */
+       PMU_REGISTER_ADDRESS_SPACE_SIZE             = 0x28,     /*< Size of register space */
+} pmu_reg_addr_mgmt_addr;
 
-static inline void mali_pmu_copy_domain_mask(void *src, u32 len)
-{
-       _mali_osk_memcpy(mali_pmu_global_domain_config, src, len);
-}
+#define PMU_REG_VAL_IRQ 1
 
-struct mali_pmu_core;
+extern struct mali_pmu_core *mali_global_pmu_core;
 
 /** @brief Initialisation of MALI PMU
  *
@@ -76,59 +64,60 @@ struct mali_pmu_core *mali_pmu_create(_mali_osk_resource_t *resource);
  */
 void mali_pmu_delete(struct mali_pmu_core *pmu);
 
-/** @brief Reset PMU core
+/** @brief Set registered cores mask
  *
- * @param pmu Pointer to PMU core object to reset
- * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ * @param pmu Pointer to PMU core object
+ * @param mask All available/valid domain bits
  */
-_mali_osk_errcode_t mali_pmu_reset(struct mali_pmu_core *pmu);
+void mali_pmu_set_registered_cores_mask(struct mali_pmu_core *pmu, u32 mask);
 
-/** @brief MALI GPU power down using MALI in-built PMU
- *
- * Called to power down the specified cores. The mask will be saved so that \a
- * mali_pmu_power_up_all will bring the PMU back to the previous state set with
- * this function or \a mali_pmu_power_up.
+/** @brief Retrieves the Mali PMU core object (if any)
  *
- * @param pmu Pointer to PMU core object to power down
- * @param mask Mask specifying which power domains to power down
- * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ * @return The Mali PMU object, or NULL if no PMU exists.
  */
-_mali_osk_errcode_t mali_pmu_power_down(struct mali_pmu_core *pmu, u32 mask);
+MALI_STATIC_INLINE struct mali_pmu_core *mali_pmu_get_global_pmu_core(void)
+{
+       return mali_global_pmu_core;
+}
 
-/** @brief MALI GPU power up using MALI in-built PMU
+/** @brief Reset PMU core
  *
- * Called to power up the specified cores. The mask will be saved so that \a
- * mali_pmu_power_up_all will bring the PMU back to the previous state set with
- * this function or \a mali_pmu_power_down.
+ * @param pmu Pointer to PMU core object to reset
+ */
+void mali_pmu_reset(struct mali_pmu_core *pmu);
+
+void mali_pmu_power_up_all(struct mali_pmu_core *pmu);
+
+void mali_pmu_power_down_all(struct mali_pmu_core *pmu);
+
+/** @brief Returns a mask of the currently powered up domains
  *
- * @param pmu Pointer to PMU core object to power up
- * @param mask Mask specifying which power domains to power up
- * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ * @param pmu Pointer to PMU core object
  */
-_mali_osk_errcode_t mali_pmu_power_up(struct mali_pmu_core *pmu, u32 mask);
+MALI_STATIC_INLINE u32 mali_pmu_get_mask(struct mali_pmu_core *pmu)
+{
+       u32 stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
+       return ((~stat) & pmu->registered_cores_mask);
+}
 
 /** @brief MALI GPU power down using MALI in-built PMU
  *
- * called to power down all cores
+ * Called to power down the specified cores.
  *
  * @param pmu Pointer to PMU core object to power down
+ * @param mask Mask specifying which power domains to power down
  * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
  */
-_mali_osk_errcode_t mali_pmu_power_down_all(struct mali_pmu_core *pmu);
+_mali_osk_errcode_t mali_pmu_power_down(struct mali_pmu_core *pmu, u32 mask);
 
 /** @brief MALI GPU power up using MALI in-built PMU
  *
- * called to power up all cores
+ * Called to power up the specified cores.
  *
  * @param pmu Pointer to PMU core object to power up
+ * @param mask Mask specifying which power domains to power up
  * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
  */
-_mali_osk_errcode_t mali_pmu_power_up_all(struct mali_pmu_core *pmu);
-
-/** @brief Retrieves the Mali PMU core object (if any)
- *
- * @return The Mali PMU object, or NULL if no PMU exists.
- */
-struct mali_pmu_core *mali_pmu_get_global_pmu_core(void);
+_mali_osk_errcode_t mali_pmu_power_up(struct mali_pmu_core *pmu, u32 mask);
 
 #endif /* __MALI_PMU_H__ */
index bdfeb5cd016d399f17dfe4374775af0371964eb2..0d748d56794bf81996864f6f9f040f31d47528b3 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2013 ARM Limited
+ * (C) COPYRIGHT 2011-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 #include "regs/mali_200_regs.h"
 #include "mali_kernel_common.h"
 #include "mali_kernel_core.h"
-#include "mali_dma.h"
-#include "mali_kernel_utilization.h"
 #if defined(CONFIG_MALI400_PROFILING)
 #include "mali_osk_profiling.h"
 #endif
-#include "platform_pmm.h"
 
 /* Number of frame registers on Mali-200 */
 #define MALI_PP_MALI200_NUM_FRAME_REGISTERS ((0x04C/4)+1)
 /* Number of frame registers on Mali-300 and later */
 #define MALI_PP_MALI400_NUM_FRAME_REGISTERS ((0x058/4)+1)
 
-static struct mali_pp_coremali_global_pp_cores[MALI_MAX_NUMBER_OF_PP_CORES] = { NULL };
+static struct mali_pp_core *mali_global_pp_cores[MALI_MAX_NUMBER_OF_PP_CORES] = { NULL };
 static u32 mali_global_num_pp_cores = 0;
 
 /* Interrupt handlers */
@@ -36,7 +33,7 @@ static _mali_osk_errcode_t mali_pp_irq_probe_ack(void *data);
 
 struct mali_pp_core *mali_pp_create(const _mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual, u32 bcast_id)
 {
-       struct mali_pp_corecore = NULL;
+       struct mali_pp_core *core = NULL;
 
        MALI_DEBUG_PRINT(2, ("Mali PP: Creating Mali PP core: %s\n", resource->description));
        MALI_DEBUG_PRINT(2, ("Mali PP: Base address of PP core: 0x%x\n", resource->base));
@@ -67,12 +64,12 @@ struct mali_pp_core *mali_pp_create(const _mali_osk_resource_t *resource, struct
                                        MALI_DEBUG_ASSERT(!is_virtual || -1 != resource->irq);
 
                                        core->irq = _mali_osk_irq_init(resource->irq,
-                                                                      mali_group_upper_half_pp,
-                                                                      group,
-                                                                      mali_pp_irq_probe_trigger,
-                                                                      mali_pp_irq_probe_ack,
-                                                                      core,
-                                                                      resource->description);
+                                                                      mali_group_upper_half_pp,
+                                                                      group,
+                                                                      mali_pp_irq_probe_trigger,
+                                                                      mali_pp_irq_probe_ack,
+                                                                      core,
+                                                                      resource->description);
                                        if (NULL != core->irq) {
                                                mali_global_pp_cores[mali_global_num_pp_cores] = core;
                                                mali_global_num_pp_cores++;
@@ -205,6 +202,7 @@ static const u32 mali_perf_cnt_enable_reset_value = 0;
 _mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core)
 {
        /* Bus must be stopped before calling this function */
+       const u32 reset_wait_target_register = MALI200_REG_ADDR_MGMT_PERF_CNT_0_LIMIT;
        const u32 reset_invalid_value = 0xC0FFE000;
        const u32 reset_check_value = 0xC01A0000;
        int i;
@@ -213,7 +211,7 @@ _mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core)
        MALI_DEBUG_PRINT(2, ("Mali PP: Hard reset of core %s\n", core->hw_core.description));
 
        /* Set register to a bogus value. The register will be used to detect when reset is complete */
-       mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, reset_invalid_value);
+       mali_hw_core_register_write_relaxed(&core->hw_core, reset_wait_target_register, reset_invalid_value);
        mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_NONE);
 
        /* Force core to reset */
@@ -221,8 +219,8 @@ _mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core)
 
        /* Wait for reset to be complete */
        for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
-               mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, reset_check_value);
-               if (reset_check_value == mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW)) {
+               mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_check_value);
+               if (reset_check_value == mali_hw_core_register_read(&core->hw_core, reset_wait_target_register)) {
                        break;
                }
        }
@@ -231,7 +229,7 @@ _mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core)
                MALI_PRINT_ERROR(("Mali PP: The hard reset loop didn't work, unable to recover\n"));
        }
 
-       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, 0x00000000); /* set it back to the default */
+       mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, 0x00000000); /* set it back to the default */
        /* Re-enable interrupts */
        mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL);
        mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
@@ -256,7 +254,8 @@ _mali_osk_errcode_t mali_pp_reset_wait(struct mali_pp_core *core)
        u32 rawstat = 0;
 
        for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
-               if (!(mali_pp_read_status(core) & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE)) {
+               u32 status =  mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS);
+               if (!(status & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE)) {
                        rawstat = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT);
                        if (rawstat == MALI400PP_REG_VAL_IRQ_RESET_COMPLETED) {
                                break;
@@ -266,7 +265,7 @@ _mali_osk_errcode_t mali_pp_reset_wait(struct mali_pp_core *core)
 
        if (i == MALI_REG_POLL_COUNT_FAST) {
                MALI_PRINT_ERROR(("Mali PP: Failed to reset core %s, rawstat: 0x%08x\n",
-                                 core->hw_core.description, rawstat));
+                                 core->hw_core.description, rawstat));
                return _MALI_OSK_ERR_FAULT;
        }
 
@@ -283,89 +282,6 @@ _mali_osk_errcode_t mali_pp_reset(struct mali_pp_core *core)
        return mali_pp_reset_wait(core);
 }
 
-void mali_pp_job_dma_cmd_prepare(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job,
-                                 mali_bool restart_virtual, mali_dma_cmd_buf *buf)
-{
-       u32 relative_address;
-       u32 start_index;
-       u32 nr_of_regs;
-       u32 *frame_registers = mali_pp_job_get_frame_registers(job);
-       u32 *wb0_registers = mali_pp_job_get_wb0_registers(job);
-       u32 *wb1_registers = mali_pp_job_get_wb1_registers(job);
-       u32 *wb2_registers = mali_pp_job_get_wb2_registers(job);
-       u32 counter_src0 = mali_pp_job_get_perf_counter_src0(job, sub_job);
-       u32 counter_src1 = mali_pp_job_get_perf_counter_src1(job, sub_job);
-
-       MALI_DEBUG_ASSERT_POINTER(core);
-
-       /* Write frame registers */
-
-       /*
-        * There are two frame registers which are different for each sub job:
-        * 1. The Renderer List Address Register (MALI200_REG_ADDR_FRAME)
-        * 2. The FS Stack Address Register (MALI200_REG_ADDR_STACK)
-        */
-       mali_dma_write_conditional(buf, &core->hw_core, MALI200_REG_ADDR_FRAME, mali_pp_job_get_addr_frame(job, sub_job), mali_frame_registers_reset_values[MALI200_REG_ADDR_FRAME / sizeof(u32)]);
-
-       /* For virtual jobs, the stack address shouldn't be broadcast but written individually */
-       if (!mali_pp_job_is_virtual(job) || restart_virtual) {
-               mali_dma_write_conditional(buf, &core->hw_core, MALI200_REG_ADDR_STACK, mali_pp_job_get_addr_stack(job, sub_job), mali_frame_registers_reset_values[MALI200_REG_ADDR_STACK / sizeof(u32)]);
-       }
-
-       /* Write registers between MALI200_REG_ADDR_FRAME and MALI200_REG_ADDR_STACK */
-       relative_address = MALI200_REG_ADDR_RSW;
-       start_index = MALI200_REG_ADDR_RSW / sizeof(u32);
-       nr_of_regs = (MALI200_REG_ADDR_STACK - MALI200_REG_ADDR_RSW) / sizeof(u32);
-
-       mali_dma_write_array_conditional(buf, &core->hw_core,
-                                        relative_address, &frame_registers[start_index],
-                                        nr_of_regs, &mali_frame_registers_reset_values[start_index]);
-
-       /* MALI200_REG_ADDR_STACK_SIZE */
-       relative_address = MALI200_REG_ADDR_STACK_SIZE;
-       start_index = MALI200_REG_ADDR_STACK_SIZE / sizeof(u32);
-
-       mali_dma_write_conditional(buf, &core->hw_core,
-                                  relative_address, frame_registers[start_index],
-                                  mali_frame_registers_reset_values[start_index]);
-
-       /* Skip 2 reserved registers */
-
-       /* Write remaining registers */
-       relative_address = MALI200_REG_ADDR_ORIGIN_OFFSET_X;
-       start_index = MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32);
-       nr_of_regs = MALI_PP_MALI400_NUM_FRAME_REGISTERS - MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32);
-
-       mali_dma_write_array_conditional(buf, &core->hw_core,
-                                        relative_address, &frame_registers[start_index],
-                                        nr_of_regs, &mali_frame_registers_reset_values[start_index]);
-
-       /* Write WBx registers */
-       if (wb0_registers[0]) { /* M200_WB0_REG_SOURCE_SELECT register */
-               mali_dma_write_array_conditional(buf, &core->hw_core, MALI200_REG_ADDR_WB0, wb0_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
-       }
-
-       if (wb1_registers[0]) { /* M200_WB1_REG_SOURCE_SELECT register */
-               mali_dma_write_array_conditional(buf, &core->hw_core, MALI200_REG_ADDR_WB1, wb1_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
-       }
-
-       if (wb2_registers[0]) { /* M200_WB2_REG_SOURCE_SELECT register */
-               mali_dma_write_array_conditional(buf, &core->hw_core, MALI200_REG_ADDR_WB2, wb2_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
-       }
-
-       if (MALI_HW_CORE_NO_COUNTER != counter_src0) {
-               mali_dma_write(buf, &core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC, counter_src0);
-               mali_dma_write_conditional(buf, &core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value);
-       }
-       if (MALI_HW_CORE_NO_COUNTER != counter_src1) {
-               mali_dma_write(buf, &core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC, counter_src1);
-               mali_dma_write_conditional(buf, &core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value);
-       }
-
-       /* This is the command that starts the core. */
-       mali_dma_write(buf, &core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_START_RENDERING);
-}
-
 void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job, mali_bool restart_virtual)
 {
        u32 relative_address;
@@ -400,16 +316,16 @@ void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 s
        nr_of_regs = (MALI200_REG_ADDR_STACK - MALI200_REG_ADDR_RSW) / sizeof(u32);
 
        mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core,
-               relative_address, &frame_registers[start_index],
-               nr_of_regs, &mali_frame_registers_reset_values[start_index]);
+                       relative_address, &frame_registers[start_index],
+                       nr_of_regs, &mali_frame_registers_reset_values[start_index]);
 
        /* MALI200_REG_ADDR_STACK_SIZE */
        relative_address = MALI200_REG_ADDR_STACK_SIZE;
        start_index = MALI200_REG_ADDR_STACK_SIZE / sizeof(u32);
 
        mali_hw_core_register_write_relaxed_conditional(&core->hw_core,
-               relative_address, frame_registers[start_index],
-               mali_frame_registers_reset_values[start_index]);
+                       relative_address, frame_registers[start_index],
+                       mali_frame_registers_reset_values[start_index]);
 
        /* Skip 2 reserved registers */
 
@@ -419,8 +335,8 @@ void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 s
        nr_of_regs = MALI_PP_MALI400_NUM_FRAME_REGISTERS - MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32);
 
        mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core,
-               relative_address, &frame_registers[start_index],
-               nr_of_regs, &mali_frame_registers_reset_values[start_index]);
+                       relative_address, &frame_registers[start_index],
+                       nr_of_regs, &mali_frame_registers_reset_values[start_index]);
 
        /* Write WBx registers */
        if (wb0_registers[0]) { /* M200_WB0_REG_SOURCE_SELECT register */
@@ -445,7 +361,7 @@ void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 s
        }
 
 #ifdef CONFIG_MALI400_HEATMAPS_ENABLED
-       if(job->uargs.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_HEATMAP_ENABLE) {
+       if (job->uargs.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_HEATMAP_ENABLE) {
                mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERFMON_CONTR, ((job->uargs.tilesx & 0x3FF) << 16) | 1);
                mali_hw_core_register_write_relaxed(&core->hw_core,  MALI200_REG_ADDR_MGMT_PERFMON_BASE, job->uargs.heatmap_mem & 0xFFFFFFF8);
        }
@@ -456,8 +372,16 @@ void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 s
        /* Adding barrier to make sure all rester writes are finished */
        _mali_osk_write_mem_barrier();
 
-       /* This is the command that starts the core. */
+       /* This is the command that starts the core.
+        *
+        * Don't actually run the job if PROFILING_SKIP_PP_JOBS are set, just
+        * force core to assert the completion interrupt.
+        */
+#if !defined(PROFILING_SKIP_PP_JOBS)
        mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_START_RENDERING);
+#else
+       mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_END_OF_FRAME);
+#endif
 
        /* Adding barrier to make sure previous rester writes is finished */
        _mali_osk_write_mem_barrier();
@@ -469,7 +393,7 @@ u32 mali_pp_core_get_version(struct mali_pp_core *core)
        return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_VERSION);
 }
 
-struct mali_pp_coremali_pp_get_global_pp_core(u32 index)
+struct mali_pp_core *mali_pp_get_global_pp_core(u32 index)
 {
        if (mali_global_num_pp_cores > index) {
                return mali_global_pp_cores[index];
@@ -488,7 +412,7 @@ static void mali_pp_irq_probe_trigger(void *data)
 {
        struct mali_pp_core *core = (struct mali_pp_core *)data;
        mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
-       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_FORCE_HANG);
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_BUS_ERROR);
        _mali_osk_mem_barrier();
 }
 
@@ -498,8 +422,8 @@ static _mali_osk_errcode_t mali_pp_irq_probe_ack(void *data)
        u32 irq_readout;
 
        irq_readout = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS);
-       if (MALI200_REG_VAL_IRQ_FORCE_HANG & irq_readout) {
-               mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_FORCE_HANG);
+       if (MALI200_REG_VAL_IRQ_BUS_ERROR & irq_readout) {
+               mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_BUS_ERROR);
                _mali_osk_mem_barrier();
                return _MALI_OSK_ERR_OK;
        }
@@ -530,7 +454,7 @@ static void mali_pp_print_registers(struct mali_pp_core *core)
 #if 0
 void mali_pp_print_state(struct mali_pp_core *core)
 {
-       MALI_DEBUG_PRINT(2, ("Mali PP: State: 0x%08x\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS) ));
+       MALI_DEBUG_PRINT(2, ("Mali PP: State: 0x%08x\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS)));
 }
 #endif
 
@@ -543,31 +467,26 @@ void mali_pp_update_performance_counters(struct mali_pp_core *parent, struct mal
 #if defined(CONFIG_MALI400_PROFILING)
        int counter_index = COUNTER_FP_0_C0 + (2 * child->core_id);
 #endif
-   val0 = mali_hw_core_register_read(&child->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE);
-       if (MALI_HW_CORE_NO_COUNTER != counter_src0)
-       {               
+
+       if (MALI_HW_CORE_NO_COUNTER != counter_src0) {
+               val0 = mali_hw_core_register_read(&child->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE);
                mali_pp_job_set_perf_counter_value0(job, subjob, val0);
 
 #if defined(CONFIG_MALI400_PROFILING)
                _mali_osk_profiling_report_hw_counter(counter_index, val0);
+               _mali_osk_profiling_record_global_counters(counter_index, val0);
 #endif
        }
 
-   val1 = mali_hw_core_register_read(&child->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
-       if (MALI_HW_CORE_NO_COUNTER != counter_src1)
-       {               
+       if (MALI_HW_CORE_NO_COUNTER != counter_src1) {
+               val1 = mali_hw_core_register_read(&child->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
                mali_pp_job_set_perf_counter_value1(job, subjob, val1);
 
 #if defined(CONFIG_MALI400_PROFILING)
                _mali_osk_profiling_report_hw_counter(counter_index + 1, val1);
+               _mali_osk_profiling_record_global_counters(counter_index + 1, val1);
 #endif
        }
-       
-       if (MALI_UTILIZATION_BW_CTR_SRC0 == counter_src0
-                       && MALI_UTILIZATION_BW_CTR_SRC1 == counter_src1)
-       {
-               mali_utilization_bw_report_counters(val0, val1);
-       }       
 }
 
 #if MALI_STATE_TRACKING
index 839b43ebbf3597eb87d5abe6a6ab5f3953325257..934fbe5f67c55e618861dc8d73b0c3e865768296 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2013 ARM Limited
+ * (C) COPYRIGHT 2011-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -14,7 +14,6 @@
 #include "mali_osk.h"
 #include "mali_pp_job.h"
 #include "mali_hw_core.h"
-#include "mali_dma.h"
 
 struct mali_group;
 
@@ -34,7 +33,7 @@ struct mali_pp_core {
 _mali_osk_errcode_t mali_pp_initialize(void);
 void mali_pp_terminate(void);
 
-struct mali_pp_core *mali_pp_create(const _mali_osk_resource_t * resource, struct mali_group *group, mali_bool is_virtual, u32 bcast_id);
+struct mali_pp_core *mali_pp_create(const _mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual, u32 bcast_id);
 void mali_pp_delete(struct mali_pp_core *core);
 
 void mali_pp_stop_bus(struct mali_pp_core *core);
@@ -46,12 +45,6 @@ _mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core);
 
 void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job, mali_bool restart_virtual);
 
-/**
- * @brief Add commands to DMA command buffer to start PP job on core.
- */
-void mali_pp_job_dma_cmd_prepare(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job,
-                                 mali_bool restart_virtual, mali_dma_cmd_buf *buf);
-
 u32 mali_pp_core_get_version(struct mali_pp_core *core);
 
 MALI_STATIC_INLINE u32 mali_pp_core_get_id(struct mali_pp_core *core)
@@ -66,7 +59,7 @@ MALI_STATIC_INLINE u32 mali_pp_core_get_bcast_id(struct mali_pp_core *core)
        return core->bcast_id;
 }
 
-struct mali_pp_coremali_pp_get_global_pp_core(u32 index);
+struct mali_pp_core *mali_pp_get_global_pp_core(u32 index);
 u32 mali_pp_get_glob_num_pp_cores(void);
 
 /* Debug */
@@ -86,25 +79,35 @@ u32 mali_pp_dump_state(struct mali_pp_core *core, char *buf, u32 size);
  */
 void mali_pp_update_performance_counters(struct mali_pp_core *parent, struct mali_pp_core *child, struct mali_pp_job *job, u32 subjob);
 
-MALI_STATIC_INLINE const char *mali_pp_get_hw_core_desc(struct mali_pp_core *core)
+MALI_STATIC_INLINE const char *mali_pp_core_description(struct mali_pp_core *core)
 {
        return core->hw_core.description;
 }
 
-/*** Register reading/writing functions ***/
-MALI_STATIC_INLINE u32 mali_pp_get_int_stat(struct mali_pp_core *core)
+MALI_STATIC_INLINE enum mali_interrupt_result mali_pp_get_interrupt_result(struct mali_pp_core *core)
 {
-       return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS);
+       u32 rawstat_used = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) &
+                          MALI200_REG_VAL_IRQ_MASK_USED;
+       if (0 == rawstat_used) {
+               return MALI_INTERRUPT_RESULT_NONE;
+       } else if (MALI200_REG_VAL_IRQ_END_OF_FRAME == rawstat_used) {
+               return MALI_INTERRUPT_RESULT_SUCCESS;
+       }
+       return MALI_INTERRUPT_RESULT_ERROR;
 }
 
-MALI_STATIC_INLINE u32 mali_pp_read_rawstat(struct mali_pp_core *core)
+MALI_STATIC_INLINE u32 mali_pp_get_rawstat(struct mali_pp_core *core)
 {
-       return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) & MALI200_REG_VAL_IRQ_MASK_USED;
+       MALI_DEBUG_ASSERT_POINTER(core);
+       return mali_hw_core_register_read(&core->hw_core,
+                                         MALI200_REG_ADDR_MGMT_INT_RAWSTAT);
 }
 
-MALI_STATIC_INLINE u32 mali_pp_read_status(struct mali_pp_core *core)
+
+MALI_STATIC_INLINE u32 mali_pp_is_active(struct mali_pp_core *core)
 {
-       return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS);
+       u32 status = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS);
+       return (status & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE) ? MALI_TRUE : MALI_FALSE;
 }
 
 MALI_STATIC_INLINE void mali_pp_mask_all_interrupts(struct mali_pp_core *core)
@@ -112,16 +115,19 @@ MALI_STATIC_INLINE void mali_pp_mask_all_interrupts(struct mali_pp_core *core)
        mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_NONE);
 }
 
-MALI_STATIC_INLINE void mali_pp_clear_hang_interrupt(struct mali_pp_core *core)
+MALI_STATIC_INLINE void mali_pp_enable_interrupts(struct mali_pp_core *core)
 {
-       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_HANG);
+       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
 }
 
-MALI_STATIC_INLINE void mali_pp_enable_interrupts(struct mali_pp_core *core)
+MALI_STATIC_INLINE void mali_pp_write_addr_renderer_list(struct mali_pp_core *core,
+               struct mali_pp_job *job, u32 subjob)
 {
-       mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+       u32 addr = mali_pp_job_get_addr_frame(job, subjob);
+       mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_FRAME, addr);
 }
 
+
 MALI_STATIC_INLINE void mali_pp_write_addr_stack(struct mali_pp_core *core, struct mali_pp_job *job)
 {
        u32 addr = mali_pp_job_get_addr_stack(job, core->core_id);
index 8e17ea13b7bdd073c82258c77e74dc7839d8e669..327205702b4a6b45d0a8c5310c31b12da0fd0211 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2013 ARM Limited
+ * (C) COPYRIGHT 2011-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 
 #include "mali_pp.h"
 #include "mali_pp_job.h"
-#include "mali_dma.h"
 #include "mali_osk.h"
 #include "mali_osk_list.h"
 #include "mali_kernel_common.h"
 #include "mali_uk_types.h"
-#include "mali_pp_scheduler.h"
+#include "mali_executor.h"
 #if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
 #include "linux/mali_memory_dma_buf.h"
 #endif
-#include "mali_kernel_utilization.h"
-
-/// static u32 pp_counter_src0 = MALI_HW_CORE_NO_COUNTER;      /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
-/// static u32 pp_counter_src1 = MALI_HW_CORE_NO_COUNTER;      /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
-static u32 pp_counter_src0 = MALI_UTILIZATION_BW_CTR_SRC0;      /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
-static u32 pp_counter_src1 = MALI_UTILIZATION_BW_CTR_SRC1;      /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
 
+static u32 pp_counter_src0 = MALI_HW_CORE_NO_COUNTER;   /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
+static u32 pp_counter_src1 = MALI_HW_CORE_NO_COUNTER;   /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
 static _mali_osk_atomic_t pp_counter_per_sub_job_count; /**< Number of values in the two arrays which is != MALI_HW_CORE_NO_COUNTER */
 static u32 pp_counter_per_sub_job_src0[_MALI_PP_MAX_SUB_JOBS] = { MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER };
 static u32 pp_counter_per_sub_job_src1[_MALI_PP_MAX_SUB_JOBS] = { MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER };
@@ -40,13 +35,15 @@ void mali_pp_job_terminate(void)
        _mali_osk_atomic_term(&pp_counter_per_sub_job_count);
 }
 
-struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, _mali_uk_pp_start_job_s *uargs, u32 id)
+struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session,
+                                      _mali_uk_pp_start_job_s __user *uargs, u32 id)
 {
        struct mali_pp_job *job;
        u32 perf_counter_flag;
 
        job = _mali_osk_calloc(1, sizeof(struct mali_pp_job));
        if (NULL != job) {
+               u32 num_memory_cookies = 0;
                if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_pp_start_job_s))) {
                        goto fail;
                }
@@ -64,15 +61,15 @@ struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, _mali_
                perf_counter_flag = mali_pp_job_get_perf_counter_flag(job);
 
                /* case when no counters came from user space
-                * so pass the debugfs / DS-5 provided global ones to the job object */      
+                * so pass the debugfs / DS-5 provided global ones to the job object */
                if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) ||
                      (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))) {
                        u32 sub_job_count = _mali_osk_atomic_read(&pp_counter_per_sub_job_count);
 
                        /* These counters apply for all virtual jobs, and where no per sub job counter is specified */
                        job->uargs.perf_counter_src0 = pp_counter_src0;
-                       job->uargs.perf_counter_src1 = pp_counter_src1;         
-   
+                       job->uargs.perf_counter_src1 = pp_counter_src1;
+
                        /* We only copy the per sub job array if it is enabled with at least one counter */
                        if (0 < sub_job_count) {
                                job->perf_counter_per_sub_job_count = sub_job_count;
@@ -82,24 +79,22 @@ struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, _mali_
                }
 
                _mali_osk_list_init(&job->list);
+               _mali_osk_list_init(&job->session_fb_lookup_list);
                job->session = session;
-               _mali_osk_list_init(&job->session_list);
                job->id = id;
 
                job->sub_jobs_num = job->uargs.num_cores ? job->uargs.num_cores : 1;
                job->pid = _mali_osk_get_pid();
                job->tid = _mali_osk_get_tid();
 
-               job->num_memory_cookies = job->uargs.num_memory_cookies;
-               if (job->num_memory_cookies > 0) {
+               _mali_osk_atomic_init(&job->sub_jobs_completed, 0);
+               _mali_osk_atomic_init(&job->sub_job_errors, 0);
+               num_memory_cookies = job->uargs.num_memory_cookies;
+               if (num_memory_cookies !=  0) {
                        u32 size;
+                       u32 __user *memory_cookies = (u32 __user *)(uintptr_t)job->uargs.memory_cookies;
 
-                       if (job->uargs.num_memory_cookies > session->descriptor_mapping->current_nr_mappings) {
-                               MALI_PRINT_ERROR(("Mali PP job: Too many memory cookies specified in job object\n"));
-                               goto fail;
-                       }
-
-                       size = sizeof(*job->uargs.memory_cookies) * job->num_memory_cookies;
+                       size = sizeof(*memory_cookies) * num_memory_cookies;
 
                        job->memory_cookies = _mali_osk_malloc(size);
                        if (NULL == job->memory_cookies) {
@@ -107,35 +102,10 @@ struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, _mali_
                                goto fail;
                        }
 
-                       if (0 != _mali_osk_copy_from_user(job->memory_cookies, job->uargs.memory_cookies, size)) {
+                       if (0 != _mali_osk_copy_from_user(job->memory_cookies, memory_cookies, size)) {
                                MALI_PRINT_ERROR(("Mali PP job: Failed to copy %d bytes of memory cookies from user!\n", size));
                                goto fail;
                        }
-
-#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
-                       job->num_dma_bufs = job->num_memory_cookies;
-                       job->dma_bufs = _mali_osk_calloc(job->num_dma_bufs, sizeof(struct mali_dma_buf_attachment *));
-                       if (NULL == job->dma_bufs) {
-                               MALI_PRINT_ERROR(("Mali PP job: Failed to allocate dma_bufs array!\n"));
-                               goto fail;
-                       }
-#endif
-               }
-
-               /* Prepare DMA command buffer to start job, if it is virtual. */
-               if (mali_pp_job_is_virtual(job)) {
-                       struct mali_pp_core *core;
-                       _mali_osk_errcode_t err =  mali_dma_get_cmd_buf(&job->dma_cmd_buf);
-
-                       if (_MALI_OSK_ERR_OK != err) {
-                               MALI_PRINT_ERROR(("Mali PP job: Failed to allocate DMA command buffer\n"));
-                               goto fail;
-                       }
-
-                       core = mali_pp_scheduler_get_virtual_pp();
-                       MALI_DEBUG_ASSERT_POINTER(core);
-
-                       mali_pp_job_dma_cmd_prepare(core, job, 0, MALI_FALSE, &job->dma_cmd_buf);
                }
 
                if (_MALI_OSK_ERR_OK != mali_pp_job_check(job)) {
@@ -159,25 +129,59 @@ fail:
 
 void mali_pp_job_delete(struct mali_pp_job *job)
 {
-       mali_dma_put_cmd_buf(&job->dma_cmd_buf);
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
+       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_fb_lookup_list));
+
        if (NULL != job->finished_notification) {
                _mali_osk_notification_delete(job->finished_notification);
        }
 
-       _mali_osk_free(job->memory_cookies);
-
+       if (NULL != job->memory_cookies) {
 #if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
-       /* Unmap buffers attached to job */
-       if (0 < job->num_dma_bufs) {
+               /* Unmap buffers attached to job */
                mali_dma_buf_unmap_job(job);
+#endif
+               _mali_osk_free(job->memory_cookies);
        }
 
-       _mali_osk_free(job->dma_bufs);
-#endif /* CONFIG_DMA_SHARED_BUFFER */
+       _mali_osk_atomic_term(&job->sub_jobs_completed);
+       _mali_osk_atomic_term(&job->sub_job_errors);
 
        _mali_osk_free(job);
 }
 
+void mali_pp_job_list_add(struct mali_pp_job *job, _mali_osk_list_t *list)
+{
+       struct mali_pp_job *iter;
+       struct mali_pp_job *tmp;
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+
+       /* Find position in list/queue where job should be added. */
+       _MALI_OSK_LIST_FOREACHENTRY_REVERSE(iter, tmp, list,
+                                           struct mali_pp_job, list) {
+               /* job should be started after iter if iter is in progress. */
+               if (0 < iter->sub_jobs_started) {
+                       break;
+               }
+
+               /*
+                * job should be started after iter if it has a higher
+                * job id. A span is used to handle job id wrapping.
+                */
+               if ((mali_pp_job_get_id(job) -
+                    mali_pp_job_get_id(iter)) <
+                   MALI_SCHEDULER_JOB_ID_SPAN) {
+                       break;
+               }
+       }
+
+       _mali_osk_list_add(&job->list, &iter->list);
+}
+
+
 u32 mali_pp_job_get_perf_counter_src0(struct mali_pp_job *job, u32 sub_job)
 {
        /* Virtual jobs always use the global job counter (or if there are per sub job counters at all) */
@@ -213,20 +217,12 @@ u32 mali_pp_job_get_perf_counter_src1(struct mali_pp_job *job, u32 sub_job)
 
 void mali_pp_job_set_pp_counter_global_src0(u32 counter)
 {
-       if (MALI_HW_CORE_NO_COUNTER == counter)
-       {
-               counter = MALI_UTILIZATION_BW_CTR_SRC0;
-       }
-       pp_counter_src0 = counter;   
+       pp_counter_src0 = counter;
 }
 
 void mali_pp_job_set_pp_counter_global_src1(u32 counter)
 {
-       if (MALI_HW_CORE_NO_COUNTER == counter)
-       {
-               counter = MALI_UTILIZATION_BW_CTR_SRC1;
-       }            
-       pp_counter_src1 = counter;      
+       pp_counter_src1 = counter;
 }
 
 void mali_pp_job_set_pp_counter_sub_job_src0(u32 sub_job, u32 counter)
@@ -244,10 +240,7 @@ void mali_pp_job_set_pp_counter_sub_job_src0(u32 sub_job, u32 counter)
        }
 
        /* PS: A change from MALI_HW_CORE_NO_COUNTER to MALI_HW_CORE_NO_COUNTER will inc and dec, result will be 0 change */
-       if (MALI_HW_CORE_NO_COUNTER == counter)
-       {
-               counter = MALI_UTILIZATION_BW_CTR_SRC0;
-       }
+
        pp_counter_per_sub_job_src0[sub_job] = counter;
 }
 
@@ -266,10 +259,7 @@ void mali_pp_job_set_pp_counter_sub_job_src1(u32 sub_job, u32 counter)
        }
 
        /* PS: A change from MALI_HW_CORE_NO_COUNTER to MALI_HW_CORE_NO_COUNTER will inc and dec, result will be 0 change */
-       if (MALI_HW_CORE_NO_COUNTER == counter)
-       {
-               counter = MALI_UTILIZATION_BW_CTR_SRC1;
-       }
+
        pp_counter_per_sub_job_src1[sub_job] = counter;
 }
 
index 7a259924181202d8106349045744fa45b0ffe50a..1050dc6804c449829bf6907f7bad576a61eca023 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2013 ARM Limited
+ * (C) COPYRIGHT 2011-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 #include "mali_kernel_common.h"
 #include "regs/mali_200_regs.h"
 #include "mali_kernel_core.h"
-#include "mali_dma.h"
 #include "mali_dlbu.h"
 #include "mali_timeline.h"
+#include "mali_scheduler.h"
+#include "mali_executor.h"
 #if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
 #include "linux/mali_memory_dma_buf.h"
 #endif
 
 /**
- * The structure represents a PP job, including all sub-jobs
- * (This struct unfortunately needs to be public because of how the _mali_osk_list_*
- * mechanism works)
+ * This structure represents a PP job, including all sub jobs.
+ *
+ * The PP job object itself is not protected by any single lock,
+ * but relies on other locks instead (scheduler, executor and timeline lock).
+ * Think of the job object as moving between these sub systems through-out
+ * its lifetime. Different part of the PP job struct is used by different
+ * subsystems. Accessor functions ensure that correct lock is taken.
+ * Do NOT access any data members directly from outside this module!
  */
 struct mali_pp_job {
-       _mali_osk_list_t list;                             /**< Used to link jobs together in the scheduler queue */
-       struct mali_session_data *session;                 /**< Session which submitted this job */
-       _mali_osk_list_t session_list;                     /**< Used to link jobs together in the session job list */
-       _mali_osk_list_t session_fb_lookup_list;           /**< Used to link jobs together from the same frame builder in the session */
+       /*
+        * These members are typically only set at creation,
+        * and only read later on.
+        * They do not require any lock protection.
+        */
        _mali_uk_pp_start_job_s uargs;                     /**< Arguments from user space */
-       mali_dma_cmd_buf dma_cmd_buf;                      /**< Command buffer for starting job using Mali-450 DMA unit */
-       u32 id;                                            /**< Identifier for this job in kernel space (sequential numbering) */
-       u32 cache_order;                                   /**< Cache order used for L2 cache flushing (sequential numbering) */
-       u32 perf_counter_value0[_MALI_PP_MAX_SUB_JOBS];    /**< Value of performance counter 0 (to be returned to user space), one for each sub job */
-       u32 perf_counter_value1[_MALI_PP_MAX_SUB_JOBS];    /**< Value of performance counter 1 (to be returned to user space), one for each sub job */
-       u32 sub_jobs_num;                                  /**< Number of subjobs; set to 1 for Mali-450 if DLBU is used, otherwise equals number of PP cores */
-       u32 sub_jobs_started;                              /**< Total number of sub-jobs started (always started in ascending order) */
-       u32 sub_jobs_completed;                            /**< Number of completed sub-jobs in this superjob */
-       u32 sub_job_errors;                                /**< Bitfield with errors (errors for each single sub-job is or'ed together) */
+       struct mali_session_data *session;                 /**< Session which submitted this job */
        u32 pid;                                           /**< Process ID of submitting process */
        u32 tid;                                           /**< Thread ID of submitting thread */
-       _mali_osk_notification_t *finished_notification;   /**< Notification sent back to userspace on job complete */
-       u32 num_memory_cookies;                            /**< Number of memory cookies attached to job */
-       u32 *memory_cookies;                               /**< Memory cookies attached to job */
-#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
-       struct mali_dma_buf_attachment **dma_bufs;         /**< Array of DMA-bufs used by job */
-       u32 num_dma_bufs;                                  /**< Number of DMA-bufs used by job */
-#endif
+       u32 id;                                            /**< Identifier for this job in kernel space (sequential numbering) */
+       u32 cache_order;                                   /**< Cache order used for L2 cache flushing (sequential numbering) */
        struct mali_timeline_tracker tracker;              /**< Timeline tracker for this job */
+       _mali_osk_notification_t *finished_notification;   /**< Notification sent back to userspace on job complete */
        u32 perf_counter_per_sub_job_count;                /**< Number of values in the two arrays which is != MALI_HW_CORE_NO_COUNTER */
        u32 perf_counter_per_sub_job_src0[_MALI_PP_MAX_SUB_JOBS]; /**< Per sub job counters src0 */
        u32 perf_counter_per_sub_job_src1[_MALI_PP_MAX_SUB_JOBS]; /**< Per sub job counters src1 */
+       u32 sub_jobs_num;                                  /**< Number of subjobs; set to 1 for Mali-450 if DLBU is used, otherwise equals number of PP cores */
+
+       /*
+        * These members are used by both scheduler and executor.
+        * They are "protected" by atomic operations.
+        */
+       _mali_osk_atomic_t sub_jobs_completed;                            /**< Number of completed sub-jobs in this superjob */
+       _mali_osk_atomic_t sub_job_errors;                                /**< Bitfield with errors (errors for each single sub-job is or'ed together) */
+
+       /*
+        * These members are used by scheduler, but only when no one else
+        * knows about this job object but the working function.
+        * No lock is thus needed for these.
+        */
+       u32 *memory_cookies;                               /**< Memory cookies attached to job */
+
+       /*
+        * These members are used by the scheduler,
+        * protected by scheduler lock
+        */
+       _mali_osk_list_t list;                             /**< Used to link jobs together in the scheduler queue */
+       _mali_osk_list_t session_fb_lookup_list;           /**< Used to link jobs together from the same frame builder in the session */
+       u32 sub_jobs_started;                              /**< Total number of sub-jobs started (always started in ascending order) */
+
+       /*
+        * Set by executor/group on job completion, read by scheduler when
+        * returning job to user. Hold executor lock when setting,
+        * no lock needed when reading
+        */
+       u32 perf_counter_value0[_MALI_PP_MAX_SUB_JOBS];    /**< Value of performance counter 0 (to be returned to user space), one for each sub job */
+       u32 perf_counter_value1[_MALI_PP_MAX_SUB_JOBS];    /**< Value of performance counter 1 (to be returned to user space), one for each sub job */
 };
 
 void mali_pp_job_initialize(void);
@@ -81,53 +107,71 @@ u32 mali_pp_job_get_pp_counter_sub_job_src1(u32 sub_job);
 
 MALI_STATIC_INLINE u32 mali_pp_job_get_id(struct mali_pp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        return (NULL == job) ? 0 : job->id;
 }
 
+MALI_STATIC_INLINE void mali_pp_job_set_cache_order(struct mali_pp_job *job,
+               u32 cache_order)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+       job->cache_order = cache_order;
+}
+
 MALI_STATIC_INLINE u32 mali_pp_job_get_cache_order(struct mali_pp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        return (NULL == job) ? 0 : job->cache_order;
 }
 
-MALI_STATIC_INLINE u32 mali_pp_job_get_user_id(struct mali_pp_job *job)
+MALI_STATIC_INLINE u64 mali_pp_job_get_user_id(struct mali_pp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        return job->uargs.user_job_ptr;
 }
 
 MALI_STATIC_INLINE u32 mali_pp_job_get_frame_builder_id(struct mali_pp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        return job->uargs.frame_builder_id;
 }
 
 MALI_STATIC_INLINE u32 mali_pp_job_get_flush_id(struct mali_pp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        return job->uargs.flush_id;
 }
 
 MALI_STATIC_INLINE u32 mali_pp_job_get_pid(struct mali_pp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        return job->pid;
 }
 
 MALI_STATIC_INLINE u32 mali_pp_job_get_tid(struct mali_pp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        return job->tid;
 }
 
-MALI_STATIC_INLINE u32mali_pp_job_get_frame_registers(struct mali_pp_job *job)
+MALI_STATIC_INLINE u32 *mali_pp_job_get_frame_registers(struct mali_pp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        return job->uargs.frame_registers;
 }
 
-MALI_STATIC_INLINE u32mali_pp_job_get_dlbu_registers(struct mali_pp_job *job)
+MALI_STATIC_INLINE u32 *mali_pp_job_get_dlbu_registers(struct mali_pp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        return job->uargs.dlbu_registers;
 }
 
 MALI_STATIC_INLINE mali_bool mali_pp_job_is_virtual(struct mali_pp_job *job)
 {
-#if defined(CONFIG_MALI450)
-       return 0 == job->uargs.num_cores;
+#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470))
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return (0 == job->uargs.num_cores) ? MALI_TRUE : MALI_FALSE;
 #else
        return MALI_FALSE;
 #endif
@@ -135,6 +179,8 @@ MALI_STATIC_INLINE mali_bool mali_pp_job_is_virtual(struct mali_pp_job *job)
 
 MALI_STATIC_INLINE u32 mali_pp_job_get_addr_frame(struct mali_pp_job *job, u32 sub_job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
+
        if (mali_pp_job_is_virtual(job)) {
                return MALI_DLBU_VIRT_ADDR;
        } else if (0 == sub_job) {
@@ -148,6 +194,8 @@ MALI_STATIC_INLINE u32 mali_pp_job_get_addr_frame(struct mali_pp_job *job, u32 s
 
 MALI_STATIC_INLINE u32 mali_pp_job_get_addr_stack(struct mali_pp_job *job, u32 sub_job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
+
        if (0 == sub_job) {
                return job->uargs.frame_registers[MALI200_REG_ADDR_STACK / sizeof(u32)];
        } else if (sub_job < _MALI_PP_MAX_SUB_JOBS) {
@@ -157,33 +205,81 @@ MALI_STATIC_INLINE u32 mali_pp_job_get_addr_stack(struct mali_pp_job *job, u32 s
        return 0;
 }
 
-MALI_STATIC_INLINE u32* mali_pp_job_get_wb0_registers(struct mali_pp_job *job)
+void mali_pp_job_list_add(struct mali_pp_job *job, _mali_osk_list_t *list);
+
+MALI_STATIC_INLINE void mali_pp_job_list_addtail(struct mali_pp_job *job,
+               _mali_osk_list_t *list)
+{
+       _mali_osk_list_addtail(&job->list, list);
+}
+
+MALI_STATIC_INLINE void mali_pp_job_list_move(struct mali_pp_job *job,
+               _mali_osk_list_t *list)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+       MALI_DEBUG_ASSERT(!_mali_osk_list_empty(&job->list));
+       _mali_osk_list_move(&job->list, list);
+}
+
+MALI_STATIC_INLINE void mali_pp_job_list_remove(struct mali_pp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+       _mali_osk_list_delinit(&job->list);
+}
+
+MALI_STATIC_INLINE u32 *mali_pp_job_get_wb0_registers(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
        return job->uargs.wb0_registers;
 }
 
-MALI_STATIC_INLINE u32mali_pp_job_get_wb1_registers(struct mali_pp_job *job)
+MALI_STATIC_INLINE u32 *mali_pp_job_get_wb1_registers(struct mali_pp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        return job->uargs.wb1_registers;
 }
 
-MALI_STATIC_INLINE u32mali_pp_job_get_wb2_registers(struct mali_pp_job *job)
+MALI_STATIC_INLINE u32 *mali_pp_job_get_wb2_registers(struct mali_pp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        return job->uargs.wb2_registers;
 }
 
+MALI_STATIC_INLINE u32 mali_pp_job_get_wb0_source_addr(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR / sizeof(u32)];
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_wb1_source_addr(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR / sizeof(u32)];
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_wb2_source_addr(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR / sizeof(u32)];
+}
+
 MALI_STATIC_INLINE void mali_pp_job_disable_wb0(struct mali_pp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
 }
 
 MALI_STATIC_INLINE void mali_pp_job_disable_wb1(struct mali_pp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
 }
 
 MALI_STATIC_INLINE void mali_pp_job_disable_wb2(struct mali_pp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
 }
 
@@ -191,9 +287,9 @@ MALI_STATIC_INLINE mali_bool mali_pp_job_all_writeback_unit_disabled(struct mali
 {
        MALI_DEBUG_ASSERT_POINTER(job);
 
-       if ( job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] ||
-            job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] ||
-            job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT]
+       if (job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] ||
+           job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] ||
+           job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT]
           ) {
                /* At least one output unit active */
                return MALI_FALSE;
@@ -203,20 +299,45 @@ MALI_STATIC_INLINE mali_bool mali_pp_job_all_writeback_unit_disabled(struct mali
        return MALI_TRUE;
 }
 
-MALI_STATIC_INLINE u32 mali_pp_job_get_fb_lookup_id(struct mali_pp_job *job)
+MALI_STATIC_INLINE void mali_pp_job_fb_lookup_add(struct mali_pp_job *job)
 {
+       u32 fb_lookup_id;
+
        MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+
+       fb_lookup_id = MALI_PP_JOB_FB_LOOKUP_LIST_MASK & job->uargs.frame_builder_id;
 
-       return MALI_PP_JOB_FB_LOOKUP_LIST_MASK & job->uargs.frame_builder_id;
+       MALI_DEBUG_ASSERT(MALI_PP_JOB_FB_LOOKUP_LIST_SIZE > fb_lookup_id);
+
+       _mali_osk_list_addtail(&job->session_fb_lookup_list,
+                              &job->session->pp_job_fb_lookup_list[fb_lookup_id]);
+}
+
+MALI_STATIC_INLINE void mali_pp_job_fb_lookup_remove(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+       _mali_osk_list_delinit(&job->session_fb_lookup_list);
 }
 
 MALI_STATIC_INLINE struct mali_session_data *mali_pp_job_get_session(struct mali_pp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        return job->session;
 }
 
+MALI_STATIC_INLINE mali_bool mali_pp_job_has_started_sub_jobs(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+       return (0 < job->sub_jobs_started) ? MALI_TRUE : MALI_FALSE;
+}
+
 MALI_STATIC_INLINE mali_bool mali_pp_job_has_unstarted_sub_jobs(struct mali_pp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
        return (job->sub_jobs_started < job->sub_jobs_num) ? MALI_TRUE : MALI_FALSE;
 }
 
@@ -224,39 +345,71 @@ MALI_STATIC_INLINE mali_bool mali_pp_job_has_unstarted_sub_jobs(struct mali_pp_j
    Makes sure that no new subjobs are started. */
 MALI_STATIC_INLINE void mali_pp_job_mark_unstarted_failed(struct mali_pp_job *job)
 {
-       u32 jobs_remaining = job->sub_jobs_num - job->sub_jobs_started;
-       job->sub_jobs_started   += jobs_remaining;
-       job->sub_jobs_completed += jobs_remaining;
-       job->sub_job_errors     += jobs_remaining;
-}
+       u32 jobs_remaining;
+       u32 i;
 
-MALI_STATIC_INLINE void mali_pp_job_mark_unstarted_success(struct mali_pp_job *job)
-{
-       u32 jobs_remaining = job->sub_jobs_num - job->sub_jobs_started;
-       job->sub_jobs_started   += jobs_remaining;
-       job->sub_jobs_completed += jobs_remaining;
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+
+       jobs_remaining = job->sub_jobs_num - job->sub_jobs_started;
+       job->sub_jobs_started += jobs_remaining;
+
+       /* Not the most optimal way, but this is only used in error cases */
+       for (i = 0; i < jobs_remaining; i++) {
+               _mali_osk_atomic_inc(&job->sub_jobs_completed);
+               _mali_osk_atomic_inc(&job->sub_job_errors);
+       }
 }
 
 MALI_STATIC_INLINE mali_bool mali_pp_job_is_complete(struct mali_pp_job *job)
 {
-       return (job->sub_jobs_num == job->sub_jobs_completed) ? MALI_TRUE : MALI_FALSE;
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return (job->sub_jobs_num ==
+               _mali_osk_atomic_read(&job->sub_jobs_completed)) ?
+              MALI_TRUE : MALI_FALSE;
 }
 
 MALI_STATIC_INLINE u32 mali_pp_job_get_first_unstarted_sub_job(struct mali_pp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
        return job->sub_jobs_started;
 }
 
 MALI_STATIC_INLINE u32 mali_pp_job_get_sub_job_count(struct mali_pp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        return job->sub_jobs_num;
 }
 
+MALI_STATIC_INLINE u32 mali_pp_job_unstarted_sub_job_count(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+       MALI_DEBUG_ASSERT(job->sub_jobs_num >= job->sub_jobs_started);
+       return (job->sub_jobs_num - job->sub_jobs_started);
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_num_memory_cookies(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return job->uargs.num_memory_cookies;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_memory_cookie(
+       struct mali_pp_job *job, u32 index)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT(index < job->uargs.num_memory_cookies);
+       MALI_DEBUG_ASSERT_POINTER(job->memory_cookies);
+       return job->memory_cookies[index];
+}
+
 MALI_STATIC_INLINE mali_bool mali_pp_job_needs_dma_buf_mapping(struct mali_pp_job *job)
 {
-       MALI_DEBUG_ASSERT(job);
+       MALI_DEBUG_ASSERT_POINTER(job);
 
-       if (0 != job->num_memory_cookies) {
+       if (0 < job->uargs.num_memory_cookies) {
                return MALI_TRUE;
        }
 
@@ -266,6 +419,7 @@ MALI_STATIC_INLINE mali_bool mali_pp_job_needs_dma_buf_mapping(struct mali_pp_jo
 MALI_STATIC_INLINE void mali_pp_job_mark_sub_job_started(struct mali_pp_job *job, u32 sub_job)
 {
        MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
 
        /* Assert that we are marking the "first unstarted sub job" as started */
        MALI_DEBUG_ASSERT(job->sub_jobs_started == sub_job);
@@ -275,86 +429,103 @@ MALI_STATIC_INLINE void mali_pp_job_mark_sub_job_started(struct mali_pp_job *job
 
 MALI_STATIC_INLINE void mali_pp_job_mark_sub_job_completed(struct mali_pp_job *job, mali_bool success)
 {
-       job->sub_jobs_completed++;
-       if ( MALI_FALSE == success ) {
-               job->sub_job_errors++;
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       _mali_osk_atomic_inc(&job->sub_jobs_completed);
+       if (MALI_FALSE == success) {
+               _mali_osk_atomic_inc(&job->sub_job_errors);
        }
 }
 
 MALI_STATIC_INLINE mali_bool mali_pp_job_was_success(struct mali_pp_job *job)
 {
-       if ( 0 == job->sub_job_errors ) {
+       MALI_DEBUG_ASSERT_POINTER(job);
+       if (0 == _mali_osk_atomic_read(&job->sub_job_errors)) {
                return MALI_TRUE;
        }
        return MALI_FALSE;
 }
 
-MALI_STATIC_INLINE mali_bool mali_pp_job_use_no_notification(struct mali_pp_job *job)
+MALI_STATIC_INLINE mali_bool mali_pp_job_use_no_notification(
+       struct mali_pp_job *job)
 {
-       return job->uargs.flags & _MALI_PP_JOB_FLAG_NO_NOTIFICATION ? MALI_TRUE : MALI_FALSE;
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return (job->uargs.flags & _MALI_PP_JOB_FLAG_NO_NOTIFICATION) ?
+              MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_pilot_job(struct mali_pp_job *job)
+{
+       /*
+        * A pilot job is currently identified as jobs which
+        * require no callback notification.
+        */
+       return mali_pp_job_use_no_notification(job);
+}
+
+MALI_STATIC_INLINE _mali_osk_notification_t *
+mali_pp_job_get_finished_notification(struct mali_pp_job *job)
+{
+       _mali_osk_notification_t *notification;
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_POINTER(job->finished_notification);
+
+       notification = job->finished_notification;
+       job->finished_notification = NULL;
+
+       return notification;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_window_surface(
+       struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return (job->uargs.flags & _MALI_PP_JOB_FLAG_IS_WINDOW_SURFACE)
+              ? MALI_TRUE : MALI_FALSE;
 }
 
 MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_flag(struct mali_pp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        return job->uargs.perf_counter_flag;
 }
 
-
 MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_value0(struct mali_pp_job *job, u32 sub_job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        return job->perf_counter_value0[sub_job];
 }
 
 MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_value1(struct mali_pp_job *job, u32 sub_job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        return job->perf_counter_value1[sub_job];
 }
 
 MALI_STATIC_INLINE void mali_pp_job_set_perf_counter_value0(struct mali_pp_job *job, u32 sub_job, u32 value)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
        job->perf_counter_value0[sub_job] = value;
 }
 
 MALI_STATIC_INLINE void mali_pp_job_set_perf_counter_value1(struct mali_pp_job *job, u32 sub_job, u32 value)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
        job->perf_counter_value1[sub_job] = value;
 }
 
 MALI_STATIC_INLINE _mali_osk_errcode_t mali_pp_job_check(struct mali_pp_job *job)
 {
+       MALI_DEBUG_ASSERT_POINTER(job);
        if (mali_pp_job_is_virtual(job) && job->sub_jobs_num != 1) {
                return _MALI_OSK_ERR_FAULT;
        }
        return _MALI_OSK_ERR_OK;
 }
 
-/**
- * Returns MALI_TRUE if first job should be started after second job.
- *
- * @param first First job.
- * @param second Second job.
- * @return MALI_TRUE if first job should be started after second job, MALI_FALSE if not.
- */
-MALI_STATIC_INLINE mali_bool mali_pp_job_should_start_after(struct mali_pp_job *first, struct mali_pp_job *second)
-{
-       MALI_DEBUG_ASSERT_POINTER(first);
-       MALI_DEBUG_ASSERT_POINTER(second);
-
-       /* First job should be started after second job if second job is in progress. */
-       if (0 < second->sub_jobs_started) {
-               return MALI_TRUE;
-       }
-
-       /* First job should be started after second job if first job has a higher job id.  A span is
-          used to handle job id wrapping. */
-       if ((mali_pp_job_get_id(first) - mali_pp_job_get_id(second)) < MALI_SCHEDULER_JOB_ID_SPAN) {
-               return MALI_TRUE;
-       }
-
-       /* Second job should be started after first job. */
-       return MALI_FALSE;
-}
-
 /**
  * Returns MALI_TRUE if this job has more than two sub jobs and all sub jobs are unstarted.
  *
@@ -364,6 +535,7 @@ MALI_STATIC_INLINE mali_bool mali_pp_job_should_start_after(struct mali_pp_job *
 MALI_STATIC_INLINE mali_bool mali_pp_job_is_large_and_unstarted(struct mali_pp_job *job)
 {
        MALI_DEBUG_ASSERT_POINTER(job);
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
        MALI_DEBUG_ASSERT(!mali_pp_job_is_virtual(job));
 
        return (0 == job->sub_jobs_started && 2 < job->sub_jobs_num);
@@ -381,4 +553,12 @@ MALI_STATIC_INLINE struct mali_timeline_tracker *mali_pp_job_get_tracker(struct
        return &(job->tracker);
 }
 
+MALI_STATIC_INLINE u32 *mali_pp_job_get_timeline_point_ptr(
+       struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+       return (u32 __user *)(uintptr_t)job->uargs.timeline_point_ptr;
+}
+
+
 #endif /* __MALI_PP_JOB_H__ */
diff --git a/drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_pp_scheduler.c b/drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_pp_scheduler.c
deleted file mode 100644 (file)
index 6b985d0..0000000
+++ /dev/null
@@ -1,2067 +0,0 @@
-/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2013 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
- */
-
-#include "mali_pp_scheduler.h"
-#include "mali_kernel_common.h"
-#include "mali_kernel_core.h"
-#include "mali_osk.h"
-#include "mali_osk_list.h"
-#include "mali_scheduler.h"
-#include "mali_pp.h"
-#include "mali_pp_job.h"
-#include "mali_group.h"
-#include "mali_pm.h"
-#include "mali_timeline.h"
-#include "mali_osk_profiling.h"
-#include "mali_kernel_utilization.h"
-#include "mali_session.h"
-#include "mali_pm_domain.h"
-#include "linux/mali/mali_utgard.h"
-
-#if defined(CONFIG_DMA_SHARED_BUFFER)
-#include "mali_memory_dma_buf.h"
-#endif
-#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
-#include <linux/sched.h>
-#include <trace/events/gpu.h>
-#endif
-
-/* Queue type used for physical and virtual job queues. */
-struct mali_pp_scheduler_job_queue {
-       _MALI_OSK_LIST_HEAD(normal_pri); /* List of jobs with some unscheduled work. */
-       _MALI_OSK_LIST_HEAD(high_pri);   /* List of high priority jobs with some unscheduled work. */
-       u32 depth;                       /* Depth of combined queues. */
-};
-
-/* If dma_buf with map on demand is used, we defer job deletion and job queue if in atomic context,
- * since both might sleep. */
-#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
-#define MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE 1
-#define MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE 1
-#endif /* !defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) */
-
-static void mali_pp_scheduler_job_queued(void);
-static void mali_pp_scheduler_job_completed(void);
-
-/* Maximum of 8 PP cores (a group can only have maximum of 1 PP core) */
-#define MALI_MAX_NUMBER_OF_PP_GROUPS 9
-
-static mali_bool mali_pp_scheduler_is_suspended(void *data);
-
-static u32 pp_version = 0;
-
-/* Physical job queue */
-static struct mali_pp_scheduler_job_queue job_queue;
-
-/* Physical groups */
-static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_working);     /* List of physical groups with working jobs on the pp core */
-static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_idle);        /* List of physical groups with idle jobs on the pp core */
-static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_disabled);    /* List of disabled physical groups */
-
-/* Virtual job queue (Mali-450 only) */
-static struct mali_pp_scheduler_job_queue virtual_job_queue;
-
-/**
- * Add job to scheduler queue.
- *
- * @param job Job to queue.
- * @return Schedule mask.
- */
-static mali_scheduler_mask mali_pp_scheduler_queue_job(struct mali_pp_job *job);
-
-/* Virtual group (Mali-450 only) */
-static struct mali_group *virtual_group = NULL;                 /* Virtual group (if any) */
-static enum {
-       VIRTUAL_GROUP_IDLE,
-       VIRTUAL_GROUP_WORKING,
-       VIRTUAL_GROUP_DISABLED,
-}
-virtual_group_state = VIRTUAL_GROUP_IDLE;            /* Flag which indicates whether the virtual group is working or idle */
-
-/* Number of physical cores */
-static u32 num_cores = 0;
-
-/* Number of physical cores which are enabled */
-static u32 enabled_cores = 0;
-
-/* Enable or disable core scaling */
-static mali_bool core_scaling_enabled = MALI_TRUE;
-
-/* Variables to allow safe pausing of the scheduler */
-static _mali_osk_wait_queue_t *pp_scheduler_working_wait_queue = NULL;
-static u32 pause_count = 0;
-
-#if defined(MALI_UPPER_HALF_SCHEDULING)
-static _mali_osk_spinlock_irq_t *pp_scheduler_lock = NULL;
-#else
-static _mali_osk_spinlock_t *pp_scheduler_lock = NULL;
-#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
-
-MALI_STATIC_INLINE void mali_pp_scheduler_lock(void)
-{
-#if defined(MALI_UPPER_HALF_SCHEDULING)
-       _mali_osk_spinlock_irq_lock(pp_scheduler_lock);
-#else
-       _mali_osk_spinlock_lock(pp_scheduler_lock);
-#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
-       MALI_DEBUG_PRINT(5, ("Mali PP scheduler: PP scheduler lock taken.\n"));
-}
-
-MALI_STATIC_INLINE void mali_pp_scheduler_unlock(void)
-{
-       MALI_DEBUG_PRINT(5, ("Mali PP scheduler: Releasing PP scheduler lock.\n"));
-#if defined(MALI_UPPER_HALF_SCHEDULING)
-       _mali_osk_spinlock_irq_unlock(pp_scheduler_lock);
-#else
-       _mali_osk_spinlock_unlock(pp_scheduler_lock);
-#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
-}
-
-#if defined(DEBUG)
-#define MALI_ASSERT_PP_SCHEDULER_LOCKED() MALI_DEBUG_ASSERT_LOCK_HELD(pp_scheduler_lock)
-#else
-#define MALI_ASSERT_PP_SCHEDULER_LOCKED() do {} while (0)
-#endif /* defined(DEBUG) */
-
-#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
-
-static _mali_osk_wq_work_t *pp_scheduler_wq_job_delete = NULL;
-static _mali_osk_spinlock_irq_t *pp_scheduler_job_delete_lock = NULL;
-static _MALI_OSK_LIST_HEAD_STATIC_INIT(pp_scheduler_job_deletion_queue);
-
-static void mali_pp_scheduler_deferred_job_delete(struct mali_pp_job *job)
-{
-       MALI_DEBUG_ASSERT_POINTER(job);
-
-       _mali_osk_spinlock_irq_lock(pp_scheduler_job_delete_lock);
-
-       /* This job object should not be on any lists. */
-       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
-       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_list));
-       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_fb_lookup_list));
-
-       _mali_osk_list_addtail(&job->list, &pp_scheduler_job_deletion_queue);
-
-       _mali_osk_spinlock_irq_unlock(pp_scheduler_job_delete_lock);
-
-       _mali_osk_wq_schedule_work(pp_scheduler_wq_job_delete);
-}
-
-static void mali_pp_scheduler_do_job_delete(void *arg)
-{
-       _MALI_OSK_LIST_HEAD_STATIC_INIT(list);
-       struct mali_pp_job *job;
-       struct mali_pp_job *tmp;
-
-       MALI_IGNORE(arg);
-
-       _mali_osk_spinlock_irq_lock(pp_scheduler_job_delete_lock);
-
-       /*
-        * Quickly "unhook" the jobs pending to be deleted, so we can release the lock before
-        * we start deleting the job objects (without any locks held
-        */
-       _mali_osk_list_move_list(&pp_scheduler_job_deletion_queue, &list);
-
-       _mali_osk_spinlock_irq_unlock(pp_scheduler_job_delete_lock);
-
-       _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list, struct mali_pp_job, list) {
-               mali_pp_job_delete(job); /* delete the job object itself */
-       }
-}
-
-#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE) */
-
-#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
-
-static _mali_osk_wq_work_t *pp_scheduler_wq_job_queue = NULL;
-static _mali_osk_spinlock_irq_t *pp_scheduler_job_queue_lock = NULL;
-static _MALI_OSK_LIST_HEAD_STATIC_INIT(pp_scheduler_job_queue_list);
-
-static void mali_pp_scheduler_deferred_job_queue(struct mali_pp_job *job)
-{
-       MALI_DEBUG_ASSERT_POINTER(job);
-
-       _mali_osk_spinlock_irq_lock(pp_scheduler_job_queue_lock);
-       _mali_osk_list_addtail(&job->list, &pp_scheduler_job_queue_list);
-       _mali_osk_spinlock_irq_unlock(pp_scheduler_job_queue_lock);
-
-       _mali_osk_wq_schedule_work(pp_scheduler_wq_job_queue);
-}
-
-static void mali_pp_scheduler_do_job_queue(void *arg)
-{
-       _MALI_OSK_LIST_HEAD_STATIC_INIT(list);
-       struct mali_pp_job *job;
-       struct mali_pp_job *tmp;
-       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
-
-       MALI_IGNORE(arg);
-
-       _mali_osk_spinlock_irq_lock(pp_scheduler_job_queue_lock);
-
-       /*
-        * Quickly "unhook" the jobs pending to be queued, so we can release the lock before
-        * we start queueing the job objects (without any locks held)
-        */
-       _mali_osk_list_move_list(&pp_scheduler_job_queue_list, &list);
-
-       _mali_osk_spinlock_irq_unlock(pp_scheduler_job_queue_lock);
-
-       _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list, struct mali_pp_job, list) {
-               _mali_osk_list_delinit(&job->list);
-               schedule_mask |= mali_pp_scheduler_queue_job(job);
-       }
-
-       mali_scheduler_schedule_from_mask(schedule_mask, MALI_FALSE);
-}
-
-#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
-
-MALI_STATIC_INLINE mali_bool mali_pp_scheduler_has_virtual_group(void)
-{
-#if defined(CONFIG_MALI450)
-       return NULL != virtual_group;
-#else
-       return MALI_FALSE;
-#endif /* defined(CONFIG_MALI450) */
-}
-
-_mali_osk_errcode_t mali_pp_scheduler_initialize(void)
-{
-       _MALI_OSK_INIT_LIST_HEAD(&job_queue.normal_pri);
-       _MALI_OSK_INIT_LIST_HEAD(&job_queue.high_pri);
-       job_queue.depth = 0;
-
-       _MALI_OSK_INIT_LIST_HEAD(&virtual_job_queue.normal_pri);
-       _MALI_OSK_INIT_LIST_HEAD(&virtual_job_queue.high_pri);
-       virtual_job_queue.depth = 0;
-
-#if defined(MALI_UPPER_HALF_SCHEDULING)
-       pp_scheduler_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER);
-#else
-       pp_scheduler_lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER);
-#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
-       if (NULL == pp_scheduler_lock) goto cleanup;
-
-       pp_scheduler_working_wait_queue = _mali_osk_wait_queue_init();
-       if (NULL == pp_scheduler_working_wait_queue) goto cleanup;
-
-#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
-       pp_scheduler_wq_job_delete = _mali_osk_wq_create_work(mali_pp_scheduler_do_job_delete, NULL);
-       if (NULL == pp_scheduler_wq_job_delete) goto cleanup;
-
-       pp_scheduler_job_delete_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED);
-       if (NULL == pp_scheduler_job_delete_lock) goto cleanup;
-#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE) */
-
-#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
-       pp_scheduler_wq_job_queue = _mali_osk_wq_create_work(mali_pp_scheduler_do_job_queue, NULL);
-       if (NULL == pp_scheduler_wq_job_queue) goto cleanup;
-
-       pp_scheduler_job_queue_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED);
-       if (NULL == pp_scheduler_job_queue_lock) goto cleanup;
-#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
-
-       return _MALI_OSK_ERR_OK;
-
-cleanup:
-#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
-       if (NULL != pp_scheduler_job_queue_lock) {
-               _mali_osk_spinlock_irq_term(pp_scheduler_job_queue_lock);
-               pp_scheduler_job_queue_lock = NULL;
-       }
-
-       if (NULL != pp_scheduler_wq_job_queue) {
-               _mali_osk_wq_delete_work(pp_scheduler_wq_job_queue);
-               pp_scheduler_wq_job_queue = NULL;
-       }
-#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
-
-#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
-       if (NULL != pp_scheduler_job_delete_lock) {
-               _mali_osk_spinlock_irq_term(pp_scheduler_job_delete_lock);
-               pp_scheduler_job_delete_lock = NULL;
-       }
-
-       if (NULL != pp_scheduler_wq_job_delete) {
-               _mali_osk_wq_delete_work(pp_scheduler_wq_job_delete);
-               pp_scheduler_wq_job_delete = NULL;
-       }
-#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE) */
-
-       if (NULL != pp_scheduler_working_wait_queue) {
-               _mali_osk_wait_queue_term(pp_scheduler_working_wait_queue);
-               pp_scheduler_working_wait_queue = NULL;
-       }
-
-       if (NULL != pp_scheduler_lock) {
-#if defined(MALI_UPPER_HALF_SCHEDULING)
-               _mali_osk_spinlock_irq_term(pp_scheduler_lock);
-#else
-               _mali_osk_spinlock_term(pp_scheduler_lock);
-#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
-               pp_scheduler_lock = NULL;
-       }
-
-       return _MALI_OSK_ERR_NOMEM;
-}
-
-void mali_pp_scheduler_terminate(void)
-{
-#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
-       _mali_osk_spinlock_irq_term(pp_scheduler_job_queue_lock);
-       _mali_osk_wq_delete_work(pp_scheduler_wq_job_queue);
-#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
-
-#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
-       _mali_osk_spinlock_irq_term(pp_scheduler_job_delete_lock);
-       _mali_osk_wq_delete_work(pp_scheduler_wq_job_delete);
-#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE) */
-
-       _mali_osk_wait_queue_term(pp_scheduler_working_wait_queue);
-
-#if defined(MALI_UPPER_HALF_SCHEDULING)
-       _mali_osk_spinlock_irq_term(pp_scheduler_lock);
-#else
-       _mali_osk_spinlock_term(pp_scheduler_lock);
-#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
-}
-
-void mali_pp_scheduler_populate(void)
-{
-       struct mali_group *group;
-       struct mali_pp_core *pp_core;
-       u32 num_groups;
-       u32 i;
-
-       num_groups = mali_group_get_glob_num_groups();
-
-       /* Do we have a virtual group? */
-       for (i = 0; i < num_groups; i++) {
-               group = mali_group_get_glob_group(i);
-
-               if (mali_group_is_virtual(group)) {
-                       MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Found virtual group %p.\n", group));
-
-                       virtual_group = group;
-                       break;
-               }
-       }
-
-       /* Find all the available PP cores */
-       for (i = 0; i < num_groups; i++) {
-               group = mali_group_get_glob_group(i);
-               pp_core = mali_group_get_pp_core(group);
-
-               if (NULL != pp_core && !mali_group_is_virtual(group)) {
-                       if (0 == pp_version) {
-                               /* Retrieve PP version from the first available PP core */
-                               pp_version = mali_pp_core_get_version(pp_core);
-                       }
-
-                       if (mali_pp_scheduler_has_virtual_group()) {
-                               /* Add all physical PP cores to the virtual group */
-                               mali_group_lock(virtual_group);
-                               group->state = MALI_GROUP_STATE_JOINING_VIRTUAL;
-                               mali_group_add_group(virtual_group, group, MALI_TRUE);
-                               mali_group_unlock(virtual_group);
-                       } else {
-                               _mali_osk_list_add(&group->pp_scheduler_list, &group_list_idle);
-                       }
-
-                       num_cores++;
-               }
-       }
-
-       enabled_cores = num_cores;
-}
-
-void mali_pp_scheduler_depopulate(void)
-{
-       struct mali_group *group, *temp;
-
-       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&group_list_working));
-       MALI_DEBUG_ASSERT(VIRTUAL_GROUP_WORKING != virtual_group_state);
-
-       /* Delete all groups owned by scheduler */
-       if (mali_pp_scheduler_has_virtual_group()) {
-               mali_group_delete(virtual_group);
-       }
-
-       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, pp_scheduler_list) {
-               mali_group_delete(group);
-       }
-       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, pp_scheduler_list) {
-               mali_group_delete(group);
-       }
-}
-
-MALI_STATIC_INLINE void mali_pp_scheduler_disable_empty_virtual(void)
-{
-       MALI_ASSERT_GROUP_LOCKED(virtual_group);
-
-       if (mali_group_virtual_disable_if_empty(virtual_group)) {
-               MALI_DEBUG_PRINT(4, ("Disabling empty virtual group\n"));
-
-               MALI_DEBUG_ASSERT(VIRTUAL_GROUP_IDLE == virtual_group_state);
-
-               virtual_group_state = VIRTUAL_GROUP_DISABLED;
-       }
-}
-
-MALI_STATIC_INLINE void mali_pp_scheduler_enable_empty_virtual(void)
-{
-       MALI_ASSERT_GROUP_LOCKED(virtual_group);
-
-       if (mali_group_virtual_enable_if_empty(virtual_group)) {
-               MALI_DEBUG_PRINT(4, ("Re-enabling empty virtual group\n"));
-
-               MALI_DEBUG_ASSERT(VIRTUAL_GROUP_DISABLED == virtual_group_state);
-
-               virtual_group_state = VIRTUAL_GROUP_IDLE;
-       }
-}
-
-static struct mali_pp_job *mali_pp_scheduler_get_job(struct mali_pp_scheduler_job_queue *queue)
-{
-       struct mali_pp_job *job = NULL;
-
-       MALI_ASSERT_PP_SCHEDULER_LOCKED();
-       MALI_DEBUG_ASSERT_POINTER(queue);
-
-       /* Check if we have a normal priority job. */
-       if (!_mali_osk_list_empty(&queue->normal_pri)) {
-               MALI_DEBUG_ASSERT(queue->depth > 0);
-               job = _MALI_OSK_LIST_ENTRY(queue->normal_pri.next, struct mali_pp_job, list);
-       }
-
-       /* Prefer normal priority job if it is in progress. */
-       if (NULL != job && 0 < job->sub_jobs_started) {
-               return job;
-       }
-
-       /* Check if we have a high priority job. */
-       if (!_mali_osk_list_empty(&queue->high_pri)) {
-               MALI_DEBUG_ASSERT(queue->depth > 0);
-               job = _MALI_OSK_LIST_ENTRY(queue->high_pri.next, struct mali_pp_job, list);
-       }
-
-       return job;
-}
-
-/**
- * Returns a physical job if a physical job is ready to run
- */
-MALI_STATIC_INLINE struct mali_pp_job *mali_pp_scheduler_get_physical_job(void)
-{
-       MALI_ASSERT_PP_SCHEDULER_LOCKED();
-       return mali_pp_scheduler_get_job(&job_queue);
-}
-
-MALI_STATIC_INLINE void mali_pp_scheduler_dequeue_physical_job(struct mali_pp_job *job)
-{
-       MALI_ASSERT_PP_SCHEDULER_LOCKED();
-       MALI_DEBUG_ASSERT(job_queue.depth > 0);
-
-       /* Remove job from queue */
-       if (!mali_pp_job_has_unstarted_sub_jobs(job)) {
-               /* All sub jobs have been started: remove job from queue */
-               _mali_osk_list_delinit(&job->list);
-               _mali_osk_list_delinit(&job->session_fb_lookup_list);
-       }
-
-       --job_queue.depth;
-}
-
-/**
- * Returns a virtual job if a virtual job is ready to run
- */
-MALI_STATIC_INLINE struct mali_pp_job *mali_pp_scheduler_get_virtual_job(void)
-{
-       MALI_ASSERT_PP_SCHEDULER_LOCKED();
-       MALI_DEBUG_ASSERT_POINTER(virtual_group);
-       return mali_pp_scheduler_get_job(&virtual_job_queue);
-}
-
-MALI_STATIC_INLINE void mali_pp_scheduler_dequeue_virtual_job(struct mali_pp_job *job)
-{
-       MALI_ASSERT_PP_SCHEDULER_LOCKED();
-       MALI_DEBUG_ASSERT(virtual_job_queue.depth > 0);
-
-       /* Remove job from queue */
-       _mali_osk_list_delinit(&job->list);
-       _mali_osk_list_delinit(&job->session_fb_lookup_list);
-       --virtual_job_queue.depth;
-}
-
-/**
- * Checks if the criteria is met for removing a physical core from virtual group
- */
-MALI_STATIC_INLINE mali_bool mali_pp_scheduler_can_move_virtual_to_physical(void)
-{
-       MALI_ASSERT_PP_SCHEDULER_LOCKED();
-       MALI_DEBUG_ASSERT(mali_pp_scheduler_has_virtual_group());
-       MALI_ASSERT_GROUP_LOCKED(virtual_group);
-       /*
-        * The criteria for taking out a physical group from a virtual group are the following:
-        * - There virtual group is idle
-        * - There are currently no physical groups (idle and working)
-        * - There are physical jobs to be scheduled
-        */
-       return (VIRTUAL_GROUP_IDLE == virtual_group_state) &&
-              _mali_osk_list_empty(&group_list_idle) &&
-              _mali_osk_list_empty(&group_list_working) &&
-              (NULL != mali_pp_scheduler_get_physical_job());
-}
-
-MALI_STATIC_INLINE struct mali_group *mali_pp_scheduler_acquire_physical_group(void)
-{
-       MALI_ASSERT_PP_SCHEDULER_LOCKED();
-
-       if (!_mali_osk_list_empty(&group_list_idle)) {
-               MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Acquiring physical group from idle list.\n"));
-               return _MALI_OSK_LIST_ENTRY(group_list_idle.next, struct mali_group, pp_scheduler_list);
-       } else if (mali_pp_scheduler_has_virtual_group()) {
-               MALI_ASSERT_GROUP_LOCKED(virtual_group);
-               if (mali_pp_scheduler_can_move_virtual_to_physical()) {
-                       struct mali_group *group;
-                       MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Acquiring physical group from virtual group.\n"));
-                       group = mali_group_acquire_group(virtual_group);
-
-                       if (mali_pp_scheduler_has_virtual_group()) {
-                               mali_pp_scheduler_disable_empty_virtual();
-                       }
-
-                       return group;
-               }
-       }
-
-       return NULL;
-}
-
-static void mali_pp_scheduler_return_job_to_user(struct mali_pp_job *job, mali_bool deferred)
-{
-       if (MALI_FALSE == mali_pp_job_use_no_notification(job)) {
-               u32 i;
-               u32 num_counters_to_copy;
-               mali_bool success = mali_pp_job_was_success(job);
-
-               _mali_uk_pp_job_finished_s *jobres = job->finished_notification->result_buffer;
-               _mali_osk_memset(jobres, 0, sizeof(_mali_uk_pp_job_finished_s)); /* @@@@ can be removed once we initialize all members in this struct */
-               jobres->user_job_ptr = mali_pp_job_get_user_id(job);
-               if (MALI_TRUE == success) {
-                       jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS;
-               } else {
-                       jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR;
-               }
-
-               if (mali_pp_job_is_virtual(job)) {
-                       num_counters_to_copy = num_cores; /* Number of physical cores available */
-               } else {
-                       num_counters_to_copy = mali_pp_job_get_sub_job_count(job);
-               }
-
-               for (i = 0; i < num_counters_to_copy; i++) {
-                       jobres->perf_counter0[i] = mali_pp_job_get_perf_counter_value0(job, i);
-                       jobres->perf_counter1[i] = mali_pp_job_get_perf_counter_value1(job, i);
-                       jobres->perf_counter_src0 = mali_pp_job_get_pp_counter_global_src0();
-                       jobres->perf_counter_src1 = mali_pp_job_get_pp_counter_global_src1();
-               }
-
-               mali_session_send_notification(mali_pp_job_get_session(job), job->finished_notification);
-               job->finished_notification = NULL;
-       }
-
-#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
-       if (MALI_TRUE == deferred) {
-               /* The deletion of the job object (releasing sync refs etc) must be done in a different context */
-               mali_pp_scheduler_deferred_job_delete(job);
-       } else {
-               mali_pp_job_delete(job);
-       }
-#else
-       MALI_DEBUG_ASSERT(MALI_FALSE == deferred); /* no use cases need this in this configuration */
-       mali_pp_job_delete(job);
-#endif
-}
-
-static void mali_pp_scheduler_finalize_job(struct mali_pp_job * job)
-{
-       /* This job object should not be on any lists. */
-       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
-       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_list));
-       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_fb_lookup_list));
-
-       /* Send notification back to user space */
-#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
-       mali_pp_scheduler_return_job_to_user(job, MALI_TRUE);
-#else
-       mali_pp_scheduler_return_job_to_user(job, MALI_FALSE);
-#endif
-
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
-       if (_MALI_PP_JOB_FLAG_IS_WINDOW_SURFACE & job->uargs.flags) {
-               _mali_osk_atomic_inc(&job->session->number_of_window_jobs);
-       }
-#endif
-
-       mali_pp_scheduler_job_completed();
-}
-
-void mali_pp_scheduler_schedule(void)
-{
-       struct mali_group* physical_groups_to_start[MALI_MAX_NUMBER_OF_PP_GROUPS - 1];
-       struct mali_pp_job* physical_jobs_to_start[MALI_MAX_NUMBER_OF_PP_GROUPS - 1];
-       u32 physical_sub_jobs_to_start[MALI_MAX_NUMBER_OF_PP_GROUPS - 1];
-       int num_physical_jobs_to_start = 0;
-       int i;
-
-       if (mali_pp_scheduler_has_virtual_group()) {
-               /* Lock the virtual group since we might have to grab physical groups. */
-               mali_group_lock(virtual_group);
-       }
-
-       mali_pp_scheduler_lock();
-       if (pause_count > 0) {
-               /* Scheduler is suspended, don't schedule any jobs. */
-               mali_pp_scheduler_unlock();
-               if (mali_pp_scheduler_has_virtual_group()) {
-                       mali_group_unlock(virtual_group);
-               }
-               return;
-       }
-
-       /* Find physical job(s) to schedule first. */
-       while (1) {
-               struct mali_group *group;
-               struct mali_pp_job *job;
-               u32 sub_job;
-
-               job = mali_pp_scheduler_get_physical_job();
-               if (NULL == job) {
-                       break; /* No job, early out. */
-               }
-
-               if (mali_scheduler_hint_is_enabled(MALI_SCHEDULER_HINT_GP_BOUND) &&
-                   mali_pp_job_is_large_and_unstarted(job) && !_mali_osk_list_empty(&group_list_working)) {
-                       /* Since not all groups are idle, don't schedule yet. */
-                       break;
-               }
-
-               MALI_DEBUG_ASSERT(!mali_pp_job_is_virtual(job));
-               MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(job));
-               MALI_DEBUG_ASSERT(1 <= mali_pp_job_get_sub_job_count(job));
-
-               /* Acquire a physical group, either from the idle list or from the virtual group.
-                * In case the group was acquired from the virtual group, it's state will be
-                * LEAVING_VIRTUAL and must be set to IDLE before it can be used. */
-               group = mali_pp_scheduler_acquire_physical_group();
-               if (NULL == group) {
-                       /* Could not get a group to run the job on, early out. */
-                       MALI_DEBUG_PRINT(4, ("Mali PP scheduler: No more physical groups available.\n"));
-                       break;
-               }
-
-               MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Acquired physical group %p.\n", group));
-
-               /* Mark sub job as started. */
-               sub_job = mali_pp_job_get_first_unstarted_sub_job(job);
-               mali_pp_job_mark_sub_job_started(job, sub_job);
-
-               /* Remove job from queue (if this was the last sub job). */
-               mali_pp_scheduler_dequeue_physical_job(job);
-
-               /* Move group to working list. */
-               _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_working);
-
-               /* Keep track of this group, so that we actually can start the job once we are done with the scheduler lock we are now holding. */
-               physical_groups_to_start[num_physical_jobs_to_start] = group;
-               physical_jobs_to_start[num_physical_jobs_to_start] = job;
-               physical_sub_jobs_to_start[num_physical_jobs_to_start] = sub_job;
-               ++num_physical_jobs_to_start;
-
-               MALI_DEBUG_ASSERT(num_physical_jobs_to_start < MALI_MAX_NUMBER_OF_PP_GROUPS);
-       }
-
-       if (mali_pp_scheduler_has_virtual_group()) {
-               if (VIRTUAL_GROUP_IDLE == virtual_group_state) {
-                       /* We have a virtual group and it is idle. */
-
-                       struct mali_pp_job *job;
-
-                       /* Find a virtual job we can start. */
-                       job = mali_pp_scheduler_get_virtual_job();
-
-                       if (NULL != job) {
-                               MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(job));
-                               MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(job));
-                               MALI_DEBUG_ASSERT(1 == mali_pp_job_get_sub_job_count(job));
-
-                               /* Mark the one and only sub job as started. */
-                               mali_pp_job_mark_sub_job_started(job, 0);
-
-                               /* Remove job from queue. */
-                               mali_pp_scheduler_dequeue_virtual_job(job);
-
-                               /* Virtual group is now working. */
-                               virtual_group_state = VIRTUAL_GROUP_WORKING;
-
-                               /* We no longer need the scheduler lock, but we still need the virtual lock
-                                * in order to start the virtual job. */
-                               mali_pp_scheduler_unlock();
-
-                               /* Start job. */
-                               mali_group_start_pp_job(virtual_group, job, 0);
-
-                               MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Virtual job %u (0x%08X) part %u/%u started (from schedule).\n",
-                                                    mali_pp_job_get_id(job), job, 1,
-                                                    mali_pp_job_get_sub_job_count(job)));
-
-                               mali_group_unlock(virtual_group);
-                       } else {
-                               /* No virtual job to start. */
-                               mali_pp_scheduler_unlock();
-                               mali_group_unlock(virtual_group);
-                       }
-               } else {
-                       /* We have a virtual group, but it is busy or disabled. */
-                       MALI_DEBUG_ASSERT(VIRTUAL_GROUP_IDLE != virtual_group_state);
-
-                       mali_pp_scheduler_unlock();
-                       mali_group_unlock(virtual_group);
-               }
-       } else {
-               /* There is no virtual group. */
-               mali_pp_scheduler_unlock();
-       }
-
-       /* We have now released the scheduler lock, and we are ready to start the physical jobs.
-        * The reason we want to wait until we have released the scheduler lock is that job start
-        * may take quite a bit of time (many registers have to be written). This will allow new
-        * jobs from user space to come in, and post-processing of other PP jobs to happen at the
-        * same time as we start jobs. */
-       for (i = 0; i < num_physical_jobs_to_start; i++) {
-               struct mali_group *group = physical_groups_to_start[i];
-               struct mali_pp_job *job  = physical_jobs_to_start[i];
-               u32 sub_job              = physical_sub_jobs_to_start[i];
-
-               MALI_DEBUG_ASSERT_POINTER(group);
-               MALI_DEBUG_ASSERT_POINTER(job);
-               MALI_DEBUG_ASSERT(!mali_group_is_virtual(group));
-               MALI_DEBUG_ASSERT(!mali_pp_job_is_virtual(job));
-
-               mali_group_lock(group);
-
-               /* Set state to IDLE if group was acquired from the virtual group. */
-               group->state = MALI_GROUP_STATE_IDLE;
-
-               mali_group_start_pp_job(group, job, sub_job);
-
-               MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Physical job %u (0x%08X) part %u/%u started (from schedule).\n",
-                                    mali_pp_job_get_id(job), job, sub_job + 1,
-                                    mali_pp_job_get_sub_job_count(job)));
-
-               mali_group_unlock(group);
-       }
-}
-
-/**
- * Set group idle.
- *
- * If @ref group is the virtual group, nothing is done since the virtual group should be idle
- * already.
- *
- * If @ref group is a physical group we rejoin the virtual group, if it exists.  If not, we move the
- * physical group to the idle list.
- *
- * @note The group and the scheduler must both be locked when entering this function.  Both will be
- * unlocked before exiting.
- *
- * @param group The group to set idle.
- */
-static void mali_pp_scheduler_set_group_idle_and_unlock(struct mali_group *group)
-{
-       MALI_DEBUG_ASSERT_POINTER(group);
-
-       MALI_ASSERT_GROUP_LOCKED(group);
-       MALI_DEBUG_ASSERT_LOCK_HELD(pp_scheduler_lock);
-
-       if (mali_group_is_virtual(group)) {
-               /* The virtual group should have been set to non-working already. */
-               MALI_DEBUG_ASSERT(VIRTUAL_GROUP_IDLE == virtual_group_state);
-
-               mali_pp_scheduler_unlock();
-               mali_group_unlock(group);
-
-               return;
-       } else {
-               if (mali_pp_scheduler_has_virtual_group()) {
-                       /* Rejoin virtual group. */
-
-                       /* We're no longer needed on the scheduler list. */
-                       _mali_osk_list_delinit(&(group->pp_scheduler_list));
-
-                       /* Make sure no interrupts are handled for this group during the transition
-                        * from physical to virtual. */
-                       group->state = MALI_GROUP_STATE_JOINING_VIRTUAL;
-
-                       mali_pp_scheduler_unlock();
-                       mali_group_unlock(group);
-
-                       mali_group_lock(virtual_group);
-
-                       if (mali_pp_scheduler_has_virtual_group()) {
-                               mali_pp_scheduler_enable_empty_virtual();
-                       }
-
-                       /* We need to recheck the group state since it is possible that someone has
-                        * modified the group before we locked the virtual group. */
-                       if (MALI_GROUP_STATE_JOINING_VIRTUAL == group->state) {
-                               mali_group_add_group(virtual_group, group, MALI_TRUE);
-                       }
-
-                       mali_group_unlock(virtual_group);
-               } else {
-                       /* Move physical group back to idle list. */
-                       _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_idle);
-
-#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
-                       trace_gpu_sched_switch(mali_pp_get_hw_core_desc(group->pp_core), sched_clock(), 0, 0, 0);
-#endif
-
-                       mali_pp_scheduler_unlock();
-                       mali_group_unlock(group);
-               }
-       }
-}
-
-/**
- * Schedule job on locked group.
- *
- * @note The group and the scheduler must both be locked when entering this function.  Both will be
- * unlocked before exiting.
- *
- * @param group The group to schedule on.
- */
-static void mali_pp_scheduler_schedule_on_group_and_unlock(struct mali_group *group)
-{
-       MALI_DEBUG_ASSERT_POINTER(group);
-
-       MALI_ASSERT_GROUP_LOCKED(group);
-       MALI_DEBUG_ASSERT_LOCK_HELD(pp_scheduler_lock);
-
-       if (mali_group_is_virtual(group)) {
-               /* Now that the virtual group is idle, check if we should reconfigure. */
-
-               struct mali_pp_job *virtual_job = NULL;
-               struct mali_pp_job *physical_job = NULL;
-               struct mali_group *physical_group = NULL;
-               u32 physical_sub_job = 0;
-
-               MALI_DEBUG_ASSERT(VIRTUAL_GROUP_IDLE == virtual_group_state);
-
-               if (mali_pp_scheduler_can_move_virtual_to_physical()) {
-                       /* There is a runnable physical job and we can acquire a physical group. */
-                       physical_job = mali_pp_scheduler_get_physical_job();
-                       MALI_DEBUG_ASSERT_POINTER(physical_job);
-                       MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(physical_job));
-
-                       /* Mark sub job as started. */
-                       physical_sub_job = mali_pp_job_get_first_unstarted_sub_job(physical_job);
-                       mali_pp_job_mark_sub_job_started(physical_job, physical_sub_job);
-
-                       /* Remove job from queue (if this was the last sub job). */
-                       mali_pp_scheduler_dequeue_physical_job(physical_job);
-
-                       /* Acquire a physical group from the virtual group.  Its state will
-                        * be LEAVING_VIRTUAL and must be set to IDLE before it can be
-                        * used. */
-                       physical_group = mali_group_acquire_group(virtual_group);
-
-                       /* Move physical group to the working list, as we will soon start a job on it. */
-                       _mali_osk_list_move(&(physical_group->pp_scheduler_list), &group_list_working);
-
-                       mali_pp_scheduler_disable_empty_virtual();
-               }
-
-               /* Get next virtual job. */
-               virtual_job = mali_pp_scheduler_get_virtual_job();
-               if (NULL != virtual_job && VIRTUAL_GROUP_IDLE == virtual_group_state) {
-                       /* There is a runnable virtual job. */
-
-                       MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(virtual_job));
-                       MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(virtual_job));
-                       MALI_DEBUG_ASSERT(1 == mali_pp_job_get_sub_job_count(virtual_job));
-
-                       mali_pp_job_mark_sub_job_started(virtual_job, 0);
-
-                       /* Remove job from queue. */
-                       mali_pp_scheduler_dequeue_virtual_job(virtual_job);
-
-                       /* Virtual group is now working. */
-                       virtual_group_state = VIRTUAL_GROUP_WORKING;
-
-                       mali_pp_scheduler_unlock();
-
-                       /* Start job. */
-                       mali_group_start_pp_job(group, virtual_job, 0);
-
-                       MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Virtual job %u (0x%08X) part %u/%u started (from job_done).\n",
-                                            mali_pp_job_get_id(virtual_job), virtual_job, 1,
-                                            mali_pp_job_get_sub_job_count(virtual_job)));
-               } else {
-#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
-                       trace_gpu_sched_switch("Mali_Virtual_PP", sched_clock(), 0, 0, 0);
-#endif
-
-                       mali_pp_scheduler_unlock();
-               }
-
-               /* Releasing the virtual group lock that was held when entering the function. */
-               mali_group_unlock(group);
-
-               /* Start a physical job (if we acquired a physical group earlier). */
-               if (NULL != physical_job && NULL != physical_group) {
-                       mali_group_lock(physical_group);
-
-                       /* Change the group state from LEAVING_VIRTUAL to IDLE to complete the transition. */
-                       physical_group->state = MALI_GROUP_STATE_IDLE;
-
-                       /* Start job. */
-                       mali_group_start_pp_job(physical_group, physical_job, physical_sub_job);
-
-                       MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Physical job %u (0x%08X) part %u/%u started (from job_done).\n",
-                                            mali_pp_job_get_id(physical_job), physical_job, physical_sub_job + 1,
-                                            mali_pp_job_get_sub_job_count(physical_job)));
-
-                       mali_group_unlock(physical_group);
-               }
-       } else {
-               /* Physical group. */
-               struct mali_pp_job *job = NULL;
-               u32 sub_job = 0;
-
-               job = mali_pp_scheduler_get_physical_job();
-               if (NULL != job) {
-                       /* There is a runnable physical job. */
-                       MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(job));
-
-                       /* Mark sub job as started. */
-                       sub_job = mali_pp_job_get_first_unstarted_sub_job(job);
-                       mali_pp_job_mark_sub_job_started(job, sub_job);
-
-                       /* Remove job from queue (if this was the last sub job). */
-                       mali_pp_scheduler_dequeue_physical_job(job);
-
-                       mali_pp_scheduler_unlock();
-
-                       /* Group is already on the working list, so start the new job. */
-                       mali_group_start_pp_job(group, job, sub_job);
-
-                       MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Physical job %u (0x%08X) part %u/%u started (from job_done).\n",
-                                            mali_pp_job_get_id(job), job, sub_job + 1, mali_pp_job_get_sub_job_count(job)));
-
-                       mali_group_unlock(group);
-               } else {
-                       mali_pp_scheduler_set_group_idle_and_unlock(group);
-               }
-       }
-}
-
-void mali_pp_scheduler_job_done(struct mali_group *group, struct mali_pp_job *job, u32 sub_job, mali_bool success, mali_bool in_upper_half)
-{
-       mali_bool job_is_done = MALI_FALSE;
-       mali_bool schedule_on_group = MALI_FALSE;
-       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
-
-       MALI_DEBUG_PRINT(3, ("Mali PP scheduler: %s job %u (0x%08X) part %u/%u completed (%s).\n",
-                            mali_pp_job_is_virtual(job) ? "Virtual" : "Physical",
-                            mali_pp_job_get_id(job),
-                            job, sub_job + 1,
-                            mali_pp_job_get_sub_job_count(job),
-                            success ? "success" : "failure"));
-
-       MALI_ASSERT_GROUP_LOCKED(group);
-       mali_pp_scheduler_lock();
-
-       mali_pp_job_mark_sub_job_completed(job, success);
-
-       MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(job) == mali_group_is_virtual(group));
-
-       job_is_done = mali_pp_job_is_complete(job);
-
-       if (job_is_done) {
-               /* Job is removed from these lists when the last sub job is scheduled. */
-               MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
-               MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_fb_lookup_list));
-
-               /* Remove job from session list. */
-               _mali_osk_list_delinit(&job->session_list);
-
-               MALI_DEBUG_PRINT(4, ("Mali PP scheduler: All parts completed for %s job %u (0x%08X).\n",
-                                    mali_pp_job_is_virtual(job) ? "virtual" : "physical",
-                                    mali_pp_job_get_id(job), job));
-
-               mali_pp_scheduler_unlock();
-
-               /* Release tracker.  If other trackers are waiting on this tracker, this could
-                * trigger activation.  The returned scheduling mask can be used to determine if we
-                * have to schedule GP, PP or both. */
-               schedule_mask = mali_timeline_tracker_release(&job->tracker);
-
-               mali_pp_scheduler_lock();
-       }
-
-       if (mali_group_is_virtual(group)) {
-               /* Obey the policy. */
-               virtual_group_state = VIRTUAL_GROUP_IDLE;
-       }
-
-       /* If paused, then this was the last job, so wake up sleeping workers and return. */
-       if (pause_count > 0) {
-               /* Wake up sleeping workers. Their wake-up condition is that
-                * num_slots == num_slots_idle, so unless we are done working, no
-                * threads will actually be woken up.
-                */
-               if (!mali_group_is_virtual(group)) {
-                       /* Move physical group to idle list. */
-                       _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_idle);
-               }
-
-#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
-               trace_gpu_sched_switch(mali_pp_get_hw_core_desc(group->pp_core), sched_clock(), 0, 0, 0);
-#endif
-
-               _mali_osk_wait_queue_wake_up(pp_scheduler_working_wait_queue);
-
-               mali_pp_scheduler_unlock();
-               mali_group_unlock(group);
-
-               if (job_is_done) {
-                       /* Return job to user and delete it. */
-                       mali_pp_scheduler_finalize_job(job);
-               }
-
-               /* A GP job might be queued by tracker release above,
-                * make sure GP scheduler gets a chance to schedule this (if possible)
-                */
-               mali_scheduler_schedule_from_mask(schedule_mask & ~MALI_SCHEDULER_MASK_PP, in_upper_half);
-
-               return;
-       }
-
-       /* Since this group just finished running a job, we can reschedule a new job on it
-        * immediately. */
-
-       /* By default, don't schedule on group. */
-       schedule_on_group = MALI_FALSE;
-
-       if (mali_group_is_virtual(group)) {
-               /* Always schedule immediately on virtual group. */
-               schedule_mask &= ~MALI_SCHEDULER_MASK_PP;
-               schedule_on_group = MALI_TRUE;
-       } else if (0 < job_queue.depth && (!mali_scheduler_mask_is_set(schedule_mask, MALI_SCHEDULER_MASK_PP) || _mali_osk_list_empty(&group_list_idle))) {
-               struct mali_pp_job *next_job = NULL;
-
-               next_job = mali_pp_scheduler_get_physical_job();
-               MALI_DEBUG_ASSERT_POINTER(next_job);
-
-               /* If no new jobs have been queued or if this group is the only idle group, we can
-                * schedule immediately on this group, unless we are GP bound and the next job would
-                * benefit from all its sub jobs being started concurrently. */
-
-               if (mali_scheduler_hint_is_enabled(MALI_SCHEDULER_HINT_GP_BOUND) && mali_pp_job_is_large_and_unstarted(next_job)) {
-                       /* We are GP bound and the job would benefit from all sub jobs being started
-                        * concurrently.  Postpone scheduling until after group has been unlocked. */
-                       schedule_mask |= MALI_SCHEDULER_MASK_PP;
-                       schedule_on_group = MALI_FALSE;
-               } else {
-                       /* Schedule job immediately since we are not GP bound. */
-                       schedule_mask &= ~MALI_SCHEDULER_MASK_PP;
-                       schedule_on_group = MALI_TRUE;
-               }
-       }
-
-       if (schedule_on_group) {
-               /* Schedule a new job on this group. */
-               mali_pp_scheduler_schedule_on_group_and_unlock(group);
-       } else {
-               /* Set group idle.  Will rejoin virtual group, under appropriate conditions. */
-               mali_pp_scheduler_set_group_idle_and_unlock(group);
-       }
-
-       if (!schedule_on_group || MALI_SCHEDULER_MASK_EMPTY != schedule_mask) {
-               if (MALI_SCHEDULER_MASK_PP & schedule_mask) {
-                       /* Schedule PP directly. */
-                       mali_pp_scheduler_schedule();
-                       schedule_mask &= ~MALI_SCHEDULER_MASK_PP;
-               }
-
-               /* Schedule other jobs that were activated. */
-               mali_scheduler_schedule_from_mask(schedule_mask, in_upper_half);
-       }
-
-       if (job_is_done) {
-               /* Return job to user and delete it. */
-               mali_pp_scheduler_finalize_job(job);
-       }
-}
-
-void mali_pp_scheduler_suspend(void)
-{
-       mali_pp_scheduler_lock();
-       pause_count++; /* Increment the pause_count so that no more jobs will be scheduled */
-       mali_pp_scheduler_unlock();
-
-       /* Go to sleep. When woken up again (in mali_pp_scheduler_job_done), the
-        * mali_pp_scheduler_suspended() function will be called. This will return true
-        * if state is idle and pause_count > 0, so if the core is active this
-        * will not do anything.
-        */
-       _mali_osk_wait_queue_wait_event(pp_scheduler_working_wait_queue, mali_pp_scheduler_is_suspended, NULL);
-}
-
-void mali_pp_scheduler_resume(void)
-{
-       mali_pp_scheduler_lock();
-       pause_count--; /* Decrement pause_count to allow scheduling again (if it reaches 0) */
-       mali_pp_scheduler_unlock();
-       if (0 == pause_count) {
-               mali_pp_scheduler_schedule();
-       }
-}
-
-mali_timeline_point mali_pp_scheduler_submit_job(struct mali_session_data *session, struct mali_pp_job *job)
-{
-       mali_timeline_point point;
-       u32 fb_lookup_id = 0;
-
-       MALI_DEBUG_ASSERT_POINTER(session);
-       MALI_DEBUG_ASSERT_POINTER(job);
-
-       mali_pp_scheduler_lock();
-
-       fb_lookup_id = mali_pp_job_get_fb_lookup_id(job);
-       MALI_DEBUG_ASSERT(MALI_PP_JOB_FB_LOOKUP_LIST_SIZE > fb_lookup_id);
-
-       /* Adding job to the lookup list used to quickly discard writeback units of queued jobs. */
-       _mali_osk_list_addtail(&job->session_fb_lookup_list, &session->pp_job_fb_lookup_list[fb_lookup_id]);
-
-       mali_pp_scheduler_unlock();
-
-       mali_pp_scheduler_job_queued();
-
-       /* Add job to Timeline system. */
-       point = mali_timeline_system_add_tracker(session->timeline_system, &job->tracker, MALI_TIMELINE_PP);
-
-       return point;
-}
-
-_mali_osk_errcode_t _mali_ukk_pp_start_job(void *ctx, _mali_uk_pp_start_job_s *uargs)
-{
-       struct mali_session_data *session;
-       struct mali_pp_job *job;
-       mali_timeline_point point;
-       u32 __user *timeline_point_ptr = NULL;
-
-       MALI_DEBUG_ASSERT_POINTER(uargs);
-       MALI_DEBUG_ASSERT_POINTER(ctx);
-
-       session = (struct mali_session_data*)ctx;
-
-       job = mali_pp_job_create(session, uargs, mali_scheduler_get_new_id());
-       if (NULL == job) {
-               MALI_PRINT_ERROR(("Failed to create PP job.\n"));
-               return _MALI_OSK_ERR_NOMEM;
-       }
-
-       timeline_point_ptr = (u32 __user *) job->uargs.timeline_point_ptr;
-
-       point = mali_pp_scheduler_submit_job(session, job);
-       job = NULL;
-
-       if (0 != _mali_osk_put_user(((u32) point), timeline_point_ptr)) {
-               /* Let user space know that something failed after the job was started. */
-               return _MALI_OSK_ERR_ITEM_NOT_FOUND;
-       }
-
-       return _MALI_OSK_ERR_OK;
-}
-
-_mali_osk_errcode_t _mali_ukk_pp_and_gp_start_job(void *ctx, _mali_uk_pp_and_gp_start_job_s *uargs)
-{
-       struct mali_session_data *session;
-       _mali_uk_pp_and_gp_start_job_s kargs;
-       struct mali_pp_job *pp_job;
-       struct mali_gp_job *gp_job;
-       u32 __user *timeline_point_ptr = NULL;
-       mali_timeline_point point;
-
-       MALI_DEBUG_ASSERT_POINTER(ctx);
-       MALI_DEBUG_ASSERT_POINTER(uargs);
-
-       session = (struct mali_session_data *) ctx;
-
-       if (0 != _mali_osk_copy_from_user(&kargs, uargs, sizeof(_mali_uk_pp_and_gp_start_job_s))) {
-               return _MALI_OSK_ERR_NOMEM;
-       }
-
-       pp_job = mali_pp_job_create(session, kargs.pp_args, mali_scheduler_get_new_id());
-       if (NULL == pp_job) {
-               MALI_PRINT_ERROR(("Failed to create PP job.\n"));
-               return _MALI_OSK_ERR_NOMEM;
-       }
-
-       gp_job = mali_gp_job_create(session, kargs.gp_args, mali_scheduler_get_new_id(), mali_pp_job_get_tracker(pp_job));
-       if (NULL == gp_job) {
-               MALI_PRINT_ERROR(("Failed to create GP job.\n"));
-               mali_pp_job_delete(pp_job);
-               return _MALI_OSK_ERR_NOMEM;
-       }
-
-       timeline_point_ptr = (u32 __user *) pp_job->uargs.timeline_point_ptr;
-
-       /* Submit GP job. */
-       mali_gp_scheduler_submit_job(session, gp_job);
-       gp_job = NULL;
-
-       /* Submit PP job. */
-       point = mali_pp_scheduler_submit_job(session, pp_job);
-       pp_job = NULL;
-
-       if (0 != _mali_osk_put_user(((u32) point), timeline_point_ptr)) {
-               /* Let user space know that something failed after the jobs were started. */
-               return _MALI_OSK_ERR_ITEM_NOT_FOUND;
-       }
-
-       return _MALI_OSK_ERR_OK;
-}
-
-_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores(_mali_uk_get_pp_number_of_cores_s *args)
-{
-       MALI_DEBUG_ASSERT_POINTER(args);
-       MALI_DEBUG_ASSERT_POINTER(args->ctx);
-       args->number_of_total_cores = num_cores;
-       args->number_of_enabled_cores = enabled_cores;
-       return _MALI_OSK_ERR_OK;
-}
-
-u32 mali_pp_scheduler_get_num_cores_total(void)
-{
-       return num_cores;
-}
-
-u32 mali_pp_scheduler_get_num_cores_enabled(void)
-{
-       return enabled_cores;
-}
-
-_mali_osk_errcode_t _mali_ukk_get_pp_core_version(_mali_uk_get_pp_core_version_s *args)
-{
-       MALI_DEBUG_ASSERT_POINTER(args);
-       MALI_DEBUG_ASSERT_POINTER(args->ctx);
-       args->version = pp_version;
-       return _MALI_OSK_ERR_OK;
-}
-
-void _mali_ukk_pp_job_disable_wb(_mali_uk_pp_disable_wb_s *args)
-{
-       struct mali_session_data *session;
-       struct mali_pp_job *job;
-       struct mali_pp_job *tmp;
-       u32 fb_lookup_id;
-
-       MALI_DEBUG_ASSERT_POINTER(args);
-       MALI_DEBUG_ASSERT_POINTER(args->ctx);
-
-       session = (struct mali_session_data*)args->ctx;
-
-       fb_lookup_id = args->fb_id & MALI_PP_JOB_FB_LOOKUP_LIST_MASK;
-
-       mali_pp_scheduler_lock();
-
-       /* Iterate over all jobs for given frame builder_id. */
-       _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &session->pp_job_fb_lookup_list[fb_lookup_id], struct mali_pp_job, session_fb_lookup_list) {
-               MALI_DEBUG_CODE(u32 disable_mask = 0);
-
-               if (mali_pp_job_get_frame_builder_id(job) == (u32) args->fb_id) {
-                       MALI_DEBUG_CODE(disable_mask |= 0xD<<(4*3));
-                       if (args->wb0_memory == job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR/sizeof(u32)]) {
-                               MALI_DEBUG_CODE(disable_mask |= 0x1<<(4*1));
-                               mali_pp_job_disable_wb0(job);
-                       }
-                       if (args->wb1_memory == job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR/sizeof(u32)]) {
-                               MALI_DEBUG_CODE(disable_mask |= 0x2<<(4*2));
-                               mali_pp_job_disable_wb1(job);
-                       }
-                       if (args->wb2_memory == job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR/sizeof(u32)]) {
-                               MALI_DEBUG_CODE(disable_mask |= 0x3<<(4*3));
-                               mali_pp_job_disable_wb2(job);
-                       }
-                       MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Disable WB: 0x%X.\n", disable_mask));
-               } else {
-                       MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Disable WB mismatching FB.\n"));
-               }
-       }
-
-       mali_pp_scheduler_unlock();
-}
-
-void mali_pp_scheduler_abort_session(struct mali_session_data *session)
-{
-       u32 i = 0;
-       struct mali_pp_job *job, *tmp_job;
-       struct mali_group *group, *tmp_group;
-       struct mali_group *groups[MALI_MAX_NUMBER_OF_GROUPS];
-       _MALI_OSK_LIST_HEAD_STATIC_INIT(removed_jobs);
-
-       MALI_DEBUG_ASSERT_POINTER(session);
-       MALI_DEBUG_ASSERT(session->is_aborting);
-
-       MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Aborting all jobs from session 0x%08X.\n", session));
-
-       mali_pp_scheduler_lock();
-
-       /* Find all jobs from the aborting session. */
-       _MALI_OSK_LIST_FOREACHENTRY(job, tmp_job, &session->pp_job_list, struct mali_pp_job, session_list) {
-               /* Remove job from queue. */
-               if (mali_pp_job_is_virtual(job)) {
-                       MALI_DEBUG_ASSERT(1 == mali_pp_job_get_sub_job_count(job));
-                       if (0 == mali_pp_job_get_first_unstarted_sub_job(job)) {
-                               --virtual_job_queue.depth;
-                       }
-               } else {
-                       job_queue.depth -= mali_pp_job_get_sub_job_count(job) - mali_pp_job_get_first_unstarted_sub_job(job);
-               }
-
-               _mali_osk_list_delinit(&job->list);
-               _mali_osk_list_delinit(&job->session_fb_lookup_list);
-
-               mali_pp_job_mark_unstarted_failed(job);
-
-               if (mali_pp_job_is_complete(job)) {
-                       /* Job is complete, remove from session list. */
-                       _mali_osk_list_delinit(&job->session_list);
-
-                       /* Move job to local list for release and deletion. */
-                       _mali_osk_list_add(&job->list, &removed_jobs);
-
-                       MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Aborted PP job %u (0x%08X).\n", mali_pp_job_get_id(job), job));
-               } else {
-                       MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Keeping partially started PP job %u (0x%08X) in session.\n", mali_pp_job_get_id(job), job));
-               }
-       }
-
-       _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_working, struct mali_group, pp_scheduler_list) {
-               groups[i++] = group;
-       }
-
-       _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_idle, struct mali_group, pp_scheduler_list) {
-               groups[i++] = group;
-       }
-
-       mali_pp_scheduler_unlock();
-
-       /* Release and delete all found jobs from the aborting session. */
-       _MALI_OSK_LIST_FOREACHENTRY(job, tmp_job, &removed_jobs, struct mali_pp_job, list) {
-               mali_timeline_tracker_release(&job->tracker);
-               mali_pp_job_delete(job);
-               mali_pp_scheduler_job_completed();
-       }
-
-       /* Abort any running jobs from the session. */
-       while (i > 0) {
-               mali_group_abort_session(groups[--i], session);
-       }
-
-       if (mali_pp_scheduler_has_virtual_group()) {
-               mali_group_abort_session(virtual_group, session);
-       }
-}
-
-static mali_bool mali_pp_scheduler_is_suspended(void *data)
-{
-       mali_bool ret;
-
-       /* This callback does not use the data pointer. */
-       MALI_IGNORE(data);
-
-       mali_pp_scheduler_lock();
-
-       ret = pause_count > 0
-             && _mali_osk_list_empty(&group_list_working)
-             && VIRTUAL_GROUP_WORKING != virtual_group_state;
-
-       mali_pp_scheduler_unlock();
-
-       return ret;
-}
-
-struct mali_pp_core *mali_pp_scheduler_get_virtual_pp(void)
-{
-       if (mali_pp_scheduler_has_virtual_group()) {
-               return mali_group_get_pp_core(virtual_group);
-       } else {
-               return NULL;
-       }
-}
-
-#if MALI_STATE_TRACKING
-u32 mali_pp_scheduler_dump_state(char *buf, u32 size)
-{
-       int n = 0;
-       struct mali_group *group;
-       struct mali_group *temp;
-
-       n += _mali_osk_snprintf(buf + n, size - n, "PP:\n");
-       n += _mali_osk_snprintf(buf + n, size - n, "\tQueue is %s\n", _mali_osk_list_empty(&job_queue.normal_pri) ? "empty" : "not empty");
-       n += _mali_osk_snprintf(buf + n, size - n, "\tHigh priority queue is %s\n", _mali_osk_list_empty(&job_queue.high_pri) ? "empty" : "not empty");
-       n += _mali_osk_snprintf(buf + n, size - n, "\n");
-
-       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working, struct mali_group, pp_scheduler_list) {
-               n += mali_group_dump_state(group, buf + n, size - n);
-       }
-
-       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, pp_scheduler_list) {
-               n += mali_group_dump_state(group, buf + n, size - n);
-       }
-
-       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, pp_scheduler_list) {
-               n += mali_group_dump_state(group, buf + n, size - n);
-       }
-
-       if (mali_pp_scheduler_has_virtual_group()) {
-               n += mali_group_dump_state(virtual_group, buf + n, size -n);
-       }
-
-       n += _mali_osk_snprintf(buf + n, size - n, "\n");
-       return n;
-}
-#endif
-
-/* This function is intended for power on reset of all cores.
- * No locking is done for the list iteration, which can only be safe if the
- * scheduler is paused and all cores idle. That is always the case on init and
- * power on. */
-void mali_pp_scheduler_reset_all_groups(void)
-{
-       struct mali_group *group, *temp;
-       struct mali_group *groups[MALI_MAX_NUMBER_OF_GROUPS];
-       s32 i = 0;
-
-       if (mali_pp_scheduler_has_virtual_group()) {
-               mali_group_lock(virtual_group);
-               mali_group_reset(virtual_group);
-               mali_group_unlock(virtual_group);
-       }
-
-       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&group_list_working));
-       MALI_DEBUG_ASSERT(VIRTUAL_GROUP_WORKING != virtual_group_state);
-       mali_pp_scheduler_lock();
-       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, pp_scheduler_list) {
-               groups[i++] = group;
-       }
-       mali_pp_scheduler_unlock();
-
-       while (i > 0) {
-               group = groups[--i];
-
-               mali_group_lock(group);
-               mali_group_reset(group);
-               mali_group_unlock(group);
-       }
-}
-
-void mali_pp_scheduler_zap_all_active(struct mali_session_data *session)
-{
-       struct mali_group *group, *temp;
-       struct mali_group *groups[MALI_MAX_NUMBER_OF_GROUPS];
-       s32 i = 0;
-
-       if (mali_pp_scheduler_has_virtual_group()) {
-               mali_group_zap_session(virtual_group, session);
-       }
-
-       mali_pp_scheduler_lock();
-       _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working, struct mali_group, pp_scheduler_list) {
-               groups[i++] = group;
-       }
-       mali_pp_scheduler_unlock();
-
-       while (i > 0) {
-               mali_group_zap_session(groups[--i], session);
-       }
-}
-
-/* A pm reference must be taken with _mali_osk_pm_dev_ref_add_no_power_on
- * before calling this function to avoid Mali powering down as HW is accessed.
- */
-static void mali_pp_scheduler_enable_group_internal(struct mali_group *group)
-{
-       MALI_DEBUG_ASSERT_POINTER(group);
-
-       mali_group_lock(group);
-
-       if (MALI_GROUP_STATE_DISABLED != group->state) {
-               mali_group_unlock(group);
-               MALI_DEBUG_PRINT(4, ("Mali PP scheduler: PP group %p already enabled.\n", group));
-               return;
-       }
-
-       MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Enabling PP group %p.\n", group));
-
-       mali_pp_scheduler_lock();
-
-       MALI_DEBUG_ASSERT(MALI_GROUP_STATE_DISABLED == group->state);
-       ++enabled_cores;
-
-       if (mali_pp_scheduler_has_virtual_group()) {
-               mali_bool update_hw;
-
-               /* Add group to virtual group. */
-               _mali_osk_list_delinit(&(group->pp_scheduler_list));
-               group->state = MALI_GROUP_STATE_JOINING_VIRTUAL;
-
-               mali_pp_scheduler_unlock();
-               mali_group_unlock(group);
-
-               mali_group_lock(virtual_group);
-
-               update_hw = mali_pm_is_power_on();
-               /* Get ref of group domain */
-               mali_group_get_pm_domain_ref(group);
-
-               MALI_DEBUG_ASSERT(NULL == group->pm_domain ||
-                                 MALI_PM_DOMAIN_ON == mali_pm_domain_state_get(group->pm_domain));
-
-               if (update_hw) {
-                       mali_group_lock(group);
-                       mali_group_power_on_group(group);
-                       mali_group_reset(group);
-                       mali_group_unlock(group);
-               }
-
-               mali_pp_scheduler_enable_empty_virtual();
-               mali_group_add_group(virtual_group, group, update_hw);
-               MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Done enabling group %p. Added to virtual group.\n", group));
-
-               mali_group_unlock(virtual_group);
-       } else {
-               /* Get ref of group domain */
-               mali_group_get_pm_domain_ref(group);
-
-               MALI_DEBUG_ASSERT(NULL == group->pm_domain ||
-                                 MALI_PM_DOMAIN_ON == mali_pm_domain_state_get(group->pm_domain));
-
-               /* Put group on idle list. */
-               if (mali_pm_is_power_on()) {
-                       mali_group_power_on_group(group);
-                       mali_group_reset(group);
-               }
-
-               _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_idle);
-               group->state = MALI_GROUP_STATE_IDLE;
-
-               MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Done enabling group %p. Now on idle list.\n", group));
-               mali_pp_scheduler_unlock();
-               mali_group_unlock(group);
-       }
-}
-
-void mali_pp_scheduler_enable_group(struct mali_group *group)
-{
-       MALI_DEBUG_ASSERT_POINTER(group);
-
-       _mali_osk_pm_dev_ref_add_no_power_on();
-
-       mali_pp_scheduler_enable_group_internal(group);
-
-       _mali_osk_pm_dev_ref_dec_no_power_on();
-
-       /* Pick up any jobs that might have been queued if all PP groups were disabled. */
-       mali_pp_scheduler_schedule();
-}
-
-static void mali_pp_scheduler_disable_group_internal(struct mali_group *group)
-{
-       if (mali_pp_scheduler_has_virtual_group()) {
-               mali_group_lock(virtual_group);
-
-               MALI_DEBUG_ASSERT(VIRTUAL_GROUP_WORKING != virtual_group_state);
-               if (MALI_GROUP_STATE_JOINING_VIRTUAL == group->state) {
-                       /* The group was in the process of being added to the virtual group.  We
-                        * only need to change the state to reverse this. */
-                       group->state = MALI_GROUP_STATE_LEAVING_VIRTUAL;
-               } else if (MALI_GROUP_STATE_IN_VIRTUAL == group->state) {
-                       /* Remove group from virtual group.  The state of the group will be
-                        * LEAVING_VIRTUAL and the group will not be on any scheduler list. */
-                       mali_group_remove_group(virtual_group, group);
-
-                       mali_pp_scheduler_disable_empty_virtual();
-               }
-
-               mali_group_unlock(virtual_group);
-       }
-
-       mali_group_lock(group);
-       mali_pp_scheduler_lock();
-
-       MALI_DEBUG_ASSERT(   MALI_GROUP_STATE_IDLE            == group->state
-                            || MALI_GROUP_STATE_LEAVING_VIRTUAL == group->state
-                            || MALI_GROUP_STATE_DISABLED        == group->state);
-
-       if (MALI_GROUP_STATE_DISABLED == group->state) {
-               MALI_DEBUG_PRINT(4, ("Mali PP scheduler: PP group %p already disabled.\n", group));
-       } else {
-               MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Disabling PP group %p.\n", group));
-
-               --enabled_cores;
-               _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_disabled);
-               group->state = MALI_GROUP_STATE_DISABLED;
-
-               mali_group_power_off_group(group, MALI_TRUE);
-               mali_group_put_pm_domain_ref(group);
-       }
-
-       mali_pp_scheduler_unlock();
-       mali_group_unlock(group);
-}
-
-void mali_pp_scheduler_disable_group(struct mali_group *group)
-{
-       MALI_DEBUG_ASSERT_POINTER(group);
-
-       mali_pp_scheduler_suspend();
-
-       _mali_osk_pm_dev_ref_add_no_power_on();
-
-       mali_pp_scheduler_disable_group_internal(group);
-
-       _mali_osk_pm_dev_ref_dec_no_power_on();
-
-       mali_pp_scheduler_resume();
-}
-
-static void mali_pp_scheduler_notify_core_change(u32 num_cores)
-{
-       mali_bool done = MALI_FALSE;
-
-       if (mali_is_mali450()) {
-               return;
-       }
-
-       /*
-        * This function gets a bit complicated because we can't hold the session lock while
-        * allocating notification objects.
-        */
-
-       while (!done) {
-               u32 i;
-               u32 num_sessions_alloc;
-               u32 num_sessions_with_lock;
-               u32 used_notification_objects = 0;
-               _mali_osk_notification_t **notobjs;
-
-               /* Pre allocate the number of notifications objects we need right now (might change after lock has been taken) */
-               num_sessions_alloc = mali_session_get_count();
-               if (0 == num_sessions_alloc) {
-                       /* No sessions to report to */
-                       return;
-               }
-
-               notobjs = (_mali_osk_notification_t **)_mali_osk_malloc(sizeof(_mali_osk_notification_t *) * num_sessions_alloc);
-               if (NULL == notobjs) {
-                       MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure)\n"));
-                       /* there is probably no point in trying again, system must be really low on memory and probably unusable now anyway */
-                       return;
-               }
-
-               for (i = 0; i < num_sessions_alloc; i++) {
-                       notobjs[i] = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_NUM_CORE_CHANGE, sizeof(_mali_uk_pp_num_cores_changed_s));
-                       if (NULL != notobjs[i]) {
-                               _mali_uk_pp_num_cores_changed_s *data = notobjs[i]->result_buffer;
-                               data->number_of_enabled_cores = num_cores;
-                       } else {
-                               MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure %u)\n", i));
-                       }
-               }
-
-               mali_session_lock();
-
-               /* number of sessions will not change while we hold the lock */
-               num_sessions_with_lock = mali_session_get_count();
-
-               if (num_sessions_alloc >= num_sessions_with_lock) {
-                       /* We have allocated enough notification objects for all the sessions atm */
-                       struct mali_session_data *session, *tmp;
-                       MALI_SESSION_FOREACH(session, tmp, link) {
-                               MALI_DEBUG_ASSERT(used_notification_objects < num_sessions_alloc);
-                               if (NULL != notobjs[used_notification_objects]) {
-                                       mali_session_send_notification(session, notobjs[used_notification_objects]);
-                                       notobjs[used_notification_objects] = NULL; /* Don't track this notification object any more */
-                               }
-                               used_notification_objects++;
-                       }
-                       done = MALI_TRUE;
-               }
-
-               mali_session_unlock();
-
-               /* Delete any remaining/unused notification objects */
-               for (; used_notification_objects < num_sessions_alloc; used_notification_objects++) {
-                       if (NULL != notobjs[used_notification_objects]) {
-                               _mali_osk_notification_delete(notobjs[used_notification_objects]);
-                       }
-               }
-
-               _mali_osk_free(notobjs);
-       }
-}
-
-static void mali_pp_scheduler_core_scale_up(unsigned int target_core_nr)
-{
-       MALI_DEBUG_PRINT(2, ("Requesting %d cores: enabling %d cores\n", target_core_nr, target_core_nr - enabled_cores));
-
-       _mali_osk_pm_dev_ref_add_no_power_on();
-       _mali_osk_pm_dev_barrier();
-
-       while (target_core_nr > enabled_cores) {
-               /*
-                * If there are any cores which do not belong to any domain,
-                * then these will always be found at the head of the list and
-                * we'll thus enabled these first.
-                */
-
-               mali_pp_scheduler_lock();
-
-               if (!_mali_osk_list_empty(&group_list_disabled)) {
-                       struct mali_group *group;
-
-                       group = _MALI_OSK_LIST_ENTRY(group_list_disabled.next, struct mali_group, pp_scheduler_list);
-
-                       MALI_DEBUG_ASSERT_POINTER(group);
-                       MALI_DEBUG_ASSERT(MALI_GROUP_STATE_DISABLED == group->state);
-
-                       mali_pp_scheduler_unlock();
-
-                       mali_pp_scheduler_enable_group_internal(group);
-               } else {
-                       mali_pp_scheduler_unlock();
-                       break; /* no more groups on disabled list */
-               }
-       }
-
-       _mali_osk_pm_dev_ref_dec_no_power_on();
-
-       mali_pp_scheduler_schedule();
-}
-
-static void mali_pp_scheduler_core_scale_down(unsigned int target_core_nr)
-{
-       MALI_DEBUG_PRINT(2, ("Requesting %d cores: disabling %d cores\n", target_core_nr, enabled_cores - target_core_nr));
-
-       mali_pp_scheduler_suspend();
-
-       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&group_list_working));
-
-       _mali_osk_pm_dev_ref_add_no_power_on();
-
-       if (NULL != mali_pmu_get_global_pmu_core()) {
-               int i;
-
-               for (i = MALI_MAX_NUMBER_OF_DOMAINS - 1; i >= 0; i--) {
-                       if (target_core_nr < enabled_cores) {
-                               struct mali_pm_domain *domain;
-
-                               domain = mali_pm_domain_get_from_index(i);
-
-                               /* Domain is valid and has pp cores */
-                               if ((NULL != domain) && (NULL != domain->group_list)) {
-                                       struct mali_group *group;
-
-                                       MALI_PM_DOMAIN_FOR_EACH_GROUP(group, domain) {
-                                               /* If group is pp core */
-                                               if (NULL != mali_group_get_pp_core(group)) {
-                                                       mali_pp_scheduler_disable_group_internal(group);
-                                                       if (target_core_nr >= enabled_cores) {
-                                                               break;
-                                                       }
-                                               }
-                                       }
-                               }
-                       } else {
-                               break;
-                       }
-               }
-       }
-
-       /*
-        * Didn't find enough cores associated with a power domain,
-        * so we need to disable cores which we can't power off with the PMU.
-        * Start with physical groups used by the scheduler,
-        * then remove physical from virtual if even more groups are needed.
-        */
-
-       while (target_core_nr < enabled_cores) {
-               mali_pp_scheduler_lock();
-               if (!_mali_osk_list_empty(&group_list_idle)) {
-                       struct mali_group *group;
-
-                       group = _MALI_OSK_LIST_ENTRY(group_list_idle.next, struct mali_group, pp_scheduler_list);
-                       MALI_DEBUG_ASSERT_POINTER(group);
-
-                       mali_pp_scheduler_unlock();
-
-                       mali_pp_scheduler_disable_group_internal(group);
-               } else {
-                       mali_pp_scheduler_unlock();
-                       break; /* No more physical groups */
-               }
-       }
-
-       if (mali_pp_scheduler_has_virtual_group()) {
-               while (target_core_nr < enabled_cores) {
-                       mali_group_lock(virtual_group);
-                       if (!_mali_osk_list_empty(&virtual_group->group_list)) {
-                               struct mali_group *group;
-
-                               group = _MALI_OSK_LIST_ENTRY(virtual_group->group_list.next, struct mali_group, group_list);
-                               MALI_DEBUG_ASSERT_POINTER(group);
-
-                               mali_group_unlock(virtual_group);
-
-                               mali_pp_scheduler_disable_group_internal(group);
-                       } else {
-                               mali_group_unlock(virtual_group);
-                               break; /* No more physical groups in virtual group */
-                       }
-               }
-       }
-
-       _mali_osk_pm_dev_ref_dec_no_power_on();
-
-       mali_pp_scheduler_resume();
-}
-
-int mali_pp_scheduler_set_perf_level(unsigned int target_core_nr, mali_bool override)
-{
-       if (target_core_nr == enabled_cores) return 0;
-       if (MALI_FALSE == core_scaling_enabled && MALI_FALSE == override) return -EPERM;
-       if (target_core_nr > num_cores) return -EINVAL;
-       if (0 == target_core_nr) return -EINVAL;
-
-       if (target_core_nr > enabled_cores) {
-               mali_pp_scheduler_core_scale_up(target_core_nr);
-       } else if (target_core_nr < enabled_cores) {
-               mali_pp_scheduler_core_scale_down(target_core_nr);
-       }
-
-       if (target_core_nr != enabled_cores) {
-               MALI_DEBUG_PRINT(2, ("Core scaling failed, target number: %d, actual number: %d\n", target_core_nr, enabled_cores));
-       }
-
-       mali_pp_scheduler_notify_core_change(enabled_cores);
-
-       return 0;
-}
-
-void mali_pp_scheduler_core_scaling_enable(void)
-{
-       /* PS: Core scaling is by default enabled */
-       core_scaling_enabled = MALI_TRUE;
-}
-
-void mali_pp_scheduler_core_scaling_disable(void)
-{
-       core_scaling_enabled = MALI_FALSE;
-}
-
-mali_bool mali_pp_scheduler_core_scaling_is_enabled(void)
-{
-       return core_scaling_enabled;
-}
-
-static void mali_pp_scheduler_job_queued(void)
-{
-       /* We hold a PM reference for every job we hold queued (and running) */
-       _mali_osk_pm_dev_ref_add();
-
-       if (mali_utilization_enabled()) {
-               /*
-                * We cheat a little bit by counting the PP as busy from the time a PP job is queued.
-                * This will be fine because we only loose the tiny idle gap between jobs, but
-                * we will instead get less utilization work to do (less locks taken)
-                */
-               mali_utilization_pp_start();
-       }
-}
-
-static void mali_pp_scheduler_job_completed(void)
-{
-       /* Release the PM reference we got in the mali_pp_scheduler_job_queued() function */
-       _mali_osk_pm_dev_ref_dec();
-
-       if (mali_utilization_enabled()) {
-               mali_utilization_pp_end();
-       }
-}
-
-static void mali_pp_scheduler_abort_job_and_unlock_scheduler(struct mali_pp_job *job)
-{
-       MALI_DEBUG_ASSERT_POINTER(job);
-       MALI_DEBUG_ASSERT_LOCK_HELD(pp_scheduler_lock);
-
-       /* This job should not be on any lists. */
-       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
-       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_list));
-
-       _mali_osk_list_delinit(&job->session_fb_lookup_list);
-
-       mali_pp_scheduler_unlock();
-
-       /* Release tracker. */
-       mali_timeline_tracker_release(&job->tracker);
-}
-
-static mali_scheduler_mask mali_pp_scheduler_queue_job(struct mali_pp_job *job)
-{
-       _mali_osk_list_t *queue = NULL;
-       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
-       struct mali_pp_job *iter, *tmp;
-
-       MALI_DEBUG_ASSERT_POINTER(job);
-       MALI_DEBUG_ASSERT_POINTER(job->session);
-
-#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
-       if (mali_pp_job_needs_dma_buf_mapping(job)) {
-               mali_dma_buf_map_job(job);
-       }
-#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
-
-       mali_pp_scheduler_lock();
-
-       if (unlikely(job->session->is_aborting)) {
-               /* Before checking if the session is aborting, the scheduler must be locked. */
-               MALI_DEBUG_ASSERT_LOCK_HELD(pp_scheduler_lock);
-
-               MALI_DEBUG_PRINT(2, ("Mali PP scheduler: Job %u (0x%08X) queued while session is aborting.\n", mali_pp_job_get_id(job), job));
-
-               mali_pp_scheduler_abort_job_and_unlock_scheduler(job);
-
-               /* Delete job. */
-#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
-               mali_pp_scheduler_deferred_job_delete(job);
-#else
-               mali_pp_job_delete(job);
-#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE) */
-               mali_pp_scheduler_job_completed();
-
-               /* Since we are aborting we ignore the scheduler mask. */
-               return MALI_SCHEDULER_MASK_EMPTY;
-       }
-
-#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
-       trace_gpu_job_enqueue(mali_pp_job_get_tid(job), mali_pp_job_get_id(job), "PP");
-#endif
-
-       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_SINGLE_SW_PP_ENQUEUE, job->pid, job->tid, job->uargs.frame_builder_id, job->uargs.flush_id, 0);
-
-       job->cache_order = mali_scheduler_get_new_cache_order();
-
-       /* Determine which queue the job should be added to. */
-       if (mali_pp_job_is_virtual(job)) {
-               if (job->session->use_high_priority_job_queue) {
-                       queue = &virtual_job_queue.high_pri;
-               } else {
-                       queue = &virtual_job_queue.normal_pri;
-               }
-
-               virtual_job_queue.depth += 1;
-
-               /* Set schedule bitmask if the virtual group is idle. */
-               if (VIRTUAL_GROUP_IDLE == virtual_group_state) {
-                       schedule_mask |= MALI_SCHEDULER_MASK_PP;
-               }
-       } else {
-               if (job->session->use_high_priority_job_queue) {
-                       queue = &job_queue.high_pri;
-               } else {
-                       queue = &job_queue.normal_pri;
-               }
-
-               job_queue.depth += mali_pp_job_get_sub_job_count(job);
-
-               /* Set schedule bitmask if there are physical PP cores available, or if there is an
-                * idle virtual group. */
-               if (!_mali_osk_list_empty(&group_list_idle)
-                   || (mali_pp_scheduler_has_virtual_group()
-                       && (VIRTUAL_GROUP_IDLE == virtual_group_state))) {
-                       schedule_mask |= MALI_SCHEDULER_MASK_PP;
-               }
-       }
-
-       /* Find position in queue where job should be added. */
-       _MALI_OSK_LIST_FOREACHENTRY_REVERSE(iter, tmp, queue, struct mali_pp_job, list) {
-               if (mali_pp_job_should_start_after(job, iter)) {
-                       break;
-               }
-       }
-
-       /* Add job to queue. */
-       _mali_osk_list_add(&job->list, &iter->list);
-
-       /* Add job to session list. */
-       _mali_osk_list_addtail(&job->session_list, &(job->session->pp_job_list));
-
-       MALI_DEBUG_PRINT(3, ("Mali PP scheduler: %s job %u (0x%08X) with %u parts queued.\n",
-                            mali_pp_job_is_virtual(job) ? "Virtual" : "Physical",
-                            mali_pp_job_get_id(job), job, mali_pp_job_get_sub_job_count(job)));
-
-       mali_pp_scheduler_unlock();
-
-       return schedule_mask;
-}
-
-mali_scheduler_mask mali_pp_scheduler_activate_job(struct mali_pp_job *job)
-{
-       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
-
-       MALI_DEBUG_ASSERT_POINTER(job);
-       MALI_DEBUG_ASSERT_POINTER(job->session);
-
-       MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Timeline activation for job %u (0x%08X).\n", mali_pp_job_get_id(job), job));
-
-       if (MALI_TIMELINE_ACTIVATION_ERROR_FATAL_BIT & job->tracker.activation_error) {
-               MALI_DEBUG_PRINT(2, ("Mali PP scheduler: Job %u (0x%08X) activated with error, aborting.\n", mali_pp_job_get_id(job), job));
-
-               mali_pp_scheduler_lock();
-               mali_pp_scheduler_abort_job_and_unlock_scheduler(job);
-
-               mali_pp_job_mark_sub_job_completed(job, MALI_FALSE); /* Flagging the job as failed. */
-               mali_pp_scheduler_finalize_job(job);
-
-               return MALI_SCHEDULER_MASK_EMPTY;
-       }
-
-       /* PP job is ready to run, queue it. */
-
-#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
-       if (mali_pp_job_needs_dma_buf_mapping(job)) {
-               mali_pp_scheduler_deferred_job_queue(job);
-
-               return MALI_SCHEDULER_MASK_EMPTY;
-       }
-#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
-
-       schedule_mask = mali_pp_scheduler_queue_job(job);
-
-       return schedule_mask;
-}
diff --git a/drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_pp_scheduler.h b/drivers/misc/mediatek/gpu/mt8127/mali/mali/common/mali_pp_scheduler.h
deleted file mode 100644 (file)
index 6167d8c..0000000
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2013 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
- */
-
-#ifndef __MALI_PP_SCHEDULER_H__
-#define __MALI_PP_SCHEDULER_H__
-
-#include "mali_osk.h"
-#include "mali_pp_job.h"
-#include "mali_group.h"
-#include "linux/mali/mali_utgard.h"
-
-/** Initalize the HW independent parts of the  PP scheduler
- */
-_mali_osk_errcode_t mali_pp_scheduler_initialize(void);
-void mali_pp_scheduler_terminate(void);
-
-/** Poplulate the PP scheduler with groups
- */
-void mali_pp_scheduler_populate(void);
-void mali_pp_scheduler_depopulate(void);
-
-/**
- * @brief Handle job completion.
- *
- * Will attempt to start a new job on the locked group.
- *
- * If all sub jobs have completed the job's tracker will be released, any other resources associated
- * with the job will be freed.  A notification will also be sent to user space.
- *
- * Releasing the tracker might activate other jobs, so if appropriate we also schedule them.
- *
- * @note Group must be locked when entering this function.  Will be unlocked before exiting.
- *
- * @param group The group that completed the job.
- * @param job The job that is done.
- * @param sub_job Sub job of job.
- * @param success MALI_TRUE if job completed successfully, MALI_FALSE if not.
- * @param in_upper_half MALI_TRUE if called from upper half, MALI_FALSE if not.
- */
-void mali_pp_scheduler_job_done(struct mali_group *group, struct mali_pp_job *job, u32 sub_job, mali_bool success, mali_bool in_upper_half);
-
-void mali_pp_scheduler_suspend(void);
-void mali_pp_scheduler_resume(void);
-
-/**
- * @brief Abort all running and queued PP jobs from session.
- *
- * This functions aborts all PP jobs from the specified session. Queued jobs are removed from the
- * queue and jobs currently running on a core will be aborted.
- *
- * @param session Session that is aborting.
- */
-void mali_pp_scheduler_abort_session(struct mali_session_data *session);
-
-/**
- * @brief Reset all groups
- *
- * This function resets all groups known by the PP scheuduler. This must be
- * called after the Mali HW has been powered on in order to reset the HW.
- *
- * This function is intended for power on reset of all cores.
- * No locking is done, which can only be safe if the scheduler is paused and
- * all cores idle. That is always the case on init and power on.
- */
-void mali_pp_scheduler_reset_all_groups(void);
-
-/**
- * @brief Zap TLB on all groups with \a session active
- *
- * The scheculer will zap the session on all groups it owns.
- */
-void mali_pp_scheduler_zap_all_active(struct mali_session_data *session);
-
-/**
- * @brief Get the virtual PP core
- *
- * The returned PP core may only be used to prepare DMA command buffers for the
- * PP core. Other actions must go through the PP scheduler, or the virtual
- * group.
- *
- * @return Pointer to the virtual PP core, NULL if this doesn't exist
- */
-struct mali_pp_core *mali_pp_scheduler_get_virtual_pp(void);
-
-u32 mali_pp_scheduler_dump_state(char *buf, u32 size);
-
-void mali_pp_scheduler_enable_group(struct mali_group *group);
-void mali_pp_scheduler_disable_group(struct mali_group *group);
-
-/**
- * @brief Used by the Timeline system to queue a PP job.
- *
- * @note @ref mali_scheduler_schedule_from_mask() should be called if this function returns non-zero.
- *
- * @param job The PP job that is being activated.
- *
- * @return A scheduling bitmask that can be used to decide if scheduling is necessary after this
- * call.
- */
-mali_scheduler_mask mali_pp_scheduler_activate_job(struct mali_pp_job *job);
-
-/**
- * @brief Schedule queued jobs on idle cores.
- */
-void mali_pp_scheduler_schedule(void);
-
-int mali_pp_scheduler_set_perf_level(u32 cores, mali_bool override);
-
-void mali_pp_scheduler_core_scaling_enable(void);
-void mali_pp_scheduler_core_scaling_disable(void);
-mali_bool mali_pp_scheduler_core_scaling_is_enabled(void);
-
-u32 mali_pp_scheduler_get_num_cores_total(void);
-u32 mali_pp_scheduler_get_num_cores_enabled(void);
-
-/**
- * @brief Returns the number of Pixel Processors in the system irrespective of the context
- *
- * @return number of physical Pixel Processor cores in the system
- */
-u32 mali_pp_scheduler_get_num_cores_total(void);
-
-#endif /* __MALI_PP_SCHEDULER_H__ */
index 2daa2e46946b0cbacd786e3983443314bd1dd748..87ba070c80c5b8ab219fdccab8473ac5d5777a56 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2013 ARM Limited
+ * (C) COPYRIGHT 2012-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
  */
 
 #include "mali_scheduler.h"
-
 #include "mali_kernel_common.h"
 #include "mali_osk.h"
+#include "mali_osk_profiling.h"
+#include "mali_kernel_utilization.h"
+#include "mali_timeline.h"
+#include "mali_gp_job.h"
+#include "mali_pp_job.h"
+#include "mali_executor.h"
+#include "mali_group.h"
 
-mali_bool mali_scheduler_hints[MALI_SCHEDULER_HINT_MAX];
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#include "mali_memory_dma_buf.h"
+#endif
 
-static _mali_osk_atomic_t mali_job_id_autonumber;
-static _mali_osk_atomic_t mali_job_cache_order_autonumber;
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+#include <linux/sched.h>
+#include <trace/events/gpu.h>
+#endif
+/*
+ * ---------- static defines/constants ----------
+ */
 
-static _mali_osk_wq_work_t *pp_scheduler_wq_high_pri = NULL;
-static _mali_osk_wq_work_t *gp_scheduler_wq_high_pri = NULL;
+/*
+ * If dma_buf with map on demand is used, we defer job deletion and job queue
+ * if in atomic context, since both might sleep.
+ */
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#if !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+#define MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE 1
+#define MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE 1
+#endif
+#endif
 
-static void mali_scheduler_wq_schedule_pp(void *arg)
-{
-       MALI_IGNORE(arg);
+/*
+ * ---------- global variables (exported due to inline functions) ----------
+ */
 
-       mali_pp_scheduler_schedule();
-}
+/* Lock protecting this module */
+_mali_osk_spinlock_irq_t *mali_scheduler_lock_obj = NULL;
 
-static void mali_scheduler_wq_schedule_gp(void *arg)
-{
-       MALI_IGNORE(arg);
+/* Queue of jobs to be executed on the GP group */
+struct mali_scheduler_job_queue job_queue_gp;
 
-       mali_gp_scheduler_schedule();
-}
+/* Queue of PP jobs */
+struct mali_scheduler_job_queue job_queue_pp;
+
+_mali_osk_atomic_t mali_job_id_autonumber;
+_mali_osk_atomic_t mali_job_cache_order_autonumber;
+/*
+ * ---------- static variables ----------
+ */
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE)
+static _mali_osk_wq_work_t *scheduler_wq_pp_job_delete = NULL;
+static _mali_osk_spinlock_irq_t *scheduler_pp_job_delete_lock = NULL;
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(scheduler_pp_job_deletion_queue);
+#endif
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+static _mali_osk_wq_work_t *scheduler_wq_pp_job_queue = NULL;
+static _mali_osk_spinlock_irq_t *scheduler_pp_job_queue_lock = NULL;
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(scheduler_pp_job_queue_list);
+#endif
+
+/*
+ * ---------- Forward declaration of static functions ----------
+ */
+
+static mali_timeline_point mali_scheduler_submit_gp_job(
+       struct mali_session_data *session, struct mali_gp_job *job);
+static mali_timeline_point mali_scheduler_submit_pp_job(
+       struct mali_session_data *session, struct mali_pp_job *job);
+
+static mali_bool mali_scheduler_queue_gp_job(struct mali_gp_job *job);
+static mali_bool mali_scheduler_queue_pp_job(struct mali_pp_job *job);
+
+static void mali_scheduler_return_gp_job_to_user(struct mali_gp_job *job,
+               mali_bool success);
+static void mali_scheduler_return_pp_job_to_user(struct mali_pp_job *job,
+               u32 num_cores_in_virtual);
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE)
+static void mali_scheduler_deferred_pp_job_delete(struct mali_pp_job *job);
+static void mali_scheduler_do_pp_job_delete(void *arg);
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE) */
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+static void mali_scheduler_deferred_pp_job_queue(struct mali_pp_job *job);
+static void mali_scheduler_do_pp_job_queue(void *arg);
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */
+
+/*
+ * ---------- Actual implementation ----------
+ */
 
 _mali_osk_errcode_t mali_scheduler_initialize(void)
 {
-       if ( _MALI_OSK_ERR_OK != _mali_osk_atomic_init(&mali_job_id_autonumber, 0)) {
-               MALI_DEBUG_PRINT(1,  ("Initialization of atomic job id counter failed.\n"));
+       _mali_osk_atomic_init(&mali_job_id_autonumber, 0);
+       _mali_osk_atomic_init(&mali_job_cache_order_autonumber, 0);
+
+       _MALI_OSK_INIT_LIST_HEAD(&job_queue_gp.normal_pri);
+       _MALI_OSK_INIT_LIST_HEAD(&job_queue_gp.high_pri);
+       job_queue_gp.depth = 0;
+
+       _MALI_OSK_INIT_LIST_HEAD(&job_queue_pp.normal_pri);
+       _MALI_OSK_INIT_LIST_HEAD(&job_queue_pp.high_pri);
+       job_queue_pp.depth = 0;
+
+       mali_scheduler_lock_obj = _mali_osk_spinlock_irq_init(
+                                         _MALI_OSK_LOCKFLAG_ORDERED,
+                                         _MALI_OSK_LOCK_ORDER_SCHEDULER);
+       if (NULL == mali_scheduler_lock_obj) {
+               mali_scheduler_terminate();
+       }
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE)
+       scheduler_wq_pp_job_delete = _mali_osk_wq_create_work(
+                                            mali_scheduler_do_pp_job_delete, NULL);
+       if (NULL == scheduler_wq_pp_job_delete) {
+               mali_scheduler_terminate();
                return _MALI_OSK_ERR_FAULT;
        }
 
-       if ( _MALI_OSK_ERR_OK != _mali_osk_atomic_init(&mali_job_cache_order_autonumber, 0)) {
-               MALI_DEBUG_PRINT(1,  ("Initialization of atomic job cache order counter failed.\n"));
-               _mali_osk_atomic_term(&mali_job_id_autonumber);
+       scheduler_pp_job_delete_lock = _mali_osk_spinlock_irq_init(
+                                              _MALI_OSK_LOCKFLAG_ORDERED,
+                                              _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED);
+       if (NULL == scheduler_pp_job_delete_lock) {
+               mali_scheduler_terminate();
                return _MALI_OSK_ERR_FAULT;
        }
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE) */
 
-       pp_scheduler_wq_high_pri = _mali_osk_wq_create_work_high_pri(mali_scheduler_wq_schedule_pp, NULL);
-       if (NULL == pp_scheduler_wq_high_pri) {
-               _mali_osk_atomic_term(&mali_job_cache_order_autonumber);
-               _mali_osk_atomic_term(&mali_job_id_autonumber);
-               return _MALI_OSK_ERR_NOMEM;
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+       scheduler_wq_pp_job_queue = _mali_osk_wq_create_work(
+                                           mali_scheduler_do_pp_job_queue, NULL);
+       if (NULL == scheduler_wq_pp_job_queue) {
+               mali_scheduler_terminate();
+               return _MALI_OSK_ERR_FAULT;
        }
 
-       gp_scheduler_wq_high_pri = _mali_osk_wq_create_work_high_pri(mali_scheduler_wq_schedule_gp, NULL);
-       if (NULL == gp_scheduler_wq_high_pri) {
-               _mali_osk_wq_delete_work(pp_scheduler_wq_high_pri);
-               _mali_osk_atomic_term(&mali_job_cache_order_autonumber);
-               _mali_osk_atomic_term(&mali_job_id_autonumber);
-               return _MALI_OSK_ERR_NOMEM;
+       scheduler_pp_job_queue_lock = _mali_osk_spinlock_irq_init(
+                                             _MALI_OSK_LOCKFLAG_ORDERED,
+                                             _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED);
+       if (NULL == scheduler_pp_job_queue_lock) {
+               mali_scheduler_terminate();
+               return _MALI_OSK_ERR_FAULT;
        }
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */
 
        return _MALI_OSK_ERR_OK;
 }
 
 void mali_scheduler_terminate(void)
 {
-       _mali_osk_wq_delete_work(gp_scheduler_wq_high_pri);
-       _mali_osk_wq_delete_work(pp_scheduler_wq_high_pri);
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+       if (NULL != scheduler_pp_job_queue_lock) {
+               _mali_osk_spinlock_irq_term(scheduler_pp_job_queue_lock);
+               scheduler_pp_job_queue_lock = NULL;
+       }
+
+       if (NULL != scheduler_wq_pp_job_queue) {
+               _mali_osk_wq_delete_work(scheduler_wq_pp_job_queue);
+               scheduler_wq_pp_job_queue = NULL;
+       }
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE)
+       if (NULL != scheduler_pp_job_delete_lock) {
+               _mali_osk_spinlock_irq_term(scheduler_pp_job_delete_lock);
+               scheduler_pp_job_delete_lock = NULL;
+       }
+
+       if (NULL != scheduler_wq_pp_job_delete) {
+               _mali_osk_wq_delete_work(scheduler_wq_pp_job_delete);
+               scheduler_wq_pp_job_delete = NULL;
+       }
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE) */
+
+       if (NULL != mali_scheduler_lock_obj) {
+               _mali_osk_spinlock_irq_term(mali_scheduler_lock_obj);
+               mali_scheduler_lock_obj = NULL;
+       }
+
        _mali_osk_atomic_term(&mali_job_cache_order_autonumber);
        _mali_osk_atomic_term(&mali_job_id_autonumber);
 }
 
-u32 mali_scheduler_get_new_id(void)
+u32 mali_scheduler_job_physical_head_count(void)
 {
-       u32 job_id = _mali_osk_atomic_inc_return(&mali_job_id_autonumber);
-       return job_id;
+       /*
+        * Count how many physical sub jobs are present from the head of queue
+        * until the first virtual job is present.
+        * Early out when we have reached maximum number of PP cores (8)
+        */
+       u32 count = 0;
+       struct mali_pp_job *job;
+       struct mali_pp_job *temp;
+
+       /* Check for partially started normal pri jobs */
+       if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) {
+               MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
+
+               job = _MALI_OSK_LIST_ENTRY(job_queue_pp.normal_pri.next,
+                                          struct mali_pp_job, list);
+
+               MALI_DEBUG_ASSERT_POINTER(job);
+
+               if (MALI_TRUE == mali_pp_job_has_started_sub_jobs(job)) {
+                       /*
+                        * Remember; virtual jobs can't be queued and started
+                        * at the same time, so this must be a physical job
+                        */
+                       count += mali_pp_job_unstarted_sub_job_count(job);
+                       if (MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS <= count) {
+                               return MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS;
+                       }
+               }
+       }
+
+       _MALI_OSK_LIST_FOREACHENTRY(job, temp, &job_queue_pp.high_pri,
+                                   struct mali_pp_job, list) {
+               if (MALI_FALSE == mali_pp_job_is_virtual(job)) {
+                       count += mali_pp_job_unstarted_sub_job_count(job);
+                       if (MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS <= count) {
+                               return MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS;
+                       }
+               } else {
+                       /* Came across a virtual job, so stop counting */
+                       return count;
+               }
+       }
+
+       _MALI_OSK_LIST_FOREACHENTRY(job, temp, &job_queue_pp.normal_pri,
+                                   struct mali_pp_job, list) {
+               if (MALI_FALSE == mali_pp_job_is_virtual(job)) {
+                       /* any partially started is already counted */
+                       if (MALI_FALSE == mali_pp_job_has_started_sub_jobs(job)) {
+                               count += mali_pp_job_unstarted_sub_job_count(job);
+                               if (MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS <=
+                                   count) {
+                                       return MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS;
+                               }
+                       }
+               } else {
+                       /* Came across a virtual job, so stop counting */
+                       return count;
+               }
+       }
+
+       return count;
+}
+
+mali_bool mali_scheduler_job_next_is_virtual(void)
+{
+       struct mali_pp_job *job;
+
+       job = mali_scheduler_job_pp_virtual_peek();
+       if (NULL != job) {
+               MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(job));
+
+               return MALI_TRUE;
+       }
+
+       return MALI_FALSE;
 }
 
-u32 mali_scheduler_get_new_cache_order(void)
+struct mali_gp_job *mali_scheduler_job_gp_get(void)
 {
-       u32 job_cache_order = _mali_osk_atomic_inc_return(&mali_job_cache_order_autonumber);
-       return job_cache_order;
+       _mali_osk_list_t *queue;
+       struct mali_gp_job *job = NULL;
+
+       MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
+       MALI_DEBUG_ASSERT(0 < job_queue_gp.depth);
+
+       if (!_mali_osk_list_empty(&job_queue_gp.high_pri)) {
+               queue = &job_queue_gp.high_pri;
+       } else {
+               queue = &job_queue_gp.normal_pri;
+               MALI_DEBUG_ASSERT(!_mali_osk_list_empty(queue));
+       }
+
+       job = _MALI_OSK_LIST_ENTRY(queue->next, struct mali_gp_job, list);
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       mali_gp_job_list_remove(job);
+       job_queue_gp.depth--;
+
+       return job;
 }
 
-void mali_scheduler_schedule_from_mask(mali_scheduler_mask mask, mali_bool deferred_schedule)
+struct mali_pp_job *mali_scheduler_job_pp_physical_peek(void)
 {
-       if (MALI_SCHEDULER_MASK_GP & mask) {
-               /* GP needs scheduling. */
-               if (deferred_schedule) {
-                       /* Schedule GP deferred. */
-                       _mali_osk_wq_schedule_work_high_pri(gp_scheduler_wq_high_pri);
-               } else {
-                       /* Schedule GP now. */
-                       mali_gp_scheduler_schedule();
+       struct mali_pp_job *job = NULL;
+       struct mali_pp_job *tmp_job = NULL;
+
+       MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
+
+       /*
+        * For PP jobs we favour partially started jobs in normal
+        * priority queue over unstarted jobs in high priority queue
+        */
+
+       if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) {
+               MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
+
+               tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.normal_pri.next,
+                                              struct mali_pp_job, list);
+               MALI_DEBUG_ASSERT(NULL != tmp_job);
+
+               if (MALI_FALSE == mali_pp_job_is_virtual(tmp_job)) {
+                       job = tmp_job;
+               }
+       }
+
+       if (NULL == job ||
+           MALI_FALSE == mali_pp_job_has_started_sub_jobs(job)) {
+               /*
+                * There isn't a partially started job in normal queue, so
+                * look in high priority queue.
+                */
+               if (!_mali_osk_list_empty(&job_queue_pp.high_pri)) {
+                       MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
+
+                       tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.high_pri.next,
+                                                      struct mali_pp_job, list);
+                       MALI_DEBUG_ASSERT(NULL != tmp_job);
+
+                       if (MALI_FALSE == mali_pp_job_is_virtual(tmp_job)) {
+                               job = tmp_job;
+                       }
+               }
+       }
+
+       return job;
+}
+
+struct mali_pp_job *mali_scheduler_job_pp_virtual_peek(void)
+{
+       struct mali_pp_job *job = NULL;
+       struct mali_pp_job *tmp_job = NULL;
+
+       MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
+
+       if (!_mali_osk_list_empty(&job_queue_pp.high_pri)) {
+               MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
+
+               tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.high_pri.next,
+                                              struct mali_pp_job, list);
+
+               if (MALI_TRUE == mali_pp_job_is_virtual(tmp_job)) {
+                       job = tmp_job;
+               }
+       }
+
+       if (NULL == job) {
+               if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) {
+                       MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
+
+                       tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.normal_pri.next,
+                                                      struct mali_pp_job, list);
+
+                       if (MALI_TRUE == mali_pp_job_is_virtual(tmp_job)) {
+                               job = tmp_job;
+                       }
+               }
+       }
+
+       return job;
+}
+
+struct mali_pp_job *mali_scheduler_job_pp_physical_get(u32 *sub_job)
+{
+       struct mali_pp_job *job = mali_scheduler_job_pp_physical_peek();
+
+       MALI_DEBUG_ASSERT(MALI_FALSE == mali_pp_job_is_virtual(job));
+
+       if (NULL != job) {
+               *sub_job = mali_pp_job_get_first_unstarted_sub_job(job);
+
+               mali_pp_job_mark_sub_job_started(job, *sub_job);
+               if (MALI_FALSE == mali_pp_job_has_unstarted_sub_jobs(job)) {
+                       /* Remove from queue when last sub job has been retrieved */
+                       mali_pp_job_list_remove(job);
+               }
+
+               job_queue_pp.depth--;
+
+               /*
+                * Job about to start so it is no longer be
+                * possible to discard WB
+                */
+               mali_pp_job_fb_lookup_remove(job);
+       }
+
+       return job;
+}
+
+struct mali_pp_job *mali_scheduler_job_pp_virtual_get(void)
+{
+       struct mali_pp_job *job = mali_scheduler_job_pp_virtual_peek();
+
+       MALI_DEBUG_ASSERT(MALI_TRUE == mali_pp_job_is_virtual(job));
+
+       if (NULL != job) {
+               MALI_DEBUG_ASSERT(0 ==
+                                 mali_pp_job_get_first_unstarted_sub_job(job));
+               MALI_DEBUG_ASSERT(1 ==
+                                 mali_pp_job_get_sub_job_count(job));
+
+               mali_pp_job_mark_sub_job_started(job, 0);
+
+               mali_pp_job_list_remove(job);
+
+               job_queue_pp.depth--;
+
+               /*
+                * Job about to start so it is no longer be
+                * possible to discard WB
+                */
+               mali_pp_job_fb_lookup_remove(job);
+       }
+
+       return job;
+}
+
+mali_scheduler_mask mali_scheduler_activate_gp_job(struct mali_gp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Timeline activation for job %u (0x%08X).\n",
+                            mali_gp_job_get_id(job), job));
+
+       mali_scheduler_lock();
+
+       if (!mali_scheduler_queue_gp_job(job)) {
+               /* Failed to enqueue job, release job (with error) */
+
+               mali_scheduler_unlock();
+
+               mali_timeline_tracker_release(mali_gp_job_get_tracker(job));
+               mali_gp_job_signal_pp_tracker(job, MALI_FALSE);
+
+               /* This will notify user space and close the job object */
+               mali_scheduler_complete_gp_job(job, MALI_FALSE,
+                                              MALI_TRUE, MALI_FALSE);
+
+               return MALI_SCHEDULER_MASK_EMPTY;
+       }
+
+       mali_scheduler_unlock();
+
+       return MALI_SCHEDULER_MASK_GP;
+}
+
+mali_scheduler_mask mali_scheduler_activate_pp_job(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Timeline activation for job %u (0x%08X).\n",
+                            mali_pp_job_get_id(job), job));
+
+       if (MALI_TRUE == mali_timeline_tracker_activation_error(
+                   mali_pp_job_get_tracker(job))) {
+               MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Job %u (0x%08X) activated with error, aborting.\n",
+                                    mali_pp_job_get_id(job), job));
+
+               mali_scheduler_lock();
+               mali_pp_job_fb_lookup_remove(job);
+               mali_pp_job_mark_unstarted_failed(job);
+               mali_scheduler_unlock();
+
+               mali_timeline_tracker_release(mali_pp_job_get_tracker(job));
+
+               /* This will notify user space and close the job object */
+               mali_scheduler_complete_pp_job(job, 0, MALI_TRUE, MALI_FALSE);
+
+               return MALI_SCHEDULER_MASK_EMPTY;
+       }
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+       if (mali_pp_job_needs_dma_buf_mapping(job)) {
+               mali_scheduler_deferred_pp_job_queue(job);
+               return MALI_SCHEDULER_MASK_EMPTY;
+       }
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */
+
+       mali_scheduler_lock();
+
+       if (!mali_scheduler_queue_pp_job(job)) {
+               /* Failed to enqueue job, release job (with error) */
+               mali_pp_job_fb_lookup_remove(job);
+               mali_pp_job_mark_unstarted_failed(job);
+               mali_scheduler_unlock();
+
+               mali_timeline_tracker_release(mali_pp_job_get_tracker(job));
+
+               /* This will notify user space and close the job object */
+               mali_scheduler_complete_pp_job(job, 0, MALI_TRUE, MALI_FALSE);
+
+               return MALI_SCHEDULER_MASK_EMPTY;
+       }
+
+       mali_scheduler_unlock();
+       return MALI_SCHEDULER_MASK_PP;
+}
+
+void mali_scheduler_complete_gp_job(struct mali_gp_job *job,
+                                   mali_bool success,
+                                   mali_bool user_notification,
+                                   mali_bool dequeued)
+{
+       if (user_notification) {
+               mali_scheduler_return_gp_job_to_user(job, success);
+       }
+
+       if (dequeued) {
+               _mali_osk_pm_dev_ref_put();
+
+               if (mali_utilization_enabled()) {
+                       mali_utilization_gp_end();
+               }
+       }
+
+       mali_gp_job_delete(job);
+}
+
+void mali_scheduler_complete_pp_job(struct mali_pp_job *job,
+                                   u32 num_cores_in_virtual,
+                                   mali_bool user_notification,
+                                   mali_bool dequeued)
+{
+       if (user_notification) {
+               mali_scheduler_return_pp_job_to_user(job,
+                                                    num_cores_in_virtual);
+       }
+
+       if (dequeued) {
+#if defined(CONFIG_MALI_DVFS)
+               if (mali_pp_job_is_window_surface(job)) {
+                       struct mali_session_data *session;
+                       session = mali_pp_job_get_session(job);
+                       mali_session_inc_num_window_jobs(session);
+               }
+#endif
+
+               _mali_osk_pm_dev_ref_put();
+
+               if (mali_utilization_enabled()) {
+                       mali_utilization_pp_end();
+               }
+       }
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE)
+       /*
+        * The deletion of the job object (releasing sync refs etc)
+        * must be done in a different context
+        */
+       mali_scheduler_deferred_pp_job_delete(job);
+#else
+       /* no use cases need this in this configuration */
+       mali_pp_job_delete(job);
+#endif
+}
+
+void mali_scheduler_abort_session(struct mali_session_data *session)
+{
+       struct mali_gp_job *gp_job;
+       struct mali_gp_job *gp_tmp;
+       struct mali_pp_job *pp_job;
+       struct mali_pp_job *pp_tmp;
+       _MALI_OSK_LIST_HEAD_STATIC_INIT(removed_jobs_gp);
+       _MALI_OSK_LIST_HEAD_STATIC_INIT(removed_jobs_pp);
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+       MALI_DEBUG_ASSERT(session->is_aborting);
+
+       MALI_DEBUG_PRINT(3, ("Mali scheduler: Aborting all queued jobs from session 0x%08X.\n",
+                            session));
+
+       mali_scheduler_lock();
+
+       /* Remove from GP normal priority queue */
+       _MALI_OSK_LIST_FOREACHENTRY(gp_job, gp_tmp, &job_queue_gp.normal_pri,
+                                   struct mali_gp_job, list) {
+               if (mali_gp_job_get_session(gp_job) == session) {
+                       mali_gp_job_list_move(gp_job, &removed_jobs_gp);
+                       job_queue_gp.depth--;
+               }
+       }
+
+       /* Remove from GP high priority queue */
+       _MALI_OSK_LIST_FOREACHENTRY(gp_job, gp_tmp, &job_queue_gp.high_pri,
+                                   struct mali_gp_job, list) {
+               if (mali_gp_job_get_session(gp_job) == session) {
+                       mali_gp_job_list_move(gp_job, &removed_jobs_gp);
+                       job_queue_gp.depth--;
+               }
+       }
+
+       /* Remove from PP normal priority queue */
+       _MALI_OSK_LIST_FOREACHENTRY(pp_job, pp_tmp,
+                                   &job_queue_pp.normal_pri,
+                                   struct mali_pp_job, list) {
+               if (mali_pp_job_get_session(pp_job) == session) {
+                       mali_pp_job_fb_lookup_remove(pp_job);
+
+                       job_queue_pp.depth -=
+                               mali_pp_job_unstarted_sub_job_count(
+                                       pp_job);
+                       mali_pp_job_mark_unstarted_failed(pp_job);
+
+                       if (MALI_FALSE == mali_pp_job_has_unstarted_sub_jobs(pp_job)) {
+                               if (mali_pp_job_is_complete(pp_job)) {
+                                       mali_pp_job_list_move(pp_job,
+                                                             &removed_jobs_pp);
+                               } else {
+                                       mali_pp_job_list_remove(pp_job);
+                               }
+                       }
+               }
+       }
+
+       /* Remove from PP high priority queue */
+       _MALI_OSK_LIST_FOREACHENTRY(pp_job, pp_tmp,
+                                   &job_queue_pp.high_pri,
+                                   struct mali_pp_job, list) {
+               if (mali_pp_job_get_session(pp_job) == session) {
+                       mali_pp_job_fb_lookup_remove(pp_job);
+
+                       job_queue_pp.depth -=
+                               mali_pp_job_unstarted_sub_job_count(
+                                       pp_job);
+                       mali_pp_job_mark_unstarted_failed(pp_job);
+
+                       if (MALI_FALSE == mali_pp_job_has_unstarted_sub_jobs(pp_job)) {
+                               if (mali_pp_job_is_complete(pp_job)) {
+                                       mali_pp_job_list_move(pp_job,
+                                                             &removed_jobs_pp);
+                               } else {
+                                       mali_pp_job_list_remove(pp_job);
+                               }
+                       }
+               }
+       }
+
+       /*
+        * Release scheduler lock so we can release trackers
+        * (which will potentially queue new jobs)
+        */
+       mali_scheduler_unlock();
+
+       /* Release and complete all (non-running) found GP jobs  */
+       _MALI_OSK_LIST_FOREACHENTRY(gp_job, gp_tmp, &removed_jobs_gp,
+                                   struct mali_gp_job, list) {
+               mali_timeline_tracker_release(mali_gp_job_get_tracker(gp_job));
+               mali_gp_job_signal_pp_tracker(gp_job, MALI_FALSE);
+               _mali_osk_list_delinit(&gp_job->list);
+               mali_scheduler_complete_gp_job(gp_job,
+                                              MALI_FALSE, MALI_FALSE, MALI_TRUE);
+       }
+
+       /* Release and complete non-running PP jobs */
+       _MALI_OSK_LIST_FOREACHENTRY(pp_job, pp_tmp, &removed_jobs_pp,
+                                   struct mali_pp_job, list) {
+               mali_timeline_tracker_release(mali_pp_job_get_tracker(pp_job));
+               _mali_osk_list_delinit(&pp_job->list);
+               mali_scheduler_complete_pp_job(pp_job, 0,
+                                              MALI_FALSE, MALI_TRUE);
+       }
+}
+
+_mali_osk_errcode_t _mali_ukk_gp_start_job(void *ctx,
+               _mali_uk_gp_start_job_s *uargs)
+{
+       struct mali_session_data *session;
+       struct mali_gp_job *job;
+       mali_timeline_point point;
+       u32 __user *point_ptr = NULL;
+
+       MALI_DEBUG_ASSERT_POINTER(uargs);
+       MALI_DEBUG_ASSERT_POINTER(ctx);
+
+       session = (struct mali_session_data *)(uintptr_t)ctx;
+
+       job = mali_gp_job_create(session, uargs, mali_scheduler_get_new_id(),
+                                NULL);
+       if (NULL == job) {
+               MALI_PRINT_ERROR(("Failed to create GP job.\n"));
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       point_ptr = (u32 __user *)(uintptr_t)mali_gp_job_get_timeline_point_ptr(job);
+
+       point = mali_scheduler_submit_gp_job(session, job);
+
+       if (0 != _mali_osk_put_user(((u32) point), point_ptr)) {
+               /*
+                * Let user space know that something failed
+                * after the job was started.
+                */
+               return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_pp_start_job(void *ctx,
+               _mali_uk_pp_start_job_s *uargs)
+{
+       struct mali_session_data *session;
+       struct mali_pp_job *job;
+       mali_timeline_point point;
+       u32 __user *point_ptr = NULL;
+
+       MALI_DEBUG_ASSERT_POINTER(uargs);
+       MALI_DEBUG_ASSERT_POINTER(ctx);
+
+       session = (struct mali_session_data *)(uintptr_t)ctx;
+
+       job = mali_pp_job_create(session, uargs, mali_scheduler_get_new_id());
+       if (NULL == job) {
+               MALI_PRINT_ERROR(("Failed to create PP job.\n"));
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       point_ptr = (u32 __user *)(uintptr_t)mali_pp_job_get_timeline_point_ptr(job);
+
+       point = mali_scheduler_submit_pp_job(session, job);
+       job = NULL;
+
+       if (0 != _mali_osk_put_user(((u32) point), point_ptr)) {
+               /*
+                * Let user space know that something failed
+                * after the job was started.
+                */
+               return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_pp_and_gp_start_job(void *ctx,
+               _mali_uk_pp_and_gp_start_job_s *uargs)
+{
+       struct mali_session_data *session;
+       _mali_uk_pp_and_gp_start_job_s kargs;
+       struct mali_pp_job *pp_job;
+       struct mali_gp_job *gp_job;
+       u32 __user *point_ptr = NULL;
+       mali_timeline_point point;
+       _mali_uk_pp_start_job_s __user *pp_args;
+       _mali_uk_gp_start_job_s __user *gp_args;
+
+       MALI_DEBUG_ASSERT_POINTER(ctx);
+       MALI_DEBUG_ASSERT_POINTER(uargs);
+
+       session = (struct mali_session_data *) ctx;
+
+       if (0 != _mali_osk_copy_from_user(&kargs, uargs,
+                                         sizeof(_mali_uk_pp_and_gp_start_job_s))) {
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       pp_args = (_mali_uk_pp_start_job_s __user *)(uintptr_t)kargs.pp_args;
+       gp_args = (_mali_uk_gp_start_job_s __user *)(uintptr_t)kargs.gp_args;
+
+       pp_job = mali_pp_job_create(session, pp_args,
+                                   mali_scheduler_get_new_id());
+       if (NULL == pp_job) {
+               MALI_PRINT_ERROR(("Failed to create PP job.\n"));
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       gp_job = mali_gp_job_create(session, gp_args,
+                                   mali_scheduler_get_new_id(),
+                                   mali_pp_job_get_tracker(pp_job));
+       if (NULL == gp_job) {
+               MALI_PRINT_ERROR(("Failed to create GP job.\n"));
+               mali_pp_job_delete(pp_job);
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       point_ptr = (u32 __user *)(uintptr_t)mali_pp_job_get_timeline_point_ptr(pp_job);
+
+       /* Submit GP job. */
+       mali_scheduler_submit_gp_job(session, gp_job);
+       gp_job = NULL;
+
+       /* Submit PP job. */
+       point = mali_scheduler_submit_pp_job(session, pp_job);
+       pp_job = NULL;
+
+       if (0 != _mali_osk_put_user(((u32) point), point_ptr)) {
+               /*
+                * Let user space know that something failed
+                * after the jobs were started.
+                */
+               return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+void _mali_ukk_pp_job_disable_wb(_mali_uk_pp_disable_wb_s *args)
+{
+       struct mali_session_data *session;
+       struct mali_pp_job *job;
+       struct mali_pp_job *tmp;
+       u32 fb_lookup_id;
+
+       MALI_DEBUG_ASSERT_POINTER(args);
+       MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+       session = (struct mali_session_data *)(uintptr_t)args->ctx;
+
+       fb_lookup_id = args->fb_id & MALI_PP_JOB_FB_LOOKUP_LIST_MASK;
+
+       mali_scheduler_lock();
+
+       /* Iterate over all jobs for given frame builder_id. */
+       _MALI_OSK_LIST_FOREACHENTRY(job, tmp,
+                                   &session->pp_job_fb_lookup_list[fb_lookup_id],
+                                   struct mali_pp_job, session_fb_lookup_list) {
+               MALI_DEBUG_CODE(u32 disable_mask = 0);
+
+               if (mali_pp_job_get_frame_builder_id(job) !=
+                   (u32) args->fb_id) {
+                       MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Disable WB mismatching FB.\n"));
+                       continue;
+               }
+
+               MALI_DEBUG_CODE(disable_mask |= 0xD << (4 * 3));
+
+               if (mali_pp_job_get_wb0_source_addr(job) == args->wb0_memory) {
+                       MALI_DEBUG_CODE(disable_mask |= 0x1 << (4 * 1));
+                       mali_pp_job_disable_wb0(job);
+               }
+
+               if (mali_pp_job_get_wb1_source_addr(job) == args->wb1_memory) {
+                       MALI_DEBUG_CODE(disable_mask |= 0x2 << (4 * 2));
+                       mali_pp_job_disable_wb1(job);
+               }
+
+               if (mali_pp_job_get_wb2_source_addr(job) == args->wb2_memory) {
+                       MALI_DEBUG_CODE(disable_mask |= 0x3 << (4 * 3));
+                       mali_pp_job_disable_wb2(job);
+               }
+               MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Disable WB: 0x%X.\n",
+                                    disable_mask));
+       }
+
+       mali_scheduler_unlock();
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_scheduler_dump_state(char *buf, u32 size)
+{
+       int n = 0;
+
+       n += _mali_osk_snprintf(buf + n, size - n, "GP queues\n");
+       n += _mali_osk_snprintf(buf + n, size - n,
+                               "\tQueue depth: %u\n", job_queue_gp.depth);
+       n += _mali_osk_snprintf(buf + n, size - n,
+                               "\tNormal priority queue is %s\n",
+                               _mali_osk_list_empty(&job_queue_gp.normal_pri) ?
+                               "empty" : "not empty");
+       n += _mali_osk_snprintf(buf + n, size - n,
+                               "\tHigh priority queue is %s\n",
+                               _mali_osk_list_empty(&job_queue_gp.high_pri) ?
+                               "empty" : "not empty");
+
+       n += _mali_osk_snprintf(buf + n, size - n,
+                               "PP queues\n");
+       n += _mali_osk_snprintf(buf + n, size - n,
+                               "\tQueue depth: %u\n", job_queue_pp.depth);
+       n += _mali_osk_snprintf(buf + n, size - n,
+                               "\tNormal priority queue is %s\n",
+                               _mali_osk_list_empty(&job_queue_pp.normal_pri)
+                               ? "empty" : "not empty");
+       n += _mali_osk_snprintf(buf + n, size - n,
+                               "\tHigh priority queue is %s\n",
+                               _mali_osk_list_empty(&job_queue_pp.high_pri)
+                               ? "empty" : "not empty");
+
+       n += _mali_osk_snprintf(buf + n, size - n, "\n");
+
+       return n;
+}
+#endif
+
+/*
+ * ---------- Implementation of static functions ----------
+ */
+
+static mali_timeline_point mali_scheduler_submit_gp_job(
+       struct mali_session_data *session, struct mali_gp_job *job)
+{
+       mali_timeline_point point;
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       /* Add job to Timeline system. */
+       point = mali_timeline_system_add_tracker(session->timeline_system,
+                       mali_gp_job_get_tracker(job), MALI_TIMELINE_GP);
+
+       return point;
+}
+
+static mali_timeline_point mali_scheduler_submit_pp_job(
+       struct mali_session_data *session, struct mali_pp_job *job)
+{
+       mali_timeline_point point;
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       mali_scheduler_lock();
+       /*
+        * Adding job to the lookup list used to quickly discard
+        * writeback units of queued jobs.
+        */
+       mali_pp_job_fb_lookup_add(job);
+       mali_scheduler_unlock();
+
+       /* Add job to Timeline system. */
+       point = mali_timeline_system_add_tracker(session->timeline_system,
+                       mali_pp_job_get_tracker(job), MALI_TIMELINE_PP);
+
+       return point;
+}
+
+static mali_bool mali_scheduler_queue_gp_job(struct mali_gp_job *job)
+{
+       struct mali_session_data *session;
+       _mali_osk_list_t *queue;
+
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       session = mali_gp_job_get_session(job);
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       if (unlikely(session->is_aborting)) {
+               MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Job %u (0x%08X) queued while session is aborting.\n",
+                                    mali_gp_job_get_id(job), job));
+               return MALI_FALSE; /* job not queued */
+       }
+
+       mali_gp_job_set_cache_order(job, mali_scheduler_get_new_cache_order());
+
+       /* Determine which queue the job should be added to. */
+       if (session->use_high_priority_job_queue) {
+               queue = &job_queue_gp.high_pri;
+       } else {
+               queue = &job_queue_gp.normal_pri;
+       }
+
+       job_queue_gp.depth += 1;
+
+       /* Add job to queue (mali_gp_job_queue_add find correct place). */
+       mali_gp_job_list_add(job, queue);
+
+       /*
+        * We hold a PM reference for every job we hold queued (and running)
+        * It is important that we take this reference after job has been
+        * added the the queue so that any runtime resume could schedule this
+        * job right there and then.
+        */
+       _mali_osk_pm_dev_ref_get_async();
+
+       if (mali_utilization_enabled()) {
+               /*
+                * We cheat a little bit by counting the GP as busy from the
+                * time a GP job is queued. This will be fine because we only
+                * loose the tiny idle gap between jobs, but we will instead
+                * get less utilization work to do (less locks taken)
+                */
+               mali_utilization_gp_start();
+       }
+
+       /* Add profiling events for job enqueued */
+       _mali_osk_profiling_add_event(
+               MALI_PROFILING_EVENT_TYPE_SINGLE |
+               MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+               MALI_PROFILING_EVENT_REASON_SINGLE_SW_GP_ENQUEUE,
+               mali_gp_job_get_pid(job),
+               mali_gp_job_get_tid(job),
+               mali_gp_job_get_frame_builder_id(job),
+               mali_gp_job_get_flush_id(job),
+               0);
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+       trace_gpu_job_enqueue(mali_gp_job_get_tid(job),
+                             mali_gp_job_get_id(job), "GP");
+#endif
+
+       MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) queued\n",
+                            mali_gp_job_get_id(job), job));
+
+       return MALI_TRUE; /* job queued */
+}
+
+static mali_bool mali_scheduler_queue_pp_job(struct mali_pp_job *job)
+{
+       struct mali_session_data *session;
+       _mali_osk_list_t *queue = NULL;
+
+       MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       session = mali_pp_job_get_session(job);
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       if (unlikely(session->is_aborting)) {
+               MALI_DEBUG_PRINT(2, ("Mali PP scheduler: Job %u (0x%08X) queued while session is aborting.\n",
+                                    mali_pp_job_get_id(job), job));
+               return MALI_FALSE; /* job not queued */
+       }
+
+       mali_pp_job_set_cache_order(job, mali_scheduler_get_new_cache_order());
+
+       if (session->use_high_priority_job_queue) {
+               queue = &job_queue_pp.high_pri;
+       } else {
+               queue = &job_queue_pp.normal_pri;
+       }
+
+       job_queue_pp.depth +=
+               mali_pp_job_get_sub_job_count(job);
+
+       /* Add job to queue (mali_gp_job_queue_add find correct place). */
+       mali_pp_job_list_add(job, queue);
+
+       /*
+        * We hold a PM reference for every job we hold queued (and running)
+        * It is important that we take this reference after job has been
+        * added the the queue so that any runtime resume could schedule this
+        * job right there and then.
+        */
+       _mali_osk_pm_dev_ref_get_async();
+
+       if (mali_utilization_enabled()) {
+               /*
+                * We cheat a little bit by counting the PP as busy from the
+                * time a PP job is queued. This will be fine because we only
+                * loose the tiny idle gap between jobs, but we will instead
+                * get less utilization work to do (less locks taken)
+                */
+               mali_utilization_pp_start();
+       }
+
+       /* Add profiling events for job enqueued */
+
+       _mali_osk_profiling_add_event(
+               MALI_PROFILING_EVENT_TYPE_SINGLE |
+               MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+               MALI_PROFILING_EVENT_REASON_SINGLE_SW_PP_ENQUEUE,
+               mali_pp_job_get_pid(job),
+               mali_pp_job_get_tid(job),
+               mali_pp_job_get_frame_builder_id(job),
+               mali_pp_job_get_flush_id(job),
+               0);
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+       trace_gpu_job_enqueue(mali_pp_job_get_tid(job),
+                             mali_pp_job_get_id(job), "PP");
+#endif
+
+       MALI_DEBUG_PRINT(3, ("Mali PP scheduler: %s job %u (0x%08X) with %u parts queued.\n",
+                            mali_pp_job_is_virtual(job)
+                            ? "Virtual" : "Physical",
+                            mali_pp_job_get_id(job), job,
+                            mali_pp_job_get_sub_job_count(job)));
+
+       return MALI_TRUE; /* job queued */
+}
+
+static void mali_scheduler_return_gp_job_to_user(struct mali_gp_job *job,
+               mali_bool success)
+{
+       _mali_uk_gp_job_finished_s *jobres;
+       struct mali_session_data *session;
+       _mali_osk_notification_t *notification;
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       session = mali_gp_job_get_session(job);
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       notification = mali_gp_job_get_finished_notification(job);
+       MALI_DEBUG_ASSERT_POINTER(notification);
+
+       jobres = notification->result_buffer;
+       MALI_DEBUG_ASSERT_POINTER(jobres);
+
+       jobres->user_job_ptr = mali_gp_job_get_user_id(job);
+       if (MALI_TRUE == success) {
+               jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS;
+       } else {
+               jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR;
+       }
+       jobres->heap_current_addr = mali_gp_job_get_current_heap_addr(job);
+       jobres->perf_counter0 = mali_gp_job_get_perf_counter_value0(job);
+       jobres->perf_counter1 = mali_gp_job_get_perf_counter_value1(job);
+
+       mali_session_send_notification(session, notification);
+}
+
+static void mali_scheduler_return_pp_job_to_user(struct mali_pp_job *job,
+               u32 num_cores_in_virtual)
+{
+       u32 i;
+       u32 num_counters_to_copy;
+       _mali_uk_pp_job_finished_s *jobres;
+       struct mali_session_data *session;
+       _mali_osk_notification_t *notification;
+
+       if (MALI_TRUE == mali_pp_job_use_no_notification(job)) {
+               return;
+       }
+
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       session = mali_pp_job_get_session(job);
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       notification = mali_pp_job_get_finished_notification(job);
+       MALI_DEBUG_ASSERT_POINTER(notification);
+
+       jobres = notification->result_buffer;
+       MALI_DEBUG_ASSERT_POINTER(jobres);
+
+       jobres->user_job_ptr = mali_pp_job_get_user_id(job);
+       if (MALI_TRUE == mali_pp_job_was_success(job)) {
+               jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS;
+       } else {
+               jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR;
+       }
+
+       if (mali_pp_job_is_virtual(job)) {
+               num_counters_to_copy = num_cores_in_virtual;
+       } else {
+               num_counters_to_copy = mali_pp_job_get_sub_job_count(job);
+       }
+
+       for (i = 0; i < num_counters_to_copy; i++) {
+               jobres->perf_counter0[i] =
+                       mali_pp_job_get_perf_counter_value0(job, i);
+               jobres->perf_counter1[i] =
+                       mali_pp_job_get_perf_counter_value1(job, i);
+               jobres->perf_counter_src0 =
+                       mali_pp_job_get_pp_counter_global_src0();
+               jobres->perf_counter_src1 =
+                       mali_pp_job_get_pp_counter_global_src1();
+       }
+
+       mali_session_send_notification(session, notification);
+}
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE)
+
+static void mali_scheduler_deferred_pp_job_delete(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       _mali_osk_spinlock_irq_lock(scheduler_pp_job_delete_lock);
+       mali_pp_job_list_addtail(job, &scheduler_pp_job_deletion_queue);
+       _mali_osk_spinlock_irq_unlock(scheduler_pp_job_delete_lock);
+
+       _mali_osk_wq_schedule_work(scheduler_wq_pp_job_delete);
+}
+
+static void mali_scheduler_do_pp_job_delete(void *arg)
+{
+       _MALI_OSK_LIST_HEAD_STATIC_INIT(list);
+       struct mali_pp_job *job;
+       struct mali_pp_job *tmp;
+
+       MALI_IGNORE(arg);
+
+       /*
+        * Quickly "unhook" the jobs pending to be deleted, so we can release
+        * the lock before we start deleting the job objects
+        * (without any locks held)
+        */
+       _mali_osk_spinlock_irq_lock(scheduler_pp_job_delete_lock);
+       _mali_osk_list_move_list(&scheduler_pp_job_deletion_queue, &list);
+       _mali_osk_spinlock_irq_unlock(scheduler_pp_job_delete_lock);
+
+       _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list,
+                                   struct mali_pp_job, list) {
+
+               _mali_osk_list_delinit(&job->list);
+               mali_pp_job_delete(job); /* delete the job object itself */
+       }
+}
+
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE) */
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+
+static void mali_scheduler_deferred_pp_job_queue(struct mali_pp_job *job)
+{
+       MALI_DEBUG_ASSERT_POINTER(job);
+
+       _mali_osk_spinlock_irq_lock(scheduler_pp_job_queue_lock);
+       mali_pp_job_list_addtail(job, &scheduler_pp_job_queue_list);
+       _mali_osk_spinlock_irq_unlock(scheduler_pp_job_queue_lock);
+
+       _mali_osk_wq_schedule_work(scheduler_wq_pp_job_queue);
+}
+
+static void mali_scheduler_do_pp_job_queue(void *arg)
+{
+       _MALI_OSK_LIST_HEAD_STATIC_INIT(list);
+       struct mali_pp_job *job;
+       struct mali_pp_job *tmp;
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+       MALI_IGNORE(arg);
+
+       /*
+        * Quickly "unhook" the jobs pending to be queued, so we can release
+        * the lock before we start queueing the job objects
+        * (without any locks held)
+        */
+       _mali_osk_spinlock_irq_lock(scheduler_pp_job_queue_lock);
+       _mali_osk_list_move_list(&scheduler_pp_job_queue_list, &list);
+       _mali_osk_spinlock_irq_unlock(scheduler_pp_job_queue_lock);
+
+       /* First loop through all jobs and do the pre-work (no locks needed) */
+       _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list,
+                                   struct mali_pp_job, list) {
+               if (mali_pp_job_needs_dma_buf_mapping(job)) {
+                       /*
+                        * This operation could fail, but we continue anyway,
+                        * because the worst that could happen is that this
+                        * job will fail due to a Mali page fault.
+                        */
+                       mali_dma_buf_map_job(job);
                }
        }
 
-       if (MALI_SCHEDULER_MASK_PP & mask) {
-               /* PP needs scheduling. */
-               if (deferred_schedule) {
-                       /* Schedule PP deferred. */
-                       _mali_osk_wq_schedule_work_high_pri(pp_scheduler_wq_high_pri);
+       mali_scheduler_lock();
+
+       /* Then loop through all jobs again to queue them (lock needed) */
+       _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list,
+                                   struct mali_pp_job, list) {
+
+               /* Remove from scheduler_pp_job_queue_list before queueing */
+               mali_pp_job_list_remove(job);
+
+               if (mali_scheduler_queue_pp_job(job)) {
+                       /* Job queued successfully */
+                       schedule_mask |= MALI_SCHEDULER_MASK_PP;
                } else {
-                       /* Schedule PP now. */
-                       mali_pp_scheduler_schedule();
+                       /* Failed to enqueue job, release job (with error) */
+                       mali_pp_job_fb_lookup_remove(job);
+                       mali_pp_job_mark_unstarted_failed(job);
+
+                       /* unlock scheduler in this uncommon case */
+                       mali_scheduler_unlock();
+
+                       schedule_mask |= mali_timeline_tracker_release(
+                                                mali_pp_job_get_tracker(job));
+
+                       /* Notify user space and close the job object */
+                       mali_scheduler_complete_pp_job(job, 0, MALI_TRUE,
+                                                      MALI_FALSE);
+
+                       mali_scheduler_lock();
+               }
+       }
+
+       mali_scheduler_unlock();
+
+       /* Trigger scheduling of jobs */
+       mali_executor_schedule_from_mask(schedule_mask, MALI_FALSE);
+}
+
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */
+
+void mali_scheduler_gp_pp_job_queue_print(void)
+{
+       struct mali_gp_job *gp_job = NULL;
+       struct mali_gp_job *tmp_gp_job = NULL;
+       struct mali_pp_job *pp_job = NULL;
+       struct mali_pp_job *tmp_pp_job = NULL;
+
+       MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
+       MALI_DEBUG_ASSERT_LOCK_HELD(mali_executor_lock_obj);
+
+       /* dump job queup status */
+       if ((0 == job_queue_gp.depth) && (0 == job_queue_pp.depth)) {
+               MALI_PRINT(("No GP&PP job in the job queue.\n"));
+               return;
+       }
+
+       MALI_PRINT(("Total (%d) GP job in the job queue.\n", job_queue_gp.depth));
+       if (job_queue_gp.depth > 0) {
+               if (!_mali_osk_list_empty(&job_queue_gp.high_pri)) {
+                       _MALI_OSK_LIST_FOREACHENTRY(gp_job, tmp_gp_job, &job_queue_gp.high_pri,
+                                                   struct mali_gp_job, list) {
+                               MALI_PRINT(("GP job(%p) id = %d tid = %d pid = %d in the gp job high_pri queue\n", gp_job, gp_job->id, gp_job->tid, gp_job->pid));
+                       }
+               }
+
+               if (!_mali_osk_list_empty(&job_queue_gp.normal_pri)) {
+                       _MALI_OSK_LIST_FOREACHENTRY(gp_job, tmp_gp_job, &job_queue_gp.normal_pri,
+                                                   struct mali_gp_job, list) {
+                               MALI_PRINT(("GP job(%p) id = %d tid = %d pid = %d in the gp job normal_pri queue\n", gp_job, gp_job->id, gp_job->tid, gp_job->pid));
+                       }
                }
        }
+
+       MALI_PRINT(("Total (%d) PP job in the job queue.\n", job_queue_pp.depth));
+       if (job_queue_pp.depth > 0) {
+               if (!_mali_osk_list_empty(&job_queue_pp.high_pri)) {
+                       _MALI_OSK_LIST_FOREACHENTRY(pp_job, tmp_pp_job, &job_queue_pp.high_pri,
+                                                   struct mali_pp_job, list) {
+                               if (mali_pp_job_is_virtual(pp_job)) {
+                                       MALI_PRINT(("PP Virtual job(%p) id = %d tid = %d pid = %d in the pp job high_pri queue\n", pp_job, pp_job->id, pp_job->tid, pp_job->pid));
+                               } else {
+                                       MALI_PRINT(("PP Physical job(%p) id = %d tid = %d pid = %d in the pp job high_pri queue\n", pp_job, pp_job->id, pp_job->tid, pp_job->pid));
+                               }
+                       }
+               }
+
+               if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) {
+                       _MALI_OSK_LIST_FOREACHENTRY(pp_job, tmp_pp_job, &job_queue_pp.normal_pri,
+                                                   struct mali_pp_job, list) {
+                               if (mali_pp_job_is_virtual(pp_job)) {
+                                       MALI_PRINT(("PP Virtual job(%p) id = %d tid = %d pid = %d in the pp job normal_pri queue\n", pp_job, pp_job->id, pp_job->tid, pp_job->pid));
+                               } else {
+                                       MALI_PRINT(("PP Physical job(%p) id = %d tid = %d pid = %d in the pp job normal_pri queue\n", pp_job, pp_job->id, pp_job->tid, pp_job->pid));
+                               }
+                       }
+               }
+       }
+
+       /* dump group running job status */
+       mali_executor_running_status_print();
 }
index 07252004b04af40358406f39d5b6785f34d7cde8..ac8596a80f09f31d6d083d1d7b5ea1042aa62e70 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2013 ARM Limited
+ * (C) COPYRIGHT 2012-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 #define __MALI_SCHEDULER_H__
 
 #include "mali_osk.h"
+#include "mali_osk_list.h"
 #include "mali_scheduler_types.h"
-#include "mali_gp_scheduler.h"
-#include "mali_pp_scheduler.h"
+#include "mali_session.h"
+
+struct mali_scheduler_job_queue {
+       _MALI_OSK_LIST_HEAD(normal_pri); /* Queued jobs with normal priority */
+       _MALI_OSK_LIST_HEAD(high_pri);   /* Queued jobs with high priority */
+       u32 depth;                       /* Depth of combined queues. */
+};
+
+extern _mali_osk_spinlock_irq_t *mali_scheduler_lock_obj;
+
+/* Queue of jobs to be executed on the GP group */
+extern struct mali_scheduler_job_queue job_queue_gp;
+
+/* Queue of PP jobs */
+extern struct mali_scheduler_job_queue job_queue_pp;
+
+extern _mali_osk_atomic_t mali_job_id_autonumber;
+extern _mali_osk_atomic_t mali_job_cache_order_autonumber;
+
+#define MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD() MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
 
 _mali_osk_errcode_t mali_scheduler_initialize(void);
 void mali_scheduler_terminate(void);
 
-u32 mali_scheduler_get_new_id(void);
-u32 mali_scheduler_get_new_cache_order(void);
+MALI_STATIC_INLINE void mali_scheduler_lock(void)
+{
+       _mali_osk_spinlock_irq_lock(mali_scheduler_lock_obj);
+       MALI_DEBUG_PRINT(5, ("Mali scheduler: scheduler lock taken.\n"));
+}
 
-/**
- * @brief Reset all groups
- *
- * This function resets all groups known by the both the PP and GP scheuduler.
- * This must be called after the Mali HW has been powered on in order to reset
- * the HW.
- */
-MALI_STATIC_INLINE void mali_scheduler_reset_all_groups(void)
+MALI_STATIC_INLINE void mali_scheduler_unlock(void)
 {
-       mali_gp_scheduler_reset_all_groups();
-       mali_pp_scheduler_reset_all_groups();
+       MALI_DEBUG_PRINT(5, ("Mali scheduler: Releasing scheduler lock.\n"));
+       _mali_osk_spinlock_irq_unlock(mali_scheduler_lock_obj);
 }
 
-/**
- * @brief Zap TLB on all active groups running \a session
- *
- * @param session Pointer to the session to zap
- */
-MALI_STATIC_INLINE void mali_scheduler_zap_all_active(struct mali_session_data *session)
+MALI_STATIC_INLINE u32 mali_scheduler_job_gp_count(void)
+{
+       return job_queue_gp.depth;
+}
+
+u32 mali_scheduler_job_physical_head_count(void);
+
+mali_bool mali_scheduler_job_next_is_virtual(void);
+
+struct mali_gp_job *mali_scheduler_job_gp_get(void);
+struct mali_pp_job *mali_scheduler_job_pp_physical_peek(void);
+struct mali_pp_job *mali_scheduler_job_pp_virtual_peek(void);
+struct mali_pp_job *mali_scheduler_job_pp_physical_get(u32 *sub_job);
+struct mali_pp_job *mali_scheduler_job_pp_virtual_get(void);
+
+MALI_STATIC_INLINE u32 mali_scheduler_get_new_id(void)
+{
+       return _mali_osk_atomic_inc_return(&mali_job_id_autonumber);
+}
+
+MALI_STATIC_INLINE u32 mali_scheduler_get_new_cache_order(void)
 {
-       mali_gp_scheduler_zap_all_active(session);
-       mali_pp_scheduler_zap_all_active(session);
+       return _mali_osk_atomic_inc_return(&mali_job_cache_order_autonumber);
 }
 
 /**
- * Check if bit is set in scheduler mask.
+ * @brief Used by the Timeline system to queue a GP job.
  *
- * @param mask Scheduler mask to check.
- * @param bit Bit to check.
- * @return MALI_TRUE if bit is set in scheduler mask, MALI_FALSE if not.
+ * @note @ref mali_executor_schedule_from_mask() should be called if this
+ * function returns non-zero.
+ *
+ * @param job The GP job that is being activated.
+ *
+ * @return A scheduling bitmask that can be used to decide if scheduling is
+ * necessary after this call.
  */
-MALI_STATIC_INLINE mali_bool mali_scheduler_mask_is_set(mali_scheduler_mask mask, mali_scheduler_mask bit)
-{
-       return MALI_SCHEDULER_MASK_EMPTY != (bit & mask);
-}
+mali_scheduler_mask mali_scheduler_activate_gp_job(struct mali_gp_job *job);
 
 /**
- * Schedule GP and PP according to bitmask.
+ * @brief Used by the Timeline system to queue a PP job.
  *
- * @param mask A scheduling bitmask.
- * @param deferred_schedule MALI_TRUE if schedule should be deferred, MALI_FALSE if not.
+ * @note @ref mali_executor_schedule_from_mask() should be called if this
+ * function returns non-zero.
+ *
+ * @param job The PP job that is being activated.
+ *
+ * @return A scheduling bitmask that can be used to decide if scheduling is
+ * necessary after this call.
  */
-void mali_scheduler_schedule_from_mask(mali_scheduler_mask mask, mali_bool deferred_schedule);
+mali_scheduler_mask mali_scheduler_activate_pp_job(struct mali_pp_job *job);
 
-/* Enable or disable scheduler hint. */
-extern mali_bool mali_scheduler_hints[MALI_SCHEDULER_HINT_MAX];
+void mali_scheduler_complete_gp_job(struct mali_gp_job *job,
+                                   mali_bool success,
+                                   mali_bool user_notification,
+                                   mali_bool dequeued);
 
-MALI_STATIC_INLINE void mali_scheduler_hint_enable(mali_scheduler_hint hint)
-{
-       MALI_DEBUG_ASSERT(hint < MALI_SCHEDULER_HINT_MAX);
-       mali_scheduler_hints[hint] = MALI_TRUE;
-}
+void mali_scheduler_complete_pp_job(struct mali_pp_job *job,
+                                   u32 num_cores_in_virtual,
+                                   mali_bool user_notification,
+                                   mali_bool dequeued);
 
-MALI_STATIC_INLINE void mali_scheduler_hint_disable(mali_scheduler_hint hint)
-{
-       MALI_DEBUG_ASSERT(hint < MALI_SCHEDULER_HINT_MAX);
-       mali_scheduler_hints[hint] = MALI_FALSE;
-}
+void mali_scheduler_abort_session(struct mali_session_data *session);
 
-MALI_STATIC_INLINE mali_bool mali_scheduler_hint_is_enabled(mali_scheduler_hint hint)
-{
-       MALI_DEBUG_ASSERT(hint < MALI_SCHEDULER_HINT_MAX);
-       return mali_scheduler_hints[hint];
-}
+#if MALI_STATE_TRACKING
+u32 mali_scheduler_dump_state(char *buf, u32 size);
+#endif
+
+void mali_scheduler_gp_pp_job_queue_print(void);
 
 #endif /* __MALI_SCHEDULER_H__ */
index 5cbf598bfdce83f270bc3f699f0afa8e45023379..b4aecd5711f03d9dc537c3fc11f662a37b17b87d 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013 ARM Limited
+ * (C) COPYRIGHT 2013-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -26,9 +26,4 @@ typedef u32 mali_scheduler_mask;
 #define MALI_SCHEDULER_MASK_EMPTY 0
 #define MALI_SCHEDULER_MASK_ALL (MALI_SCHEDULER_MASK_GP | MALI_SCHEDULER_MASK_PP)
 
-typedef enum {
-       MALI_SCHEDULER_HINT_GP_BOUND = 0
-#define MALI_SCHEDULER_HINT_MAX        1
-} mali_scheduler_hint;
-
 #endif /* __MALI_SCHEDULER_TYPES_H__ */
index 852f1e1953929d79f30382b0fdbe6c042f17f737..000f40f6a9572f90d83036c429b7280402133773 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2013 ARM Limited
+ * (C) COPYRIGHT 2012-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 #include "mali_osk.h"
 #include "mali_osk_list.h"
 #include "mali_session.h"
+#include "mali_ukk.h"
 
 _MALI_OSK_LIST_HEAD(mali_sessions);
 static u32 mali_session_count = 0;
 
-_mali_osk_spinlock_irq_t *mali_sessions_lock;
+_mali_osk_spinlock_irq_t *mali_sessions_lock = NULL;
 
 _mali_osk_errcode_t mali_session_initialize(void)
 {
        _MALI_OSK_INIT_LIST_HEAD(&mali_sessions);
 
-       mali_sessions_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SESSIONS);
-
-       if (NULL == mali_sessions_lock) return _MALI_OSK_ERR_NOMEM;
+       mali_sessions_lock = _mali_osk_spinlock_irq_init(
+                                    _MALI_OSK_LOCKFLAG_ORDERED,
+                                    _MALI_OSK_LOCK_ORDER_SESSIONS);
+       if (NULL == mali_sessions_lock) {
+               return _MALI_OSK_ERR_NOMEM;
+       }
 
        return _MALI_OSK_ERR_OK;
 }
 
 void mali_session_terminate(void)
 {
-       _mali_osk_spinlock_irq_term(mali_sessions_lock);
+       if (NULL != mali_sessions_lock) {
+               _mali_osk_spinlock_irq_term(mali_sessions_lock);
+               mali_sessions_lock = NULL;
+       }
 }
 
 void mali_session_add(struct mali_session_data *session)
@@ -58,7 +65,7 @@ u32 mali_session_get_count(void)
  * Get the max completed window jobs from all active session,
  * which will be used in window render frame per sec calculate
  */
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+#if defined(CONFIG_MALI_DVFS)
 u32 mali_session_max_window_num(void)
 {
        struct mali_session_data *session, *tmp;
@@ -68,7 +75,8 @@ u32 mali_session_max_window_num(void)
        mali_session_lock();
 
        MALI_SESSION_FOREACH(session, tmp, link) {
-               tmp_number = _mali_osk_atomic_xchg(&session->number_of_window_jobs, 0);
+               tmp_number = _mali_osk_atomic_xchg(
+                                    &session->number_of_window_jobs, 0);
                if (max_window_num < tmp_number) {
                        max_window_num = tmp_number;
                }
@@ -79,3 +87,27 @@ u32 mali_session_max_window_num(void)
        return max_window_num;
 }
 #endif
+
+void mali_session_memory_tracking(_mali_osk_print_ctx *print_ctx)
+{
+       struct mali_session_data *session, *tmp;
+       u32 mali_mem_usage;
+       u32 total_mali_mem_size;
+
+       MALI_DEBUG_ASSERT_POINTER(print_ctx);
+       mali_session_lock();
+       MALI_SESSION_FOREACH(session, tmp, link) {
+               _mali_osk_ctxprintf(print_ctx, "  %-25s  %-10u  %-10u  %-15u  %-15u  %-10u  %-10u\n",
+                                   session->comm, session->pid,
+                                   (atomic_read(&session->mali_mem_allocated_pages)) * _MALI_OSK_MALI_PAGE_SIZE,
+                                   session->max_mali_mem_allocated_size,
+                                   (atomic_read(&session->mali_mem_array[MALI_MEM_EXTERNAL])) * _MALI_OSK_MALI_PAGE_SIZE,
+                                   (atomic_read(&session->mali_mem_array[MALI_MEM_UMP])) * _MALI_OSK_MALI_PAGE_SIZE,
+                                   (atomic_read(&session->mali_mem_array[MALI_MEM_DMA_BUF])) * _MALI_OSK_MALI_PAGE_SIZE
+                                  );
+       }
+       mali_session_unlock();
+       mali_mem_usage  = _mali_ukk_report_memory_usage();
+       total_mali_mem_size = _mali_ukk_report_total_memory_size();
+       _mali_osk_ctxprintf(print_ctx, "Mali mem usage: %u\nMali mem limit: %u\n", mali_mem_usage, total_mali_mem_size);
+}
index ef8d5f5c9c89b4b80ac1daaead516bf3fa9a08ad..59305be0fa245f4621005cbbd169b818f9957e6b 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
+ * (C) COPYRIGHT 2008-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 #define __MALI_SESSION_H__
 
 #include "mali_mmu_page_directory.h"
-#include "mali_kernel_descriptor_mapping.h"
 #include "mali_osk.h"
 #include "mali_osk_list.h"
+#include "mali_memory_types.h"
+#include "mali_memory_manager.h"
 
 struct mali_timeline_system;
 struct mali_soft_system;
@@ -24,18 +25,18 @@ struct mali_soft_system;
 #define MALI_PP_JOB_FB_LOOKUP_LIST_MASK (MALI_PP_JOB_FB_LOOKUP_LIST_SIZE - 1)
 
 struct mali_session_data {
-       _mali_osk_notification_queue_t * ioctl_queue;
+       _mali_osk_notification_queue_t *ioctl_queue;
 
        _mali_osk_mutex_t *memory_lock; /**< Lock protecting the vm manipulation */
-       mali_descriptor_mapping * descriptor_mapping; /**< Mapping between userspace descriptors and our pointers */
+#if 0
        _mali_osk_list_t memory_head; /**< Track all the memory allocated in this session, for freeing on abnormal termination */
-
+#endif
        struct mali_page_directory *page_directory; /**< MMU page directory for this session */
 
        _MALI_OSK_LIST_HEAD(link); /**< Link for list of all sessions */
        _MALI_OSK_LIST_HEAD(pp_job_list); /**< List of all PP jobs on this session */
 
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+#if defined(CONFIG_MALI_DVFS)
        _mali_osk_atomic_t number_of_window_jobs; /**< Record the window jobs completed on this session in a period */
 #endif
 
@@ -46,6 +47,13 @@ struct mali_session_data {
 
        mali_bool is_aborting; /**< MALI_TRUE if the session is aborting, MALI_FALSE if not. */
        mali_bool use_high_priority_job_queue; /**< If MALI_TRUE, jobs added from this session will use the high priority job queues. */
+       u32 pid;
+       char *comm;
+       atomic_t mali_mem_array[MALI_MEM_TYPE_MAX]; /**< The array to record mem types' usage for this session. */
+       atomic_t mali_mem_allocated_pages; /** The current allocated mali memory pages, which include mali os memory and mali dedicated memory.*/
+       size_t max_mali_mem_allocated_size; /**< The past max mali memory allocated size, which include mali os memory and mali dedicated memory. */
+       /* Added for new memroy system */
+       struct mali_allocation_manager allocation_mgr;
 };
 
 _mali_osk_errcode_t mali_session_initialize(void);
@@ -78,17 +86,39 @@ MALI_STATIC_INLINE struct mali_page_directory *mali_session_get_page_directory(s
        return session->page_directory;
 }
 
+MALI_STATIC_INLINE void mali_session_memory_lock(struct mali_session_data *session)
+{
+       MALI_DEBUG_ASSERT_POINTER(session);
+       _mali_osk_mutex_wait(session->memory_lock);
+}
+
+MALI_STATIC_INLINE void mali_session_memory_unlock(struct mali_session_data *session)
+{
+       MALI_DEBUG_ASSERT_POINTER(session);
+       _mali_osk_mutex_signal(session->memory_lock);
+}
+
 MALI_STATIC_INLINE void mali_session_send_notification(struct mali_session_data *session, _mali_osk_notification_t *object)
 {
        _mali_osk_notification_queue_send(session->ioctl_queue, object);
 }
 
+#if defined(CONFIG_MALI_DVFS)
+
+MALI_STATIC_INLINE void mali_session_inc_num_window_jobs(struct mali_session_data *session)
+{
+       MALI_DEBUG_ASSERT_POINTER(session);
+       _mali_osk_atomic_inc(&session->number_of_window_jobs);
+}
+
 /*
  * Get the max completed window jobs from all active session,
  * which will be used in  window render frame per sec calculate
  */
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
 u32 mali_session_max_window_num(void);
+
 #endif
 
+void mali_session_memory_tracking(_mali_osk_print_ctx *print_ctx);
+
 #endif /* __MALI_SESSION_H__ */
index b2bb8e389a92c91af810f6a6859bfb7a5acea626..c0781bfe88746d7de323c92d450224bc6f9d26d4 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013 ARM Limited
+ * (C) COPYRIGHT 2013-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 
 #include "mali_soft_job.h"
 #include "mali_osk.h"
-#include "mali_osk_mali.h"
 #include "mali_timeline.h"
 #include "mali_session.h"
 #include "mali_kernel_common.h"
 #include "mali_uk_types.h"
 #include "mali_scheduler.h"
+#include "mali_executor.h"
 
 MALI_STATIC_INLINE void mali_soft_job_system_lock(struct mali_soft_job_system *system)
 {
@@ -48,9 +48,7 @@ MALI_STATIC_INLINE void mali_soft_job_system_assert_locked(struct mali_soft_job_
 
 struct mali_soft_job_system *mali_soft_job_system_create(struct mali_session_data *session)
 {
-       u32 i;
        struct mali_soft_job_system *system;
-       struct mali_soft_job *job;
 
        MALI_DEBUG_ASSERT_POINTER(session);
 
@@ -67,18 +65,10 @@ struct mali_soft_job_system *mali_soft_job_system_create(struct mali_session_dat
                return NULL;
        }
        system->lock_owner = 0;
+       system->last_job_id = 0;
 
-       _MALI_OSK_INIT_LIST_HEAD(&(system->jobs_free));
        _MALI_OSK_INIT_LIST_HEAD(&(system->jobs_used));
 
-       for (i = 0; i < MALI_MAX_NUM_SOFT_JOBS; ++i) {
-               job = &(system->jobs[i]);
-               _mali_osk_list_add(&(job->system_list), &(system->jobs_free));
-               job->system = system;
-               job->state = MALI_SOFT_JOB_STATE_FREE;
-               job->id = i;
-       }
-
        return system;
 }
 
@@ -87,16 +77,7 @@ void mali_soft_job_system_destroy(struct mali_soft_job_system *system)
        MALI_DEBUG_ASSERT_POINTER(system);
 
        /* All jobs should be free at this point. */
-       MALI_DEBUG_CODE( {
-               u32 i;
-               struct mali_soft_job *job;
-
-               for (i = 0; i < MALI_MAX_NUM_SOFT_JOBS; ++i)
-               {
-                       job = &(system->jobs[i]);
-                       MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_FREE == job->state);
-               }
-       });
+       MALI_DEBUG_ASSERT(_mali_osk_list_empty(&(system->jobs_used)));
 
        if (NULL != system) {
                if (NULL != system->lock) {
@@ -106,31 +87,6 @@ void mali_soft_job_system_destroy(struct mali_soft_job_system *system)
        }
 }
 
-static struct mali_soft_job *mali_soft_job_system_alloc_job(struct mali_soft_job_system *system)
-{
-       struct mali_soft_job *job;
-
-       MALI_DEBUG_ASSERT_POINTER(system);
-       MALI_ASSERT_SOFT_JOB_SYSTEM_LOCKED(system);
-
-       if (_mali_osk_list_empty(&(system->jobs_free))) {
-               /* No jobs available. */
-               return NULL;
-       }
-
-       /* Grab first job and move it to the used list. */
-       job = _MALI_OSK_LIST_ENTRY(system->jobs_free.next, struct mali_soft_job, system_list);
-       MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_FREE == job->state);
-
-       _mali_osk_list_move(&(job->system_list), &(system->jobs_used));
-       job->state = MALI_SOFT_JOB_STATE_ALLOCATED;
-
-       MALI_DEBUG_ASSERT(MALI_SOFT_JOB_INVALID_ID != job->id);
-       MALI_DEBUG_ASSERT(system == job->system);
-
-       return job;
-}
-
 static void mali_soft_job_system_free_job(struct mali_soft_job_system *system, struct mali_soft_job *job)
 {
        MALI_DEBUG_ASSERT_POINTER(job);
@@ -138,23 +94,26 @@ static void mali_soft_job_system_free_job(struct mali_soft_job_system *system, s
 
        mali_soft_job_system_lock(job->system);
 
-       MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_FREE != job->state);
        MALI_DEBUG_ASSERT(MALI_SOFT_JOB_INVALID_ID != job->id);
        MALI_DEBUG_ASSERT(system == job->system);
 
-       job->state = MALI_SOFT_JOB_STATE_FREE;
-       _mali_osk_list_move(&(job->system_list), &(system->jobs_free));
+       _mali_osk_list_del(&(job->system_list));
 
        mali_soft_job_system_unlock(job->system);
+
+       _mali_osk_free(job);
 }
 
 MALI_STATIC_INLINE struct mali_soft_job *mali_soft_job_system_lookup_job(struct mali_soft_job_system *system, u32 job_id)
 {
+       struct mali_soft_job *job, *tmp;
+
        MALI_DEBUG_ASSERT_POINTER(system);
        MALI_ASSERT_SOFT_JOB_SYSTEM_LOCKED(system);
 
-       if (job_id < MALI_MAX_NUM_SOFT_JOBS) {
-               return &system->jobs[job_id];
+       _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &system->jobs_used, struct mali_soft_job, system_list) {
+               if (job->id == job_id)
+                       return job;
        }
 
        return NULL;
@@ -181,39 +140,40 @@ void mali_soft_job_destroy(struct mali_soft_job *job)
        }
 }
 
-struct mali_soft_job *mali_soft_job_create(struct mali_soft_job_system *system, mali_soft_job_type type, u32 user_job)
+struct mali_soft_job *mali_soft_job_create(struct mali_soft_job_system *system, mali_soft_job_type type, u64 user_job)
 {
        struct mali_soft_job *job;
        _mali_osk_notification_t *notification = NULL;
 
        MALI_DEBUG_ASSERT_POINTER(system);
-       MALI_DEBUG_ASSERT(MALI_SOFT_JOB_TYPE_USER_SIGNALED >= type);
+       MALI_DEBUG_ASSERT((MALI_SOFT_JOB_TYPE_USER_SIGNALED == type) ||
+                         (MALI_SOFT_JOB_TYPE_SELF_SIGNALED == type));
 
-       if (MALI_SOFT_JOB_TYPE_USER_SIGNALED == type) {
-               notification = _mali_osk_notification_create(_MALI_NOTIFICATION_SOFT_ACTIVATED, sizeof(_mali_uk_soft_job_activated_s));
-               if (unlikely(NULL == notification)) {
-                       MALI_PRINT_ERROR(("Mali Soft Job: failed to allocate notification"));
-                       return NULL;
-               }
+       notification = _mali_osk_notification_create(_MALI_NOTIFICATION_SOFT_ACTIVATED, sizeof(_mali_uk_soft_job_activated_s));
+       if (unlikely(NULL == notification)) {
+               MALI_PRINT_ERROR(("Mali Soft Job: failed to allocate notification"));
+               return NULL;
        }
 
-       mali_soft_job_system_lock(system);
-
-       job = mali_soft_job_system_alloc_job(system);
-       if (NULL == job) {
-               mali_soft_job_system_unlock(system);
-               MALI_PRINT_ERROR(("Mali Soft Job: failed to allocate job"));
-               _mali_osk_notification_delete(notification);
+       job = _mali_osk_malloc(sizeof(struct mali_soft_job));
+       if (unlikely(NULL == job)) {
+               MALI_DEBUG_PRINT(2, ("Mali Soft Job: system alloc job failed. \n"));
                return NULL;
        }
 
+       mali_soft_job_system_lock(system);
+
+       job->system = system;
+       job->id = system->last_job_id++;
+       job->state = MALI_SOFT_JOB_STATE_ALLOCATED;
+
+       _mali_osk_list_add(&(job->system_list), &(system->jobs_used));
+
        job->type = type;
        job->user_job = user_job;
        job->activated = MALI_FALSE;
 
-       if (MALI_SOFT_JOB_TYPE_USER_SIGNALED == type) {
-               job->activated_notification = notification;
-       }
+       job->activated_notification = notification;
 
        _mali_osk_atomic_init(&job->refcount, 1);
 
@@ -277,7 +237,8 @@ _mali_osk_errcode_t mali_soft_job_system_signal_job(struct mali_soft_job_system
 
        job = mali_soft_job_system_lookup_job(system, job_id);
 
-       if (NULL == job || !(MALI_SOFT_JOB_STATE_STARTED == job->state || MALI_SOFT_JOB_STATE_TIMED_OUT == job->state)) {
+       if ((NULL == job) || (MALI_SOFT_JOB_TYPE_USER_SIGNALED != job->type)
+           || !(MALI_SOFT_JOB_STATE_STARTED == job->state || MALI_SOFT_JOB_STATE_TIMED_OUT == job->state)) {
                mali_soft_job_system_unlock(system);
                MALI_PRINT_ERROR(("Mali Soft Job: invalid soft job id %u", job_id));
                return _MALI_OSK_ERR_ITEM_NOT_FOUND;
@@ -311,7 +272,7 @@ _mali_osk_errcode_t mali_soft_job_system_signal_job(struct mali_soft_job_system
        MALI_DEBUG_PRINT(4, ("Mali Soft Job: signaling soft job %u (0x%08X)\n", job->id, job));
 
        schedule_mask = mali_timeline_tracker_release(&job->tracker);
-       mali_scheduler_schedule_from_mask(schedule_mask, MALI_FALSE);
+       mali_executor_schedule_from_mask(schedule_mask, MALI_FALSE);
 
        mali_soft_job_destroy(job);
 
@@ -328,8 +289,10 @@ static void mali_soft_job_send_activated_notification(struct mali_soft_job *job)
        job->activated_notification = NULL;
 }
 
-void mali_soft_job_system_activate_job(struct mali_soft_job *job)
+mali_scheduler_mask mali_soft_job_system_activate_job(struct mali_soft_job *job)
 {
+       mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
        MALI_DEBUG_ASSERT_POINTER(job);
        MALI_DEBUG_ASSERT_POINTER(job->system);
        MALI_DEBUG_ASSERT_POINTER(job->system->session);
@@ -346,7 +309,7 @@ void mali_soft_job_system_activate_job(struct mali_soft_job *job)
                /* Since we are in shutdown, we can ignore the scheduling bitmask. */
                mali_timeline_tracker_release(&job->tracker);
                mali_soft_job_destroy(job);
-               return;
+               return schedule_mask;
        }
 
        /* Send activated notification. */
@@ -354,9 +317,24 @@ void mali_soft_job_system_activate_job(struct mali_soft_job *job)
 
        /* Wake up sleeping signaler. */
        job->activated = MALI_TRUE;
-       _mali_osk_wait_queue_wake_up(job->tracker.system->wait_queue);
 
-       mali_soft_job_system_unlock(job->system);
+       /* If job type is self signaled, release tracker, move soft job to free list, and scheduler at once */
+       if (MALI_SOFT_JOB_TYPE_SELF_SIGNALED == job->type) {
+               MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state);
+
+               job->state = MALI_SOFT_JOB_STATE_SIGNALED;
+               mali_soft_job_system_unlock(job->system);
+
+               schedule_mask |= mali_timeline_tracker_release(&job->tracker);
+
+               mali_soft_job_destroy(job);
+       } else {
+               _mali_osk_wait_queue_wake_up(job->tracker.system->wait_queue);
+
+               mali_soft_job_system_unlock(job->system);
+       }
+
+       return schedule_mask;
 }
 
 mali_scheduler_mask mali_soft_job_system_timeout_job(struct mali_soft_job *job)
@@ -373,7 +351,7 @@ mali_scheduler_mask mali_soft_job_system_timeout_job(struct mali_soft_job *job)
        mali_soft_job_system_lock(job->system);
 
        MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED  == job->state ||
-                         MALI_SOFT_JOB_STATE_SIGNALED == job->state);
+                         MALI_SOFT_JOB_STATE_SIGNALED == job->state);
 
        if (unlikely(job->system->session->is_aborting)) {
                /* The session is aborting.  This job will be released and destroyed by @ref
@@ -408,7 +386,6 @@ mali_scheduler_mask mali_soft_job_system_timeout_job(struct mali_soft_job *job)
 
 void mali_soft_job_system_abort(struct mali_soft_job_system *system)
 {
-       u32 i;
        struct mali_soft_job *job, *tmp;
        _MALI_OSK_LIST_HEAD_STATIC_INIT(jobs);
 
@@ -420,12 +397,9 @@ void mali_soft_job_system_abort(struct mali_soft_job_system *system)
 
        mali_soft_job_system_lock(system);
 
-       for (i = 0; i < MALI_MAX_NUM_SOFT_JOBS; ++i) {
-               job = &(system->jobs[i]);
-
-               MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_FREE      == job->state ||
-                                 MALI_SOFT_JOB_STATE_STARTED   == job->state ||
-                                 MALI_SOFT_JOB_STATE_TIMED_OUT == job->state);
+       _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &system->jobs_used, struct mali_soft_job, system_list) {
+               MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED   == job->state ||
+                                 MALI_SOFT_JOB_STATE_TIMED_OUT == job->state);
 
                if (MALI_SOFT_JOB_STATE_STARTED == job->state) {
                        /* If the job has been activated, we have to release the tracker and destroy
@@ -450,7 +424,7 @@ void mali_soft_job_system_abort(struct mali_soft_job_system *system)
        /* Release and destroy jobs. */
        _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &jobs, struct mali_soft_job, system_list) {
                MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_SIGNALED  == job->state ||
-                                 MALI_SOFT_JOB_STATE_TIMED_OUT == job->state);
+                                 MALI_SOFT_JOB_STATE_TIMED_OUT == job->state);
 
                if (MALI_SOFT_JOB_STATE_SIGNALED == job->state) {
                        mali_timeline_tracker_release(&job->tracker);
index 1a50dc168add38c9198b195fb598e3f7145fd6bc..19f780ca3b8839163d49184f5d8773a024c57403 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013 ARM Limited
+ * (C) COPYRIGHT 2013-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -26,17 +26,18 @@ struct mali_soft_job_system;
  * Soft jobs of type MALI_SOFT_JOB_TYPE_USER_SIGNALED will only complete after activation if either
  * they are signaled by user-space (@ref mali_soft_job_system_signaled_job) or if they are timed out
  * by the Timeline system.
+ * Soft jobs of type MALI_SOFT_JOB_TYPE_SELF_SIGNALED will release job resource automatically
+ * in kernel when the job is activated.
  */
 typedef enum mali_soft_job_type {
+       MALI_SOFT_JOB_TYPE_SELF_SIGNALED,
        MALI_SOFT_JOB_TYPE_USER_SIGNALED,
 } mali_soft_job_type;
 
 /**
  * Soft job state.
  *
- * All soft jobs in a soft job system will initially be in state MALI_SOFT_JOB_STATE_FREE.  On @ref
- * mali_soft_job_system_start_job a job will first be allocated.  A job in state
- * MALI_SOFT_JOB_STATE_FREE will be picked and the state changed to MALI_SOFT_JOB_STATE_ALLOCATED.
+ * mali_soft_job_system_start_job a job will first be allocated.The job's state set to MALI_SOFT_JOB_STATE_ALLOCATED.
  * Once the job is added to the timeline system, the state changes to MALI_SOFT_JOB_STATE_STARTED.
  *
  * For soft jobs of type MALI_SOFT_JOB_TYPE_USER_SIGNALED the state is changed to
@@ -47,11 +48,8 @@ typedef enum mali_soft_job_type {
  * state is changed to MALI_SOFT_JOB_STATE_TIMED_OUT.  This can only happen to soft jobs in state
  * MALI_SOFT_JOB_STATE_STARTED.
  *
- * When a soft job's reference count reaches zero, it will be freed and the state returns to
- * MALI_SOFT_JOB_STATE_FREE.
  */
 typedef enum mali_soft_job_state {
-       MALI_SOFT_JOB_STATE_FREE,
        MALI_SOFT_JOB_STATE_ALLOCATED,
        MALI_SOFT_JOB_STATE_STARTED,
        MALI_SOFT_JOB_STATE_SIGNALED,
@@ -60,9 +58,6 @@ typedef enum mali_soft_job_state {
 
 #define MALI_SOFT_JOB_INVALID_ID ((u32) -1)
 
-/* Maximum number of soft jobs per soft system. */
-#define MALI_MAX_NUM_SOFT_JOBS 20
-
 /**
  * Soft job struct.
  *
@@ -70,7 +65,7 @@ typedef enum mali_soft_job_state {
  */
 typedef struct mali_soft_job {
        mali_soft_job_type            type;                   /**< Soft job type.  Must be one of MALI_SOFT_JOB_TYPE_*. */
-       u32                           user_job;               /**< Identifier for soft job in user space. */
+       u64                           user_job;               /**< Identifier for soft job in user space. */
        _mali_osk_atomic_t            refcount;               /**< Soft jobs are reference counted to prevent premature deletion. */
        struct mali_timeline_tracker  tracker;                /**< Timeline tracker for soft job. */
        mali_bool                     activated;              /**< MALI_TRUE if the job has been activated, MALI_FALSE if not. */
@@ -90,13 +85,11 @@ typedef struct mali_soft_job {
  */
 typedef struct mali_soft_job_system {
        struct mali_session_data *session;                    /**< The session this soft job system belongs to. */
-
-       struct mali_soft_job jobs[MALI_MAX_NUM_SOFT_JOBS];    /**< Array of all soft jobs in this system. */
-       _MALI_OSK_LIST_HEAD(jobs_free);                       /**< List of all free soft jobs. */
        _MALI_OSK_LIST_HEAD(jobs_used);                       /**< List of all allocated soft jobs. */
 
        _mali_osk_spinlock_irq_t *lock;                       /**< Lock used to protect soft job system and its soft jobs. */
        u32 lock_owner;                                       /**< Contains tid of thread that locked the system or 0, if not locked. */
+       u32 last_job_id;                                      /**< Recored the last job id protected by lock. */
 } mali_soft_job_system;
 
 /**
@@ -125,7 +118,7 @@ void mali_soft_job_system_destroy(struct mali_soft_job_system *system);
  * @param user_job Identifier for soft job in user space.
  * @return New soft job if successful, NULL if not.
  */
-struct mali_soft_job *mali_soft_job_create(struct mali_soft_job_system *system, mali_soft_job_type type, u32 user_job);
+struct mali_soft_job *mali_soft_job_create(struct mali_soft_job_system *system, mali_soft_job_type type, u64 user_job);
 
 /**
  * Destroy soft job.
@@ -172,8 +165,9 @@ _mali_osk_errcode_t mali_soft_job_system_signal_job(struct mali_soft_job_system
  * Used by the Timeline system to activate a soft job.
  *
  * @param job The soft job that is being activated.
+ * @return A scheduling bitmask.
  */
-void mali_soft_job_system_activate_job(struct mali_soft_job *job);
+mali_scheduler_mask mali_soft_job_system_activate_job(struct mali_soft_job *job);
 
 /**
  * Used by the Timeline system to timeout a soft job.
index e11feee30b8788357b9c681e151b9a1db45d7ef6..0af38bc71a61ef3c6af65bc1631f418f4c4a0b61 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013 ARM Limited
+ * (C) COPYRIGHT 2013, 2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
index 2dc7ab98670253d7bd703dfdc1204f3c044bec87..9800113b8ee827e0c8e26fc19131045d54743c2e 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013 ARM Limited
+ * (C) COPYRIGHT 2013, 2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
index 651249fc20c7d113796d8ee4147ea561e7625f3a..cb370dfbe0f0a6e5639484427a1173a8fb44e6b0 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013 ARM Limited
+ * (C) COPYRIGHT 2013-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 
 #include "mali_timeline.h"
 #include "mali_kernel_common.h"
-#include "mali_osk_mali.h"
 #include "mali_scheduler.h"
 #include "mali_soft_job.h"
 #include "mali_timeline_fence_wait.h"
 #include "mali_timeline_sync_fence.h"
+#include "mali_executor.h"
+#include "mali_pp_job.h"
 
 #define MALI_TIMELINE_SYSTEM_LOCKED(system) (mali_spinlock_reentrant_is_held((system)->spinlock, _mali_osk_get_tid()))
 
-static mali_scheduler_mask mali_timeline_system_release_waiter(struct mali_timeline_system *system,
-        struct mali_timeline_waiter *waiter);
+/*
+ * Following three elements are used to record how many
+ * gp, physical pp or virtual pp jobs are delayed in the whole
+ * timeline system, we can use these three value to decide
+ * if need to deactivate idle group.
+ */
+_mali_osk_atomic_t gp_tracker_count;
+_mali_osk_atomic_t phy_pp_tracker_count;
+_mali_osk_atomic_t virt_pp_tracker_count;
 
+static mali_scheduler_mask mali_timeline_system_release_waiter(struct mali_timeline_system *system,
+               struct mali_timeline_waiter *waiter);
 
 #if defined(CONFIG_SYNC)
-
-/*
-   Older versions of Linux, before 3.5, doesn't support fput() in interrupt
-   context. For those older kernels, allocate a list object and put the
-   fence object on that and defer the call to sync_fence_put() to a
-   workqueue.
-*/
 #include <linux/version.h>
 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
-#include <linux/llist.h>
+#include <linux/list.h>
 #include <linux/workqueue.h>
+#include <linux/spinlock.h>
 
 struct mali_deferred_fence_put_entry {
-       struct llist_node list;
+       struct hlist_node list;
        struct sync_fence *fence;
 };
 
-static LLIST_HEAD(mali_timeline_sync_fence_to_free_list);
+static HLIST_HEAD(mali_timeline_sync_fence_to_free_list);
+static DEFINE_SPINLOCK(mali_timeline_sync_fence_to_free_lock);
 
 static void put_sync_fences(struct work_struct *ignore)
 {
-       struct llist_node *list;
+       struct hlist_head list;
+       struct hlist_node *tmp, *pos;
+       unsigned long flags;
+       struct mali_deferred_fence_put_entry *o;
 
-       list = llist_del_all(&mali_timeline_sync_fence_to_free_list);
-
-       while(list)
-       {
-               struct mali_deferred_fence_put_entry *o;
-
-               o = llist_entry(list, struct mali_deferred_fence_put_entry, list);
-               list = llist_next(list);
+       spin_lock_irqsave(&mali_timeline_sync_fence_to_free_lock, flags);
+       hlist_move_list(&mali_timeline_sync_fence_to_free_list, &list);
+       spin_unlock_irqrestore(&mali_timeline_sync_fence_to_free_lock, flags);
 
+       hlist_for_each_entry_safe(o, pos, tmp, &list, list) {
                sync_fence_put(o->fence);
                kfree(o);
        }
@@ -63,7 +67,6 @@ static void put_sync_fences(struct work_struct *ignore)
 static DECLARE_DELAYED_WORK(delayed_sync_fence_put, put_sync_fences);
 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) */
 
-
 /* Callback that is called when a sync fence a tracker is waiting on is signaled. */
 static void mali_timeline_sync_fence_callback(struct sync_fence *sync_fence, struct sync_fence_waiter *sync_fence_waiter)
 {
@@ -73,7 +76,11 @@ static void mali_timeline_sync_fence_callback(struct sync_fence *sync_fence, str
        mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
        u32 tid = _mali_osk_get_tid();
        mali_bool is_aborting = MALI_FALSE;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
        int fence_status = sync_fence->status;
+#else
+       int fence_status = atomic_read(&sync_fence->status);
+#endif
 
        MALI_DEBUG_ASSERT_POINTER(sync_fence);
        MALI_DEBUG_ASSERT_POINTER(sync_fence_waiter);
@@ -97,6 +104,8 @@ static void mali_timeline_sync_fence_callback(struct sync_fence *sync_fence, str
        MALI_DEBUG_ASSERT_POINTER(waiter);
 
        tracker->sync_fence = NULL;
+       tracker->fence.sync_fd = -1;
+
        schedule_mask |= mali_timeline_system_release_waiter(system, waiter);
 
        /* If aborting, wake up sleepers that are waiting for sync fence callbacks to complete. */
@@ -105,25 +114,39 @@ static void mali_timeline_sync_fence_callback(struct sync_fence *sync_fence, str
        }
 
        mali_spinlock_reentrant_signal(system->spinlock, tid);
-       
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)   
+
+       /*
+        * Older versions of Linux, before 3.5, doesn't support fput() in interrupt
+        * context. For those older kernels, allocate a list object and put the
+        * fence object on that and defer the call to sync_fence_put() to a workqueue.
+        */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
        {
                struct mali_deferred_fence_put_entry *obj;
 
                obj = kzalloc(sizeof(struct mali_deferred_fence_put_entry), GFP_ATOMIC);
                if (obj) {
+                       unsigned long flags;
+                       mali_bool schedule = MALI_FALSE;
+
                        obj->fence = sync_fence;
-                       if (llist_add(&obj->list, &mali_timeline_sync_fence_to_free_list)) {
+
+                       spin_lock_irqsave(&mali_timeline_sync_fence_to_free_lock, flags);
+                       if (hlist_empty(&mali_timeline_sync_fence_to_free_list))
+                               schedule = MALI_TRUE;
+                       hlist_add_head(&obj->list, &mali_timeline_sync_fence_to_free_list);
+                       spin_unlock_irqrestore(&mali_timeline_sync_fence_to_free_lock, flags);
+
+                       if (schedule)
                                schedule_delayed_work(&delayed_sync_fence_put, 0);
-                       }
                }
        }
-#else/* LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) */     
-   sync_fence_put(sync_fence);
-#endif
+#else
+       sync_fence_put(sync_fence);
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) */
 
        if (!is_aborting) {
-               mali_scheduler_schedule_from_mask(schedule_mask, MALI_TRUE);
+               mali_executor_schedule_from_mask(schedule_mask, MALI_TRUE);
        }
 }
 #endif /* defined(CONFIG_SYNC) */
@@ -175,7 +198,7 @@ static void mali_timeline_timer_callback(void *data)
 
        mali_spinlock_reentrant_signal(system->spinlock, tid);
 
-       mali_scheduler_schedule_from_mask(schedule_mask, MALI_FALSE);
+       mali_executor_schedule_from_mask(schedule_mask, MALI_FALSE);
 }
 
 void mali_timeline_system_stop_timer(struct mali_timeline_system *system)
@@ -225,7 +248,9 @@ static void mali_timeline_destroy(struct mali_timeline *timeline)
                        _mali_osk_wq_delayed_delete_work_nonflush(timeline->delayed_work);
                }
 
+#ifndef CONFIG_SYNC
                _mali_osk_free(timeline);
+#endif
        }
 }
 
@@ -283,11 +308,19 @@ static struct mali_timeline *mali_timeline_create(struct mali_timeline_system *s
                        return NULL;
                }
 
-               timeline->sync_tl = mali_sync_timeline_create(timeline_name);
+               timeline->destroyed = MALI_FALSE;
+
+               timeline->sync_tl = mali_sync_timeline_create(timeline, timeline_name);
                if (NULL == timeline->sync_tl) {
                        mali_timeline_destroy(timeline);
                        return NULL;
                }
+
+               timeline->spinlock = mali_spinlock_reentrant_init(_MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM);
+               if (NULL == timeline->spinlock) {
+                       mali_timeline_destroy(timeline);
+                       return NULL;
+               }
        }
 #endif /* defined(CONFIG_SYNC) */
 
@@ -316,6 +349,16 @@ static void mali_timeline_insert_tracker(struct mali_timeline *timeline, struct
 
        MALI_DEBUG_ASSERT(!mali_timeline_is_empty(timeline));
 
+       if (MALI_TIMELINE_TRACKER_GP == tracker->type) {
+               _mali_osk_atomic_inc(&gp_tracker_count);
+       } else if (MALI_TIMELINE_TRACKER_PP == tracker->type) {
+               if (mali_pp_job_is_virtual((struct mali_pp_job *)tracker->job)) {
+                       _mali_osk_atomic_inc(&virt_pp_tracker_count);
+               } else {
+                       _mali_osk_atomic_inc(&phy_pp_tracker_count);
+               }
+       }
+
        /* Add tracker as new head on timeline's tracker list. */
        if (NULL == timeline->tracker_head) {
                /* Tracker list is empty. */
@@ -347,7 +390,7 @@ static void mali_timeline_insert_waiter(struct mali_timeline *timeline, struct m
 
        /* Waiter time must be between timeline head and tail, and there must
         * be less than MALI_TIMELINE_MAX_POINT_SPAN elements between */
-       MALI_DEBUG_ASSERT(( waiter_new->point - timeline->point_oldest) < MALI_TIMELINE_MAX_POINT_SPAN);
+       MALI_DEBUG_ASSERT((waiter_new->point - timeline->point_oldest) < MALI_TIMELINE_MAX_POINT_SPAN);
        MALI_DEBUG_ASSERT((-waiter_new->point + timeline->point_next) < MALI_TIMELINE_MAX_POINT_SPAN);
 
        /* Finding out where to put this waiter, in the linked waiter list of the given timeline **/
@@ -471,9 +514,9 @@ static mali_scheduler_mask mali_timeline_update_oldest_point(struct mali_timelin
 }
 
 void mali_timeline_tracker_init(struct mali_timeline_tracker *tracker,
-                                mali_timeline_tracker_type type,
-                                struct mali_timeline_fence *fence,
-                                void *job)
+                               mali_timeline_tracker_type type,
+                               struct mali_timeline_fence *fence,
+                               void *job)
 {
        MALI_DEBUG_ASSERT_POINTER(tracker);
        MALI_DEBUG_ASSERT_POINTER(job);
@@ -531,7 +574,7 @@ mali_scheduler_mask mali_timeline_tracker_release(struct mali_timeline_tracker *
 
        /* Tracker should still be on timeline */
        MALI_DEBUG_ASSERT(!mali_timeline_is_empty(timeline));
-       MALI_DEBUG_ASSERT( mali_timeline_is_point_on(timeline, tracker->point));
+       MALI_DEBUG_ASSERT(mali_timeline_is_point_on(timeline, tracker->point));
 
        /* Tracker is no longer valid. */
        MALI_DEBUG_CODE(tracker->magic = 0);
@@ -573,8 +616,8 @@ mali_scheduler_mask mali_timeline_tracker_release(struct mali_timeline_tracker *
 }
 
 void mali_timeline_system_release_waiter_list(struct mali_timeline_system *system,
-        struct mali_timeline_waiter *tail,
-        struct mali_timeline_waiter *head)
+               struct mali_timeline_waiter *tail,
+               struct mali_timeline_waiter *head)
 {
        MALI_DEBUG_ASSERT_POINTER(system);
        MALI_DEBUG_ASSERT_POINTER(head);
@@ -609,16 +652,23 @@ static mali_scheduler_mask mali_timeline_tracker_activate(struct mali_timeline_t
 
        switch (tracker->type) {
        case MALI_TIMELINE_TRACKER_GP:
-               schedule_mask = mali_gp_scheduler_activate_job((struct mali_gp_job *) tracker->job);
+               schedule_mask = mali_scheduler_activate_gp_job((struct mali_gp_job *) tracker->job);
+
+               _mali_osk_atomic_dec(&gp_tracker_count);
                break;
        case MALI_TIMELINE_TRACKER_PP:
-               schedule_mask = mali_pp_scheduler_activate_job((struct mali_pp_job *) tracker->job);
+               if (mali_pp_job_is_virtual((struct mali_pp_job *)tracker->job)) {
+                       _mali_osk_atomic_dec(&virt_pp_tracker_count);
+               } else {
+                       _mali_osk_atomic_dec(&phy_pp_tracker_count);
+               }
+               schedule_mask = mali_scheduler_activate_pp_job((struct mali_pp_job *) tracker->job);
                break;
        case MALI_TIMELINE_TRACKER_SOFT:
                timeline = tracker->timeline;
                MALI_DEBUG_ASSERT_POINTER(timeline);
 
-               mali_soft_job_system_activate_job((struct mali_soft_job *) tracker->job);
+               schedule_mask |= mali_soft_job_system_activate_job((struct mali_soft_job *) tracker->job);
 
                /* Start a soft timer to make sure the soft job be released in a limited time */
                mali_spinlock_reentrant_wait(system->spinlock, tid);
@@ -725,7 +775,7 @@ struct mali_timeline_system *mali_timeline_system_create(struct mali_session_dat
        }
 
 #if defined(CONFIG_SYNC)
-       system->signaled_sync_tl = mali_sync_timeline_create("mali-always-signaled");
+       system->signaled_sync_tl = mali_sync_timeline_create(NULL, "mali-always-signaled");
        if (NULL == system->signaled_sync_tl) {
                mali_timeline_system_destroy(system);
                return NULL;
@@ -841,7 +891,7 @@ void mali_timeline_system_abort(struct mali_timeline_system *system)
 #endif /* defined(CONFIG_SYNC) */
 
        /* Should not be any waiters or trackers left at this point. */
-       MALI_DEBUG_CODE( {
+       MALI_DEBUG_CODE({
                u32 i;
                mali_spinlock_reentrant_wait(system->spinlock, tid);
                for (i = 0; i < MALI_TIMELINE_MAX; ++i)
@@ -862,6 +912,9 @@ void mali_timeline_system_destroy(struct mali_timeline_system *system)
 {
        u32 i;
        struct mali_timeline_waiter *waiter, *next;
+#if defined(CONFIG_SYNC)
+       u32 tid = _mali_osk_get_tid();
+#endif
 
        MALI_DEBUG_ASSERT_POINTER(system);
        MALI_DEBUG_ASSERT_POINTER(system->session);
@@ -869,6 +922,7 @@ void mali_timeline_system_destroy(struct mali_timeline_system *system)
        MALI_DEBUG_PRINT(4, ("Mali Timeline: destroying timeline system\n"));
 
        if (NULL != system) {
+
                /* There should be no waiters left on this queue. */
                if (NULL != system->wait_queue) {
                        _mali_osk_wait_queue_term(system->wait_queue);
@@ -887,6 +941,14 @@ void mali_timeline_system_destroy(struct mali_timeline_system *system)
                if (NULL != system->signaled_sync_tl) {
                        sync_timeline_destroy(system->signaled_sync_tl);
                }
+
+               for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+                       if ((NULL != system->timelines[i]) && (NULL != system->timelines[i]->spinlock)) {
+                               mali_spinlock_reentrant_wait(system->timelines[i]->spinlock, tid);
+                               system->timelines[i]->destroyed = MALI_TRUE;
+                               mali_spinlock_reentrant_signal(system->timelines[i]->spinlock, tid);
+                       }
+               }
 #endif /* defined(CONFIG_SYNC) */
 
                for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
@@ -894,6 +956,7 @@ void mali_timeline_system_destroy(struct mali_timeline_system *system)
                                mali_timeline_destroy(system->timelines[i]);
                        }
                }
+
                if (NULL != system->spinlock) {
                        mali_spinlock_reentrant_term(system->spinlock);
                }
@@ -946,9 +1009,9 @@ static struct mali_timeline_waiter *mali_timeline_system_get_zeroed_waiter(struc
 }
 
 static void mali_timeline_system_allocate_waiters(struct mali_timeline_system *system,
-        struct mali_timeline_waiter **tail,
-        struct mali_timeline_waiter **head,
-        int max_num_waiters)
+               struct mali_timeline_waiter **tail,
+               struct mali_timeline_waiter **head,
+               int max_num_waiters)
 {
        u32 i, tid = _mali_osk_get_tid();
        mali_bool do_alloc;
@@ -1000,9 +1063,9 @@ static void mali_timeline_system_allocate_waiters(struct mali_timeline_system *s
  * @param waiter_head List of pre-allocated waiters.
  */
 static void mali_timeline_system_create_waiters_and_unlock(struct mali_timeline_system *system,
-        struct mali_timeline_tracker *tracker,
-        struct mali_timeline_waiter *waiter_tail,
-        struct mali_timeline_waiter *waiter_head)
+               struct mali_timeline_tracker *tracker,
+               struct mali_timeline_waiter *waiter_tail,
+               struct mali_timeline_waiter *waiter_head)
 {
        int i;
        u32 tid = _mali_osk_get_tid();
@@ -1040,7 +1103,7 @@ static void mali_timeline_system_create_waiters_and_unlock(struct mali_timeline_
 
                if (unlikely(!mali_timeline_is_point_valid(timeline, point))) {
                        MALI_PRINT_ERROR(("Mali Timeline: point %d is not valid (oldest=%d, next=%d)\n",
-                                         point, timeline->point_oldest, timeline->point_next));
+                                         point, timeline->point_oldest, timeline->point_next));
                        continue;
                }
 
@@ -1103,6 +1166,7 @@ static void mali_timeline_system_create_waiters_and_unlock(struct mali_timeline_
                ret = sync_fence_wait_async(sync_fence, &tracker->sync_fence_waiter);
                if (1 == ret) {
                        /* Fence already signaled, no waiter needed. */
+                       tracker->fence.sync_fd = -1;
                        goto exit;
                } else if (0 != ret) {
                        MALI_PRINT_ERROR(("Mali Timeline: sync fence fd %d signaled with error %d\n", tracker->fence.sync_fd, ret));
@@ -1161,12 +1225,12 @@ exit:
        }
 #endif /* defined(CONFIG_SYNC) */
 
-       mali_scheduler_schedule_from_mask(schedule_mask, MALI_FALSE);
+       mali_executor_schedule_from_mask(schedule_mask, MALI_FALSE);
 }
 
 mali_timeline_point mali_timeline_system_add_tracker(struct mali_timeline_system *system,
-        struct mali_timeline_tracker *tracker,
-        enum mali_timeline_id timeline_id)
+               struct mali_timeline_tracker *tracker,
+               enum mali_timeline_id timeline_id)
 {
        int num_waiters = 0;
        struct mali_timeline_waiter *waiter_tail, *waiter_head;
@@ -1224,7 +1288,7 @@ mali_timeline_point mali_timeline_system_add_tracker(struct mali_timeline_system
 }
 
 static mali_scheduler_mask mali_timeline_system_release_waiter(struct mali_timeline_system *system,
-        struct mali_timeline_waiter *waiter)
+               struct mali_timeline_waiter *waiter)
 {
        struct mali_timeline_tracker *tracker;
        mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
@@ -1255,7 +1319,7 @@ static mali_scheduler_mask mali_timeline_system_release_waiter(struct mali_timel
 }
 
 mali_timeline_point mali_timeline_system_get_latest_point(struct mali_timeline_system *system,
-        enum mali_timeline_id timeline_id)
+               enum mali_timeline_id timeline_id)
 {
        mali_timeline_point point;
        struct mali_timeline *timeline;
@@ -1283,6 +1347,20 @@ mali_timeline_point mali_timeline_system_get_latest_point(struct mali_timeline_s
        return point;
 }
 
+void mali_timeline_initialize(void)
+{
+       _mali_osk_atomic_init(&gp_tracker_count, 0);
+       _mali_osk_atomic_init(&phy_pp_tracker_count, 0);
+       _mali_osk_atomic_init(&virt_pp_tracker_count, 0);
+}
+
+void mali_timeline_terminate(void)
+{
+       _mali_osk_atomic_term(&gp_tracker_count);
+       _mali_osk_atomic_term(&phy_pp_tracker_count);
+       _mali_osk_atomic_term(&virt_pp_tracker_count);
+}
+
 #if defined(MALI_TIMELINE_DEBUG_FUNCTIONS)
 
 static mali_bool is_waiting_on_timeline(struct mali_timeline_tracker *tracker, enum mali_timeline_id id)
@@ -1299,7 +1377,11 @@ static mali_bool is_waiting_on_timeline(struct mali_timeline_tracker *tracker, e
        system = timeline->system;
 
        if (MALI_TIMELINE_MAX > id) {
-               return mali_timeline_is_point_on(system->timelines[id], tracker->fence.points[id]);
+               if (MALI_TIMELINE_NO_POINT != tracker->fence.points[id]) {
+                       return mali_timeline_is_point_on(system->timelines[id], tracker->fence.points[id]);
+               } else {
+                       return MALI_FALSE;
+               }
        } else {
                MALI_DEBUG_ASSERT(MALI_TIMELINE_NONE == id);
                return MALI_FALSE;
@@ -1310,9 +1392,9 @@ static const char *timeline_id_to_string(enum mali_timeline_id id)
 {
        switch (id) {
        case MALI_TIMELINE_GP:
-               return "  GP";
+               return "GP";
        case MALI_TIMELINE_PP:
-               return "  PP";
+               return "PP";
        case MALI_TIMELINE_SOFT:
                return "SOFT";
        default:
@@ -1324,9 +1406,9 @@ static const char *timeline_tracker_type_to_string(enum mali_timeline_tracker_ty
 {
        switch (type) {
        case MALI_TIMELINE_TRACKER_GP:
-               return "  GP";
+               return "GP";
        case MALI_TIMELINE_TRACKER_PP:
-               return "  PP";
+               return "PP";
        case MALI_TIMELINE_TRACKER_SOFT:
                return "SOFT";
        case MALI_TIMELINE_TRACKER_WAIT:
@@ -1360,54 +1442,125 @@ mali_timeline_tracker_state mali_timeline_debug_get_tracker_state(struct mali_ti
        return MALI_TIMELINE_TS_FINISH;
 }
 
-void mali_timeline_debug_print_tracker(struct mali_timeline_tracker *tracker)
+void mali_timeline_debug_print_tracker(struct mali_timeline_tracker *tracker, _mali_osk_print_ctx *print_ctx)
 {
        const char *tracker_state = "IWAF";
+       char state_char = 'I';
+       char tracker_type[32] = {0};
 
        MALI_DEBUG_ASSERT_POINTER(tracker);
 
+       state_char = *(tracker_state + mali_timeline_debug_get_tracker_state(tracker));
+       _mali_osk_snprintf(tracker_type, sizeof(tracker_type), "%s", timeline_tracker_type_to_string(tracker->type));
+
+#if defined(CONFIG_SYNC)
+       if (0 != tracker->trigger_ref_count) {
+               _mali_osk_ctxprintf(print_ctx, "TL:  %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u), fd:%d, fence:(0x%08X)]  job:(0x%08X)\n",
+                                   tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
+                                   is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0],
+                                   is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1],
+                                   is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2],
+                                   tracker->fence.sync_fd, tracker->sync_fence, tracker->job);
+       } else {
+               _mali_osk_ctxprintf(print_ctx, "TL:  %s %u %c  fd:%d  fence:(0x%08X)  job:(0x%08X)\n",
+                                   tracker_type, tracker->point, state_char,
+                                   tracker->fence.sync_fd, tracker->sync_fence, tracker->job);
+       }
+#else
        if (0 != tracker->trigger_ref_count) {
-               MALI_PRINTF(("TL:  %s %u %c - ref_wait:%u [%s%u,%s%u,%s%u,%d]  (0x%08X)\n",
-                            timeline_tracker_type_to_string(tracker->type), tracker->point,
-                            *(tracker_state + mali_timeline_debug_get_tracker_state(tracker)),
-                            tracker->trigger_ref_count,
-                            is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "W" : " ", tracker->fence.points[0],
-                            is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "W" : " ", tracker->fence.points[1],
-                            is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "W" : " ", tracker->fence.points[2],
-                            tracker->fence.sync_fd, tracker->job));
+               _mali_osk_ctxprintf(print_ctx, "TL:  %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u)]  job:(0x%08X)\n",
+                                   tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
+                                   is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0],
+                                   is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1],
+                                   is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2],
+                                   tracker->job);
        } else {
-               MALI_PRINTF(("TL:  %s %u %c  (0x%08X)\n",
-                            timeline_tracker_type_to_string(tracker->type), tracker->point,
-                            *(tracker_state + mali_timeline_debug_get_tracker_state(tracker)),
-                            tracker->job));
+               _mali_osk_ctxprintf(print_ctx, "TL:  %s %u %c  job:(0x%08X)\n",
+                                   tracker_type, tracker->point, state_char,
+                                   tracker->job);
        }
+#endif
 }
 
-void mali_timeline_debug_print_timeline(struct mali_timeline *timeline)
+void mali_timeline_debug_print_timeline(struct mali_timeline *timeline, _mali_osk_print_ctx *print_ctx)
 {
        struct mali_timeline_tracker *tracker = NULL;
-       int i_max = 30;
 
        MALI_DEBUG_ASSERT_POINTER(timeline);
 
        tracker = timeline->tracker_tail;
-       while (NULL != tracker && 0 < --i_max) {
-               mali_timeline_debug_print_tracker(tracker);
+       while (NULL != tracker) {
+               mali_timeline_debug_print_tracker(tracker, print_ctx);
                tracker = tracker->timeline_next;
        }
+}
+
+#if !(LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0))
+void mali_timeline_debug_direct_print_tracker(struct mali_timeline_tracker *tracker)
+{
+       const char *tracker_state = "IWAF";
+       char state_char = 'I';
+       char tracker_type[32] = {0};
+
+       MALI_DEBUG_ASSERT_POINTER(tracker);
+
+       state_char = *(tracker_state + mali_timeline_debug_get_tracker_state(tracker));
+       _mali_osk_snprintf(tracker_type, sizeof(tracker_type), "%s", timeline_tracker_type_to_string(tracker->type));
+
+#if defined(CONFIG_SYNC)
+       if (0 != tracker->trigger_ref_count) {
+               MALI_PRINT(("TL:  %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u), fd:%d, fence:(0x%08X)]  job:(0x%08X)\n",
+                           tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
+                           is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0],
+                           is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1],
+                           is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2],
+                           tracker->fence.sync_fd, tracker->sync_fence, tracker->job));
+       } else {
+               MALI_PRINT(("TL:  %s %u %c  fd:%d  fence:(0x%08X)  job:(0x%08X)\n",
+                           tracker_type, tracker->point, state_char,
+                           tracker->fence.sync_fd, tracker->sync_fence, tracker->job));
+       }
+#else
+       if (0 != tracker->trigger_ref_count) {
+               MALI_PRINT(("TL:  %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u)]  job:(0x%08X)\n",
+                           tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
+                           is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0],
+                           is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1],
+                           is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2],
+                           tracker->job));
+       } else {
+               MALI_PRINT(("TL:  %s %u %c  job:(0x%08X)\n",
+                           tracker_type, tracker->point, state_char,
+                           tracker->job));
+       }
+#endif
+}
+
+void mali_timeline_debug_direct_print_timeline(struct mali_timeline *timeline)
+{
+       struct mali_timeline_tracker *tracker = NULL;
 
-       if (0 == i_max) {
-               MALI_PRINTF(("TL: Too many trackers in list to print\n"));
+       MALI_DEBUG_ASSERT_POINTER(timeline);
+
+       tracker = timeline->tracker_tail;
+       while (NULL != tracker) {
+               mali_timeline_debug_direct_print_tracker(tracker);
+               tracker = tracker->timeline_next;
        }
 }
 
-void mali_timeline_debug_print_system(struct mali_timeline_system *system)
+#endif
+
+void mali_timeline_debug_print_system(struct mali_timeline_system *system, _mali_osk_print_ctx *print_ctx)
 {
        int i;
        int num_printed = 0;
+       u32 tid = _mali_osk_get_tid();
 
        MALI_DEBUG_ASSERT_POINTER(system);
 
+       mali_spinlock_reentrant_wait(system->spinlock, tid);
+
        /* Print all timelines */
        for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
                struct mali_timeline *timeline = system->timelines[i];
@@ -1416,15 +1569,18 @@ void mali_timeline_debug_print_system(struct mali_timeline_system *system)
 
                if (NULL == timeline->tracker_head) continue;
 
-               MALI_PRINTF(("TL: Timeline %s:\n",
-                            timeline_id_to_string((enum mali_timeline_id)i)));
-               mali_timeline_debug_print_timeline(timeline);
+               _mali_osk_ctxprintf(print_ctx, "TL: Timeline %s:\n",
+                                   timeline_id_to_string((enum mali_timeline_id)i));
+
+               mali_timeline_debug_print_timeline(timeline, print_ctx);
                num_printed++;
        }
 
        if (0 == num_printed) {
-               MALI_PRINTF(("TL: All timelines empty\n"));
+               _mali_osk_ctxprintf(print_ctx, "TL: All timelines empty\n");
        }
+
+       mali_spinlock_reentrant_signal(system->spinlock, tid);
 }
 
 #endif /* defined(MALI_TIMELINE_DEBUG_FUNCTIONS) */
index a5f167853357ea6bd52cffad00591a202fcb15ff..7d6cc8b39bd4bacf91f432a2768657b563e27134 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013 ARM Limited
+ * (C) COPYRIGHT 2013-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -18,6 +18,7 @@
 #include "mali_spinlock_reentrant.h"
 #include "mali_sync.h"
 #include "mali_scheduler_types.h"
+#include <linux/version.h>
 
 /**
  * Soft job timeout.
@@ -25,7 +26,7 @@
  * Soft jobs have to be signaled as complete after activation.  Normally this is done by user space,
  * but in order to guarantee that every soft job is completed, we also have a timer.
  */
-#define MALI_TIMELINE_TIMEOUT_HZ ((u32) (HZ * 3 / 2)) /* 1500 ms. */
+#define MALI_TIMELINE_TIMEOUT_HZ ((unsigned long) (HZ * 3 / 2)) /* 1500 ms. */
 
 /**
  * Timeline type.
@@ -140,6 +141,8 @@ struct mali_timeline {
 
 #if defined(CONFIG_SYNC)
        struct sync_timeline         *sync_tl;      /**< Sync timeline that corresponds to this timeline. */
+       mali_bool destroyed;
+       struct mali_spinlock_reentrant *spinlock;       /**< Spin lock protecting the timeline system */
 #endif /* defined(CONFIG_SYNC) */
 
        /* The following fields are used to time out soft job trackers. */
@@ -194,11 +197,15 @@ struct mali_timeline_tracker {
        void                          *job;          /**< Owner of tracker. */
 
        /* The following fields are used to time out soft job trackers. */
-       u32                           os_tick_create;
-       u32                           os_tick_activate;
+       unsigned long                 os_tick_create;
+       unsigned long                 os_tick_activate;
        mali_bool                     timer_active;
 };
 
+extern _mali_osk_atomic_t gp_tracker_count;
+extern _mali_osk_atomic_t phy_pp_tracker_count;
+extern _mali_osk_atomic_t virt_pp_tracker_count;
+
 /**
  * What follows is a set of functions to check the state of a timeline and to determine where on a
  * timeline a given point is.  Most of these checks will translate the timeline so the oldest point
@@ -368,8 +375,8 @@ void mali_timeline_system_stop_timer(struct mali_timeline_system *system);
  * @return Point on timeline identifying this tracker, or MALI_TIMELINE_NO_POINT if not on timeline.
  */
 mali_timeline_point mali_timeline_system_add_tracker(struct mali_timeline_system *system,
-        struct mali_timeline_tracker *tracker,
-        enum mali_timeline_id timeline_id);
+               struct mali_timeline_tracker *tracker,
+               enum mali_timeline_id timeline_id);
 
 /**
  * Get latest point on timeline.
@@ -379,7 +386,7 @@ mali_timeline_point mali_timeline_system_add_tracker(struct mali_timeline_system
  * @return Latest point on timeline, or MALI_TIMELINE_NO_POINT if the timeline is empty.
  */
 mali_timeline_point mali_timeline_system_get_latest_point(struct mali_timeline_system *system,
-        enum mali_timeline_id timeline_id);
+               enum mali_timeline_id timeline_id);
 
 /**
  * Initialize tracker.
@@ -392,9 +399,9 @@ mali_timeline_point mali_timeline_system_get_latest_point(struct mali_timeline_s
  * @param job Pointer to job struct this tracker is associated with.
  */
 void mali_timeline_tracker_init(struct mali_timeline_tracker *tracker,
-                                mali_timeline_tracker_type type,
-                                struct mali_timeline_fence *fence,
-                                void *job);
+                               mali_timeline_tracker_type type,
+                               struct mali_timeline_fence *fence,
+                               void *job);
 
 /**
  * Grab trigger ref count on tracker.
@@ -439,6 +446,14 @@ mali_scheduler_mask mali_timeline_system_tracker_put(struct mali_timeline_system
  */
 mali_scheduler_mask mali_timeline_tracker_release(struct mali_timeline_tracker *tracker);
 
+MALI_STATIC_INLINE mali_bool mali_timeline_tracker_activation_error(
+       struct mali_timeline_tracker *tracker)
+{
+       MALI_DEBUG_ASSERT_POINTER(tracker);
+       return (MALI_TIMELINE_ACTIVATION_ERROR_FATAL_BIT &
+               tracker->activation_error) ? MALI_TRUE : MALI_FALSE;
+}
+
 /**
  * Copy data from a UK fence to a Timeline fence.
  *
@@ -447,7 +462,28 @@ mali_scheduler_mask mali_timeline_tracker_release(struct mali_timeline_tracker *
  */
 void mali_timeline_fence_copy_uk_fence(struct mali_timeline_fence *fence, _mali_uk_fence_t *uk_fence);
 
+void mali_timeline_initialize(void);
+
+void mali_timeline_terminate(void);
+
+MALI_STATIC_INLINE mali_bool mali_timeline_has_gp_job(void)
+{
+       return 0 < _mali_osk_atomic_read(&gp_tracker_count);
+}
+
+MALI_STATIC_INLINE mali_bool mali_timeline_has_physical_pp_job(void)
+{
+       return 0 < _mali_osk_atomic_read(&phy_pp_tracker_count);
+}
+
+MALI_STATIC_INLINE mali_bool mali_timeline_has_virtual_pp_job(void)
+{
+       return 0 < _mali_osk_atomic_read(&virt_pp_tracker_count);
+}
+
+#if defined(DEBUG)
 #define MALI_TIMELINE_DEBUG_FUNCTIONS
+#endif /* DEBUG */
 #if defined(MALI_TIMELINE_DEBUG_FUNCTIONS)
 
 /**
@@ -473,21 +509,26 @@ mali_timeline_tracker_state mali_timeline_debug_get_tracker_state(struct mali_ti
  *
  * @param tracker Tracker to print.
  */
-void mali_timeline_debug_print_tracker(struct mali_timeline_tracker *tracker);
+void mali_timeline_debug_print_tracker(struct mali_timeline_tracker *tracker, _mali_osk_print_ctx *print_ctx);
 
 /**
  * Print debug information about timeline.
  *
  * @param timeline Timeline to print.
  */
-void mali_timeline_debug_print_timeline(struct mali_timeline *timeline);
+void mali_timeline_debug_print_timeline(struct mali_timeline *timeline, _mali_osk_print_ctx *print_ctx);
+
+#if !(LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0))
+void mali_timeline_debug_direct_print_tracker(struct mali_timeline_tracker *tracker);
+void mali_timeline_debug_direct_print_timeline(struct mali_timeline *timeline);
+#endif
 
 /**
  * Print debug information about timeline system.
  *
  * @param system Timeline system to print.
  */
-void mali_timeline_debug_print_system(struct mali_timeline_system *system);
+void mali_timeline_debug_print_system(struct mali_timeline_system *system, _mali_osk_print_ctx *print_ctx);
 
 #endif /* defined(MALI_TIMELINE_DEBUG_FUNCTIONS) */
 
index ab5cb00a40faf44e1cae0bc2fb4464e3745d1876..895a583f9b3e42c8d5a6dbc146618aa541aea3a3 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013 ARM Limited
+ * (C) COPYRIGHT 2013-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -101,7 +101,11 @@ static mali_bool mali_timeline_fence_wait_check_status(struct mali_timeline_syst
        if (-1 != fence->sync_fd) {
                sync_fence = sync_fence_fdget(fence->sync_fd);
                if (likely(NULL != sync_fence)) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
                        if (0 == sync_fence->status) {
+#else
+                       if (0 == atomic_read(&sync_fence->status)) {
+#endif
                                ret = MALI_FALSE;
                        }
                } else {
index 16d2f2c0cd47df853b522eeb193a7121b1af01cb..ed7d4167838ac5497125310b952a4f3a8c819ef8 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013 ARM Limited
+ * (C) COPYRIGHT 2013, 2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
index bad18d35b8a393c8e442465362ed7106505c8d39..1ced601816e5201c483331c4248f1c6773e7bf62 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013 ARM Limited
+ * (C) COPYRIGHT 2013, 2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -89,19 +89,12 @@ s32 mali_timeline_sync_fence_create(struct mali_timeline_system *system, struct
                MALI_DEBUG_ASSERT_POINTER(timeline);
 
                sync_fence = mali_timeline_sync_fence_create_and_add_tracker(timeline, fence->points[i]);
-               if (NULL == sync_fence) 
-      {
-         MALI_DEBUG_PRINT(1, ("mali_timeline_sync_fence_create: mali_timeline_sync_fence_create_and_add_tracker!\n"));
-                  goto error;
-      }
+               if (NULL == sync_fence) goto error;
+
                if (NULL != sync_fence_acc) {
                        /* Merge sync fences. */
                        sync_fence_acc = mali_sync_fence_merge(sync_fence_acc, sync_fence);
-                       if (NULL == sync_fence_acc) 
-                       {
-                          MALI_DEBUG_PRINT(1, ("mali_timeline_sync_fence_create: mali_sync_fence_merge! sync_fence_acc=%x, sync_fence=%x\n", sync_fence_acc, sync_fence));   
-                          goto error;
-                       }
+                       if (NULL == sync_fence_acc) goto error;
                } else {
                        /* This was the first sync fence created. */
                        sync_fence_acc = sync_fence;
@@ -112,18 +105,11 @@ s32 mali_timeline_sync_fence_create(struct mali_timeline_system *system, struct
                struct sync_fence *sync_fence;
 
                sync_fence = sync_fence_fdget(fence->sync_fd);
-               if (NULL == sync_fence) 
-      {
-         MALI_DEBUG_PRINT(1, ("mali_timeline_sync_fence_create: sync_fence_fdget! fence->sync_fd=%x, sync_fence=%x\n", fence->sync_fd, sync_fence));
-                  goto error;
-      }
+               if (NULL == sync_fence) goto error;
+
                if (NULL != sync_fence_acc) {
                        sync_fence_acc = mali_sync_fence_merge(sync_fence_acc, sync_fence);
-                       if (NULL == sync_fence_acc)
-                        {   
-            MALI_DEBUG_PRINT(1, ("mali_timeline_sync_fence_create: mali_sync_fence_merge! sync_fence_acc=%x\n", sync_fence_acc));
-                          goto error;
-                        }
+                       if (NULL == sync_fence_acc) goto error;
                } else {
                        sync_fence_acc = sync_fence;
                }
@@ -135,11 +121,7 @@ s32 mali_timeline_sync_fence_create(struct mali_timeline_system *system, struct
                /* There was nothing to wait on, so return an already signaled fence. */
 
                sync_fence_acc = mali_sync_timeline_create_signaled_fence(system->signaled_sync_tl);
-               if (NULL == sync_fence_acc) 
-               {
-                  MALI_DEBUG_PRINT(1, ("mali_timeline_sync_fence_create: mali_sync_timeline_create_signaled_fence! sync_fence_acc=%x\n", sync_fence_acc));   
-                  goto error;
-               }
+               if (NULL == sync_fence_acc) goto error;
        }
 
        /* Return file descriptor for the accumulated sync fence. */
@@ -149,7 +131,7 @@ error:
        if (NULL != sync_fence_acc) {
                sync_fence_put(sync_fence_acc);
        }
-   MALI_DEBUG_PRINT(1, ("mali_timeline_sync_fence_create fail!return -1, sync_fence_acc=%x\n", sync_fence_acc));
+
        return -1;
 }
 
index 3d8e3a1cad8349129aeeff143240bb38afa41fa1..d9554848dffbacc69f3bdda57f42cc8fe402e4e0 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013 ARM Limited
+ * (C) COPYRIGHT 2013, 2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
index 5bc8e1b0491e1c8c899709b07092e8ecd3135064..28cd81b1a00be506b01e222fb9f1d0f4d3641042 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
+ * (C) COPYRIGHT 2008-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -192,7 +192,7 @@ extern "C" {
  * @param context pointer to storage to return a (void*)context handle.
  * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
  */
-_mali_osk_errcode_t _mali_ukk_open( void **context );
+_mali_osk_errcode_t _mali_ukk_open(void **context);
 
 /** @brief End a Mali Device Driver session
  *
@@ -203,7 +203,7 @@ _mali_osk_errcode_t _mali_ukk_open( void **context );
  * @param context pointer to a stored (void*)context handle.
  * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
  */
-_mali_osk_errcode_t _mali_ukk_close( void **context );
+_mali_osk_errcode_t _mali_ukk_close(void **context);
 
 /** @} */ /* end group _mali_uk_context */
 
@@ -224,21 +224,31 @@ _mali_osk_errcode_t _mali_ukk_close( void **context );
  * @param args see _mali_uk_wait_for_notification_s in "mali_utgard_uk_types.h"
  * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
  */
-_mali_osk_errcode_t _mali_ukk_wait_for_notification( _mali_uk_wait_for_notification_s *args );
+_mali_osk_errcode_t _mali_ukk_wait_for_notification(_mali_uk_wait_for_notification_s *args);
 
 /** @brief Post a notification to the notification queue of this application.
  *
  * @param args see _mali_uk_post_notification_s in "mali_utgard_uk_types.h"
  * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
  */
-_mali_osk_errcode_t _mali_ukk_post_notification( _mali_uk_post_notification_s *args );
+_mali_osk_errcode_t _mali_ukk_post_notification(_mali_uk_post_notification_s *args);
 
 /** @brief Verifies if the user and kernel side of this API are compatible.
+ *
+ * This function is obsolete, but kept to allow old, incompatible user space
+ * clients to robustly detect the incompatibility.
  *
  * @param args see _mali_uk_get_api_version_s in "mali_utgard_uk_types.h"
  * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
  */
-_mali_osk_errcode_t _mali_ukk_get_api_version( _mali_uk_get_api_version_s *args );
+_mali_osk_errcode_t _mali_ukk_get_api_version(_mali_uk_get_api_version_s *args);
+
+/** @brief Verifies if the user and kernel side of this API are compatible.
+ *
+ * @param args see _mali_uk_get_api_version_v2_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_api_version_v2(_mali_uk_get_api_version_v2_s *args);
 
 /** @brief Get the user space settings applicable for calling process.
  *
@@ -311,7 +321,7 @@ _mali_osk_errcode_t _mali_ukk_request_high_priority(_mali_uk_request_high_priori
  * @param args see _mali_uk_mem_mmap_s in "mali_utgard_uk_types.h"
  * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
  */
-_mali_osk_errcode_t _mali_ukk_mem_mmap( _mali_uk_mem_mmap_s *args );
+_mali_osk_errcode_t _mali_ukk_mem_mmap(_mali_uk_mem_mmap_s *args);
 
 /** @brief Unmap Mali Memory from the current user process
  *
@@ -321,92 +331,24 @@ _mali_osk_errcode_t _mali_ukk_mem_mmap( _mali_uk_mem_mmap_s *args );
  * @param args see _mali_uk_mem_munmap_s in "mali_utgard_uk_types.h"
  * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
  */
-_mali_osk_errcode_t _mali_ukk_mem_munmap( _mali_uk_mem_munmap_s *args );
+_mali_osk_errcode_t _mali_ukk_mem_munmap(_mali_uk_mem_munmap_s *args);
 
 /** @brief Determine the buffer size necessary for an MMU page table dump.
  * @param args see _mali_uk_query_mmu_page_table_dump_size_s in mali_utgard_uk_types.h
  * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
  */
-_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size( _mali_uk_query_mmu_page_table_dump_size_s *args );
+_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size(_mali_uk_query_mmu_page_table_dump_size_s *args);
 /** @brief Dump MMU Page tables.
  * @param args see _mali_uk_dump_mmu_page_table_s in mali_utgard_uk_types.h
  * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
  */
-_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table( _mali_uk_dump_mmu_page_table_s * args );
+_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table(_mali_uk_dump_mmu_page_table_s *args);
 
 /** @brief Write user data to specified Mali memory without causing segfaults.
  * @param args see _mali_uk_mem_write_safe_s in mali_utgard_uk_types.h
  * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
  */
-_mali_osk_errcode_t _mali_ukk_mem_write_safe( _mali_uk_mem_write_safe_s *args );
-
-/** @brief Map a physically contiguous range of memory into Mali
- * @param args see _mali_uk_map_external_mem_s in mali_utgard_uk_types.h
- * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
- */
-_mali_osk_errcode_t _mali_ukk_map_external_mem( _mali_uk_map_external_mem_s *args );
-
-/** @brief Unmap a physically contiguous range of memory from Mali
- * @param args see _mali_uk_unmap_external_mem_s in mali_utgard_uk_types.h
- * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
- */
-_mali_osk_errcode_t _mali_ukk_unmap_external_mem( _mali_uk_unmap_external_mem_s *args );
-
-#if defined(CONFIG_MALI400_UMP)
-/** @brief Map UMP memory into Mali
- * @param args see _mali_uk_attach_ump_mem_s in mali_utgard_uk_types.h
- * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
- */
-_mali_osk_errcode_t _mali_ukk_attach_ump_mem( _mali_uk_attach_ump_mem_s *args );
-/** @brief Unmap UMP memory from Mali
- * @param args see _mali_uk_release_ump_mem_s in mali_utgard_uk_types.h
- * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
- */
-_mali_osk_errcode_t _mali_ukk_release_ump_mem( _mali_uk_release_ump_mem_s *args );
-#endif /* CONFIG_MALI400_UMP */
-
-/** @brief Determine virtual-to-physical mapping of a contiguous memory range
- * (optional)
- *
- * This allows the user-side to do a virtual-to-physical address translation.
- * In conjunction with _mali_uku_map_external_mem, this can be used to do
- * direct rendering.
- *
- * This function will only succeed on a virtual range that is mapped into the
- * current process, and that is contigious.
- *
- * If va is not page-aligned, then it is rounded down to the next page
- * boundary. The remainer is added to size, such that ((u32)va)+size before
- * rounding is equal to ((u32)va)+size after rounding. The rounded modified
- * va and size will be written out into args on success.
- *
- * If the supplied size is zero, or not a multiple of the system's PAGE_SIZE,
- * then size will be rounded up to the next multiple of PAGE_SIZE before
- * translation occurs. The rounded up size will be written out into args on
- * success.
- *
- * On most OSs, virtual-to-physical address translation is a priveledged
- * function. Therefore, the implementer must validate the range supplied, to
- * ensure they are not providing arbitrary virtual-to-physical address
- * translations. While it is unlikely such a mechanism could be used to
- * compromise the security of a system on its own, it is possible it could be
- * combined with another small security risk to cause a much larger security
- * risk.
- *
- * @note This is an optional part of the interface, and is only used by certain
- * implementations of libEGL. If the platform layer in your libEGL
- * implementation does not require Virtual-to-Physical address translation,
- * then this function need not be implemented. A stub implementation should not
- * be required either, as it would only be removed by the compiler's dead code
- * elimination.
- *
- * @note if implemented, this function is entirely platform-dependant, and does
- * not exist in common code.
- *
- * @param args see _mali_uk_va_to_mali_pa_s in "mali_utgard_uk_types.h"
- * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
- */
-_mali_osk_errcode_t _mali_ukk_va_to_mali_pa( _mali_uk_va_to_mali_pa_s * args );
+_mali_osk_errcode_t _mali_ukk_mem_write_safe(_mali_uk_mem_write_safe_s *args);
 
 /** @} */ /* end group _mali_uk_memory */
 
@@ -436,7 +378,7 @@ _mali_osk_errcode_t _mali_ukk_va_to_mali_pa( _mali_uk_va_to_mali_pa_s * args );
  * @param uargs see _mali_uk_pp_start_job_s in "mali_utgard_uk_types.h". Use _mali_osk_copy_from_user to retrieve data!
  * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
  */
-_mali_osk_errcode_t _mali_ukk_pp_start_job( void *ctx, _mali_uk_pp_start_job_s *uargs );
+_mali_osk_errcode_t _mali_ukk_pp_start_job(void *ctx, _mali_uk_pp_start_job_s *uargs);
 
 /**
  * @brief Issue a request to start new jobs on both Vertex Processor and Fragment Processor.
@@ -447,14 +389,14 @@ _mali_osk_errcode_t _mali_ukk_pp_start_job( void *ctx, _mali_uk_pp_start_job_s *
  * @param uargs see _mali_uk_pp_and_gp_start_job_s in "mali_utgard_uk_types.h". Use _mali_osk_copy_from_user to retrieve data!
  * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
  */
-_mali_osk_errcode_t _mali_ukk_pp_and_gp_start_job( void *ctx, _mali_uk_pp_and_gp_start_job_s *uargs );
+_mali_osk_errcode_t _mali_ukk_pp_and_gp_start_job(void *ctx, _mali_uk_pp_and_gp_start_job_s *uargs);
 
 /** @brief Returns the number of Fragment Processors in the system
  *
  * @param args see _mali_uk_get_pp_number_of_cores_s in "mali_utgard_uk_types.h"
  * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
  */
-_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores( _mali_uk_get_pp_number_of_cores_s *args );
+_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores(_mali_uk_get_pp_number_of_cores_s *args);
 
 /** @brief Returns the version that all Fragment Processor cores are compatible with.
  *
@@ -464,7 +406,7 @@ _mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores( _mali_uk_get_pp_number_of_
  * @param args see _mali_uk_get_pp_core_version_s in "mali_utgard_uk_types.h"
  * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
  */
-_mali_osk_errcode_t _mali_ukk_get_pp_core_version( _mali_uk_get_pp_core_version_s *args );
+_mali_osk_errcode_t _mali_ukk_get_pp_core_version(_mali_uk_get_pp_core_version_s *args);
 
 /** @brief Disable Write-back unit(s) on specified job
  *
@@ -501,14 +443,14 @@ void _mali_ukk_pp_job_disable_wb(_mali_uk_pp_disable_wb_s *args);
  * @param uargs see _mali_uk_gp_start_job_s in "mali_utgard_uk_types.h". Use _mali_osk_copy_from_user to retrieve data!
  * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
  */
-_mali_osk_errcode_t _mali_ukk_gp_start_job( void *ctx, _mali_uk_gp_start_job_s *uargs );
+_mali_osk_errcode_t _mali_ukk_gp_start_job(void *ctx, _mali_uk_gp_start_job_s *uargs);
 
 /** @brief Returns the number of Vertex Processors in the system.
  *
  * @param args see _mali_uk_get_gp_number_of_cores_s in "mali_utgard_uk_types.h"
  * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
  */
-_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores( _mali_uk_get_gp_number_of_cores_s *args );
+_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores(_mali_uk_get_gp_number_of_cores_s *args);
 
 /** @brief Returns the version that all Vertex Processor cores are compatible with.
  *
@@ -518,7 +460,7 @@ _mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores( _mali_uk_get_gp_number_of_
  * @param args see _mali_uk_get_gp_core_version_s in "mali_utgard_uk_types.h"
  * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
  */
-_mali_osk_errcode_t _mali_ukk_get_gp_core_version( _mali_uk_get_gp_core_version_s *args );
+_mali_osk_errcode_t _mali_ukk_get_gp_core_version(_mali_uk_get_gp_core_version_s *args);
 
 /** @brief Resume or abort suspended Vertex Processor jobs.
  *
@@ -528,7 +470,7 @@ _mali_osk_errcode_t _mali_ukk_get_gp_core_version( _mali_uk_get_gp_core_version_
  * @param args see _mali_uk_gp_suspend_response_s in "mali_utgard_uk_types.h"
  * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
  */
-_mali_osk_errcode_t _mali_ukk_gp_suspend_response( _mali_uk_gp_suspend_response_s *args );
+_mali_osk_errcode_t _mali_ukk_gp_suspend_response(_mali_uk_gp_suspend_response_s *args);
 
 /** @} */ /* end group _mali_uk_gp */
 
@@ -536,35 +478,23 @@ _mali_osk_errcode_t _mali_ukk_gp_suspend_response( _mali_uk_gp_suspend_response_
 /** @addtogroup _mali_uk_profiling U/K Timeline profiling module
  * @{ */
 
-/** @brief Start recording profiling events.
- *
- * @param args see _mali_uk_profiling_start_s in "mali_utgard_uk_types.h"
- */
-_mali_osk_errcode_t _mali_ukk_profiling_start(_mali_uk_profiling_start_s *args);
-
 /** @brief Add event to profiling buffer.
  *
  * @param args see _mali_uk_profiling_add_event_s in "mali_utgard_uk_types.h"
  */
 _mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args);
 
-/** @brief Stop recording profiling events.
+/** @brief Get profiling stream fd.
  *
- * @param args see _mali_uk_profiling_stop_s in "mali_utgard_uk_types.h"
+ * @param args see _mali_uk_profiling_stream_fd_get_s in "mali_utgard_uk_types.h"
  */
-_mali_osk_errcode_t _mali_ukk_profiling_stop(_mali_uk_profiling_stop_s *args);
+_mali_osk_errcode_t _mali_ukk_profiling_stream_fd_get(_mali_uk_profiling_stream_fd_get_s *args);
 
-/** @brief Retrieve a recorded profiling event.
+/** @brief Profiling control set.
  *
- * @param args see _mali_uk_profiling_get_event_s in "mali_utgard_uk_types.h"
+ * @param args see _mali_uk_profiling_control_set_s in "mali_utgard_uk_types.h"
  */
-_mali_osk_errcode_t _mali_ukk_profiling_get_event(_mali_uk_profiling_get_event_s *args);
-
-/** @brief Clear recorded profiling events.
- *
- * @param args see _mali_uk_profiling_clear_s in "mali_utgard_uk_types.h"
- */
-_mali_osk_errcode_t _mali_ukk_profiling_clear(_mali_uk_profiling_clear_s *args);
+_mali_osk_errcode_t _mali_ukk_profiling_control_set(_mali_uk_profiling_control_set_s *args);
 
 /** @} */ /* end group _mali_uk_profiling */
 #endif
@@ -601,6 +531,8 @@ _mali_osk_errcode_t _mali_ukk_sw_counters_report(_mali_uk_sw_counters_report_s *
 
 u32 _mali_ukk_report_memory_usage(void);
 
+u32 _mali_ukk_report_total_memory_size(void);
+
 u32 _mali_ukk_utilization_gp_pp(void);
 
 u32 _mali_ukk_utilization_gp(void);
index c28a8d6317cfb9305d3f0d89209220309e083230..6bb43100fc505ef4e4357c4689bdb5800b4ed03e 100644 (file)
@@ -1,15 +1,16 @@
 /**
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2013 ARM Limited
+ * (C) COPYRIGHT 2012-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
  * by a licensing agreement from ARM Limited.
  */
 
-#include "mali_osk.h"
 #include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
 #include "mali_uk_types.h"
 #include "mali_user_settings_db.h"
 #include "mali_session.h"
@@ -48,7 +49,7 @@ static void mali_user_settings_notify(_mali_uk_user_setting_t setting, u32 value
 
                for (i = 0; i < num_sessions_alloc; i++) {
                        notobjs[i] = _mali_osk_notification_create(_MALI_NOTIFICATION_SETTINGS_CHANGED,
-                                    sizeof(_mali_uk_settings_changed_s));
+                                       sizeof(_mali_uk_settings_changed_s));
                        if (NULL != notobjs[i]) {
                                _mali_uk_settings_changed_s *data;
                                data = notobjs[i]->result_buffer;
index 547ffeeaf0659b7a4829639ba90bbf5d31b74e46..fbeb48686106a9686fb51a56fff0c0f2aef4b724 100644 (file)
@@ -1,7 +1,7 @@
 /**
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2013 ARM Limited
+ * (C) COPYRIGHT 2012-2013, 2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
index 4e457efca35b959909f7724bbcb178e64dc1082a..a770b631f24926f6a2be1e810218ae29ff4b0f33 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2013 ARM Limited
+ * (C) COPYRIGHT 2012-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 
 #define MALI_GPU_NAME_UTGARD "mali-utgard"
 
-/* Mali-200 */
 
-#define MALI_GPU_RESOURCES_MALI200(base_addr, gp_irq, pp_irq, mmu_irq) \
-       MALI_GPU_RESOURCE_PP(base_addr + 0x0000, pp_irq) \
-       MALI_GPU_RESOURCE_GP(base_addr + 0x2000, gp_irq) \
-       MALI_GPU_RESOURCE_MMU(base_addr + 0x3000, mmu_irq)
+#define MALI_OFFSET_GP                    0x00000
+#define MALI_OFFSET_GP_MMU                0x03000
+
+#define MALI_OFFSET_PP0                   0x08000
+#define MALI_OFFSET_PP0_MMU               0x04000
+#define MALI_OFFSET_PP1                   0x0A000
+#define MALI_OFFSET_PP1_MMU               0x05000
+#define MALI_OFFSET_PP2                   0x0C000
+#define MALI_OFFSET_PP2_MMU               0x06000
+#define MALI_OFFSET_PP3                   0x0E000
+#define MALI_OFFSET_PP3_MMU               0x07000
+
+#define MALI_OFFSET_PP4                   0x28000
+#define MALI_OFFSET_PP4_MMU               0x1C000
+#define MALI_OFFSET_PP5                   0x2A000
+#define MALI_OFFSET_PP5_MMU               0x1D000
+#define MALI_OFFSET_PP6                   0x2C000
+#define MALI_OFFSET_PP6_MMU               0x1E000
+#define MALI_OFFSET_PP7                   0x2E000
+#define MALI_OFFSET_PP7_MMU               0x1F000
+
+#define MALI_OFFSET_L2_RESOURCE0          0x01000
+#define MALI_OFFSET_L2_RESOURCE1          0x10000
+#define MALI_OFFSET_L2_RESOURCE2          0x11000
+
+#define MALI400_OFFSET_L2_CACHE0          MALI_OFFSET_L2_RESOURCE0
+#define MALI450_OFFSET_L2_CACHE0          MALI_OFFSET_L2_RESOURCE1
+#define MALI450_OFFSET_L2_CACHE1          MALI_OFFSET_L2_RESOURCE0
+#define MALI450_OFFSET_L2_CACHE2          MALI_OFFSET_L2_RESOURCE2
+#define MALI470_OFFSET_L2_CACHE1          MALI_OFFSET_L2_RESOURCE0
+
+#define MALI_OFFSET_BCAST                 0x13000
+#define MALI_OFFSET_DLBU                  0x14000
+
+#define MALI_OFFSET_PP_BCAST              0x16000
+#define MALI_OFFSET_PP_BCAST_MMU          0x15000
+
+#define MALI_OFFSET_PMU                   0x02000
+#define MALI_OFFSET_DMA                   0x12000
 
 /* Mali-300 */
 
 /* Mali-400 */
 
 #define MALI_GPU_RESOURCES_MALI400_MP1(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq) \
-       MALI_GPU_RESOURCE_L2(base_addr + 0x1000) \
-       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x0000, gp_irq, base_addr + 0x3000, gp_mmu_irq) \
-       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x8000, pp0_irq, base_addr + 0x4000, pp0_mmu_irq)
+       MALI_GPU_RESOURCE_L2(base_addr + MALI400_OFFSET_L2_CACHE0) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq)
 
 #define MALI_GPU_RESOURCES_MALI400_MP1_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq) \
        MALI_GPU_RESOURCES_MALI400_MP1(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq) \
-       MALI_GPU_RESOURCE_PMU(base_addr + 0x2000)
+       MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU)
 
 #define MALI_GPU_RESOURCES_MALI400_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq) \
-       MALI_GPU_RESOURCE_L2(base_addr + 0x1000) \
-       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x0000, gp_irq, base_addr + 0x3000, gp_mmu_irq) \
-       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x8000, pp0_irq, base_addr + 0x4000, pp0_mmu_irq) \
-       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0xA000, pp1_irq, base_addr + 0x5000, pp1_mmu_irq)
+       MALI_GPU_RESOURCE_L2(base_addr + MALI400_OFFSET_L2_CACHE0) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq)
 
 #define MALI_GPU_RESOURCES_MALI400_MP2_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq) \
        MALI_GPU_RESOURCES_MALI400_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq) \
-       MALI_GPU_RESOURCE_PMU(base_addr + 0x2000)
+       MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU)
 
 #define MALI_GPU_RESOURCES_MALI400_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq) \
-       MALI_GPU_RESOURCE_L2(base_addr + 0x1000) \
-       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x0000, gp_irq, base_addr + 0x3000, gp_mmu_irq) \
-       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x8000, pp0_irq, base_addr + 0x4000, pp0_mmu_irq) \
-       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0xA000, pp1_irq, base_addr + 0x5000, pp1_mmu_irq) \
-       MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0xC000, pp2_irq, base_addr + 0x6000, pp2_mmu_irq)
+       MALI_GPU_RESOURCE_L2(base_addr + MALI400_OFFSET_L2_CACHE0) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq)
 
 #define MALI_GPU_RESOURCES_MALI400_MP3_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq) \
        MALI_GPU_RESOURCES_MALI400_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq) \
-       MALI_GPU_RESOURCE_PMU(base_addr + 0x2000)
+       MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU)
 
 #define MALI_GPU_RESOURCES_MALI400_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq) \
-       MALI_GPU_RESOURCE_L2(base_addr + 0x1000) \
-       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x0000, gp_irq, base_addr + 0x3000, gp_mmu_irq) \
-       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x8000, pp0_irq, base_addr + 0x4000, pp0_mmu_irq) \
-       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0xA000, pp1_irq, base_addr + 0x5000, pp1_mmu_irq) \
-       MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0xC000, pp2_irq, base_addr + 0x6000, pp2_mmu_irq) \
-       MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + 0xE000, pp3_irq, base_addr + 0x7000, pp3_mmu_irq)
+       MALI_GPU_RESOURCE_L2(base_addr + MALI400_OFFSET_L2_CACHE0) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + MALI_OFFSET_PP3, pp3_irq, base_addr + MALI_OFFSET_PP3_MMU, pp3_mmu_irq)
 
 #define MALI_GPU_RESOURCES_MALI400_MP4_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq) \
        MALI_GPU_RESOURCES_MALI400_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq) \
-       MALI_GPU_RESOURCE_PMU(base_addr + 0x2000)
+       MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
 
-/* Mali-450 */
+       /* Mali-450 */
 #define MALI_GPU_RESOURCES_MALI450_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
-       MALI_GPU_RESOURCE_L2(base_addr + 0x10000) \
-       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x00000, gp_irq, base_addr + 0x03000, gp_mmu_irq) \
-       MALI_GPU_RESOURCE_L2(base_addr + 0x01000) \
-       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x08000, pp0_irq, base_addr + 0x04000, pp0_mmu_irq) \
-       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0x0A000, pp1_irq, base_addr + 0x05000, pp1_mmu_irq) \
-       MALI_GPU_RESOURCE_BCAST(base_addr + 0x13000) \
-       MALI_GPU_RESOURCE_DLBU(base_addr + 0x14000) \
-       MALI_GPU_RESOURCE_PP_BCAST(base_addr + 0x16000, pp_bcast_irq) \
-       MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + 0x15000) \
-       MALI_GPU_RESOURCE_DMA(base_addr + 0x12000)
+       MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE0) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE1) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+       MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+       MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) \
+       MALI_GPU_RESOURCE_DMA(base_addr + MALI_OFFSET_DMA)
 
 #define MALI_GPU_RESOURCES_MALI450_MP2_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
        MALI_GPU_RESOURCES_MALI450_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
-       MALI_GPU_RESOURCE_PMU(base_addr + 0x2000) \
+       MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
 #define MALI_GPU_RESOURCES_MALI450_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
-       MALI_GPU_RESOURCE_L2(base_addr + 0x10000) \
-       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x00000, gp_irq, base_addr + 0x03000, gp_mmu_irq) \
-       MALI_GPU_RESOURCE_L2(base_addr + 0x01000) \
-       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x08000, pp0_irq, base_addr + 0x04000, pp0_mmu_irq) \
-       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0x0A000, pp1_irq, base_addr + 0x05000, pp1_mmu_irq) \
-       MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0x0C000, pp2_irq, base_addr + 0x06000, pp2_mmu_irq) \
-       MALI_GPU_RESOURCE_BCAST(base_addr + 0x13000) \
-       MALI_GPU_RESOURCE_DLBU(base_addr + 0x14000) \
-       MALI_GPU_RESOURCE_PP_BCAST(base_addr + 0x16000, pp_bcast_irq) \
-       MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + 0x15000) \
-       MALI_GPU_RESOURCE_DMA(base_addr + 0x12000)
+       MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE0) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE1) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+       MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+       MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+       MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU)
 
 #define MALI_GPU_RESOURCES_MALI450_MP3_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
        MALI_GPU_RESOURCES_MALI450_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
-       MALI_GPU_RESOURCE_PMU(base_addr + 0x2000) \
+       MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
 #define MALI_GPU_RESOURCES_MALI450_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
-       MALI_GPU_RESOURCE_L2(base_addr + 0x10000) \
-       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x00000, gp_irq, base_addr + 0x03000, gp_mmu_irq) \
-       MALI_GPU_RESOURCE_L2(base_addr + 0x01000) \
-       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x08000, pp0_irq, base_addr + 0x04000, pp0_mmu_irq) \
-       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0x0A000, pp1_irq, base_addr + 0x05000, pp1_mmu_irq) \
-       MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0x0C000, pp2_irq, base_addr + 0x06000, pp2_mmu_irq) \
-       MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + 0x0E000, pp3_irq, base_addr + 0x07000, pp3_mmu_irq) \
-       MALI_GPU_RESOURCE_BCAST(base_addr + 0x13000) \
-       MALI_GPU_RESOURCE_DLBU(base_addr + 0x14000) \
-       MALI_GPU_RESOURCE_PP_BCAST(base_addr + 0x16000, pp_bcast_irq) \
-       MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + 0x15000) \
-       MALI_GPU_RESOURCE_DMA(base_addr + 0x12000)
+       MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE0) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE1) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + MALI_OFFSET_PP3, pp3_irq, base_addr + MALI_OFFSET_PP3_MMU, pp3_mmu_irq) \
+       MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+       MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+       MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) \
+       MALI_GPU_RESOURCE_DMA(base_addr + MALI_OFFSET_DMA)
 
 #define MALI_GPU_RESOURCES_MALI450_MP4_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
        MALI_GPU_RESOURCES_MALI450_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
-       MALI_GPU_RESOURCE_PMU(base_addr + 0x2000) \
+       MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
 #define MALI_GPU_RESOURCES_MALI450_MP6(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp_bcast_irq) \
-       MALI_GPU_RESOURCE_L2(base_addr + 0x10000) \
-       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x00000, gp_irq, base_addr + 0x03000, gp_mmu_irq) \
-       MALI_GPU_RESOURCE_L2(base_addr + 0x01000) \
-       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x08000, pp0_irq, base_addr + 0x04000, pp0_mmu_irq) \
-       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0x0A000, pp1_irq, base_addr + 0x05000, pp1_mmu_irq) \
-       MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0x0C000, pp2_irq, base_addr + 0x06000, pp2_mmu_irq) \
-       MALI_GPU_RESOURCE_L2(base_addr + 0x11000) \
-       MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + 0x28000, pp3_irq, base_addr + 0x1C000, pp3_mmu_irq) \
-       MALI_GPU_RESOURCE_PP_WITH_MMU(4, base_addr + 0x2A000, pp4_irq, base_addr + 0x1D000, pp4_mmu_irq) \
-       MALI_GPU_RESOURCE_PP_WITH_MMU(5, base_addr + 0x2C000, pp5_irq, base_addr + 0x1E000, pp5_mmu_irq) \
-       MALI_GPU_RESOURCE_BCAST(base_addr + 0x13000) \
-       MALI_GPU_RESOURCE_DLBU(base_addr + 0x14000) \
-       MALI_GPU_RESOURCE_PP_BCAST(base_addr + 0x16000, pp_bcast_irq) \
-       MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + 0x15000) \
-       MALI_GPU_RESOURCE_DMA(base_addr + 0x12000)
+       MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE0) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE1) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE2) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + MALI_OFFSET_PP4, pp3_irq, base_addr + MALI_OFFSET_PP4_MMU, pp3_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(4, base_addr + MALI_OFFSET_PP5, pp4_irq, base_addr + MALI_OFFSET_PP5_MMU, pp4_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(5, base_addr + MALI_OFFSET_PP6, pp5_irq, base_addr + MALI_OFFSET_PP6_MMU, pp5_mmu_irq) \
+       MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+       MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+       MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) \
+       MALI_GPU_RESOURCE_DMA(base_addr + MALI_OFFSET_DMA)
 
 #define MALI_GPU_RESOURCES_MALI450_MP6_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp_bcast_irq) \
        MALI_GPU_RESOURCES_MALI450_MP6(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp_bcast_irq) \
-       MALI_GPU_RESOURCE_PMU(base_addr + 0x2000) \
+       MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
 #define MALI_GPU_RESOURCES_MALI450_MP8(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp6_irq, pp6_mmu_irq, pp7_irq, pp7_mmu_irq, pp_bcast_irq) \
-       MALI_GPU_RESOURCE_L2(base_addr + 0x10000) \
-       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x00000, gp_irq, base_addr + 0x03000, gp_mmu_irq) \
-       MALI_GPU_RESOURCE_L2(base_addr + 0x01000) \
-       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x08000, pp0_irq, base_addr + 0x04000, pp0_mmu_irq) \
-       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0x0A000, pp1_irq, base_addr + 0x05000, pp1_mmu_irq) \
-       MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0x0C000, pp2_irq, base_addr + 0x06000, pp2_mmu_irq) \
-       MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + 0x0E000, pp3_irq, base_addr + 0x07000, pp3_mmu_irq) \
-       MALI_GPU_RESOURCE_L2(base_addr + 0x11000) \
-       MALI_GPU_RESOURCE_PP_WITH_MMU(4, base_addr + 0x28000, pp4_irq, base_addr + 0x1C000, pp4_mmu_irq) \
-       MALI_GPU_RESOURCE_PP_WITH_MMU(5, base_addr + 0x2A000, pp5_irq, base_addr + 0x1D000, pp5_mmu_irq) \
-       MALI_GPU_RESOURCE_PP_WITH_MMU(6, base_addr + 0x2C000, pp6_irq, base_addr + 0x1E000, pp6_mmu_irq) \
-       MALI_GPU_RESOURCE_PP_WITH_MMU(7, base_addr + 0x2E000, pp7_irq, base_addr + 0x1F000, pp7_mmu_irq) \
-       MALI_GPU_RESOURCE_BCAST(base_addr + 0x13000) \
-       MALI_GPU_RESOURCE_DLBU(base_addr + 0x14000) \
-       MALI_GPU_RESOURCE_PP_BCAST(base_addr + 0x16000, pp_bcast_irq) \
-       MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + 0x15000) \
-       MALI_GPU_RESOURCE_DMA(base_addr + 0x12000)
+       MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE0) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE1) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + MALI_OFFSET_PP3, pp3_irq, base_addr + MALI_OFFSET_PP3_MMU, pp3_mmu_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE2) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(4, base_addr + MALI_OFFSET_PP4, pp4_irq, base_addr + MALI_OFFSET_PP4_MMU, pp4_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(5, base_addr + MALI_OFFSET_PP5, pp5_irq, base_addr + MALI_OFFSET_PP5_MMU, pp5_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(6, base_addr + MALI_OFFSET_PP6, pp6_irq, base_addr + MALI_OFFSET_PP6_MMU, pp6_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(7, base_addr + MALI_OFFSET_PP7, pp7_irq, base_addr + MALI_OFFSET_PP7_MMU, pp7_mmu_irq) \
+       MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+       MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+       MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) \
+       MALI_GPU_RESOURCE_DMA(base_addr + MALI_OFFSET_DMA)
 
 #define MALI_GPU_RESOURCES_MALI450_MP8_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp6_irq, pp6_mmu_irq, pp7_irq, pp7_mmu_irq, pp_bcast_irq) \
        MALI_GPU_RESOURCES_MALI450_MP8(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp6_irq, pp6_mmu_irq, pp7_irq, pp7_mmu_irq, pp_bcast_irq) \
-       MALI_GPU_RESOURCE_PMU(base_addr + 0x2000) \
+       MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
+       /* Mali - 470 */
+#define MALI_GPU_RESOURCES_MALI470_MP1(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + MALI470_OFFSET_L2_CACHE1) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+       MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+       MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU)
+
+#define MALI_GPU_RESOURCES_MALI470_MP1_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCES_MALI470_MP1(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
+#define MALI_GPU_RESOURCES_MALI470_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + MALI470_OFFSET_L2_CACHE1) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+       MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+       MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU)
+
+#define MALI_GPU_RESOURCES_MALI470_MP2_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCES_MALI470_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
+#define MALI_GPU_RESOURCES_MALI470_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + MALI470_OFFSET_L2_CACHE1) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+       MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+       MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+       MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU)
+
+#define MALI_GPU_RESOURCES_MALI470_MP3_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCES_MALI470_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
+#define MALI_GPU_RESOURCES_MALI470_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_L2(base_addr + MALI470_OFFSET_L2_CACHE1) \
+       MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+       MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + MALI_OFFSET_PP3, pp3_irq, base_addr + MALI_OFFSET_PP3_MMU, pp3_mmu_irq) \
+       MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+       MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+       MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU)
+
+#define MALI_GPU_RESOURCES_MALI470_MP4_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCES_MALI470_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
+       MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
 #define MALI_GPU_RESOURCE_L2(addr) \
        { \
                .name = "Mali_L2", \
-               .flags = IORESOURCE_MEM, \
-               .start = addr, \
-               .end   = addr + 0x200, \
+                       .flags = IORESOURCE_MEM, \
+                                .start = addr, \
+                                         .end   = addr + 0x200, \
        },
 
 #define MALI_GPU_RESOURCE_GP(gp_addr, gp_irq) \
        { \
                .name = "Mali_GP", \
-               .flags = IORESOURCE_MEM, \
-               .start = gp_addr, \
-               .end =   gp_addr + 0x100, \
+                       .flags = IORESOURCE_MEM, \
+                                .start = gp_addr, \
+                                         .end =   gp_addr + 0x100, \
        }, \
        { \
                .name = "Mali_GP_IRQ", \
-               .flags = IORESOURCE_IRQ, \
-               .start = gp_irq, \
-               .end   = gp_irq, \
+                       .flags = IORESOURCE_IRQ, \
+                                .start = gp_irq, \
+                                         .end   = gp_irq, \
        }, \
+
 #define MALI_GPU_RESOURCE_GP_WITH_MMU(gp_addr, gp_irq, gp_mmu_addr, gp_mmu_irq) \
        { \
                .name = "Mali_GP", \
-               .flags = IORESOURCE_MEM, \
-               .start = gp_addr, \
-               .end =   gp_addr + 0x100, \
+                       .flags = IORESOURCE_MEM, \
+                                .start = gp_addr, \
+                                         .end =   gp_addr + 0x100, \
        }, \
        { \
                .name = "Mali_GP_IRQ", \
-               .flags = IORESOURCE_IRQ, \
-               .start = gp_irq, \
-               .end   = gp_irq, \
+                       .flags = IORESOURCE_IRQ, \
+                                .start = gp_irq, \
+                                         .end   = gp_irq, \
        }, \
        { \
                .name = "Mali_GP_MMU", \
-               .flags = IORESOURCE_MEM, \
-               .start = gp_mmu_addr, \
-               .end =   gp_mmu_addr + 0x100, \
+                       .flags = IORESOURCE_MEM, \
+                                .start = gp_mmu_addr, \
+                                         .end =   gp_mmu_addr + 0x100, \
        }, \
        { \
                .name = "Mali_GP_MMU_IRQ", \
-               .flags = IORESOURCE_IRQ, \
-               .start = gp_mmu_irq, \
-               .end =   gp_mmu_irq, \
+                       .flags = IORESOURCE_IRQ, \
+                                .start = gp_mmu_irq, \
+                                         .end =   gp_mmu_irq, \
        },
 
 #define MALI_GPU_RESOURCE_PP(pp_addr, pp_irq) \
        { \
                .name = "Mali_PP", \
-               .flags = IORESOURCE_MEM, \
-               .start = pp_addr, \
-               .end =   pp_addr + 0x1100, \
+                       .flags = IORESOURCE_MEM, \
+                                .start = pp_addr, \
+                                         .end =   pp_addr + 0x1100, \
        }, \
        { \
                .name = "Mali_PP_IRQ", \
-               .flags = IORESOURCE_IRQ, \
-               .start = pp_irq, \
-               .end =   pp_irq, \
+                       .flags = IORESOURCE_IRQ, \
+                                .start = pp_irq, \
+                                         .end =   pp_irq, \
        }, \
+
 #define MALI_GPU_RESOURCE_PP_WITH_MMU(id, pp_addr, pp_irq, pp_mmu_addr, pp_mmu_irq) \
        { \
                .name = "Mali_PP" #id, \
-               .flags = IORESOURCE_MEM, \
-               .start = pp_addr, \
-               .end =   pp_addr + 0x1100, \
+                       .flags = IORESOURCE_MEM, \
+                                .start = pp_addr, \
+                                         .end =   pp_addr + 0x1100, \
        }, \
        { \
                .name = "Mali_PP" #id "_IRQ", \
-               .flags = IORESOURCE_IRQ, \
-               .start = pp_irq, \
-               .end =   pp_irq, \
+                       .flags = IORESOURCE_IRQ, \
+                                .start = pp_irq, \
+                                         .end =   pp_irq, \
        }, \
        { \
                .name = "Mali_PP" #id "_MMU", \
-               .flags = IORESOURCE_MEM, \
-               .start = pp_mmu_addr, \
-               .end =   pp_mmu_addr + 0x100, \
+                       .flags = IORESOURCE_MEM, \
+                                .start = pp_mmu_addr, \
+                                         .end =   pp_mmu_addr + 0x100, \
        }, \
        { \
                .name = "Mali_PP" #id "_MMU_IRQ", \
-               .flags = IORESOURCE_IRQ, \
-               .start = pp_mmu_irq, \
-               .end =   pp_mmu_irq, \
+                       .flags = IORESOURCE_IRQ, \
+                                .start = pp_mmu_irq, \
+                                         .end =   pp_mmu_irq, \
        },
 
 #define MALI_GPU_RESOURCE_MMU(mmu_addr, mmu_irq) \
        { \
                .name = "Mali_MMU", \
-               .flags = IORESOURCE_MEM, \
-               .start = mmu_addr, \
-               .end =   mmu_addr + 0x100, \
+                       .flags = IORESOURCE_MEM, \
+                                .start = mmu_addr, \
+                                         .end =   mmu_addr + 0x100, \
        }, \
        { \
                .name = "Mali_MMU_IRQ", \
-               .flags = IORESOURCE_IRQ, \
-               .start = mmu_irq, \
-               .end =   mmu_irq, \
+                       .flags = IORESOURCE_IRQ, \
+                                .start = mmu_irq, \
+                                         .end =   mmu_irq, \
        },
 
 #define MALI_GPU_RESOURCE_PMU(pmu_addr) \
        { \
                .name = "Mali_PMU", \
-               .flags = IORESOURCE_MEM, \
-               .start = pmu_addr, \
-               .end =   pmu_addr + 0x100, \
+                       .flags = IORESOURCE_MEM, \
+                                .start = pmu_addr, \
+                                         .end =   pmu_addr + 0x100, \
        },
 
 #define MALI_GPU_RESOURCE_DMA(dma_addr) \
        { \
                .name = "Mali_DMA", \
-               .flags = IORESOURCE_MEM, \
-               .start = dma_addr, \
-               .end = dma_addr + 0x100, \
+                       .flags = IORESOURCE_MEM, \
+                                .start = dma_addr, \
+                                         .end = dma_addr + 0x100, \
        },
 
 #define MALI_GPU_RESOURCE_DLBU(dlbu_addr) \
        { \
                .name = "Mali_DLBU", \
-               .flags = IORESOURCE_MEM, \
-               .start = dlbu_addr, \
-               .end = dlbu_addr + 0x100, \
+                       .flags = IORESOURCE_MEM, \
+                                .start = dlbu_addr, \
+                                         .end = dlbu_addr + 0x100, \
        },
 
 #define MALI_GPU_RESOURCE_BCAST(bcast_addr) \
        { \
                .name = "Mali_Broadcast", \
-               .flags = IORESOURCE_MEM, \
-               .start = bcast_addr, \
-               .end = bcast_addr + 0x100, \
+                       .flags = IORESOURCE_MEM, \
+                                .start = bcast_addr, \
+                                         .end = bcast_addr + 0x100, \
        },
 
 #define MALI_GPU_RESOURCE_PP_BCAST(pp_addr, pp_irq) \
        { \
                .name = "Mali_PP_Broadcast", \
-               .flags = IORESOURCE_MEM, \
-               .start = pp_addr, \
-               .end =   pp_addr + 0x1100, \
+                       .flags = IORESOURCE_MEM, \
+                                .start = pp_addr, \
+                                         .end =   pp_addr + 0x1100, \
        }, \
        { \
                .name = "Mali_PP_Broadcast_IRQ", \
-               .flags = IORESOURCE_IRQ, \
-               .start = pp_irq, \
-               .end =   pp_irq, \
+                       .flags = IORESOURCE_IRQ, \
+                                .start = pp_irq, \
+                                         .end =   pp_irq, \
        }, \
+
 #define MALI_GPU_RESOURCE_PP_MMU_BCAST(pp_mmu_bcast_addr) \
        { \
                .name = "Mali_PP_MMU_Broadcast", \
-               .flags = IORESOURCE_MEM, \
-               .start = pp_mmu_bcast_addr, \
-               .end = pp_mmu_bcast_addr + 0x100, \
+                       .flags = IORESOURCE_MEM, \
+                                .start = pp_mmu_bcast_addr, \
+                                         .end = pp_mmu_bcast_addr + 0x100, \
        },
 
-struct mali_gpu_utilization_data {
-       unsigned int utilization_gpu; /* Utilization for GP and all PP cores combined, 0 = no utilization, 256 = full utilization */
-       unsigned int utilization_gp;  /* Utilization for GP core only, 0 = no utilization, 256 = full utilization */
-       unsigned int utilization_pp;  /* Utilization for all PP cores combined, 0 = no utilization, 256 = full utilization */
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
-       unsigned int number_of_window_jobs;
-       unsigned int number_of_window_jobs_under_pressure;
-#endif
-};
-
-struct mali_gpu_device_data {
-       /* Dedicated GPU memory range (physical). */
-       unsigned long dedicated_mem_start;
-       unsigned long dedicated_mem_size;
-
-       /* Shared GPU memory */
-       unsigned long shared_mem_size;
-
-       /* Frame buffer memory to be accessible by Mali GPU (physical) */
-       unsigned long fb_start;
-       unsigned long fb_size;
-
-       /* Max runtime [ms] for jobs */
-       int max_job_runtime;
-
-       /* Report GPU utilization in this interval (specified in ms) */
-       unsigned long utilization_interval;
-
-       /* Function that will receive periodic GPU utilization numbers */
-       void (*utilization_callback)(struct mali_gpu_utilization_data *data);
-
-       /*
-        * Mali PMU switch delay.
-        * Only needed if the power gates are connected to the PMU in a high fanout
-        * network. This value is the number of Mali clock cycles it takes to
-        * enable the power gates and turn on the power mesh.
-        * This value will have no effect if a daisy chain implementation is used.
+       struct mali_gpu_utilization_data {
+               unsigned int utilization_gpu; /* Utilization for GP and all PP cores combined, 0 = no utilization, 256 = full utilization */
+               unsigned int utilization_gp;  /* Utilization for GP core only, 0 = no utilization, 256 = full utilization */
+               unsigned int utilization_pp;  /* Utilization for all PP cores combined, 0 = no utilization, 256 = full utilization */
+       };
+
+       struct mali_gpu_clk_item {
+               unsigned int clock; /* unit(MHz) */
+               unsigned int vol;
+       };
+
+       struct mali_gpu_clock {
+               struct mali_gpu_clk_item *item;
+               unsigned int num_of_steps;
+       };
+
+       struct mali_gpu_device_data {
+               /* Shared GPU memory */
+               unsigned long shared_mem_size;
+
+               /*
+                * Mali PMU switch delay.
+                * Only needed if the power gates are connected to the PMU in a high fanout
+                * network. This value is the number of Mali clock cycles it takes to
+                * enable the power gates and turn on the power mesh.
+                * This value will have no effect if a daisy chain implementation is used.
+                */
+               u32 pmu_switch_delay;
+
+               /* Mali Dynamic power domain configuration in sequence from 0-11
+                *  GP  PP0 PP1  PP2  PP3  PP4  PP5  PP6  PP7, L2$0 L2$1 L2$2
+                */
+               u16 pmu_domain_config[12];
+
+               /* Dedicated GPU memory range (physical). */
+               unsigned long dedicated_mem_start;
+               unsigned long dedicated_mem_size;
+
+               /* Frame buffer memory to be accessible by Mali GPU (physical) */
+               unsigned long fb_start;
+               unsigned long fb_size;
+
+               /* Max runtime [ms] for jobs */
+               int max_job_runtime;
+
+               /* Report GPU utilization and related control in this interval (specified in ms) */
+               unsigned long control_interval;
+
+               /* Function that will receive periodic GPU utilization numbers */
+               void (*utilization_callback)(struct mali_gpu_utilization_data *data);
+
+               /* Fuction that platform callback for freq setting, needed when CONFIG_MALI_DVFS enabled */
+               int (*set_freq)(int setting_clock_step);
+               /* Function that platfrom report it's clock info which driver can set, needed when CONFIG_MALI_DVFS enabled */
+               void (*get_clock_info)(struct mali_gpu_clock **data);
+               /* Function that get the current clock info, needed when CONFIG_MALI_DVFS enabled */
+               int (*get_freq)(void);
+       };
+
+       /**
+        * Pause the scheduling and power state changes of Mali device driver.
+        * mali_dev_resume() must always be called as soon as possible after this function
+        * in order to resume normal operation of the Mali driver.
         */
-       u32 pmu_switch_delay;
-
+       void mali_dev_pause(void);
 
-       /* Mali Dynamic power domain configuration in sequence from 0-11
-        *  GP  PP0 PP1  PP2  PP3  PP4  PP5  PP6  PP7, L2$0 L2$1 L2$2
+       /**
+        * Resume scheduling and allow power changes in Mali device driver.
+        * This must always be called after mali_dev_pause().
         */
-       u16 pmu_domain_config[12];
-
-       /* Fuction that platform callback for freq tunning, needed when POWER_PERFORMANCE_POLICY enabled*/
-       int (*set_freq_callback)(unsigned int mhz);
-};
-
-/** @brief MALI GPU power down using MALI in-built PMU
- *
- * called to power down all cores
- */
-int mali_pmu_powerdown(void);
-
-
-/** @brief MALI GPU power up using MALI in-built PMU
- *
- * called to power up all cores
- */
-int mali_pmu_powerup(void);
-
-/**
- * Pause the scheduling and power state changes of Mali device driver.
- * mali_dev_resume() must always be called as soon as possible after this function
- * in order to resume normal operation of the Mali driver.
- */
-void mali_dev_pause(void);
-
-/**
- * Resume scheduling and allow power changes in Mali device driver.
- * This must always be called after mali_dev_pause().
- */
-void mali_dev_resume(void);
-
-/** @brief Set the desired number of PP cores to use.
- *
- * The internal Mali PMU will be used, if present, to physically power off the PP cores.
- *
- * @param num_cores The number of desired cores
- * @return 0 on success, otherwise error. -EINVAL means an invalid number of cores was specified.
- */
-int mali_perf_set_num_pp_cores(unsigned int num_cores);
+       void mali_dev_resume(void);
+
+       /** @brief Set the desired number of PP cores to use.
+        *
+        * The internal Mali PMU will be used, if present, to physically power off the PP cores.
+        *
+        * @param num_cores The number of desired cores
+        * @return 0 on success, otherwise error. -EINVAL means an invalid number of cores was specified.
+        */
+       int mali_perf_set_num_pp_cores(unsigned int num_cores);
 
 #endif
index 7ea190ff87f2395464b8eb23dd12c99e964fad64..76b1e8f113c8996cbf4c6af010b0808a187ac4c2 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2007-2013 ARM Limited
+ * (C) COPYRIGHT 2007-2013, 2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
index 0838868364208f5daf7dced1f9cc4bf3f974d7e2..bc83c27acbd167678529063644e371642a36329b 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2007-2013 ARM Limited
+ * (C) COPYRIGHT 2007-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -38,55 +38,48 @@ extern "C" {
 #define MALI_IOC_PROFILING_BASE (_MALI_UK_PROFILING_SUBSYSTEM + MALI_IOC_BASE)
 #define MALI_IOC_VSYNC_BASE     (_MALI_UK_VSYNC_SUBSYSTEM + MALI_IOC_BASE)
 
-#define MALI_IOC_WAIT_FOR_NOTIFICATION      _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_WAIT_FOR_NOTIFICATION, _mali_uk_wait_for_notification_s *)
-#define MALI_IOC_GET_API_VERSION            _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_API_VERSION, _mali_uk_get_api_version_s *)
-#define MALI_IOC_POST_NOTIFICATION          _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_POST_NOTIFICATION, _mali_uk_post_notification_s *)
-#define MALI_IOC_GET_USER_SETTING           _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_USER_SETTING, _mali_uk_get_user_setting_s *)
-#define MALI_IOC_GET_USER_SETTINGS          _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_USER_SETTINGS, _mali_uk_get_user_settings_s *)
-#define MALI_IOC_REQUEST_HIGH_PRIORITY      _IOW (MALI_IOC_CORE_BASE, _MALI_UK_REQUEST_HIGH_PRIORITY, _mali_uk_request_high_priority_s *)
-#define MALI_IOC_TIMELINE_GET_LATEST_POINT  _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_GET_LATEST_POINT, _mali_uk_timeline_get_latest_point_s *)
-#define MALI_IOC_TIMELINE_WAIT              _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_WAIT, _mali_uk_timeline_wait_s *)
-#define MALI_IOC_TIMELINE_CREATE_SYNC_FENCE _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_CREATE_SYNC_FENCE, _mali_uk_timeline_create_sync_fence_s *)
-#define MALI_IOC_SOFT_JOB_START             _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_SOFT_JOB_START, _mali_uk_soft_job_start_s *)
-#define MALI_IOC_SOFT_JOB_SIGNAL            _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_SOFT_JOB_SIGNAL, _mali_uk_soft_job_signal_s *)
+#define MALI_IOC_WAIT_FOR_NOTIFICATION      _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_WAIT_FOR_NOTIFICATION, _mali_uk_wait_for_notification_s)
+#define MALI_IOC_GET_API_VERSION            _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_API_VERSION, u32)
+#define MALI_IOC_GET_API_VERSION_V2         _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_API_VERSION, _mali_uk_get_api_version_v2_s)
+#define MALI_IOC_POST_NOTIFICATION          _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_POST_NOTIFICATION, _mali_uk_post_notification_s)
+#define MALI_IOC_GET_USER_SETTING           _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_USER_SETTING, _mali_uk_get_user_setting_s)
+#define MALI_IOC_GET_USER_SETTINGS          _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_USER_SETTINGS, _mali_uk_get_user_settings_s)
+#define MALI_IOC_REQUEST_HIGH_PRIORITY      _IOW (MALI_IOC_CORE_BASE, _MALI_UK_REQUEST_HIGH_PRIORITY, _mali_uk_request_high_priority_s)
+#define MALI_IOC_TIMELINE_GET_LATEST_POINT  _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_GET_LATEST_POINT, _mali_uk_timeline_get_latest_point_s)
+#define MALI_IOC_TIMELINE_WAIT              _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_WAIT, _mali_uk_timeline_wait_s)
+#define MALI_IOC_TIMELINE_CREATE_SYNC_FENCE _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_CREATE_SYNC_FENCE, _mali_uk_timeline_create_sync_fence_s)
+#define MALI_IOC_SOFT_JOB_START             _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_SOFT_JOB_START, _mali_uk_soft_job_start_s)
+#define MALI_IOC_SOFT_JOB_SIGNAL            _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_SOFT_JOB_SIGNAL, _mali_uk_soft_job_signal_s)
 
-#define MALI_IOC_MEM_MAP_EXT                _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_MAP_EXT_MEM, _mali_uk_map_external_mem_s *)
-#define MALI_IOC_MEM_UNMAP_EXT              _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_UNMAP_EXT_MEM, _mali_uk_unmap_external_mem_s *)
-#define MALI_IOC_MEM_ATTACH_DMA_BUF         _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_ATTACH_DMA_BUF, _mali_uk_attach_dma_buf_s *)
-#define MALI_IOC_MEM_RELEASE_DMA_BUF        _IOW(MALI_IOC_MEMORY_BASE, _MALI_UK_RELEASE_DMA_BUF, _mali_uk_release_dma_buf_s *)
-#define MALI_IOC_MEM_DMA_BUF_GET_SIZE       _IOR(MALI_IOC_MEMORY_BASE, _MALI_UK_DMA_BUF_GET_SIZE, _mali_uk_dma_buf_get_size_s *)
-#define MALI_IOC_MEM_ATTACH_UMP             _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_ATTACH_UMP_MEM, _mali_uk_attach_ump_mem_s *)
-#define MALI_IOC_MEM_RELEASE_UMP            _IOW(MALI_IOC_MEMORY_BASE, _MALI_UK_RELEASE_UMP_MEM, _mali_uk_release_ump_mem_s *)
-#define MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE _IOR (MALI_IOC_MEMORY_BASE, _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, _mali_uk_query_mmu_page_table_dump_size_s *)
-#define MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE    _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_DUMP_MMU_PAGE_TABLE, _mali_uk_dump_mmu_page_table_s *)
-#define MALI_IOC_MEM_WRITE_SAFE             _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_MEM_WRITE_SAFE, _mali_uk_mem_write_safe_s *)
+#define MALI_IOC_MEM_ALLOC                  _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_ALLOC_MEM, _mali_uk_alloc_mem_s)
+#define MALI_IOC_MEM_FREE                   _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_FREE_MEM, _mali_uk_free_mem_s)
+#define MALI_IOC_MEM_BIND                   _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_BIND_MEM, _mali_uk_bind_mem_s)
+#define MALI_IOC_MEM_UNBIND                 _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_UNBIND_MEM, _mali_uk_unbind_mem_s)
+#define MALI_IOC_MEM_COW                    _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_COW_MEM, _mali_uk_cow_mem_s)
+#define MALI_IOC_MEM_COW_MODIFY_RANGE       _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_COW_MODIFY_RANGE, _mali_uk_cow_modify_range_s)
+#define MALI_IOC_MEM_DMA_BUF_GET_SIZE       _IOR(MALI_IOC_MEMORY_BASE, _MALI_UK_DMA_BUF_GET_SIZE, _mali_uk_dma_buf_get_size_s)
+#define MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE _IOR (MALI_IOC_MEMORY_BASE, _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, _mali_uk_query_mmu_page_table_dump_size_s)
+#define MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE    _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_DUMP_MMU_PAGE_TABLE, _mali_uk_dump_mmu_page_table_s)
+#define MALI_IOC_MEM_WRITE_SAFE             _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_MEM_WRITE_SAFE, _mali_uk_mem_write_safe_s)
 
-#define MALI_IOC_PP_START_JOB               _IOWR(MALI_IOC_PP_BASE, _MALI_UK_PP_START_JOB, _mali_uk_pp_start_job_s *)
-#define MALI_IOC_PP_AND_GP_START_JOB        _IOWR(MALI_IOC_PP_BASE, _MALI_UK_PP_AND_GP_START_JOB, _mali_uk_pp_and_gp_start_job_s *)
-#define MALI_IOC_PP_NUMBER_OF_CORES_GET            _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_NUMBER_OF_CORES, _mali_uk_get_pp_number_of_cores_s *)
-#define MALI_IOC_PP_CORE_VERSION_GET       _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_CORE_VERSION, _mali_uk_get_pp_core_version_s * )
-#define MALI_IOC_PP_DISABLE_WB              _IOW (MALI_IOC_PP_BASE, _MALI_UK_PP_DISABLE_WB, _mali_uk_pp_disable_wb_s * )
+#define MALI_IOC_PP_START_JOB               _IOWR(MALI_IOC_PP_BASE, _MALI_UK_PP_START_JOB, _mali_uk_pp_start_job_s)
+#define MALI_IOC_PP_AND_GP_START_JOB        _IOWR(MALI_IOC_PP_BASE, _MALI_UK_PP_AND_GP_START_JOB, _mali_uk_pp_and_gp_start_job_s)
+#define MALI_IOC_PP_NUMBER_OF_CORES_GET     _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_NUMBER_OF_CORES, _mali_uk_get_pp_number_of_cores_s)
+#define MALI_IOC_PP_CORE_VERSION_GET        _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_CORE_VERSION, _mali_uk_get_pp_core_version_s)
+#define MALI_IOC_PP_DISABLE_WB              _IOW (MALI_IOC_PP_BASE, _MALI_UK_PP_DISABLE_WB, _mali_uk_pp_disable_wb_s)
 
-#define MALI_IOC_GP2_START_JOB              _IOWR(MALI_IOC_GP_BASE, _MALI_UK_GP_START_JOB, _mali_uk_gp_start_job_s *)
-#define MALI_IOC_GP2_NUMBER_OF_CORES_GET    _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_NUMBER_OF_CORES, _mali_uk_get_gp_number_of_cores_s *)
-#define MALI_IOC_GP2_CORE_VERSION_GET      _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_CORE_VERSION, _mali_uk_get_gp_core_version_s *)
-#define MALI_IOC_GP2_SUSPEND_RESPONSE      _IOW (MALI_IOC_GP_BASE, _MALI_UK_GP_SUSPEND_RESPONSE,_mali_uk_gp_suspend_response_s *)
+#define MALI_IOC_GP2_START_JOB              _IOWR(MALI_IOC_GP_BASE, _MALI_UK_GP_START_JOB, _mali_uk_gp_start_job_s)
+#define MALI_IOC_GP2_NUMBER_OF_CORES_GET    _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_NUMBER_OF_CORES, _mali_uk_get_gp_number_of_cores_s)
+#define MALI_IOC_GP2_CORE_VERSION_GET       _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_CORE_VERSION, _mali_uk_get_gp_core_version_s)
+#define MALI_IOC_GP2_SUSPEND_RESPONSE       _IOW (MALI_IOC_GP_BASE, _MALI_UK_GP_SUSPEND_RESPONSE,_mali_uk_gp_suspend_response_s)
 
-#define MALI_IOC_PROFILING_START            _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_START, _mali_uk_profiling_start_s *)
-#define MALI_IOC_PROFILING_ADD_EVENT        _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_ADD_EVENT, _mali_uk_profiling_add_event_s*)
-#define MALI_IOC_PROFILING_STOP             _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_STOP, _mali_uk_profiling_stop_s *)
-#define MALI_IOC_PROFILING_GET_EVENT        _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_GET_EVENT, _mali_uk_profiling_get_event_s *)
-#define MALI_IOC_PROFILING_CLEAR            _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_CLEAR, _mali_uk_profiling_clear_s *)
-#define MALI_IOC_PROFILING_GET_CONFIG       _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_GET_CONFIG, _mali_uk_get_user_settings_s *)
-#define MALI_IOC_PROFILING_REPORT_SW_COUNTERS  _IOW (MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_REPORT_SW_COUNTERS, _mali_uk_sw_counters_report_s *)
+#define MALI_IOC_PROFILING_ADD_EVENT        _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_ADD_EVENT, _mali_uk_profiling_add_event_s)
+#define MALI_IOC_PROFILING_REPORT_SW_COUNTERS  _IOW (MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_REPORT_SW_COUNTERS, _mali_uk_sw_counters_report_s)
+#define MALI_IOC_PROFILING_MEMORY_USAGE_GET _IOR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_MEMORY_USAGE_GET, _mali_uk_profiling_memory_usage_get_s)
+#define MALI_IOC_PROFILING_STREAM_FD_GET        _IOR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_STREAM_FD_GET, _mali_uk_profiling_stream_fd_get_s)
+#define MALI_IOC_PROILING_CONTROL_SET   _IOR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_CONTROL_SET, _mali_uk_profiling_control_set_s)
 
-#define MALI_IOC_VSYNC_EVENT_REPORT         _IOW (MALI_IOC_VSYNC_BASE, _MALI_UK_VSYNC_EVENT_REPORT, _mali_uk_vsync_event_report_s *)
-
-/* Deprecated ioctls */
-#define MALI_IOC_MEM_GET_BIG_BLOCK          _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_GET_BIG_BLOCK, void *)
-#define MALI_IOC_MEM_FREE_BIG_BLOCK         _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_FREE_BIG_BLOCK, void *)
-#define MALI_IOC_MEM_INIT                   _IOR (MALI_IOC_MEMORY_BASE, _MALI_UK_INIT_MEM, void *)
-#define MALI_IOC_MEM_TERM                   _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_TERM_MEM, void *)
+#define MALI_IOC_VSYNC_EVENT_REPORT         _IOW (MALI_IOC_VSYNC_BASE, _MALI_UK_VSYNC_EVENT_REPORT, _mali_uk_vsync_event_report_s)
 
 #ifdef __cplusplus
 }
index 0bc62d4279b5e291cd4677fdfa5d8083b830ff3d..5a7396af662ca397015b01a25420270264badce2 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2010-2013 ARM Limited
+ * (C) COPYRIGHT 2010-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -78,7 +78,7 @@ typedef enum {
        MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_CREATE_FENCE_SYNC = 60,
        MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_CREATE_NATIVE_FENCE_SYNC = 61,
        MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_FENCE_FLUSH       = 62,
-       MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_FLUSH_SERVER_WAITS= 63,
+       MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_FLUSH_SERVER_WAITS = 63,
 } cinstr_profiling_event_reason_single_sw_t;
 
 /**
@@ -128,6 +128,9 @@ typedef enum {
        MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_MALI_FENCE_DUP       = 43,
        MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_FLUSH_SERVER_WAITS   = 44,
        MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_WAIT_SYNC            = 45, /* USED */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_JOBS_WAIT             = 46, /* USED */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_NOFRAMES_WAIT         = 47, /* USED */
+       MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_NOJOBS_WAIT           = 48, /* USED */
 } cinstr_profiling_event_reason_suspend_resume_sw_t;
 
 /**
@@ -165,10 +168,22 @@ typedef enum {
        MALI_PROFILING_EVENT_DATA_CORE_PP5             = 10,
        MALI_PROFILING_EVENT_DATA_CORE_PP6             = 11,
        MALI_PROFILING_EVENT_DATA_CORE_PP7             = 12,
+       MALI_PROFILING_EVENT_DATA_CORE_GP0_MMU         = 22, /* GP0 + 21 */
+       MALI_PROFILING_EVENT_DATA_CORE_PP0_MMU         = 26, /* PP0 + 21 */
+       MALI_PROFILING_EVENT_DATA_CORE_PP1_MMU         = 27, /* PP1 + 21 */
+       MALI_PROFILING_EVENT_DATA_CORE_PP2_MMU         = 28, /* PP2 + 21 */
+       MALI_PROFILING_EVENT_DATA_CORE_PP3_MMU         = 29, /* PP3 + 21 */
+       MALI_PROFILING_EVENT_DATA_CORE_PP4_MMU         = 30, /* PP4 + 21 */
+       MALI_PROFILING_EVENT_DATA_CORE_PP5_MMU         = 31, /* PP5 + 21 */
+       MALI_PROFILING_EVENT_DATA_CORE_PP6_MMU         = 32, /* PP6 + 21 */
+       MALI_PROFILING_EVENT_DATA_CORE_PP7_MMU         = 33, /* PP7 + 21 */
+
 } cinstr_profiling_event_data_core_t;
 
 #define MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(num) (MALI_PROFILING_EVENT_DATA_CORE_GP0 + (num))
+#define MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(num) (MALI_PROFILING_EVENT_DATA_CORE_GP0_MMU + (num))
 #define MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(num) (MALI_PROFILING_EVENT_DATA_CORE_PP0 + (num))
+#define MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(num) (MALI_PROFILING_EVENT_DATA_CORE_PP0_MMU + (num))
 
 
 #endif /*_MALI_UTGARD_PROFILING_EVENTS_H_*/
index 918a3f328366b5f3eab9c1f960be5505123c12f4..8ac67c7915e5a113e30f08f0557f31975b8da2ca 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013 ARM Limited
+ * (C) COPYRIGHT 2013, 2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -21,6 +21,39 @@ extern "C" {
 #define MAX_NUM_FP_CORES 8
 #define MAX_NUM_VP_CORES 1
 
+#define _MALI_SPCIAL_COUNTER_DESCRIPTIONS \
+       {                                           \
+               "Filmstrip_cnt0",                 \
+               "Frequency",       \
+               "Voltage",       \
+               "vertex",     \
+               "fragment",         \
+               "Total_alloc_pages",        \
+       };
+
+#define _MALI_MEM_COUTNER_DESCRIPTIONS \
+       {                                           \
+               "untyped_memory",                 \
+               "vertex_index_buffer",       \
+               "texture_buffer",       \
+               "varying_buffer",     \
+               "render_target",         \
+               "pbuffer_buffer",        \
+               "plbu_heap",            \
+               "pointer_array_buffer",             \
+               "slave_tilelist",          \
+               "untyped_gp_cmdlist",     \
+               "polygon_cmdlist",               \
+               "texture_descriptor",               \
+               "render_state_word",               \
+               "shader",               \
+               "stream_buffer",               \
+               "fragment_stack",               \
+               "uniform",               \
+               "untyped_frame_pool",               \
+               "untyped_surface",               \
+       };
+
 /** The list of events supported by the Mali DDK. */
 typedef enum {
        /* Vertex processor activity */
@@ -116,8 +149,41 @@ typedef enum {
        COUNTER_GLES_STRIP_LINES_COUNT,
        COUNTER_GLES_LOOP_LINES_COUNT,
 
+       /* Special counter */
+
        /* Framebuffer capture pseudo-counter */
        COUNTER_FILMSTRIP,
+       COUNTER_FREQUENCY,
+       COUNTER_VOLTAGE,
+       COUNTER_VP_ACTIVITY,
+       COUNTER_FP_ACTIVITY,
+       COUNTER_TOTAL_ALLOC_PAGES,
+
+       /* Memory usage counter */
+       COUNTER_MEM_UNTYPED,
+       COUNTER_MEM_VB_IB,
+       COUNTER_MEM_TEXTURE,
+       COUNTER_MEM_VARYING,
+       COUNTER_MEM_RT,
+       COUNTER_MEM_PBUFFER,
+       /* memory usages for gp command */
+       COUNTER_MEM_PLBU_HEAP,
+       COUNTER_MEM_POINTER_ARRAY,
+       COUNTER_MEM_SLAVE_TILELIST,
+       COUNTER_MEM_UNTYPE_GP_CMDLIST,
+       /* memory usages for polygon list command */
+       COUNTER_MEM_POLYGON_CMDLIST,
+       /* memory usages for pp command */
+       COUNTER_MEM_TD,
+       COUNTER_MEM_RSW,
+       /* other memory usages */
+       COUNTER_MEM_SHADER,
+       COUNTER_MEM_STREAMS,
+       COUNTER_MEM_FRAGMENT_STACK,
+       COUNTER_MEM_UNIFORM,
+       /* Special mem usage, which is used for mem pool allocation */
+       COUNTER_MEM_UNTYPE_MEM_POOL,
+       COUNTER_MEM_UNTYPE_SURFACE,
 
        NUMBER_OF_EVENTS
 } _mali_osk_counter_id;
@@ -132,7 +198,34 @@ typedef enum {
 #define LAST_SW_COUNTER         COUNTER_GLES_LOOP_LINES_COUNT
 
 #define FIRST_SPECIAL_COUNTER   COUNTER_FILMSTRIP
-#define LAST_SPECIAL_COUNTER    COUNTER_FILMSTRIP
+#define LAST_SPECIAL_COUNTER    COUNTER_TOTAL_ALLOC_PAGES
+
+#define FIRST_MEM_COUNTER               COUNTER_MEM_UNTYPED
+#define LAST_MEM_COUNTER                COUNTER_MEM_UNTYPE_SURFACE
+
+#define MALI_PROFILING_MEM_COUNTERS_NUM (LAST_MEM_COUNTER - FIRST_MEM_COUNTER + 1)
+#define MALI_PROFILING_SPECIAL_COUNTERS_NUM     (LAST_SPECIAL_COUNTER - FIRST_SPECIAL_COUNTER + 1)
+#define MALI_PROFILING_SW_COUNTERS_NUM  (LAST_SW_COUNTER - FIRST_SW_COUNTER + 1)
+
+/**
+ * Define the stream header type for porfiling stream.
+ */
+#define  STREAM_HEADER_FRAMEBUFFER 0x05         /* The stream packet header type for framebuffer dumping. */
+#define STREAM_HEADER_COUNTER_VALUE  0x09       /* The stream packet header type for hw/sw/memory counter sampling. */
+#define STREAM_HEADER_CORE_ACTIVITY 0x0a                /* The stream packet header type for activity counter sampling. */
+#define STREAM_HEADER_SIZE      5
+
+/**
+ * Define the packet header type of profiling control packet.
+ */
+#define PACKET_HEADER_ERROR            0x80             /* The response packet header type if error. */
+#define PACKET_HEADER_ACK              0x81             /* The response packet header type if OK. */
+#define PACKET_HEADER_COUNTERS_REQUEST 0x82             /* The control packet header type to request counter information from ddk. */
+#define PACKET_HEADER_COUNTERS_ACK         0x83         /* The response packet header type to send out counter information. */
+#define PACKET_HEADER_COUNTERS_ENABLE  0x84             /* The control packet header type to enable counters. */
+#define PACKET_HEADER_START_CAPTURE_VALUE            0x85               /* The control packet header type to start capture values. */
+
+#define PACKET_HEADER_SIZE      5
 
 /**
  * Structure to pass performance counter data of a Mali core
@@ -170,6 +263,19 @@ typedef struct _mali_profiling_mali_version {
        u32 num_of_vp_cores;
 } _mali_profiling_mali_version;
 
+/**
+ * Structure to define the mali profiling counter struct.
+ */
+typedef struct mali_profiling_counter {
+       char counter_name[40];
+       u32 counter_id;
+       u32 counter_event;
+       u32 prev_counter_value;
+       u32 current_counter_value;
+       u32 key;
+       int enabled;
+} mali_profiling_counter;
+
 /*
  * List of possible actions to be controlled by Streamline.
  * The following numbers are used by gator to control the frame buffer dumping and s/w counter reporting.
@@ -179,6 +285,8 @@ typedef struct _mali_profiling_mali_version {
 #define FBDUMP_CONTROL_RATE (2)
 #define SW_COUNTER_ENABLE (3)
 #define FBDUMP_CONTROL_RESIZE_FACTOR (4)
+#define MEM_COUNTER_ENABLE (5)
+#define ANNOTATE_PROFILING_ENABLE (6)
 
 void _mali_profiling_control(u32 action, u32 value);
 
index 817f5a7b2401e43443e421428f2ca3c3fcaac594..4d6517e37e87515f65ef90edd03eb34609db7570 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
+ * (C) COPYRIGHT 2008-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -56,7 +56,6 @@ typedef enum {
        _MALI_UK_PP_SUBSYSTEM,        /**< Fragment Processor Group of U/K calls */
        _MALI_UK_GP_SUBSYSTEM,        /**< Vertex Processor Group of U/K calls */
        _MALI_UK_PROFILING_SUBSYSTEM, /**< Profiling Group of U/K calls */
-       _MALI_UK_PMM_SUBSYSTEM,       /**< Power Management Module Group of U/K calls */
        _MALI_UK_VSYNC_SUBSYSTEM,     /**< VSYNC Group of U/K calls */
 } _mali_uk_subsystem_t;
 
@@ -85,22 +84,15 @@ typedef enum {
 
        /** Memory functions */
 
-       _MALI_UK_INIT_MEM                = 0,    /**< _mali_ukk_init_mem() */
-       _MALI_UK_TERM_MEM,                       /**< _mali_ukk_term_mem() */
-       _MALI_UK_GET_BIG_BLOCK,                  /**< _mali_ukk_get_big_block() */
-       _MALI_UK_FREE_BIG_BLOCK,                 /**< _mali_ukk_free_big_block() */
-       _MALI_UK_MAP_MEM,                        /**< _mali_ukk_mem_mmap() */
-       _MALI_UK_UNMAP_MEM,                      /**< _mali_ukk_mem_munmap() */
+       _MALI_UK_ALLOC_MEM                = 0,   /**< _mali_ukk_alloc_mem() */
+       _MALI_UK_FREE_MEM,                       /**< _mali_ukk_free_mem() */
+       _MALI_UK_BIND_MEM,                       /**< _mali_ukk_mem_bind() */
+       _MALI_UK_UNBIND_MEM,                     /**< _mali_ukk_mem_unbind() */
+       _MALI_UK_COW_MEM,                        /**< _mali_ukk_mem_cow() */
+       _MALI_UK_COW_MODIFY_RANGE,               /**< _mali_ukk_mem_cow_modify_range() */
        _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, /**< _mali_ukk_mem_get_mmu_page_table_dump_size() */
        _MALI_UK_DUMP_MMU_PAGE_TABLE,            /**< _mali_ukk_mem_dump_mmu_page_table() */
-       _MALI_UK_ATTACH_DMA_BUF,                 /**< _mali_ukk_attach_dma_buf() */
-       _MALI_UK_RELEASE_DMA_BUF,                /**< _mali_ukk_release_dma_buf() */
        _MALI_UK_DMA_BUF_GET_SIZE,               /**< _mali_ukk_dma_buf_get_size() */
-       _MALI_UK_ATTACH_UMP_MEM,                 /**< _mali_ukk_attach_ump_mem() */
-       _MALI_UK_RELEASE_UMP_MEM,                /**< _mali_ukk_release_ump_mem() */
-       _MALI_UK_MAP_EXT_MEM,                    /**< _mali_uku_map_external_mem() */
-       _MALI_UK_UNMAP_EXT_MEM,                  /**< _mali_uku_unmap_external_mem() */
-       _MALI_UK_VA_TO_MALI_PA,                  /**< _mali_uku_va_to_mali_pa() */
        _MALI_UK_MEM_WRITE_SAFE,                 /**< _mali_uku_mem_write_safe() */
 
        /** Common functions for each core */
@@ -126,29 +118,16 @@ typedef enum {
 
        /** Profiling functions */
 
-       _MALI_UK_PROFILING_START         = 0, /**< __mali_uku_profiling_start() */
-       _MALI_UK_PROFILING_ADD_EVENT,         /**< __mali_uku_profiling_add_event() */
-       _MALI_UK_PROFILING_STOP,              /**< __mali_uku_profiling_stop() */
-       _MALI_UK_PROFILING_GET_EVENT,         /**< __mali_uku_profiling_get_event() */
-       _MALI_UK_PROFILING_CLEAR,             /**< __mali_uku_profiling_clear() */
-       _MALI_UK_PROFILING_GET_CONFIG,        /**< __mali_uku_profiling_get_config() */
+       _MALI_UK_PROFILING_ADD_EVENT     = 0, /**< __mali_uku_profiling_add_event() */
        _MALI_UK_PROFILING_REPORT_SW_COUNTERS,/**< __mali_uku_profiling_report_sw_counters() */
+       _MALI_UK_PROFILING_MEMORY_USAGE_GET,  /**< __mali_uku_profiling_memory_usage_get() */
+       _MALI_UK_PROFILING_STREAM_FD_GET, /** < __mali_uku_profiling_stream_fd_get() */
+       _MALI_UK_PROFILING_CONTROL_SET, /** < __mali_uku_profiling_control_set() */
 
        /** VSYNC reporting fuctions */
        _MALI_UK_VSYNC_EVENT_REPORT      = 0, /**< _mali_ukk_vsync_event_report() */
-
 } _mali_uk_functions;
 
-/** @brief Get the size necessary for system info
- *
- * @see _mali_ukk_get_system_info_size()
- */
-typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
-       u32 size;                       /**< [out] size of buffer necessary to hold system information data, in bytes */
-} _mali_uk_get_system_info_size_s;
-
-
 /** @defgroup _mali_uk_getsysteminfo U/K Get System Info
  * @{ */
 
@@ -162,95 +141,6 @@ typedef struct {
  */
 typedef u32 _mali_core_version;
 
-/**
- * Enum values for the different modes the driver can be put in.
- * Normal is the default mode. The driver then uses a job queue and takes job objects from the clients.
- * Job completion is reported using the _mali_ukk_wait_for_notification call.
- * The driver blocks this io command until a job has completed or failed or a timeout occurs.
- *
- * The 'raw' mode is reserved for future expansion.
- */
-typedef enum _mali_driver_mode {
-       _MALI_DRIVER_MODE_RAW = 1,    /**< Reserved for future expansion */
-       _MALI_DRIVER_MODE_NORMAL = 2  /**< Normal mode of operation */
-} _mali_driver_mode;
-
-/** @brief List of possible cores
- *
- * add new entries to the end of this enum */
-typedef enum _mali_core_type {
-       _MALI_GP2 = 2,                /**< MaliGP2 Programmable Vertex Processor */
-       _MALI_200 = 5,                /**< Mali200 Programmable Fragment Processor */
-       _MALI_400_GP = 6,             /**< Mali400 Programmable Vertex Processor */
-       _MALI_400_PP = 7,             /**< Mali400 Programmable Fragment Processor */
-       /* insert new core here, do NOT alter the existing values */
-} _mali_core_type;
-
-
-/** @brief Capabilities of Memory Banks
- *
- * These may be used to restrict memory banks for certain uses. They may be
- * used when access is not possible (e.g. Bus does not support access to it)
- * or when access is possible but not desired (e.g. Access is slow).
- *
- * In the case of 'possible but not desired', there is no way of specifying
- * the flags as an optimization hint, so that the memory could be used as a
- * last resort.
- *
- * @see _mali_mem_info
- */
-typedef enum _mali_bus_usage {
-
-       _MALI_PP_READABLE   = (1<<0),  /** Readable by the Fragment Processor */
-       _MALI_PP_WRITEABLE  = (1<<1),  /** Writeable by the Fragment Processor */
-       _MALI_GP_READABLE   = (1<<2),  /** Readable by the Vertex Processor */
-       _MALI_GP_WRITEABLE  = (1<<3),  /** Writeable by the Vertex Processor */
-       _MALI_CPU_READABLE  = (1<<4),  /** Readable by the CPU */
-       _MALI_CPU_WRITEABLE = (1<<5),  /** Writeable by the CPU */
-       _MALI_GP_L2_ALLOC   = (1<<6),  /** GP allocate mali L2 cache lines*/
-       _MALI_MMU_READABLE  = _MALI_PP_READABLE | _MALI_GP_READABLE,   /** Readable by the MMU (including all cores behind it) */
-       _MALI_MMU_WRITEABLE = _MALI_PP_WRITEABLE | _MALI_GP_WRITEABLE, /** Writeable by the MMU (including all cores behind it) */
-} _mali_bus_usage;
-
-typedef enum mali_memory_cache_settings {
-       MALI_CACHE_STANDARD                     = 0,
-       MALI_CACHE_GP_READ_ALLOCATE     = 1,
-} mali_memory_cache_settings ;
-
-
-/** @brief Information about the Mali Memory system
- *
- * Information is stored in a linked list, which is stored entirely in the
- * buffer pointed to by the system_info member of the
- * _mali_uk_get_system_info_s arguments provided to _mali_ukk_get_system_info()
- *
- * Each element of the linked list describes a single Mali Memory bank.
- * Each allocation can only come from one bank, and will not cross multiple
- * banks.
- *
- * On Mali-MMU systems, there is only one bank, which describes the maximum
- * possible address range that could be allocated (which may be much less than
- * the available physical memory)
- *
- * The flags member describes the capabilities of the memory. It is an error
- * to attempt to build a job for a particular core (PP or GP) when the memory
- * regions used do not have the capabilities for supporting that core. This
- * would result in a job abort from the Device Driver.
- *
- * For example, it is correct to build a PP job where read-only data structures
- * are taken from a memory with _MALI_PP_READABLE set and
- * _MALI_PP_WRITEABLE clear, and a framebuffer with  _MALI_PP_WRITEABLE set and
- * _MALI_PP_READABLE clear. However, it would be incorrect to use a framebuffer
- * where _MALI_PP_WRITEABLE is clear.
- */
-typedef struct _mali_mem_info {
-       u32 size;                     /**< Size of the memory bank in bytes */
-       _mali_bus_usage flags;        /**< Capabilitiy flags of the memory */
-       u32 maximum_order_supported;  /**< log2 supported size */
-       u32 identifier;               /* mali_memory_cache_settings cache_settings; */
-       struct _mali_mem_info * next; /**< Next List Link */
-} _mali_mem_info;
-
 /** @} */ /* end group _mali_uk_core */
 
 
@@ -283,7 +173,7 @@ typedef enum _maligp_job_suspended_response_code {
 } _maligp_job_suspended_response_code;
 
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        u32 cookie;                     /**< [in] cookie from the _mali_uk_gp_job_suspended_s notification */
        _maligp_job_suspended_response_code code; /**< [in] abort or resume response code, see \ref _maligp_job_suspended_response_code */
        u32 arguments[2];               /**< [in] 0 when aborting a job. When resuming a job, the Mali start and end address for a new heap to resume the job with */
@@ -294,25 +184,18 @@ typedef struct {
 /** @defgroup _mali_uk_gpstartjob_s Vertex Processor Start Job
  * @{ */
 
-/** @brief Status indicating the result of starting a Vertex or Fragment processor job */
-typedef enum {
-       _MALI_UK_START_JOB_STARTED,                         /**< Job started */
-       _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE           /**< Job could not be started at this time. Try starting the job again */
-} _mali_uk_start_job_status;
-
 /** @brief Status indicating the result of the execution of a Vertex or Fragment processor job  */
-
 typedef enum {
-       _MALI_UK_JOB_STATUS_END_SUCCESS         = 1<<(16+0),
-       _MALI_UK_JOB_STATUS_END_OOM             = 1<<(16+1),
-       _MALI_UK_JOB_STATUS_END_ABORT           = 1<<(16+2),
-       _MALI_UK_JOB_STATUS_END_TIMEOUT_SW      = 1<<(16+3),
-       _MALI_UK_JOB_STATUS_END_HANG            = 1<<(16+4),
-       _MALI_UK_JOB_STATUS_END_SEG_FAULT       = 1<<(16+5),
-       _MALI_UK_JOB_STATUS_END_ILLEGAL_JOB     = 1<<(16+6),
-       _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR     = 1<<(16+7),
-       _MALI_UK_JOB_STATUS_END_SHUTDOWN        = 1<<(16+8),
-       _MALI_UK_JOB_STATUS_END_SYSTEM_UNUSABLE = 1<<(16+9)
+       _MALI_UK_JOB_STATUS_END_SUCCESS         = 1 << (16 + 0),
+       _MALI_UK_JOB_STATUS_END_OOM             = 1 << (16 + 1),
+       _MALI_UK_JOB_STATUS_END_ABORT           = 1 << (16 + 2),
+       _MALI_UK_JOB_STATUS_END_TIMEOUT_SW      = 1 << (16 + 3),
+       _MALI_UK_JOB_STATUS_END_HANG            = 1 << (16 + 4),
+       _MALI_UK_JOB_STATUS_END_SEG_FAULT       = 1 << (16 + 5),
+       _MALI_UK_JOB_STATUS_END_ILLEGAL_JOB     = 1 << (16 + 6),
+       _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR     = 1 << (16 + 7),
+       _MALI_UK_JOB_STATUS_END_SHUTDOWN        = 1 << (16 + 8),
+       _MALI_UK_JOB_STATUS_END_SYSTEM_UNUSABLE = 1 << (16 + 9)
 } _mali_uk_job_status;
 
 #define MALIGP2_NUM_REGS_FRAME (6)
@@ -371,8 +254,8 @@ typedef enum {
  *
  */
 typedef struct {
-       void *ctx;                          /**< [in,out] user-kernel context (trashed on output) */
-       u32 user_job_ptr;                   /**< [in] identifier for the job in user space, a @c mali_gp_job_info* */
+       u64 ctx;                          /**< [in,out] user-kernel context (trashed on output) */
+       u64 user_job_ptr;                   /**< [in] identifier for the job in user space, a @c mali_gp_job_info* */
        u32 priority;                       /**< [in] job priority. A lower number means higher priority */
        u32 frame_registers[MALIGP2_NUM_REGS_FRAME]; /**< [in] core specific registers associated with this job */
        u32 perf_counter_flag;              /**< [in] bitmask indicating which performance counters to enable, see \ref _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE and related macro definitions */
@@ -381,7 +264,7 @@ typedef struct {
        u32 frame_builder_id;               /**< [in] id of the originating frame builder */
        u32 flush_id;                       /**< [in] flush id within the originating frame builder */
        _mali_uk_fence_t fence;             /**< [in] fence this job must wait on */
-       u32 *timeline_point_ptr;            /**< [in,out] pointer to location where point on gp timeline for this job will be written */
+       u64 timeline_point_ptr;            /**< [in,out] pointer to u32: location where point on gp timeline for this job will be written */
 } _mali_uk_gp_start_job_s;
 
 #define _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE (1<<0) /**< Enable performance counter SRC0 for a job */
@@ -391,7 +274,7 @@ typedef struct {
 /** @} */ /* end group _mali_uk_gpstartjob_s */
 
 typedef struct {
-       u32 user_job_ptr;               /**< [out] identifier for the job in user space */
+       u64 user_job_ptr;               /**< [out] identifier for the job in user space */
        _mali_uk_job_status status;     /**< [out] status of finished job */
        u32 heap_current_addr;          /**< [out] value of the GP PLB PL heap start address register */
        u32 perf_counter0;              /**< [out] value of performance counter 0 (see ARM DDI0415A) */
@@ -399,7 +282,7 @@ typedef struct {
 } _mali_uk_gp_job_finished_s;
 
 typedef struct {
-       u32 user_job_ptr;                    /**< [out] identifier for the job in user space */
+       u64 user_job_ptr;                    /**< [out] identifier for the job in user space */
        u32 cookie;                          /**< [out] identifier for the core in kernel space on which the job stalled */
 } _mali_uk_gp_job_suspended_s;
 
@@ -466,8 +349,8 @@ typedef struct {
  *
  */
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
-       u32 user_job_ptr;               /**< [in] identifier for the job in user space */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 user_job_ptr;               /**< [in] identifier for the job in user space */
        u32 priority;                   /**< [in] job priority. A lower number means higher priority */
        u32 frame_registers[_MALI_PP_MAX_FRAME_REGISTERS];         /**< [in] core specific registers associated with first sub job, see ARM DDI0415A */
        u32 frame_registers_addr_frame[_MALI_PP_MAX_SUB_JOBS - 1]; /**< [in] ADDR_FRAME registers for sub job 1-7 */
@@ -476,7 +359,7 @@ typedef struct {
        u32 wb1_registers[_MALI_PP_MAX_WB_REGISTERS];
        u32 wb2_registers[_MALI_PP_MAX_WB_REGISTERS];
        u32 dlbu_registers[_MALI_DLBU_MAX_REGISTERS]; /**< [in] Dynamic load balancing unit registers */
-       u32 num_cores;                      /**< [in] Number of cores to set up (valid range: 1-4) */
+       u32 num_cores;                      /**< [in] Number of cores to set up (valid range: 1-8(M450) or 4(M400)) */
        u32 perf_counter_flag;              /**< [in] bitmask indicating which performance counters to enable, see \ref _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE and related macro definitions */
        u32 perf_counter_src0;              /**< [in] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
        u32 perf_counter_src1;              /**< [in] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
@@ -487,21 +370,21 @@ typedef struct {
        u32 tilesy;                         /**< [in] number of tiles in y direction (needed for reading the heatmap memory) */
        u32 heatmap_mem;                    /**< [in] memory address to store counter values per tile (aka heatmap) */
        u32 num_memory_cookies;             /**< [in] number of memory cookies attached to job */
-       u32 *memory_cookies;                /**< [in] memory cookies attached to job  */
+       u64 memory_cookies;               /**< [in] pointer to array of u32 memory cookies attached to job */
        _mali_uk_fence_t fence;             /**< [in] fence this job must wait on */
-       u32 *timeline_point_ptr;            /**< [in,out] pointer to location where point on pp timeline for this job will be written */
+       u64 timeline_point_ptr;           /**< [in,out] pointer to location of u32 where point on pp timeline for this job will be written */
 } _mali_uk_pp_start_job_s;
 
 typedef struct {
-       void *ctx;                          /**< [in,out] user-kernel context (trashed on output) */
-       _mali_uk_gp_start_job_s *gp_args;   /**< [in,out] GP uk arguments (see _mali_uk_gp_start_job_s) */
-       _mali_uk_pp_start_job_s *pp_args;   /**< [in,out] PP uk arguments (see _mali_uk_pp_start_job_s) */
+       u64 ctx;       /**< [in,out] user-kernel context (trashed on output) */
+       u64 gp_args;   /**< [in,out] GP uk arguments (see _mali_uk_gp_start_job_s) */
+       u64 pp_args;   /**< [in,out] PP uk arguments (see _mali_uk_pp_start_job_s) */
 } _mali_uk_pp_and_gp_start_job_s;
 
 /** @} */ /* end group _mali_uk_ppstartjob_s */
 
 typedef struct {
-       u32 user_job_ptr;                          /**< [out] identifier for the job in user space */
+       u64 user_job_ptr;                          /**< [out] identifier for the job in user space */
        _mali_uk_job_status status;                /**< [out] status of finished job */
        u32 perf_counter0[_MALI_PP_MAX_SUB_JOBS];  /**< [out] value of perfomance counter 0 (see ARM DDI0415A), one for each sub job */
        u32 perf_counter1[_MALI_PP_MAX_SUB_JOBS];  /**< [out] value of perfomance counter 1 (see ARM DDI0415A), one for each sub job */
@@ -525,7 +408,7 @@ typedef enum {
 } _mali_uk_pp_job_wbx_flag;
 
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        u32 fb_id;                      /**< [in] Frame builder ID of job to disable WB units for */
        u32 wb0_memory;
        u32 wb1_memory;
@@ -539,25 +422,37 @@ typedef struct {
  * @{ */
 
 typedef struct {
-       void *ctx;                          /**< [in,out] user-kernel context (trashed on output) */
-       u32 type;                           /**< [in] type of soft job */
-       u32 user_job;                       /**< [in] identifier for the job in user space */
-       u32 *job_id_ptr;                    /**< [in,out] pointer to location where job id will be written */
+       u64 ctx;                            /**< [in,out] user-kernel context (trashed on output) */
+       u64 user_job;                       /**< [in] identifier for the job in user space */
+       u64 job_id_ptr;                     /**< [in,out] pointer to location of u32 where job id will be written */
        _mali_uk_fence_t fence;             /**< [in] fence this job must wait on */
        u32 point;                          /**< [out] point on soft timeline for this job */
+       u32 type;                           /**< [in] type of soft job */
 } _mali_uk_soft_job_start_s;
 
 typedef struct {
-       u32 user_job;                       /**< [out] identifier for the job in user space */
+       u64 user_job;                       /**< [out] identifier for the job in user space */
 } _mali_uk_soft_job_activated_s;
 
 typedef struct {
-       void *ctx;                          /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                          /**< [in,out] user-kernel context (trashed on output) */
        u32 job_id;                         /**< [in] id for soft job */
 } _mali_uk_soft_job_signal_s;
 
 /** @} */ /* end group _mali_uk_soft_job */
 
+typedef struct {
+       u32 counter_id;
+       u32 key;
+       int enable;
+} _mali_uk_annotate_profiling_mem_counter_s;
+
+typedef struct {
+       u32 sampling_rate;
+       int enable;
+} _mali_uk_annotate_profiling_enable_s;
+
+
 /** @addtogroup _mali_uk_core U/K Core
  * @{ */
 
@@ -577,21 +472,24 @@ typedef struct {
 typedef enum {
        /** core notifications */
 
-       _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS =  (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x20,
-       _MALI_NOTIFICATION_APPLICATION_QUIT =           (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x40,
-       _MALI_NOTIFICATION_SETTINGS_CHANGED =           (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x80,
-       _MALI_NOTIFICATION_SOFT_ACTIVATED =             (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x100,
+       _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x20,
+       _MALI_NOTIFICATION_APPLICATION_QUIT = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x40,
+       _MALI_NOTIFICATION_SETTINGS_CHANGED = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x80,
+       _MALI_NOTIFICATION_SOFT_ACTIVATED = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x100,
 
        /** Fragment Processor notifications */
 
-       _MALI_NOTIFICATION_PP_FINISHED =                (_MALI_UK_PP_SUBSYSTEM << 16) | 0x10,
-       _MALI_NOTIFICATION_PP_NUM_CORE_CHANGE =         (_MALI_UK_PP_SUBSYSTEM << 16) | 0x20,
+       _MALI_NOTIFICATION_PP_FINISHED = (_MALI_UK_PP_SUBSYSTEM << 16) | 0x10,
+       _MALI_NOTIFICATION_PP_NUM_CORE_CHANGE = (_MALI_UK_PP_SUBSYSTEM << 16) | 0x20,
 
        /** Vertex Processor notifications */
 
-       _MALI_NOTIFICATION_GP_FINISHED =                (_MALI_UK_GP_SUBSYSTEM << 16) | 0x10,
-       _MALI_NOTIFICATION_GP_STALLED =                 (_MALI_UK_GP_SUBSYSTEM << 16) | 0x20,
+       _MALI_NOTIFICATION_GP_FINISHED = (_MALI_UK_GP_SUBSYSTEM << 16) | 0x10,
+       _MALI_NOTIFICATION_GP_STALLED = (_MALI_UK_GP_SUBSYSTEM << 16) | 0x20,
 
+       /** Profiling notifications */
+       _MALI_NOTIFICATION_ANNOTATE_PROFILING_MEM_COUNTER = (_MALI_UK_PROFILING_SUBSYSTEM << 16) | 0x10,
+       _MALI_NOTIFICATION_ANNOTATE_PROFILING_ENABLE = (_MALI_UK_PROFILING_SUBSYSTEM << 16) | 0x20,
 } _mali_uk_notification_type;
 
 /** to assist in splitting up 32-bit notification value in subsystem and id value */
@@ -623,19 +521,19 @@ typedef enum {
 /* See mali_user_settings_db.c */
 extern const char *_mali_uk_user_setting_descriptions[];
 #define _MALI_UK_USER_SETTING_DESCRIPTIONS \
-{                                           \
-       "sw_events_enable",                 \
-       "colorbuffer_capture_enable",       \
-       "depthbuffer_capture_enable",       \
-       "stencilbuffer_capture_enable",     \
-       "per_tile_counters_enable",         \
-       "buffer_capture_compositor",        \
-       "buffer_capture_window",            \
-       "buffer_capture_other",             \
-       "buffer_capture_n_frames",          \
-       "buffer_capture_resize_factor",     \
-       "sw_counters_enable",               \
-};
+       {                                           \
+               "sw_events_enable",                 \
+               "colorbuffer_capture_enable",       \
+               "depthbuffer_capture_enable",       \
+               "stencilbuffer_capture_enable",     \
+               "per_tile_counters_enable",         \
+               "buffer_capture_compositor",        \
+               "buffer_capture_window",            \
+               "buffer_capture_other",             \
+               "buffer_capture_n_frames",          \
+               "buffer_capture_resize_factor",     \
+               "sw_counters_enable",               \
+       };
 
 /** @brief struct to hold the value to a particular setting as seen in the kernel space
  */
@@ -688,7 +586,7 @@ typedef struct {
  * when the polygon list builder unit has run out of memory.
  */
 typedef struct {
-       void *ctx;                       /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                       /**< [in,out] user-kernel context (trashed on output) */
        _mali_uk_notification_type type; /**< [out] Type of notification available */
        union {
                _mali_uk_gp_job_suspended_s gp_job_suspended;/**< [out] Notification data for _MALI_NOTIFICATION_GP_STALLED notification type */
@@ -696,6 +594,8 @@ typedef struct {
                _mali_uk_pp_job_finished_s  pp_job_finished; /**< [out] Notification data for _MALI_NOTIFICATION_PP_FINISHED notification type */
                _mali_uk_settings_changed_s setting_changed;/**< [out] Notification data for _MALI_NOTIFICAATION_SETTINGS_CHANGED notification type */
                _mali_uk_soft_job_activated_s soft_job_activated; /**< [out] Notification data for _MALI_NOTIFICATION_SOFT_ACTIVATED notification type */
+               _mali_uk_annotate_profiling_mem_counter_s profiling_mem_counter;
+               _mali_uk_annotate_profiling_enable_s profiling_enable;
        } data;
 } _mali_uk_wait_for_notification_s;
 
@@ -705,7 +605,7 @@ typedef struct {
  * This is used to send a quit message to the callback thread.
  */
 typedef struct {
-       void *ctx;                       /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                       /**< [in,out] user-kernel context (trashed on output) */
        _mali_uk_notification_type type; /**< [in] Type of notification to post */
 } _mali_uk_post_notification_s;
 
@@ -739,7 +639,7 @@ typedef struct {
  * The 16bit integer is stored twice in a 32bit integer
  * For example, for version 1 the value would be 0x00010001
  */
-#define _MALI_API_VERSION 401
+#define _MALI_API_VERSION 800
 #define _MALI_UK_API_VERSION _MAKE_VERSION_ID(_MALI_API_VERSION)
 
 /**
@@ -764,10 +664,31 @@ typedef u32 _mali_uk_api_version;
  * of the interface may be backwards compatible.
  */
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 ctx;                        /**< [in,out] user-kernel context (trashed on output) */
        _mali_uk_api_version version;   /**< [in,out] API version of user-side interface. */
        int compatible;                 /**< [out] @c 1 when @version is compatible, @c 0 otherwise */
 } _mali_uk_get_api_version_s;
+
+/** @brief Arguments for _mali_uk_get_api_version_v2()
+ *
+ * The user-side interface version must be written into the version member,
+ * encoded using _MAKE_VERSION_ID(). It will be compared to the API version of
+ * the kernel-side interface.
+ *
+ * On successful return, the version member will be the API version of the
+ * kernel-side interface. _MALI_UK_API_VERSION macro defines the current version
+ * of the API.
+ *
+ * The compatible member must be checked to see if the version of the user-side
+ * interface is compatible with the kernel-side interface, since future versions
+ * of the interface may be backwards compatible.
+ */
+typedef struct {
+       u64 ctx;                        /**< [in,out] user-kernel context (trashed on output) */
+       _mali_uk_api_version version;   /**< [in,out] API version of user-side interface. */
+       int compatible;                 /**< [out] @c 1 when @version is compatible, @c 0 otherwise */
+} _mali_uk_get_api_version_v2_s;
+
 /** @} */ /* end group _mali_uk_getapiversion_s */
 
 /** @defgroup _mali_uk_get_user_settings_s Get user space settings */
@@ -781,21 +702,21 @@ typedef struct {
  *
  */
 typedef struct {
-       void *ctx;                       /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                       /**< [in,out] user-kernel context (trashed on output) */
        u32 settings[_MALI_UK_USER_SETTING_MAX]; /**< [out] The values for all settings */
 } _mali_uk_get_user_settings_s;
 
 /** @brief struct to hold the value of a particular setting from the user space within a given context
  */
 typedef struct {
-       void *ctx;                       /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                       /**< [in,out] user-kernel context (trashed on output) */
        _mali_uk_user_setting_t setting; /**< [in] setting to get */
        u32 value;                       /**< [out] value of setting */
 } _mali_uk_get_user_setting_s;
 
 /** @brief Arguments for _mali_ukk_request_high_priority() */
 typedef struct {
-       void *ctx;                       /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                       /**< [in,out] user-kernel context (trashed on output) */
 } _mali_uk_request_high_priority_s;
 
 /** @} */ /* end group _mali_uk_core */
@@ -804,113 +725,137 @@ typedef struct {
 /** @defgroup _mali_uk_memory U/K Memory
  * @{ */
 
-/** Flag for _mali_uk_map_external_mem_s, _mali_uk_attach_ump_mem_s and _mali_uk_attach_dma_buf_s */
-#define _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE (1<<0)
+#define _MALI_MEMORY_ALLOCATE_RESIZEABLE  (1<<4) /* BUFFER can trim dow/grow*/
+#define _MALI_MEMORY_ALLOCATE_NO_BIND_GPU (1<<5) /*Not map to GPU when allocate, must call bind later*/
 
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
-       u32 phys_addr;                  /**< [in] physical address */
-       u32 size;                       /**< [in] size */
-       u32 mali_address;               /**< [in] mali address to map the physical memory to */
-       u32 rights;                     /**< [in] rights necessary for accessing memory */
-       u32 flags;                      /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
-       u32 cookie;                     /**< [out] identifier for mapped memory object in kernel space  */
-} _mali_uk_map_external_mem_s;
+       u64 ctx;                                          /**< [in,out] user-kernel context (trashed on output) */
+       u32 gpu_vaddr;                                    /**< [in] GPU virtual address */
+       u32 vsize;                                        /**< [in] vitrual size of the allocation */
+       u32 psize;                                        /**< [in] physical size of the allocation */
+       u32 flags;
+       u64 backend_handle;                               /**< [out] backend handle */
+       struct {
+               /* buffer types*/
+               /* CPU read/write info*/
+       } buffer_info;
+} _mali_uk_alloc_mem_s;
 
-typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
-       u32 cookie;                     /**< [out] identifier for mapped memory object in kernel space  */
-} _mali_uk_unmap_external_mem_s;
 
-/** @note This is identical to _mali_uk_map_external_mem_s above, however phys_addr is replaced by memory descriptor */
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
-       u32 mem_fd;                     /**< [in] Memory descriptor */
-       u32 size;                       /**< [in] size */
-       u32 mali_address;               /**< [in] mali address to map the physical memory to */
-       u32 rights;                     /**< [in] rights necessary for accessing memory */
-       u32 flags;                      /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
-       u32 cookie;                     /**< [out] identifier for mapped memory object in kernel space  */
-} _mali_uk_attach_dma_buf_s;
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 gpu_vaddr;                /**< [in] use as handle to free allocation */
+       u32 free_pages_nr;      /** < [out] record the number of free pages */
+} _mali_uk_free_mem_s;
+
+
+#define _MALI_MEMORY_BIND_BACKEND_UMP             (1<<8)
+#define _MALI_MEMORY_BIND_BACKEND_DMA_BUF         (1<<9)
+#define _MALI_MEMORY_BIND_BACKEND_MALI_MEMORY     (1<<10)
+#define _MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY (1<<11)
+#define _MALI_MEMORY_BIND_BACKEND_EXT_COW         (1<<12)
+#define _MALI_MEMORY_BIND_BACKEND_HAVE_ALLOCATION (1<<13)
+
+
+#define _MALI_MEMORY_BIND_BACKEND_MASK (_MALI_MEMORY_BIND_BACKEND_UMP| \
+                                       _MALI_MEMORY_BIND_BACKEND_DMA_BUF |\
+                                       _MALI_MEMORY_BIND_BACKEND_MALI_MEMORY |\
+                                       _MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY |\
+                                       _MALI_MEMORY_BIND_BACKEND_EXT_COW |\
+                                       _MALI_MEMORY_BIND_BACKEND_HAVE_ALLOCATION)
+
+
+#define _MALI_MEMORY_GPU_READ_ALLOCATE            (1<<16)
+
 
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
-       u32 mem_fd;                     /**< [in] Memory descriptor */
-       u32 size;                       /**< [out] size */
-} _mali_uk_dma_buf_get_size_s;
+       u64 ctx;                                        /**< [in,out] user-kernel context (trashed on output) */
+       u32 vaddr;                                      /**< [in] mali address to map the physical memory to */
+       u32 size;                                       /**< [in] size */
+       u32 flags;                                      /**< [in] see_MALI_MEMORY_BIND_BACKEND_* */
+       u32 padding;                                    /** padding for 32/64 struct alignment */
+       union {
+               struct {
+                       u32 secure_id;                  /**< [in] secure id */
+                       u32 rights;                     /**< [in] rights necessary for accessing memory */
+                       u32 flags;                      /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
+               } bind_ump;
+               struct {
+                       u32 mem_fd;                     /**< [in] Memory descriptor */
+                       u32 rights;                     /**< [in] rights necessary for accessing memory */
+                       u32 flags;                      /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
+               } bind_dma_buf;
+               struct {
+                       /**/
+               } bind_mali_memory;
+               struct {
+                       u32 phys_addr;                  /**< [in] physical address */
+                       u32 rights;                     /**< [in] rights necessary for accessing memory */
+                       u32 flags;                      /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
+               } bind_ext_memory;
+       } mem_union;
+} _mali_uk_bind_mem_s;
 
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
-       u32 cookie;                     /**< [in] identifier for mapped memory object in kernel space  */
-} _mali_uk_release_dma_buf_s;
+       u64 ctx;                                        /**< [in,out] user-kernel context (trashed on output) */
+       u32 flags;                                      /**< [in] see_MALI_MEMORY_BIND_BACKEND_* */
+       u32 vaddr;                                      /**<  [in] identifier for mapped memory object in kernel space  */
+} _mali_uk_unbind_mem_s;
 
-/** @note This is identical to _mali_uk_map_external_mem_s above, however phys_addr is replaced by secure_id */
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
-       u32 secure_id;                  /**< [in] secure id */
-       u32 size;                       /**< [in] size */
-       u32 mali_address;               /**< [in] mali address to map the physical memory to */
-       u32 rights;                     /**< [in] rights necessary for accessing memory */
-       u32 flags;                      /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
-       u32 cookie;                     /**< [out] identifier for mapped memory object in kernel space  */
-} _mali_uk_attach_ump_mem_s;
+       u64 ctx;                                        /**< [in,out] user-kernel context (trashed on output) */
+       u32 target_handle;                              /**< [in] handle of allocation need to do COW */
+       u32 target_offset;              /**< [in] offset in target allocation to do COW(for support COW  a memory allocated from memory_bank, PAGE_SIZE align)*/
+       u32 target_size;                        /**< [in] size of target allocation to do COW (for support memory bank, PAGE_SIZE align)(in byte) */
+       u32 range_start;                                /**< [in] re allocate range start offset, offset from the start of allocation (PAGE_SIZE align)*/
+       u32 range_size;                                 /**< [in] re allocate size (PAGE_SIZE align)*/
+       u32 vaddr;                                      /**< [in] mali address for the new allocaiton */
+       u32 backend_handle;                             /**< [out] backend handle */
+       u32 flags;
+} _mali_uk_cow_mem_s;
 
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
-       u32 cookie;                     /**< [in] identifier for mapped memory object in kernel space  */
-} _mali_uk_release_ump_mem_s;
+       u64 ctx;                                        /**< [in,out] user-kernel context (trashed on output) */
+       u32 range_start;                                /**< [in] re allocate range start offset, offset from the start of allocation */
+       u32 size;                                       /**< [in] re allocate size*/
+       u32 vaddr;                                      /**< [in] mali address for the new allocaiton */
+       s32 change_pages_nr;            /**< [out] record the page number change for cow operation */
+} _mali_uk_cow_modify_range_s;
+
 
-/** @brief Arguments for _mali_ukk_va_to_mali_pa()
- *
- * if size is zero or not a multiple of the system's page size, it will be
- * rounded up to the next multiple of the page size. This will occur before
- * any other use of the size parameter.
- *
- * if va is not PAGE_SIZE aligned, it will be rounded down to the next page
- * boundary.
- *
- * The range (va) to ((u32)va)+(size-1) inclusive will be checked for physical
- * contiguity.
- *
- * The implementor will check that the entire physical range is allowed to be mapped
- * into user-space.
- *
- * Failure will occur if either of the above are not satisfied.
- *
- * Otherwise, the physical base address of the range is returned through pa,
- * va is updated to be page aligned, and size is updated to be a non-zero
- * multiple of the system's pagesize.
- */
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
-       void *va;                       /**< [in,out] Virtual address of the start of the range */
-       u32 pa;                         /**< [out] Physical base address of the range */
-       u32 size;                       /**< [in,out] Size of the range, in bytes. */
-} _mali_uk_va_to_mali_pa_s;
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u32 mem_fd;                     /**< [in] Memory descriptor */
+       u32 size;                       /**< [out] size */
+} _mali_uk_dma_buf_get_size_s;
+
+/** Flag for _mali_uk_map_external_mem_s, _mali_uk_attach_ump_mem_s and _mali_uk_attach_dma_buf_s */
+#define _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE (1<<0)
+
 
 /**
  * @brief Arguments for _mali_uk[uk]_mem_write_safe()
  */
 typedef struct {
-       void *ctx;        /**< [in,out] user-kernel context (trashed on output) */
-       const void *src;  /**< [in]     Pointer to source data */
-       void *dest;       /**< [in]     Destination Mali buffer */
-       u32 size;         /**< [in,out] Number of bytes to write/copy on input, number of bytes actually written/copied on output */
+       u64 ctx;  /**< [in,out] user-kernel context (trashed on output) */
+       u64 src;  /**< [in] Pointer to source data */
+       u64 dest; /**< [in] Destination Mali buffer */
+       u32 size;   /**< [in,out] Number of bytes to write/copy on input, number of bytes actually written/copied on output */
 } _mali_uk_mem_write_safe_s;
 
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        u32 size;                       /**< [out] size of MMU page table information (registers + page tables) */
 } _mali_uk_query_mmu_page_table_dump_size_s;
 
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        u32 size;                       /**< [in] size of buffer to receive mmu page table information */
-       void *buffer;                   /**< [in,out] buffer to receive mmu page table information */
+       u64 buffer;                   /**< [in,out] buffer to receive mmu page table information */
        u32 register_writes_size;       /**< [out] size of MMU register dump */
-       u32 *register_writes;           /**< [out] pointer within buffer where MMU register dump is stored */
+       u64 register_writes;           /**< [out] pointer within buffer where MMU register dump is stored */
        u32 page_table_dump_size;       /**< [out] size of MMU page table dump */
-       u32 *page_table_dump;           /**< [out] pointer within buffer where MMU page table dump is stored */
+       u64 page_table_dump;           /**< [out] pointer within buffer where MMU page table dump is stored */
 } _mali_uk_dump_mmu_page_table_s;
 
 /** @} */ /* end group _mali_uk_memory */
@@ -926,7 +871,7 @@ typedef struct {
  * will contain the number of Fragment Processor cores in the system.
  */
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        u32 number_of_total_cores;      /**< [out] Total number of Fragment Processor cores in the system */
        u32 number_of_enabled_cores;    /**< [out] Number of enabled Fragment Processor cores */
 } _mali_uk_get_pp_number_of_cores_s;
@@ -938,8 +883,9 @@ typedef struct {
  * the version that all Fragment Processor cores are compatible with.
  */
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        _mali_core_version version;     /**< [out] version returned from core, see \ref _mali_core_version  */
+       u32 padding;
 } _mali_uk_get_pp_core_version_s;
 
 /** @} */ /* end group _mali_uk_pp */
@@ -955,7 +901,7 @@ typedef struct {
  * will contain the number of Vertex Processor cores in the system.
  */
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        u32 number_of_cores;            /**< [out] number of Vertex Processor cores in the system */
 } _mali_uk_get_gp_number_of_cores_s;
 
@@ -966,39 +912,24 @@ typedef struct {
  * the version that all Vertex Processor cores are compatible with.
  */
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        _mali_core_version version;     /**< [out] version returned from core, see \ref _mali_core_version */
 } _mali_uk_get_gp_core_version_s;
 
-typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
-       u32 limit;                      /**< [in,out] The desired limit for number of events to record on input, actual limit on output */
-} _mali_uk_profiling_start_s;
+/** @} */ /* end group _mali_uk_gp */
 
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        u32 event_id;                   /**< [in] event id to register (see  enum mali_profiling_events for values) */
        u32 data[5];                    /**< [in] event specific data */
 } _mali_uk_profiling_add_event_s;
 
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
-       u32 count;                      /**< [out] The number of events sampled */
-} _mali_uk_profiling_stop_s;
-
-typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
-       u32 index;                      /**< [in] which index to get (starting at zero) */
-       u64 timestamp;                  /**< [out] timestamp of event */
-       u32 event_id;                   /**< [out] event id of event (see  enum mali_profiling_events for values) */
-       u32 data[5];                    /**< [out] event specific data */
-} _mali_uk_profiling_get_event_s;
-
-typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
-} _mali_uk_profiling_clear_s;
-
-/** @} */ /* end group _mali_uk_gp */
+       u64 ctx;                     /**< [in,out] user-kernel context (trashed on output) */
+       u32 memory_usage;              /**< [out] total memory usage */
+       u32 vaddr;                                      /**< [in] mali address for the cow allocaiton */
+       s32 change_pages_nr;            /**< [out] record the page number change for cow operation */
+} _mali_uk_profiling_memory_usage_get_s;
 
 
 /** @addtogroup _mali_uk_memory U/K Memory
@@ -1031,14 +962,11 @@ typedef struct {
  * implementation of the U/K interface. Its value must be zero.
  */
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        void *mapping;                  /**< [out] Returns user-space virtual address for the mapping */
        u32 size;                       /**< [in] Size of the requested mapping */
        u32 phys_addr;                  /**< [in] Physical address - could be offset, depending on caller+callee convention */
-       u32 cookie;                     /**< [out] Returns a cookie for use in munmap calls */
-       void *uku_private;              /**< [in] User-side Private word used by U/K interface */
-       void *ukk_private;              /**< [in] Kernel-side Private word used by U/K interface */
-       mali_memory_cache_settings cache_settings; /**< [in] Option to set special cache flags, tuning L2 efficency */
+       mali_bool writeable;
 } _mali_uk_mem_mmap_s;
 
 /** @brief Arguments to _mali_ukk_mem_munmap()
@@ -1052,10 +980,9 @@ typedef struct {
  * originally obtained range, or to unmap more than was originally obtained.
  */
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        void *mapping;                  /**< [in] The mapping returned from mmap call */
        u32 size;                       /**< [in] The size passed to mmap call */
-       u32 cookie;                     /**< [in] Cookie from mmap call */
 } _mali_uk_mem_munmap_s;
 /** @} */ /* end group _mali_uk_memory */
 
@@ -1076,7 +1003,7 @@ typedef enum _mali_uk_vsync_event {
  *
  */
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        _mali_uk_vsync_event event;     /**< [in] VSYNCH event type */
 } _mali_uk_vsync_event_report_s;
 
@@ -1090,9 +1017,9 @@ typedef struct {
  * Values recorded for each of the software counters during a single renderpass.
  */
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
-       u32* counters;                  /**< [in] The array of counter values */
-       u32  num_counters;              /**< [in] The number of elements in counters array */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 counters;                  /**< [in] The array of u32 counter values */
+       u32 num_counters;              /**< [in] The number of elements in counters array */
 } _mali_uk_sw_counters_report_s;
 
 /** @} */ /* end group _mali_uk_sw_counters_report */
@@ -1101,20 +1028,20 @@ typedef struct {
  * @{ */
 
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        u32 timeline;                   /**< [in] timeline id */
        u32 point;                      /**< [out] latest point on timeline */
 } _mali_uk_timeline_get_latest_point_s;
 
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        _mali_uk_fence_t fence;         /**< [in] fence */
        u32 timeout;                    /**< [in] timeout (0 for no wait, -1 for blocking) */
        u32 status;                     /**< [out] status of fence (1 if signaled, 0 if timeout) */
 } _mali_uk_timeline_wait_s;
 
 typedef struct {
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
+       u64 ctx;                      /**< [in,out] user-kernel context (trashed on output) */
        _mali_uk_fence_t fence;         /**< [in] mali fence to create linux sync fence from */
        s32 sync_fd;                    /**< [out] file descriptor for new linux sync fence */
 } _mali_uk_timeline_create_sync_fence_s;
@@ -1125,6 +1052,19 @@ typedef struct {
 
 /** @} */ /* end group uddapi */
 
+typedef struct {
+       u64 ctx;                 /**< [in,out] user-kernel context (trashed on output) */
+       s32 stream_fd;   /**< [in] The profiling kernel base stream fd handle */
+} _mali_uk_profiling_stream_fd_get_s;
+
+typedef struct {
+       u64 ctx;        /**< [in,out] user-kernel context (trashed on output) */
+       u64 control_packet_data; /**< [in] the control packet data for control settings */
+       u32 control_packet_size;  /**< [in] The control packet size */
+       u64 response_packet_data; /** < [out] The response packet data */
+       u32 response_packet_size; /** < [in,out] The response packet data */
+} _mali_uk_profiling_control_set_s;
+
 #ifdef __cplusplus
 }
 #endif
index e9e5e55a08222775d2e31d9b449e8378edee39af..2b9027efb7179a9c560646949a719615a9f0aff6 100644 (file)
@@ -1,12 +1,3 @@
-/*
- * Copyright (C) 2010 ARM Limited. All rights reserved.
- * 
- * This program is free software and is provided to you under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
- * 
- * A copy of the licence is included with the program, and can also be obtained from Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
- */
 
 /**
  * @file mali_kernel_license.h
index d3760d8056e506c74bd3d628fb468a6d2d72452c..4c9d49669a79d192e10b7f2eac089ae0081dcf5d 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2010, 2013 ARM Limited
+ * (C) COPYRIGHT 2010, 2013, 2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
index 68a28f41aacc1c855e05144b4a1c786e8667d7ae..df033b0e8c8314df4c5b916a08eaaa5c12aa7620 100644 (file)
@@ -1,7 +1,7 @@
 /**
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2010-2013 ARM Limited
+ * (C) COPYRIGHT 2010-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 
 #include <linux/module.h>
 #include <linux/mali/mali_utgard.h>
-#include "mali_gp_scheduler.h"
-#include "mali_pp_scheduler.h"
+#include "mali_pm.h"
 
 void mali_dev_pause(void)
 {
-       mali_gp_scheduler_suspend();
-       mali_pp_scheduler_suspend();
-       mali_group_power_off(MALI_FALSE);
-       mali_l2_cache_pause_all(MALI_TRUE);
+       /*
+        * Deactive all groups to prevent hardware being touched
+        * during the period of mali device pausing
+        */
+       mali_pm_os_suspend(MALI_FALSE);
 }
 
 EXPORT_SYMBOL(mali_dev_pause);
 
 void mali_dev_resume(void)
 {
-       mali_l2_cache_pause_all(MALI_FALSE);
-       mali_gp_scheduler_resume();
-       mali_pp_scheduler_resume();
+       mali_pm_os_resume();
 }
 
 EXPORT_SYMBOL(mali_dev_resume);
index bc2685d2388e36e7f7e99c17425b974812d94d09..efe417d3d8a4bc1d721e0e31aca8edf87a2d6fb4 100644 (file)
@@ -1,7 +1,7 @@
 /**
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
+ * (C) COPYRIGHT 2008-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -22,6 +22,9 @@
 #include "mali_kernel_license.h"
 #include <linux/platform_device.h>
 #include <linux/miscdevice.h>
+#include <linux/bug.h>
+#include <linux/of.h>
+
 #include <linux/mali/mali_utgard.h>
 #include "mali_kernel_common.h"
 #include "mali_session.h"
 #include "mali_kernel_license.h"
 #include "mali_memory.h"
 #include "mali_memory_dma_buf.h"
+#include "mali_memory_manager.h"
+#include "mt_smi.h"
+#include "platform_pmm.h"
+
 #if defined(CONFIG_MALI400_INTERNAL_PROFILING)
 #include "mali_profiling_internal.h"
 #endif
+#if defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_MALI_DVFS)
+#include "mali_osk_profiling.h"
+#include "mali_dvfs_policy.h"
+static int is_first_resume = 1;
+/*Store the clk and vol for boot/insmod and mali_resume*/
+static struct mali_gpu_clk_item mali_gpu_clk[2];
+#endif
 
 /* Streamline support for the Mali driver */
 #if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_MALI400_PROFILING)
 /* Ask Linux to create the tracepoints */
 #define CREATE_TRACE_POINTS
 #include "mali_linux_trace.h"
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_event);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_hw_counter);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_sw_counters);
 #endif /* CONFIG_TRACEPOINTS */
 
 /* from the __malidrv_build_info.c file that is generated during build */
@@ -88,7 +106,7 @@ extern int mali_max_pp_cores_group_2;
 module_param(mali_max_pp_cores_group_2, int, S_IRUSR | S_IRGRP | S_IROTH);
 MODULE_PARM_DESC(mali_max_pp_cores_group_2, "Limit the number of PP cores to use from second PP group (Mali-450 only).");
 
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+#if defined(CONFIG_MALI_DVFS)
 /** the max fps the same as display vsync default 60, can set by module insert parameter */
 extern int mali_max_system_fps;
 module_param(mali_max_system_fps, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH);
@@ -145,12 +163,17 @@ static int mali_driver_runtime_idle(struct device *dev);
 #endif
 
 #if defined(MALI_FAKE_PLATFORM_DEVICE)
+#if defined(CONFIG_MALI_DT)
+extern int mali_platform_device_init(struct platform_device *device);
+extern int mali_platform_device_deinit(struct platform_device *device);
+#else
 extern int mali_platform_device_register(void);
 extern int mali_platform_device_unregister(void);
 #endif
+#endif
 
 /* Linux power management operations provided by the Mali device driver */
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29))
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29))
 struct pm_ext_ops mali_dev_ext_pm_ops = {
        .base =
        {
@@ -175,11 +198,23 @@ static const struct dev_pm_ops mali_dev_pm_ops = {
 };
 #endif
 
+#ifdef CONFIG_MALI_DT
+static struct of_device_id base_dt_ids[] = {
+       {.compatible = "arm,mali-300"},
+       {.compatible = "arm,mali-400"},
+       {.compatible = "arm,mali-450"},
+       {.compatible = "arm,mali-470"},
+       {},
+};
+
+MODULE_DEVICE_TABLE(of, base_dt_ids);
+#endif
+
 /* The Mali device driver struct */
 static struct platform_driver mali_platform_driver = {
        .probe  = mali_probe,
        .remove = mali_remove,
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29))
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29))
        .pm = &mali_dev_ext_pm_ops,
 #endif
        .driver =
@@ -187,8 +222,11 @@ static struct platform_driver mali_platform_driver = {
                .name   = MALI_GPU_NAME_UTGARD,
                .owner  = THIS_MODULE,
                .bus = &platform_bus_type,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29))
                .pm = &mali_dev_pm_ops,
+#endif
+#ifdef CONFIG_MALI_DT
+               .of_match_table = of_match_ptr(base_dt_ids),
 #endif
        },
 };
@@ -203,10 +241,10 @@ struct file_operations mali_fops = {
 #else
        .ioctl = mali_ioctl,
 #endif
+       .compat_ioctl = mali_ioctl,
        .mmap = mali_mmap
 };
 
-
 #if MALI_ENABLE_CPU_CYCLES
 void mali_init_cpu_time_counters(int reset, int enable_divide_by_64)
 {
@@ -219,30 +257,31 @@ void mali_init_cpu_time_counters(int reset, int enable_divide_by_64)
 
 
        /* See B4.1.117 PMCR, Performance Monitors Control Register. Writing to p15, c9, c12, 0 */
-       write_value = 1<<0; /* Bit 0 set. Enable counters */
+       write_value = 1 << 0; /* Bit 0 set. Enable counters */
        if (reset) {
-               write_value |= 1<<1; /* Reset event counters */
-               write_value |= 1<<2; /* Reset cycle counter  */
+               write_value |= 1 << 1; /* Reset event counters */
+               write_value |= 1 << 2; /* Reset cycle counter  */
        }
        if (enable_divide_by_64) {
-               write_value |= 1<<3; /* Enable the Clock divider by 64 */
+               write_value |= 1 << 3; /* Enable the Clock divider by 64 */
        }
-       write_value |= 1<<4; /* Export enable. Not needed */
-       asm volatile ("MCR p15, 0, %0, c9, c12, 0\t\n" :: "r"(write_value ));
+       write_value |= 1 << 4; /* Export enable. Not needed */
+       asm volatile("MCR p15, 0, %0, c9, c12, 0\t\n" :: "r"(write_value));
 
        /* PMOVSR Overflow Flag Status Register - Clear Clock and Event overflows */
-       asm volatile ("MCR p15, 0, %0, c9, c12, 3\t\n" :: "r"(0x8000000f));
+       asm volatile("MCR p15, 0, %0, c9, c12, 3\t\n" :: "r"(0x8000000f));
 
 
        /* See B4.1.124 PMUSERENR - setting p15 c9 c14 to 1" */
        /* User mode access to the Performance Monitors enabled. */
        /* Lets User space read cpu clock cycles */
-       asm volatile( "mcr p15, 0, %0, c9, c14, 0" :: "r"(1) );
+       asm volatile("mcr p15, 0, %0, c9, c14, 0" :: "r"(1));
 }
 
 /** A timer function that configures the cycle clock counter on current CPU.
-       The function \a mali_init_cpu_time_counters_on_all_cpus sets up this function
-       to trigger on all Cpus during module load. */
+ * The function \a mali_init_cpu_time_counters_on_all_cpus sets up this
+ * function to trigger on all Cpus during module load.
+ */
 static void mali_init_cpu_clock_timer_func(unsigned long data)
 {
        int reset_counters, enable_divide_clock_counter_by_64;
@@ -252,7 +291,7 @@ static void mali_init_cpu_clock_timer_func(unsigned long data)
 
        MALI_IGNORE(data);
 
-       reset_counters= 1;
+       reset_counters = 1;
        enable_divide_clock_counter_by_64 = 0;
        mali_init_cpu_time_counters(reset_counters, enable_divide_clock_counter_by_64);
 
@@ -263,7 +302,8 @@ static void mali_init_cpu_clock_timer_func(unsigned long data)
 }
 
 /** A timer functions for storing current time on all cpus.
-    Used for checking if the clocks have similar values or if they are drifting. */
+ * Used for checking if the clocks have similar values or if they are drifting.
+ */
 static void mali_print_cpu_clock_timer_func(unsigned long data)
 {
        int current_cpu = raw_smp_processor_id();
@@ -271,14 +311,15 @@ static void mali_print_cpu_clock_timer_func(unsigned long data)
 
        MALI_IGNORE(data);
        sample0 = mali_get_cpu_cyclecount();
-       if ( current_cpu<8 ) {
+       if (current_cpu < 8) {
                mali_cpu_clock_last_value[current_cpu] = sample0;
        }
 }
 
 /** Init the performance registers on all CPUs to count clock cycles.
-       For init \a print_only should be 0.
-    If \a print_only is 1, it will intead print the current clock value of all CPUs.*/
+ * For init \a print_only should be 0.
+ * If \a print_only is 1, it will intead print the current clock value of all CPUs.
+ */
 void mali_init_cpu_time_counters_on_all_cpus(int print_only)
 {
        int i = 0;
@@ -289,14 +330,14 @@ void mali_init_cpu_time_counters_on_all_cpus(int print_only)
        jiffies_wait = 2;
        jiffies_trigger = jiffies + jiffies_wait;
 
-       for ( i=0 ; i < 8 ; i++ ) {
+       for (i = 0 ; i < 8 ; i++) {
                init_timer(&mali_init_cpu_clock_timers[i]);
                if (print_only) mali_init_cpu_clock_timers[i].function = mali_print_cpu_clock_timer_func;
                else            mali_init_cpu_clock_timers[i].function = mali_init_cpu_clock_timer_func;
                mali_init_cpu_clock_timers[i].expires = jiffies_trigger ;
        }
        cpu_number = cpumask_first(cpu_online_mask);
-       for ( i=0 ; i < 8 ; i++ ) {
+       for (i = 0 ; i < 8 ; i++) {
                int next_cpu;
                add_timer_on(&mali_init_cpu_clock_timers[i], cpu_number);
                next_cpu = cpumask_next(cpu_number, cpu_online_mask);
@@ -304,30 +345,29 @@ void mali_init_cpu_time_counters_on_all_cpus(int print_only)
                cpu_number = next_cpu;
        }
 
-       while (jiffies_wait) jiffies_wait= schedule_timeout_uninterruptible(jiffies_wait);
+       while (jiffies_wait) jiffies_wait = schedule_timeout_uninterruptible(jiffies_wait);
 
-       for ( i=0 ; i < 8 ; i++ ) {
+       for (i = 0 ; i < 8 ; i++) {
                del_timer_sync(&mali_init_cpu_clock_timers[i]);
        }
 
        if (print_only) {
-               if ( (0==mali_cpu_clock_last_value[2]) &&  (0==mali_cpu_clock_last_value[3]) ) {
+               if ((0 == mali_cpu_clock_last_value[2]) && (0 == mali_cpu_clock_last_value[3])) {
                        /* Diff can be printed if we want to check if the clocks are in sync
                        int diff = mali_cpu_clock_last_value[0] - mali_cpu_clock_last_value[1];*/
                        MALI_DEBUG_PRINT(2, ("CPU cycle counters readout all: %08x %08x\n", mali_cpu_clock_last_value[0], mali_cpu_clock_last_value[1]));
                } else {
-                       MALI_DEBUG_PRINT(2, ("CPU cycle counters readout all: %08x %08x %08x %08x\n", mali_cpu_clock_last_value[0], mali_cpu_clock_last_value[1], mali_cpu_clock_last_value[2], mali_cpu_clock_last_value[3] ));
+                       MALI_DEBUG_PRINT(2, ("CPU cycle counters readout all: %08x %08x %08x %08x\n", mali_cpu_clock_last_value[0], mali_cpu_clock_last_value[1], mali_cpu_clock_last_value[2], mali_cpu_clock_last_value[3]));
                }
        }
 }
 #endif
 
-
 int mali_module_init(void)
 {
        int err = 0;
 
-       MALI_DEBUG_PRINT(2, ("Inserting Mali v%d device driver. \n",_MALI_API_VERSION));
+       MALI_DEBUG_PRINT(2, ("Inserting Mali v%d device driver. \n", _MALI_API_VERSION));
        MALI_DEBUG_PRINT(2, ("Compiled: %s, time: %s.\n", __DATE__, __TIME__));
        MALI_DEBUG_PRINT(2, ("Driver revision: %s\n", SVN_REV_STRING));
 
@@ -339,12 +379,14 @@ int mali_module_init(void)
 #endif
 
        /* Initialize module wide settings */
-#if defined(MALI_FAKE_PLATFORM_DEVICE)
+#ifdef MALI_FAKE_PLATFORM_DEVICE
+#ifndef CONFIG_MALI_DT
        MALI_DEBUG_PRINT(2, ("mali_module_init() registering device\n"));
        err = mali_platform_device_register();
        if (0 != err) {
                return err;
        }
+#endif
 #endif
 
        MALI_DEBUG_PRINT(2, ("mali_module_init() registering driver\n"));
@@ -353,8 +395,10 @@ int mali_module_init(void)
 
        if (0 != err) {
                MALI_DEBUG_PRINT(2, ("mali_module_init() Failed to register driver (%d)\n", err));
-#if defined(MALI_FAKE_PLATFORM_DEVICE)
+#ifdef MALI_FAKE_PLATFORM_DEVICE
+#ifndef CONFIG_MALI_DT
                mali_platform_device_unregister();
+#endif
 #endif
                mali_platform_device = NULL;
                return err;
@@ -368,6 +412,18 @@ int mali_module_init(void)
        }
 #endif
 
+       /* Tracing the current frequency and voltage from boot/insmod*/
+#if defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_MALI_DVFS)
+       /* Just call mali_get_current_gpu_clk_item(),to record current clk info.*/
+       mali_get_current_gpu_clk_item(&mali_gpu_clk[0]);
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                     MALI_PROFILING_EVENT_CHANNEL_GPU |
+                                     MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+                                     mali_gpu_clk[0].clock,
+                                     mali_gpu_clk[0].vol / 1000,
+                                     0, 0, 0);
+#endif
+
        MALI_PRINT(("Mali device driver loaded\n"));
 
        return 0; /* Success */
@@ -375,20 +431,30 @@ int mali_module_init(void)
 
 void mali_module_exit(void)
 {
-       MALI_DEBUG_PRINT(2, ("Unloading Mali v%d device driver.\n",_MALI_API_VERSION));
+       MALI_DEBUG_PRINT(2, ("Unloading Mali v%d device driver.\n", _MALI_API_VERSION));
 
        MALI_DEBUG_PRINT(2, ("mali_module_exit() unregistering driver\n"));
 
-#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
-       _mali_internal_profiling_term();
-#endif
-
        platform_driver_unregister(&mali_platform_driver);
 
 #if defined(MALI_FAKE_PLATFORM_DEVICE)
+#ifndef CONFIG_MALI_DT
        MALI_DEBUG_PRINT(2, ("mali_module_exit() unregistering device\n"));
        mali_platform_device_unregister();
 #endif
+#endif
+
+       /* Tracing the current frequency and voltage from rmmod*/
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                     MALI_PROFILING_EVENT_CHANNEL_GPU |
+                                     MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+                                     0,
+                                     0,
+                                     0, 0, 0);
+
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+       _mali_internal_profiling_term();
+#endif
 
        MALI_PRINT(("Mali device driver unloaded\n"));
 }
@@ -398,7 +464,10 @@ static int mali_probe(struct platform_device *pdev)
        int err;
 
        MALI_DEBUG_PRINT(2, ("mali_probe(): Called for platform device %s\n", pdev->name));
-
+       if(!mtk_mfg_is_ready()){
+               pr_warn("mfg is not ready, Mali id defer\n");
+               return -EPROBE_DEFER;
+       }
        if (NULL != mali_platform_device) {
                /* Already connected to a device, return error */
                MALI_PRINT_ERROR(("mali_probe(): The Mali driver is already connected with a Mali device."));
@@ -407,6 +476,15 @@ static int mali_probe(struct platform_device *pdev)
 
        mali_platform_device = pdev;
 
+#ifdef CONFIG_MALI_DT
+       /* If we use DT to initialize our DDK, we have to prepare somethings. */
+       err = mali_platform_device_init(mali_platform_device);
+       if (0 != err) {
+               MALI_PRINT_ERROR(("mali_probe(): Failed to initialize platform device."));
+               return -EFAULT;
+       }
+#endif
+
        if (_MALI_OSK_ERR_OK == _mali_osk_wq_init()) {
                /* Initialize the Mali GPU HW specified by pdev */
                if (_MALI_OSK_ERR_OK == mali_initialize_subsystems()) {
@@ -415,8 +493,10 @@ static int mali_probe(struct platform_device *pdev)
                        if (0 == err) {
                                /* Setup sysfs entries */
                                err = mali_sysfs_register(mali_dev_name);
+
                                if (0 == err) {
                                        MALI_DEBUG_PRINT(2, ("mali_probe(): Successfully initialized driver for platform device %s\n", pdev->name));
+
                                        return 0;
                                } else {
                                        MALI_PRINT_ERROR(("mali_probe(): failed to register sysfs entries"));
@@ -443,6 +523,9 @@ static int mali_remove(struct platform_device *pdev)
        mali_miscdevice_unregister();
        mali_terminate_subsystems();
        _mali_osk_wq_term();
+#ifdef CONFIG_MALI_DT
+       mali_platform_device_deinit(mali_platform_device);
+#endif
        mali_platform_device = NULL;
        return 0;
 }
@@ -471,25 +554,76 @@ static void mali_miscdevice_unregister(void)
 
 static int mali_driver_suspend_scheduler(struct device *dev)
 {
-       mali_pm_os_suspend();
+       mali_pm_os_suspend(MALI_TRUE);
+       /* Tracing the frequency and voltage after mali is suspended */
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                     MALI_PROFILING_EVENT_CHANNEL_GPU |
+                                     MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+                                     0,
+                                     0,
+                                     0, 0, 0);
+       mali_platform_power_mode_change(dev, MALI_POWER_MODE_DEEP_SLEEP);
        return 0;
 }
 
 static int mali_driver_resume_scheduler(struct device *dev)
 {
+       mali_platform_power_mode_change(dev, MALI_POWER_MODE_ON);
+
+       /* Tracing the frequency and voltage after mali is resumed */
+#if defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_MALI_DVFS)
+       /* Just call mali_get_current_gpu_clk_item() once,to record current clk info.*/
+       if (is_first_resume == 1) {
+               mali_get_current_gpu_clk_item(&mali_gpu_clk[1]);
+               is_first_resume = 0;
+       }
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                     MALI_PROFILING_EVENT_CHANNEL_GPU |
+                                     MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+                                     mali_gpu_clk[1].clock,
+                                     mali_gpu_clk[1].vol / 1000,
+                                     0, 0, 0);
+#endif
        mali_pm_os_resume();
        return 0;
 }
 
+
 #ifdef CONFIG_PM_RUNTIME
 static int mali_driver_runtime_suspend(struct device *dev)
 {
-       mali_pm_runtime_suspend();
-       return 0;
+       if (MALI_TRUE == mali_pm_runtime_suspend()) {
+               /* Tracing the frequency and voltage after mali is suspended */
+               _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                             MALI_PROFILING_EVENT_CHANNEL_GPU |
+                                             MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+                                             0,
+                                             0,
+                                             0, 0, 0);
+
+               return 0;
+       } else {
+               return -EBUSY;
+       }
 }
 
 static int mali_driver_runtime_resume(struct device *dev)
 {
+       /* Tracing the frequency and voltage after mali is resumed */
+#if defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_MALI_DVFS)
+       /* Just call mali_get_current_gpu_clk_item() once,to record current clk info.*/
+       if (is_first_resume == 1) {
+               mali_get_current_gpu_clk_item(&mali_gpu_clk[1]);
+               is_first_resume = 0;
+       }
+       _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+                                     MALI_PROFILING_EVENT_CHANNEL_GPU |
+                                     MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+                                     mali_gpu_clk[1].clock,
+                                     mali_gpu_clk[1].vol / 1000,
+                                     0, 0, 0);
+#endif
+
        mali_pm_runtime_resume();
        return 0;
 }
@@ -503,7 +637,7 @@ static int mali_driver_runtime_idle(struct device *dev)
 
 static int mali_open(struct inode *inode, struct file *filp)
 {
-       struct mali_session_data * session_data;
+       struct mali_session_data *session_data;
        _mali_osk_errcode_t err;
 
        /* input validation */
@@ -520,7 +654,7 @@ static int mali_open(struct inode *inode, struct file *filp)
        filp->f_pos = 0;
 
        /* link in our session data */
-       filp->private_data = (void*)session_data;
+       filp->private_data = (void *)session_data;
 
        return 0;
 }
@@ -541,9 +675,9 @@ static int mali_release(struct inode *inode, struct file *filp)
        return 0;
 }
 
-int map_errcode( _mali_osk_errcode_t err )
+int map_errcode(_mali_osk_errcode_t err)
 {
-       switch(err) {
+       switch (err) {
        case _MALI_OSK_ERR_OK :
                return 0;
        case _MALI_OSK_ERR_FAULT:
@@ -592,201 +726,196 @@ static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
                return -ENOTTY;
        }
 
-       switch(cmd) {
+       switch (cmd) {
        case MALI_IOC_WAIT_FOR_NOTIFICATION:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_wait_for_notification_s), sizeof(u64)));
                err = wait_for_notification_wrapper(session_data, (_mali_uk_wait_for_notification_s __user *)arg);
                break;
 
+       case MALI_IOC_GET_API_VERSION_V2:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_api_version_v2_s), sizeof(u64)));
+               err = get_api_version_v2_wrapper(session_data, (_mali_uk_get_api_version_v2_s __user *)arg);
+               break;
+
        case MALI_IOC_GET_API_VERSION:
                err = get_api_version_wrapper(session_data, (_mali_uk_get_api_version_s __user *)arg);
                break;
 
        case MALI_IOC_POST_NOTIFICATION:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_post_notification_s), sizeof(u64)));
                err = post_notification_wrapper(session_data, (_mali_uk_post_notification_s __user *)arg);
                break;
 
        case MALI_IOC_GET_USER_SETTINGS:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_user_settings_s), sizeof(u64)));
                err = get_user_settings_wrapper(session_data, (_mali_uk_get_user_settings_s __user *)arg);
                break;
 
        case MALI_IOC_REQUEST_HIGH_PRIORITY:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_request_high_priority_s), sizeof(u64)));
                err = request_high_priority_wrapper(session_data, (_mali_uk_request_high_priority_s __user *)arg);
                break;
 
 #if defined(CONFIG_MALI400_PROFILING)
-       case MALI_IOC_PROFILING_START:
-               err = profiling_start_wrapper(session_data, (_mali_uk_profiling_start_s __user *)arg);
-               break;
-
        case MALI_IOC_PROFILING_ADD_EVENT:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_profiling_add_event_s), sizeof(u64)));
                err = profiling_add_event_wrapper(session_data, (_mali_uk_profiling_add_event_s __user *)arg);
                break;
 
-       case MALI_IOC_PROFILING_STOP:
-               err = profiling_stop_wrapper(session_data, (_mali_uk_profiling_stop_s __user *)arg);
-               break;
-
-       case MALI_IOC_PROFILING_GET_EVENT:
-               err = profiling_get_event_wrapper(session_data, (_mali_uk_profiling_get_event_s __user *)arg);
-               break;
-
-       case MALI_IOC_PROFILING_CLEAR:
-               err = profiling_clear_wrapper(session_data, (_mali_uk_profiling_clear_s __user *)arg);
+       case MALI_IOC_PROFILING_REPORT_SW_COUNTERS:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_sw_counters_report_s), sizeof(u64)));
+               err = profiling_report_sw_counters_wrapper(session_data, (_mali_uk_sw_counters_report_s __user *)arg);
                break;
 
-       case MALI_IOC_PROFILING_GET_CONFIG:
-               /* Deprecated: still compatible with get_user_settings */
-               err = get_user_settings_wrapper(session_data, (_mali_uk_get_user_settings_s __user *)arg);
+       case MALI_IOC_PROFILING_STREAM_FD_GET:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_profiling_stream_fd_get_s), sizeof(u64)));
+               err = profiling_get_stream_fd_wrapper(session_data, (_mali_uk_profiling_stream_fd_get_s __user *)arg);
                break;
 
-       case MALI_IOC_PROFILING_REPORT_SW_COUNTERS:
-               err = profiling_report_sw_counters_wrapper(session_data, (_mali_uk_sw_counters_report_s __user *)arg);
+       case MALI_IOC_PROILING_CONTROL_SET:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_profiling_control_set_s), sizeof(u64)));
+               err = profiling_control_set_wrapper(session_data, (_mali_uk_profiling_control_set_s __user *)arg);
                break;
-
 #else
 
-       case MALI_IOC_PROFILING_START:              /* FALL-THROUGH */
        case MALI_IOC_PROFILING_ADD_EVENT:          /* FALL-THROUGH */
-       case MALI_IOC_PROFILING_STOP:               /* FALL-THROUGH */
-       case MALI_IOC_PROFILING_GET_EVENT:          /* FALL-THROUGH */
-       case MALI_IOC_PROFILING_CLEAR:              /* FALL-THROUGH */
-       case MALI_IOC_PROFILING_GET_CONFIG:         /* FALL-THROUGH */
        case MALI_IOC_PROFILING_REPORT_SW_COUNTERS: /* FALL-THROUGH */
                MALI_DEBUG_PRINT(2, ("Profiling not supported\n"));
                err = -ENOTTY;
                break;
-
 #endif
 
-       case MALI_IOC_MEM_WRITE_SAFE:
-               err = mem_write_safe_wrapper(session_data, (_mali_uk_mem_write_safe_s __user *)arg);
+       case MALI_IOC_PROFILING_MEMORY_USAGE_GET:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_profiling_memory_usage_get_s), sizeof(u64)));
+               err = mem_usage_get_wrapper(session_data, (_mali_uk_profiling_memory_usage_get_s __user *)arg);
                break;
 
-       case MALI_IOC_MEM_MAP_EXT:
-               err = mem_map_ext_wrapper(session_data, (_mali_uk_map_external_mem_s __user *)arg);
+       case MALI_IOC_MEM_ALLOC:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_alloc_mem_s), sizeof(u64)));
+               err = mem_alloc_wrapper(session_data, (_mali_uk_alloc_mem_s __user *)arg);
                break;
 
-       case MALI_IOC_MEM_UNMAP_EXT:
-               err = mem_unmap_ext_wrapper(session_data, (_mali_uk_unmap_external_mem_s __user *)arg);
+       case MALI_IOC_MEM_FREE:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_free_mem_s), sizeof(u64)));
+               err = mem_free_wrapper(session_data, (_mali_uk_free_mem_s __user *)arg);
                break;
 
-       case MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE:
-               err = mem_query_mmu_page_table_dump_size_wrapper(session_data, (_mali_uk_query_mmu_page_table_dump_size_s __user *)arg);
+       case MALI_IOC_MEM_BIND:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_bind_mem_s), sizeof(u64)));
+               err = mem_bind_wrapper(session_data, (_mali_uk_bind_mem_s __user *)arg);
                break;
 
-       case MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE:
-               err = mem_dump_mmu_page_table_wrapper(session_data, (_mali_uk_dump_mmu_page_table_s __user *)arg);
+       case MALI_IOC_MEM_UNBIND:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_unbind_mem_s), sizeof(u64)));
+               err = mem_unbind_wrapper(session_data, (_mali_uk_unbind_mem_s __user *)arg);
                break;
 
-#if defined(CONFIG_MALI400_UMP)
-
-       case MALI_IOC_MEM_ATTACH_UMP:
-               err = mem_attach_ump_wrapper(session_data, (_mali_uk_attach_ump_mem_s __user *)arg);
+       case MALI_IOC_MEM_COW:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_cow_mem_s), sizeof(u64)));
+               err = mem_cow_wrapper(session_data, (_mali_uk_cow_mem_s __user *)arg);
                break;
 
-       case MALI_IOC_MEM_RELEASE_UMP:
-               err = mem_release_ump_wrapper(session_data, (_mali_uk_release_ump_mem_s __user *)arg);
+       case MALI_IOC_MEM_COW_MODIFY_RANGE:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_cow_modify_range_s), sizeof(u64)));
+               err = mem_cow_modify_range_wrapper(session_data, (_mali_uk_cow_modify_range_s __user *)arg);
                break;
 
-#else
-
-       case MALI_IOC_MEM_ATTACH_UMP:
-       case MALI_IOC_MEM_RELEASE_UMP: /* FALL-THROUGH */
-               MALI_DEBUG_PRINT(2, ("UMP not supported\n"));
-               err = -ENOTTY;
+       case MALI_IOC_MEM_WRITE_SAFE:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_mem_write_safe_s), sizeof(u64)));
+               err = mem_write_safe_wrapper(session_data, (_mali_uk_mem_write_safe_s __user *)arg);
                break;
-#endif
 
-#ifdef CONFIG_DMA_SHARED_BUFFER
-       case MALI_IOC_MEM_ATTACH_DMA_BUF:
-               err = mali_attach_dma_buf(session_data, (_mali_uk_attach_dma_buf_s __user *)arg);
+       case MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_query_mmu_page_table_dump_size_s), sizeof(u64)));
+               err = mem_query_mmu_page_table_dump_size_wrapper(session_data, (_mali_uk_query_mmu_page_table_dump_size_s __user *)arg);
                break;
 
-       case MALI_IOC_MEM_RELEASE_DMA_BUF:
-               err = mali_release_dma_buf(session_data, (_mali_uk_release_dma_buf_s __user *)arg);
+       case MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_dump_mmu_page_table_s), sizeof(u64)));
+               err = mem_dump_mmu_page_table_wrapper(session_data, (_mali_uk_dump_mmu_page_table_s __user *)arg);
                break;
 
        case MALI_IOC_MEM_DMA_BUF_GET_SIZE:
+#ifdef CONFIG_DMA_SHARED_BUFFER
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_dma_buf_get_size_s), sizeof(u64)));
                err = mali_dma_buf_get_size(session_data, (_mali_uk_dma_buf_get_size_s __user *)arg);
-               break;
 #else
-
-       case MALI_IOC_MEM_ATTACH_DMA_BUF:   /* FALL-THROUGH */
-       case MALI_IOC_MEM_RELEASE_DMA_BUF:  /* FALL-THROUGH */
-       case MALI_IOC_MEM_DMA_BUF_GET_SIZE: /* FALL-THROUGH */
                MALI_DEBUG_PRINT(2, ("DMA-BUF not supported\n"));
                err = -ENOTTY;
-               break;
 #endif
+               break;
 
        case MALI_IOC_PP_START_JOB:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_pp_start_job_s), sizeof(u64)));
                err = pp_start_job_wrapper(session_data, (_mali_uk_pp_start_job_s __user *)arg);
                break;
 
        case MALI_IOC_PP_AND_GP_START_JOB:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_pp_and_gp_start_job_s), sizeof(u64)));
                err = pp_and_gp_start_job_wrapper(session_data, (_mali_uk_pp_and_gp_start_job_s __user *)arg);
                break;
 
        case MALI_IOC_PP_NUMBER_OF_CORES_GET:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_pp_number_of_cores_s), sizeof(u64)));
                err = pp_get_number_of_cores_wrapper(session_data, (_mali_uk_get_pp_number_of_cores_s __user *)arg);
                break;
 
        case MALI_IOC_PP_CORE_VERSION_GET:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_pp_core_version_s), sizeof(u64)));
                err = pp_get_core_version_wrapper(session_data, (_mali_uk_get_pp_core_version_s __user *)arg);
                break;
 
        case MALI_IOC_PP_DISABLE_WB:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_pp_disable_wb_s), sizeof(u64)));
                err = pp_disable_wb_wrapper(session_data, (_mali_uk_pp_disable_wb_s __user *)arg);
                break;
 
        case MALI_IOC_GP2_START_JOB:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_gp_start_job_s), sizeof(u64)));
                err = gp_start_job_wrapper(session_data, (_mali_uk_gp_start_job_s __user *)arg);
                break;
 
        case MALI_IOC_GP2_NUMBER_OF_CORES_GET:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_gp_number_of_cores_s), sizeof(u64)));
                err = gp_get_number_of_cores_wrapper(session_data, (_mali_uk_get_gp_number_of_cores_s __user *)arg);
                break;
 
        case MALI_IOC_GP2_CORE_VERSION_GET:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_gp_core_version_s), sizeof(u64)));
                err = gp_get_core_version_wrapper(session_data, (_mali_uk_get_gp_core_version_s __user *)arg);
                break;
 
        case MALI_IOC_GP2_SUSPEND_RESPONSE:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_gp_suspend_response_s), sizeof(u64)));
                err = gp_suspend_response_wrapper(session_data, (_mali_uk_gp_suspend_response_s __user *)arg);
                break;
 
        case MALI_IOC_VSYNC_EVENT_REPORT:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_vsync_event_report_s), sizeof(u64)));
                err = vsync_event_report_wrapper(session_data, (_mali_uk_vsync_event_report_s __user *)arg);
                break;
 
        case MALI_IOC_TIMELINE_GET_LATEST_POINT:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_timeline_get_latest_point_s), sizeof(u64)));
                err = timeline_get_latest_point_wrapper(session_data, (_mali_uk_timeline_get_latest_point_s __user *)arg);
                break;
        case MALI_IOC_TIMELINE_WAIT:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_timeline_wait_s), sizeof(u64)));
                err = timeline_wait_wrapper(session_data, (_mali_uk_timeline_wait_s __user *)arg);
                break;
        case MALI_IOC_TIMELINE_CREATE_SYNC_FENCE:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_timeline_create_sync_fence_s), sizeof(u64)));
                err = timeline_create_sync_fence_wrapper(session_data, (_mali_uk_timeline_create_sync_fence_s __user *)arg);
                break;
        case MALI_IOC_SOFT_JOB_START:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_soft_job_start_s), sizeof(u64)));
                err = soft_job_start_wrapper(session_data, (_mali_uk_soft_job_start_s __user *)arg);
                break;
        case MALI_IOC_SOFT_JOB_SIGNAL:
+               BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_soft_job_signal_s), sizeof(u64)));
                err = soft_job_signal_wrapper(session_data, (_mali_uk_soft_job_signal_s __user *)arg);
                break;
 
-       case MALI_IOC_MEM_INIT: /* Fallthrough */
-       case MALI_IOC_MEM_TERM: /* Fallthrough */
-               MALI_DEBUG_PRINT(2, ("Deprecated ioctls called\n"));
-               err = -ENOTTY;
-               break;
-
-       case MALI_IOC_MEM_GET_BIG_BLOCK: /* Fallthrough */
-       case MALI_IOC_MEM_FREE_BIG_BLOCK:
-               MALI_PRINT_ERROR(("Non-MMU mode is no longer supported.\n"));
-               err = -ENOTTY;
-               break;
-
        default:
                MALI_DEBUG_PRINT(2, ("No handler for ioctl 0x%08X 0x%08lX\n", cmd, arg));
                err = -ENOTTY;
index 149e4353f0e1773eecb28e9a52d75cdb7ae458cf..b88b24893c546da009fb67fe2ff3636234bce9a6 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
+ * (C) COPYRIGHT 2008-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -16,16 +16,13 @@ extern "C" {
 #endif
 
 #include <linux/cdev.h>     /* character device definitions */
+#include <linux/idr.h>
+#include <linux/rbtree.h>
 #include "mali_kernel_license.h"
 #include "mali_osk_types.h"
 
 extern struct platform_device *mali_platform_device;
 
-#if MALI_LICENSE_IS_GPL
-/* Defined in mali_osk_irq.h */
-extern struct workqueue_struct * mali_wq_normal;
-#endif
-
 #ifdef __cplusplus
 }
 #endif
index 5bbbad3c9865c1c927c44a56c7e0b4c402a3cbf1..5cdbe492429e0ba5e7744faa3541392391fc062a 100644 (file)
@@ -1,7 +1,7 @@
 /**
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2013 ARM Limited
+ * (C) COPYRIGHT 2011-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -29,7 +29,6 @@
 #include <asm/uaccess.h>
 #include <linux/module.h>
 #include <linux/mali/mali_utgard.h>
-#include <linux/proc_fs.h>
 #include "mali_kernel_sysfs.h"
 #if defined(CONFIG_MALI400_INTERNAL_PROFILING)
 #include <linux/slab.h>
@@ -49,7 +48,7 @@
 #include "mali_profiling_internal.h"
 #include "mali_gp_job.h"
 #include "mali_pp_job.h"
-#include "mali_pp_scheduler.h"
+#include "mali_executor.h"
 
 #define PRIVATE_DATA_COUNTER_MAKE_GP(src) (src)
 #define PRIVATE_DATA_COUNTER_MAKE_PP(src) ((1 << 24) | src)
@@ -71,7 +70,7 @@ typedef enum {
        _MALI_MAX_EVENTS
 } _mali_device_debug_power_events;
 
-static const charconst mali_power_events[_MALI_MAX_EVENTS] = {
+static const char *const mali_power_events[_MALI_MAX_EVENTS] = {
        [_MALI_DEVICE_SUSPEND] = "suspend",
        [_MALI_DEVICE_RESUME] = "resume",
        [_MALI_DEVICE_DVFS_PAUSE] = "dvfs_pause",
@@ -80,82 +79,6 @@ static const char* const mali_power_events[_MALI_MAX_EVENTS] = {
 
 static mali_bool power_always_on_enabled = MALI_FALSE;
 
-#ifdef CONFIG_PROC_FS
-static struct proc_dir_entry *mali_pentry;
-
-static int proc_memoryusage_show(struct seq_file *m, void *v)
-{
-    seq_printf(m, "%u\n", _mali_ukk_report_memory_usage());
-
-    return 0;
-}
-
-static int proc_memoryusage_open(struct inode *inode, struct file *file)
-{
-    return single_open(file, proc_memoryusage_show, NULL);
-}
-
-static const struct file_operations proc_memoryusage_operations = {
-    .open    = proc_memoryusage_open,
-    .read    = seq_read,
-    .llseek  = seq_lseek,
-    .release = single_release,
-};
-
-static int proc_utilization_show(struct seq_file *m, void *v)
-{
-    unsigned long gpu, gp, pp;
-    
-    gpu = ((_mali_ukk_utilization_gp_pp()*100)/256);
-    gp = ((_mali_ukk_utilization_gp()*100)/256);
-    pp = ((_mali_ukk_utilization_pp()*100)/256);
-
-    seq_printf(m, "gpu/gp/pp=%lu/%lu/%lu\n", gpu, gp, pp);
-
-    return 0;
-}
-
-static int proc_utilization_open(struct inode *inode, struct file *file)
-{
-    return single_open(file, proc_utilization_show, NULL);
-}
-
-static const struct file_operations proc_utilization_operations = {
-    .open    = proc_utilization_open,
-    .read    = seq_read,
-    .llseek  = seq_lseek,
-    .release = single_release,
-};
-
-static void proc_mali_register(void)
-{
-    struct proc_dir_entry *mt_elsuspend_entry = NULL;
-    mali_pentry = proc_mkdir("mali", NULL);
-    MALI_DEBUG_PRINT(1, ("[%s] pentry=%p\n", __FUNCTION__, mali_pentry));
-
-    if (!mali_pentry)
-        return;
-     
-    proc_create("memory_usage", 0, mali_pentry, &proc_memoryusage_operations);
-    proc_create("utilization", 0, mali_pentry, &proc_utilization_operations);
-}
-
-
-static void proc_mali_unregister(void)
-{
-    if (!mali_pentry)
-        return;
-
-    remove_proc_entry("memory_usage", mali_pentry);
-    remove_proc_entry("utilization", mali_pentry);
-    remove_proc_entry("mali", NULL);
-    mali_pentry = NULL;
-}
-#else
-#define proc_mali_register() do{}while(0)
-#define proc_mali_unregister() do{}while(0)
-#endif
-
 static int open_copy_private_data(struct inode *inode, struct file *filp)
 {
        filp->private_data = inode->i_private;
@@ -171,7 +94,8 @@ static ssize_t group_enabled_read(struct file *filp, char __user *buf, size_t co
        group = (struct mali_group *)filp->private_data;
        MALI_DEBUG_ASSERT_POINTER(group);
 
-       r = sprintf(buffer, "%u\n", mali_group_is_enabled(group) ? 1 : 0);
+       r = snprintf(buffer, 64, "%u\n",
+                    mali_executor_group_is_disabled(group) ? 0 : 1);
 
        return simple_read_from_buffer(buf, count, offp, buffer, r);
 }
@@ -195,17 +119,17 @@ static ssize_t group_enabled_write(struct file *filp, const char __user *buf, si
        }
        buffer[count] = '\0';
 
-       r = strict_strtoul(&buffer[0], 10, &val);
+       r = kstrtoul(&buffer[0], 10, &val);
        if (0 != r) {
                return -EINVAL;
        }
 
        switch (val) {
        case 1:
-               mali_group_enable(group);
+               mali_executor_group_enable(group);
                break;
        case 0:
-               mali_group_disable(group);
+               mali_executor_group_disable(group);
                break;
        default:
                return -EINVAL;
@@ -232,7 +156,7 @@ static ssize_t hw_core_base_addr_read(struct file *filp, char __user *buf, size_
        hw_core = (struct mali_hw_core *)filp->private_data;
        MALI_DEBUG_ASSERT_POINTER(hw_core);
 
-       r = sprintf(buffer, "0x%08X\n", hw_core->phys_addr);
+       r = snprintf(buffer, 64, "0x%lX\n", hw_core->phys_addr);
 
        return simple_read_from_buffer(buf, count, offp, buffer, r);
 }
@@ -245,10 +169,10 @@ static const struct file_operations hw_core_base_addr_fops = {
 
 static ssize_t profiling_counter_src_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
 {
-       u32 is_pp = PRIVATE_DATA_COUNTER_IS_PP((u32)filp->private_data);
-       u32 src_id = PRIVATE_DATA_COUNTER_GET_SRC((u32)filp->private_data);
-       mali_bool is_sub_job = PRIVATE_DATA_COUNTER_IS_SUB_JOB((u32)filp->private_data);
-       u32 sub_job = PRIVATE_DATA_COUNTER_GET_SUB_JOB((u32)filp->private_data);
+       u32 is_pp = PRIVATE_DATA_COUNTER_IS_PP((uintptr_t)filp->private_data);
+       u32 src_id = PRIVATE_DATA_COUNTER_GET_SRC((uintptr_t)filp->private_data);
+       mali_bool is_sub_job = PRIVATE_DATA_COUNTER_IS_SUB_JOB((uintptr_t)filp->private_data);
+       u32 sub_job = PRIVATE_DATA_COUNTER_GET_SUB_JOB((uintptr_t)filp->private_data);
        char buf[64];
        int r;
        u32 val;
@@ -280,9 +204,9 @@ static ssize_t profiling_counter_src_read(struct file *filp, char __user *ubuf,
        }
 
        if (MALI_HW_CORE_NO_COUNTER == val) {
-               r = sprintf(buf, "-1\n");
+               r = snprintf(buf, 64, "-1\n");
        } else {
-               r = sprintf(buf, "%u\n", val);
+               r = snprintf(buf, 64, "%u\n", val);
        }
 
        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
@@ -290,10 +214,10 @@ static ssize_t profiling_counter_src_read(struct file *filp, char __user *ubuf,
 
 static ssize_t profiling_counter_src_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
 {
-       u32 is_pp = PRIVATE_DATA_COUNTER_IS_PP((u32)filp->private_data);
-       u32 src_id = PRIVATE_DATA_COUNTER_GET_SRC((u32)filp->private_data);
-       mali_bool is_sub_job = PRIVATE_DATA_COUNTER_IS_SUB_JOB((u32)filp->private_data);
-       u32 sub_job = PRIVATE_DATA_COUNTER_GET_SUB_JOB((u32)filp->private_data);
+       u32 is_pp = PRIVATE_DATA_COUNTER_IS_PP((uintptr_t)filp->private_data);
+       u32 src_id = PRIVATE_DATA_COUNTER_GET_SRC((uintptr_t)filp->private_data);
+       mali_bool is_sub_job = PRIVATE_DATA_COUNTER_IS_SUB_JOB((uintptr_t)filp->private_data);
+       u32 sub_job = PRIVATE_DATA_COUNTER_GET_SUB_JOB((uintptr_t)filp->private_data);
        char buf[64];
        long val;
        int ret;
@@ -308,7 +232,7 @@ static ssize_t profiling_counter_src_write(struct file *filp, const char __user
 
        buf[cnt] = 0;
 
-       ret = strict_strtol(buf, 10, &val);
+       ret = kstrtol(buf, 10, &val);
        if (ret < 0) {
                return ret;
        }
@@ -369,9 +293,9 @@ static ssize_t l2_l2x_counter_srcx_read(struct file *filp, char __user *ubuf, si
        }
 
        if (MALI_HW_CORE_NO_COUNTER == val) {
-               r = sprintf(buf, "-1\n");
+               r = snprintf(buf, 64, "-1\n");
        } else {
-               r = sprintf(buf, "%u\n", val);
+               r = snprintf(buf, 64, "%u\n", val);
        }
        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 }
@@ -393,7 +317,7 @@ static ssize_t l2_l2x_counter_srcx_write(struct file *filp, const char __user *u
 
        buf[cnt] = 0;
 
-       ret = strict_strtol(buf, 10, &val);
+       ret = kstrtol(buf, 10, &val);
        if (ret < 0) {
                return ret;
        }
@@ -403,11 +327,7 @@ static ssize_t l2_l2x_counter_srcx_write(struct file *filp, const char __user *u
                val = MALI_HW_CORE_NO_COUNTER;
        }
 
-       if (0 == src_id) {
-               mali_l2_cache_core_set_counter_src0(l2_core, (u32)val);
-       } else {
-               mali_l2_cache_core_set_counter_src1(l2_core, (u32)val);
-       }
+       mali_l2_cache_core_set_counter_src(l2_core, src_id, (u32)val);
 
        *ppos += cnt;
        return cnt;
@@ -431,7 +351,7 @@ static ssize_t l2_all_counter_srcx_write(struct file *filp, const char __user *u
 
        buf[cnt] = 0;
 
-       ret = strict_strtol(buf, 10, &val);
+       ret = kstrtol(buf, 10, &val);
        if (ret < 0) {
                return ret;
        }
@@ -444,11 +364,7 @@ static ssize_t l2_all_counter_srcx_write(struct file *filp, const char __user *u
        l2_id = 0;
        l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id);
        while (NULL != l2_cache) {
-               if (0 == src_id) {
-                       mali_l2_cache_core_set_counter_src0(l2_cache, (u32)val);
-               } else {
-                       mali_l2_cache_core_set_counter_src1(l2_cache, (u32)val);
-               }
+               mali_l2_cache_core_set_counter_src(l2_cache, src_id, (u32)val);
 
                /* try next L2 */
                l2_id++;
@@ -513,6 +429,56 @@ static const struct file_operations l2_all_counter_src1_fops = {
        .write = l2_all_counter_src1_write,
 };
 
+static ssize_t l2_l2x_counter_valx_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+       char buf[64];
+       int r;
+       u32 src0 = 0;
+       u32 val0 = 0;
+       u32 src1 = 0;
+       u32 val1 = 0;
+       u32 val = -1;
+       struct mali_l2_cache_core *l2_core = (struct mali_l2_cache_core *)filp->private_data;
+
+       mali_l2_cache_core_get_counter_values(l2_core, &src0, &val0, &src1, &val1);
+
+       if (0 == src_id) {
+               if (MALI_HW_CORE_NO_COUNTER != val0) {
+                       val = val0;
+               }
+       } else {
+               if (MALI_HW_CORE_NO_COUNTER != val1) {
+                       val = val1;
+               }
+       }
+
+       r = snprintf(buf, 64, "%u\n", val);
+
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t l2_l2x_counter_val0_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       return l2_l2x_counter_valx_read(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t l2_l2x_counter_val1_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       return l2_l2x_counter_valx_read(filp, ubuf, cnt, ppos, 1);
+}
+
+static const struct file_operations l2_l2x_counter_val0_fops = {
+       .owner = THIS_MODULE,
+       .open  = open_copy_private_data,
+       .read  = l2_l2x_counter_val0_read,
+};
+
+static const struct file_operations l2_l2x_counter_val1_fops = {
+       .owner = THIS_MODULE,
+       .open  = open_copy_private_data,
+       .read  = l2_l2x_counter_val1_read,
+};
+
 static ssize_t power_always_on_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
 {
        unsigned long val;
@@ -525,7 +491,7 @@ static ssize_t power_always_on_write(struct file *filp, const char __user *ubuf,
        }
        buf[cnt] = '\0';
 
-       ret = strict_strtoul(buf, 10, &val);
+       ret = kstrtoul(buf, 10, &val);
        if (0 != ret) {
                return ret;
        }
@@ -533,10 +499,10 @@ static ssize_t power_always_on_write(struct file *filp, const char __user *ubuf,
        /* Update setting (not exactly thread safe) */
        if (1 == val && MALI_FALSE == power_always_on_enabled) {
                power_always_on_enabled = MALI_TRUE;
-               _mali_osk_pm_dev_ref_add();
+               _mali_osk_pm_dev_ref_get_sync();
        } else if (0 == val && MALI_TRUE == power_always_on_enabled) {
                power_always_on_enabled = MALI_FALSE;
-               _mali_osk_pm_dev_ref_dec();
+               _mali_osk_pm_dev_ref_put();
        }
 
        *ppos += cnt;
@@ -560,15 +526,13 @@ static const struct file_operations power_always_on_fops = {
 
 static ssize_t power_power_events_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
 {
-
-       if (!strncmp(ubuf,mali_power_events[_MALI_DEVICE_SUSPEND],strlen(mali_power_events[_MALI_DEVICE_SUSPEND]))) {
-               mali_pm_os_suspend();
-
-       } else if (!strncmp(ubuf,mali_power_events[_MALI_DEVICE_RESUME],strlen(mali_power_events[_MALI_DEVICE_RESUME]))) {
+       if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_SUSPEND], strlen(mali_power_events[_MALI_DEVICE_SUSPEND]) - 1)) {
+               mali_pm_os_suspend(MALI_TRUE);
+       } else if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_RESUME], strlen(mali_power_events[_MALI_DEVICE_RESUME]) - 1)) {
                mali_pm_os_resume();
-       } else if (!strncmp(ubuf,mali_power_events[_MALI_DEVICE_DVFS_PAUSE],strlen(mali_power_events[_MALI_DEVICE_DVFS_PAUSE]))) {
+       } else if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_DVFS_PAUSE], strlen(mali_power_events[_MALI_DEVICE_DVFS_PAUSE]) - 1)) {
                mali_dev_pause();
-       } else if (!strncmp(ubuf,mali_power_events[_MALI_DEVICE_DVFS_RESUME],strlen(mali_power_events[_MALI_DEVICE_DVFS_RESUME]))) {
+       } else if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_DVFS_RESUME], strlen(mali_power_events[_MALI_DEVICE_DVFS_RESUME]) - 1)) {
                mali_dev_resume();
        }
        *ppos += cnt;
@@ -596,13 +560,13 @@ static int mali_seq_internal_state_show(struct seq_file *seq_file, void *v)
 
        size = seq_get_buf(seq_file, &buf);
 
-       if(!size) {
+       if (!size) {
                return -ENOMEM;
        }
 
        /* Create the internal state dump. */
-       len  = snprintf(buf+len, size-len, "Mali device driver %s\n", SVN_REV_STRING);
-       len += snprintf(buf+len, size-len, "License: %s\n\n", MALI_KERNEL_LINUX_LICENSE);
+       len  = snprintf(buf + len, size - len, "Mali device driver %s\n", SVN_REV_STRING);
+       len += snprintf(buf + len, size - len, "License: %s\n\n", MALI_KERNEL_LINUX_LICENSE);
 
        len += _mali_kernel_core_dump_state(buf + len, size - len);
 
@@ -631,7 +595,7 @@ static ssize_t profiling_record_read(struct file *filp, char __user *ubuf, size_
        char buf[64];
        int r;
 
-       r = sprintf(buf, "%u\n", _mali_internal_profiling_is_recording() ? 1 : 0);
+       r = snprintf(buf, 64, "%u\n", _mali_internal_profiling_is_recording() ? 1 : 0);
        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 }
 
@@ -651,7 +615,7 @@ static ssize_t profiling_record_write(struct file *filp, const char __user *ubuf
 
        buf[cnt] = 0;
 
-       ret = strict_strtoul(buf, 10, &val);
+       ret = kstrtoul(buf, 10, &val);
        if (ret < 0) {
                return ret;
        }
@@ -750,7 +714,7 @@ static int profiling_events_show(struct seq_file *seq_file, void *v)
        u32 event_id;
        u32 data[5];
 
-       index = (u32)*spos;
+       index = (u32) * spos;
 
        /* Retrieve all events */
        if (_MALI_OSK_ERR_OK == _mali_internal_profiling_get_event(index, &timestamp, &event_id, data)) {
@@ -772,7 +736,7 @@ static int profiling_events_show_human_readable(struct seq_file *seq_file, void
        u32 event_id;
        u32 data[5];
 
-       index = (u32)*spos;
+       index = (u32) * spos;
 
        /* Retrieve all events */
        if (_MALI_OSK_ERR_OK == _mali_internal_profiling_get_event(index, &timestamp, &event_id, data)) {
@@ -784,7 +748,7 @@ static int profiling_events_show_human_readable(struct seq_file *seq_file, void
 
                seq_printf(seq_file, "[%06u] ", index);
 
-               switch(event_id & 0x0F000000) {
+               switch (event_id & 0x0F000000) {
                case MALI_PROFILING_EVENT_TYPE_SINGLE:
                        seq_printf(seq_file, "SINGLE | ");
                        break;
@@ -805,7 +769,7 @@ static int profiling_events_show_human_readable(struct seq_file *seq_file, void
                        break;
                }
 
-               switch(event_id & 0x00FF0000) {
+               switch (event_id & 0x00FF0000) {
                case MALI_PROFILING_EVENT_CHANNEL_SOFTWARE:
                        seq_printf(seq_file, "SW | ");
                        break;
@@ -846,7 +810,7 @@ static int profiling_events_show_human_readable(struct seq_file *seq_file, void
 
                if (MALI_EVENT_ID_IS_HW(event_id)) {
                        if (((event_id & 0x0F000000) == MALI_PROFILING_EVENT_TYPE_START) || ((event_id & 0x0F000000) == MALI_PROFILING_EVENT_TYPE_STOP)) {
-                               switch(event_id & 0x0000FFFF) {
+                               switch (event_id & 0x0000FFFF) {
                                case MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL:
                                        seq_printf(seq_file, "PHYSICAL | ");
                                        break;
@@ -914,26 +878,34 @@ static const struct file_operations profiling_events_human_readable_fops = {
 
 #endif
 
-static ssize_t memory_used_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+static int memory_debugfs_show(struct seq_file *s, void *private_data)
 {
-       char buf[64];
-       size_t r;
-       u32 mem = _mali_ukk_report_memory_usage();
+       seq_printf(s, "  %-25s  %-10s  %-10s  %-15s  %-15s  %-10s  %-10s\n"\
+                  "==============================================================================================================\n",
+                  "Name (:bytes)", "pid", "mali_mem", "max_mali_mem",
+                  "external_mem", "ump_mem", "dma_mem");
+       mali_session_memory_tracking(s);
+       return 0;
+}
 
-       r = snprintf(buf, 64, "%u\n", mem);
-       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+static int memory_debugfs_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, memory_debugfs_show, inode->i_private);
 }
 
 static const struct file_operations memory_usage_fops = {
        .owner = THIS_MODULE,
-       .read = memory_used_read,
+       .open = memory_debugfs_open,
+       .read  = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
 };
 
 static ssize_t utilization_gp_pp_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
 {
        char buf[64];
        size_t r;
-       u32 uval= _mali_ukk_utilization_gp_pp();
+       u32 uval = _mali_ukk_utilization_gp_pp();
 
        r = snprintf(buf, 64, "%u\n", uval);
        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
@@ -943,7 +915,7 @@ static ssize_t utilization_gp_read(struct file *filp, char __user *ubuf, size_t
 {
        char buf[64];
        size_t r;
-       u32 uval= _mali_ukk_utilization_gp();
+       u32 uval = _mali_ukk_utilization_gp();
 
        r = snprintf(buf, 64, "%u\n", uval);
        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
@@ -953,7 +925,7 @@ static ssize_t utilization_pp_read(struct file *filp, char __user *ubuf, size_t
 {
        char buf[64];
        size_t r;
-       u32 uval= _mali_ukk_utilization_pp();
+       u32 uval = _mali_ukk_utilization_pp();
 
        r = snprintf(buf, 64, "%u\n", uval);
        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
@@ -988,7 +960,7 @@ static ssize_t user_settings_write(struct file *filp, const char __user *ubuf, s
        }
        buf[cnt] = '\0';
 
-       ret = strict_strtoul(buf, 10, &val);
+       ret = kstrtoul(buf, 10, &val);
        if (0 != ret) {
                return ret;
        }
@@ -1027,93 +999,17 @@ static int mali_sysfs_user_settings_register(void)
        struct dentry *mali_user_settings_dir = debugfs_create_dir("userspace_settings", mali_debugfs_dir);
 
        if (mali_user_settings_dir != NULL) {
-               int i;
+               long i;
                for (i = 0; i < _MALI_UK_USER_SETTING_MAX; i++) {
-                       debugfs_create_file(_mali_uk_user_setting_descriptions[i], 0600, mali_user_settings_dir, (void*)i, &user_settings_fops);
+                       debugfs_create_file(_mali_uk_user_setting_descriptions[i],
+                                           0600, mali_user_settings_dir, (void *)i,
+                                           &user_settings_fops);
                }
        }
 
        return 0;
 }
 
-static ssize_t pmu_power_down_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
-{
-       int ret;
-       char buffer[32];
-       unsigned long val;
-       struct mali_pmu_core *pmu;
-       _mali_osk_errcode_t err;
-
-       if (count >= sizeof(buffer)) {
-               return -ENOMEM;
-       }
-
-       if (copy_from_user(&buffer[0], buf, count)) {
-               return -EFAULT;
-       }
-       buffer[count] = '\0';
-
-       ret = strict_strtoul(&buffer[0], 10, &val);
-       if (0 != ret) {
-               return -EINVAL;
-       }
-
-       pmu = mali_pmu_get_global_pmu_core();
-       MALI_DEBUG_ASSERT_POINTER(pmu);
-
-       err = mali_pmu_power_down(pmu, val);
-       if (_MALI_OSK_ERR_OK != err) {
-               return -EINVAL;
-       }
-
-       *offp += count;
-       return count;
-}
-
-static ssize_t pmu_power_up_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
-{
-       int ret;
-       char buffer[32];
-       unsigned long val;
-       struct mali_pmu_core *pmu;
-       _mali_osk_errcode_t err;
-
-       if (count >= sizeof(buffer)) {
-               return -ENOMEM;
-       }
-
-       if (copy_from_user(&buffer[0], buf, count)) {
-               return -EFAULT;
-       }
-       buffer[count] = '\0';
-
-       ret = strict_strtoul(&buffer[0], 10, &val);
-       if (0 != ret) {
-               return -EINVAL;
-       }
-
-       pmu = mali_pmu_get_global_pmu_core();
-       MALI_DEBUG_ASSERT_POINTER(pmu);
-
-       err = mali_pmu_power_up(pmu, val);
-       if (_MALI_OSK_ERR_OK != err) {
-               return -EINVAL;
-       }
-
-       *offp += count;
-       return count;
-}
-
-static const struct file_operations pmu_power_down_fops = {
-       .owner = THIS_MODULE,
-       .write = pmu_power_down_write,
-};
-
-static const struct file_operations pmu_power_up_fops = {
-       .owner = THIS_MODULE,
-       .write = pmu_power_up_write,
-};
-
 static ssize_t pp_num_cores_enabled_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
 {
        int ret;
@@ -1129,12 +1025,12 @@ static ssize_t pp_num_cores_enabled_write(struct file *filp, const char __user *
        }
        buffer[count] = '\0';
 
-       ret = strict_strtoul(&buffer[0], 10, &val);
+       ret = kstrtoul(&buffer[0], 10, &val);
        if (0 != ret) {
                return -EINVAL;
        }
 
-       ret = mali_pp_scheduler_set_perf_level(val, MALI_TRUE); /* override even if core scaling is disabled */
+       ret = mali_executor_set_perf_level(val, MALI_TRUE); /* override even if core scaling is disabled */
        if (ret) {
                return ret;
        }
@@ -1148,7 +1044,7 @@ static ssize_t pp_num_cores_enabled_read(struct file *filp, char __user *buf, si
        int r;
        char buffer[64];
 
-       r = sprintf(buffer, "%u\n", mali_pp_scheduler_get_num_cores_enabled());
+       r = snprintf(buffer, 64, "%u\n", mali_executor_get_num_cores_enabled());
 
        return simple_read_from_buffer(buf, count, offp, buffer, r);
 }
@@ -1165,7 +1061,7 @@ static ssize_t pp_num_cores_total_read(struct file *filp, char __user *buf, size
        int r;
        char buffer[64];
 
-       r = sprintf(buffer, "%u\n", mali_pp_scheduler_get_num_cores_total());
+       r = snprintf(buffer, 64, "%u\n", mali_executor_get_num_cores_total());
 
        return simple_read_from_buffer(buf, count, offp, buffer, r);
 }
@@ -1190,17 +1086,17 @@ static ssize_t pp_core_scaling_enabled_write(struct file *filp, const char __use
        }
        buffer[count] = '\0';
 
-       ret = strict_strtoul(&buffer[0], 10, &val);
+       ret = kstrtoul(&buffer[0], 10, &val);
        if (0 != ret) {
                return -EINVAL;
        }
 
        switch (val) {
        case 1:
-               mali_pp_scheduler_core_scaling_enable();
+               mali_executor_core_scaling_enable();
                break;
        case 0:
-               mali_pp_scheduler_core_scaling_disable();
+               mali_executor_core_scaling_disable();
                break;
        default:
                return -EINVAL;
@@ -1213,7 +1109,7 @@ static ssize_t pp_core_scaling_enabled_write(struct file *filp, const char __use
 
 static ssize_t pp_core_scaling_enabled_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
 {
-       return simple_read_from_buffer(buf, count, offp, mali_pp_scheduler_core_scaling_is_enabled() ? "1\n" : "0\n", 2);
+       return simple_read_from_buffer(buf, count, offp, mali_executor_core_scaling_is_enabled() ? "1\n" : "0\n", 2);
 }
 static const struct file_operations pp_core_scaling_enabled_fops = {
        .owner = THIS_MODULE,
@@ -1229,16 +1125,19 @@ static ssize_t version_read(struct file *filp, char __user *buf, size_t count, l
 
        switch (mali_kernel_core_get_product_id()) {
        case _MALI_PRODUCT_ID_MALI200:
-               r = sprintf(buffer, "Mali-200\n");
+               r = snprintf(buffer, 64, "Mali-200\n");
                break;
        case _MALI_PRODUCT_ID_MALI300:
-               r = sprintf(buffer, "Mali-300\n");
+               r = snprintf(buffer, 64, "Mali-300\n");
                break;
        case _MALI_PRODUCT_ID_MALI400:
-               r = sprintf(buffer, "Mali-400 MP\n");
+               r = snprintf(buffer, 64, "Mali-400 MP\n");
                break;
        case _MALI_PRODUCT_ID_MALI450:
-               r = sprintf(buffer, "Mali-450 MP\n");
+               r = snprintf(buffer, 64, "Mali-450 MP\n");
+               break;
+       case _MALI_PRODUCT_ID_MALI470:
+               r = snprintf(buffer, 64, "Mali-470 MP\n");
                break;
        case _MALI_PRODUCT_ID_UNKNOWN:
                return -EINVAL;
@@ -1253,21 +1152,48 @@ static const struct file_operations version_fops = {
        .read = version_read,
 };
 
+#if defined(DEBUG)
+static int timeline_debugfs_show(struct seq_file *s, void *private_data)
+{
+       struct mali_session_data *session, *tmp;
+       u32 session_seq = 1;
+
+       seq_printf(s, "timeline system info: \n=================\n\n");
+
+       mali_session_lock();
+       MALI_SESSION_FOREACH(session, tmp, link) {
+               seq_printf(s, "session %d <%p> start:\n", session_seq, session);
+               mali_timeline_debug_print_system(session->timeline_system, s);
+               seq_printf(s, "session %d end\n\n\n", session_seq++);
+       }
+       mali_session_unlock();
+
+       return 0;
+}
+
+static int timeline_debugfs_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, timeline_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations timeline_dump_fops = {
+       .owner = THIS_MODULE,
+       .open = timeline_debugfs_open,
+       .read  = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release
+};
+#endif
+
 int mali_sysfs_register(const char *mali_dev_name)
 {
        mali_debugfs_dir = debugfs_create_dir(mali_dev_name, NULL);
-       if(ERR_PTR(-ENODEV) == mali_debugfs_dir) {
+       if (ERR_PTR(-ENODEV) == mali_debugfs_dir) {
                /* Debugfs not supported. */
                mali_debugfs_dir = NULL;
        } else {
-      /* {MTK add
-       * Add procfs 
-       * }*/
-      proc_mali_register();
-
-               if(NULL != mali_debugfs_dir) {
+               if (NULL != mali_debugfs_dir) {
                        /* Debugfs directory created successfully; create files now */
-                       struct dentry *mali_pmu_dir;
                        struct dentry *mali_power_dir;
                        struct dentry *mali_gp_dir;
                        struct dentry *mali_pp_dir;
@@ -1276,12 +1202,6 @@ int mali_sysfs_register(const char *mali_dev_name)
 
                        debugfs_create_file("version", 0400, mali_debugfs_dir, NULL, &version_fops);
 
-                       mali_pmu_dir = debugfs_create_dir("pmu", mali_debugfs_dir);
-                       if (NULL != mali_pmu_dir) {
-                               debugfs_create_file("power_down", 0200, mali_pmu_dir, NULL, &pmu_power_down_fops);
-                               debugfs_create_file("power_up", 0200, mali_pmu_dir, NULL, &pmu_power_up_fops);
-                       }
-
                        mali_power_dir = debugfs_create_dir("power", mali_debugfs_dir);
                        if (mali_power_dir != NULL) {
                                debugfs_create_file("always_on", 0600, mali_power_dir, NULL, &power_always_on_fops);
@@ -1291,7 +1211,7 @@ int mali_sysfs_register(const char *mali_dev_name)
                        mali_gp_dir = debugfs_create_dir("gp", mali_debugfs_dir);
                        if (mali_gp_dir != NULL) {
                                u32 num_groups;
-                               int i;
+                               long i;
 
                                num_groups = mali_group_get_glob_num_groups();
                                for (i = 0; i < num_groups; i++) {
@@ -1314,7 +1234,7 @@ int mali_sysfs_register(const char *mali_dev_name)
                        mali_pp_dir = debugfs_create_dir("pp", mali_debugfs_dir);
                        if (mali_pp_dir != NULL) {
                                u32 num_groups;
-                               int i;
+                               long i;
 
                                debugfs_create_file("num_cores_total", 0400, mali_pp_dir, NULL, &pp_num_cores_total_fops);
                                debugfs_create_file("num_cores_enabled", 0600, mali_pp_dir, NULL, &pp_num_cores_enabled_fops);
@@ -1362,6 +1282,8 @@ int mali_sysfs_register(const char *mali_dev_name)
                                        if (NULL != mali_l2_l2x_dir) {
                                                debugfs_create_file("counter_src0", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_src0_fops);
                                                debugfs_create_file("counter_src1", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_src1_fops);
+                                               debugfs_create_file("counter_val0", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_val0_fops);
+                                               debugfs_create_file("counter_val1", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_val1_fops);
                                                debugfs_create_file("base_addr", 0400, mali_l2_l2x_dir, &l2_cache->hw_core, &hw_core_base_addr_fops);
                                        }
 
@@ -1371,7 +1293,7 @@ int mali_sysfs_register(const char *mali_dev_name)
                                }
                        }
 
-                       debugfs_create_file("memory_usage", 0400, mali_debugfs_dir, NULL, &memory_usage_fops);
+                       debugfs_create_file("gpu_memory", 0444, mali_debugfs_dir, NULL, &memory_usage_fops);
 
                        debugfs_create_file("utilization_gp_pp", 0400, mali_debugfs_dir, NULL, &utilization_gp_pp_fops);
                        debugfs_create_file("utilization_gp", 0400, mali_debugfs_dir, NULL, &utilization_gp_fops);
@@ -1380,7 +1302,7 @@ int mali_sysfs_register(const char *mali_dev_name)
                        mali_profiling_dir = debugfs_create_dir("profiling", mali_debugfs_dir);
                        if (mali_profiling_dir != NULL) {
                                u32 max_sub_jobs;
-                               int i;
+                               long i;
                                struct dentry *mali_profiling_gp_dir;
                                struct dentry *mali_profiling_pp_dir;
 #if defined(CONFIG_MALI400_INTERNAL_PROFILING)
@@ -1391,8 +1313,8 @@ int mali_sysfs_register(const char *mali_dev_name)
                                 */
                                mali_profiling_gp_dir = debugfs_create_dir("gp", mali_profiling_dir);
                                if (mali_profiling_gp_dir != NULL) {
-                                       debugfs_create_file("counter_src0", 0600, mali_profiling_gp_dir, (void*)PRIVATE_DATA_COUNTER_MAKE_GP(0), &profiling_counter_src_fops);
-                                       debugfs_create_file("counter_src1", 0600, mali_profiling_gp_dir, (void*)PRIVATE_DATA_COUNTER_MAKE_GP(1), &profiling_counter_src_fops);
+                                       debugfs_create_file("counter_src0", 0600, mali_profiling_gp_dir, (void *)PRIVATE_DATA_COUNTER_MAKE_GP(0), &profiling_counter_src_fops);
+                                       debugfs_create_file("counter_src1", 0600, mali_profiling_gp_dir, (void *)PRIVATE_DATA_COUNTER_MAKE_GP(1), &profiling_counter_src_fops);
                                }
 
                                /*
@@ -1402,19 +1324,25 @@ int mali_sysfs_register(const char *mali_dev_name)
                                 */
                                mali_profiling_pp_dir = debugfs_create_dir("pp", mali_profiling_dir);
                                if (mali_profiling_pp_dir != NULL) {
-                                       debugfs_create_file("counter_src0", 0600, mali_profiling_pp_dir, (void*)PRIVATE_DATA_COUNTER_MAKE_PP(0), &profiling_counter_src_fops);
-                                       debugfs_create_file("counter_src1", 0600, mali_profiling_pp_dir, (void*)PRIVATE_DATA_COUNTER_MAKE_PP(1), &profiling_counter_src_fops);
+                                       debugfs_create_file("counter_src0", 0600, mali_profiling_pp_dir, (void *)PRIVATE_DATA_COUNTER_MAKE_PP(0), &profiling_counter_src_fops);
+                                       debugfs_create_file("counter_src1", 0600, mali_profiling_pp_dir, (void *)PRIVATE_DATA_COUNTER_MAKE_PP(1), &profiling_counter_src_fops);
                                }
 
-                               max_sub_jobs = mali_pp_scheduler_get_num_cores_total();
+                               max_sub_jobs = mali_executor_get_num_cores_total();
                                for (i = 0; i < max_sub_jobs; i++) {
                                        char buf[16];
                                        struct dentry *mali_profiling_pp_x_dir;
                                        _mali_osk_snprintf(buf, sizeof(buf), "%u", i);
                                        mali_profiling_pp_x_dir = debugfs_create_dir(buf, mali_profiling_pp_dir);
                                        if (NULL != mali_profiling_pp_x_dir) {
-                                               debugfs_create_file("counter_src0", 0600, mali_profiling_pp_x_dir, (void*)PRIVATE_DATA_COUNTER_MAKE_PP_SUB_JOB(0, i), &profiling_counter_src_fops);
-                                               debugfs_create_file("counter_src1", 0600, mali_profiling_pp_x_dir, (void*)PRIVATE_DATA_COUNTER_MAKE_PP_SUB_JOB(1, i), &profiling_counter_src_fops);
+                                               debugfs_create_file("counter_src0",
+                                                                   0600, mali_profiling_pp_x_dir,
+                                                                   (void *)PRIVATE_DATA_COUNTER_MAKE_PP_SUB_JOB(0, i),
+                                                                   &profiling_counter_src_fops);
+                                               debugfs_create_file("counter_src1",
+                                                                   0600, mali_profiling_pp_x_dir,
+                                                                   (void *)PRIVATE_DATA_COUNTER_MAKE_PP_SUB_JOB(1, i),
+                                                                   &profiling_counter_src_fops);
                                        }
                                }
 
@@ -1423,7 +1351,7 @@ int mali_sysfs_register(const char *mali_dev_name)
                                if (mali_profiling_proc_dir != NULL) {
                                        struct dentry *mali_profiling_proc_default_dir = debugfs_create_dir("default", mali_profiling_proc_dir);
                                        if (mali_profiling_proc_default_dir != NULL) {
-                                               debugfs_create_file("enable", 0600, mali_profiling_proc_default_dir, (void*)_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, &user_settings_fops);
+                                               debugfs_create_file("enable", 0600, mali_profiling_proc_default_dir, (void *)_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, &user_settings_fops);
                                        }
                                }
                                debugfs_create_file("record", 0600, mali_profiling_dir, NULL, &profiling_record_fops);
@@ -1436,6 +1364,9 @@ int mali_sysfs_register(const char *mali_dev_name)
                        debugfs_create_file("state_dump", 0400, mali_debugfs_dir, NULL, &mali_seq_internal_state_fops);
 #endif
 
+#if defined(DEBUG)
+                       debugfs_create_file("timeline_dump", 0400, mali_debugfs_dir, NULL, &timeline_dump_fops);
+#endif
                        if (mali_sysfs_user_settings_register()) {
                                /* Failed to create the debugfs entries for the user settings DB. */
                                MALI_DEBUG_PRINT(2, ("Failed to create user setting debugfs files. Ignoring...\n"));
@@ -1449,12 +1380,7 @@ int mali_sysfs_register(const char *mali_dev_name)
 
 int mali_sysfs_unregister(void)
 {
-   /* {MTK add
-    * Remove procfs
-    * }*/
-   proc_mali_unregister();
-
-       if(NULL != mali_debugfs_dir) {
+       if (NULL != mali_debugfs_dir) {
                debugfs_remove_recursive(mali_debugfs_dir);
        }
        return 0;
index ab35087deed9a71b4f26ad147e18bc54c71b9d40..5d1b8deee2b901e1fc891ce57456a9e6cbc5500c 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2013 ARM Limited
+ * (C) COPYRIGHT 2011-2013, 2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
index fcd7966de52063c33f07e8514e4dc1f3523bd08a..a34d2f2ae6df1574fd2a4d85bbb72b49e0b33d19 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2013 ARM Limited
+ * (C) COPYRIGHT 2012-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
  */
 TRACE_EVENT(mali_timeline_event,
 
-            TP_PROTO(unsigned int event_id, unsigned int d0, unsigned int d1,
-                     unsigned int d2, unsigned int d3, unsigned int d4),
+           TP_PROTO(unsigned int event_id, unsigned int d0, unsigned int d1,
+                    unsigned int d2, unsigned int d3, unsigned int d4),
 
-            TP_ARGS(event_id, d0, d1, d2, d3, d4),
+           TP_ARGS(event_id, d0, d1, d2, d3, d4),
 
-            TP_STRUCT__entry(
-                __field(unsigned int, event_id)
-                __field(unsigned int, d0)
-                __field(unsigned int, d1)
-                __field(unsigned int, d2)
-                __field(unsigned int, d3)
-                __field(unsigned int, d4)
-            ),
+           TP_STRUCT__entry(
+                   __field(unsigned int, event_id)
+                   __field(unsigned int, d0)
+                   __field(unsigned int, d1)
+                   __field(unsigned int, d2)
+                   __field(unsigned int, d3)
+                   __field(unsigned int, d4)
+           ),
 
-            TP_fast_assign(
-                __entry->event_id = event_id;
-                __entry->d0 = d0;
-                __entry->d1 = d1;
-                __entry->d2 = d2;
-                __entry->d3 = d3;
-                __entry->d4 = d4;
-            ),
+           TP_fast_assign(
+                   __entry->event_id = event_id;
+                   __entry->d0 = d0;
+                   __entry->d1 = d1;
+                   __entry->d2 = d2;
+                   __entry->d3 = d3;
+                   __entry->d4 = d4;
+           ),
 
-            TP_printk("event=%d", __entry->event_id)
-           );
+           TP_printk("event=%d", __entry->event_id)
+          );
 
 /**
  * Define a tracepoint used to regsiter the value of a hardware counter.
@@ -75,21 +75,21 @@ TRACE_EVENT(mali_timeline_event,
  */
 TRACE_EVENT(mali_hw_counter,
 
-            TP_PROTO(unsigned int counter_id, unsigned int value),
+           TP_PROTO(unsigned int counter_id, unsigned int value),
 
-            TP_ARGS(counter_id, value),
+           TP_ARGS(counter_id, value),
 
-            TP_STRUCT__entry(
-                __field(unsigned int, counter_id)
-                __field(unsigned int, value)
-            ),
+           TP_STRUCT__entry(
+                   __field(unsigned int, counter_id)
+                   __field(unsigned int, value)
+           ),
 
-            TP_fast_assign(
-                __entry->counter_id = counter_id;
-            ),
+           TP_fast_assign(
+                   __entry->counter_id = counter_id;
+           ),
 
-            TP_printk("event %d = %d", __entry->counter_id, __entry->value)
-           );
+           TP_printk("event %d = %d", __entry->counter_id, __entry->value)
+          );
 
 /**
  * Define a tracepoint used to send a bundle of software counters.
@@ -98,26 +98,62 @@ TRACE_EVENT(mali_hw_counter,
  */
 TRACE_EVENT(mali_sw_counters,
 
-            TP_PROTO(pid_t pid, pid_t tid, void * surface_id, unsigned int * counters),
+           TP_PROTO(pid_t pid, pid_t tid, void *surface_id, unsigned int *counters),
 
-            TP_ARGS(pid, tid, surface_id, counters),
+           TP_ARGS(pid, tid, surface_id, counters),
 
-            TP_STRUCT__entry(
-                __field(pid_t, pid)
-                __field(pid_t, tid)
-                __field(void *, surface_id)
-                __field(unsigned int *, counters)
-            ),
+           TP_STRUCT__entry(
+                   __field(pid_t, pid)
+                   __field(pid_t, tid)
+                   __field(void *, surface_id)
+                   __field(unsigned int *, counters)
+           ),
 
-            TP_fast_assign(
-                __entry->pid = pid;
-                __entry->tid = tid;
-                __entry->surface_id = surface_id;
-                __entry->counters = counters;
-            ),
+           TP_fast_assign(
+                   __entry->pid = pid;
+                   __entry->tid = tid;
+                   __entry->surface_id = surface_id;
+                   __entry->counters = counters;
+           ),
 
-            TP_printk("counters were %s", __entry->counters == NULL? "NULL" : "not NULL")
-           );
+           TP_printk("counters were %s", __entry->counters == NULL ? "NULL" : "not NULL")
+          );
+
+/**
+ * Define a tracepoint used to gather core activity for systrace
+ * @param pid The process id for which the core activity originates from
+ * @param active If the core is active (1) or not (0)
+ * @param core_type The type of core active, either GP (1) or PP (0)
+ * @param core_id The core id that is active for the core_type
+ * @param frame_builder_id The frame builder id associated with this core activity
+ * @param flush_id The flush id associated with this core activity
+ */
+TRACE_EVENT(mali_core_active,
+
+           TP_PROTO(pid_t pid, unsigned int active, unsigned int core_type, unsigned int core_id, unsigned int frame_builder_id, unsigned int flush_id),
+
+           TP_ARGS(pid, active, core_type, core_id, frame_builder_id, flush_id),
+
+           TP_STRUCT__entry(
+                   __field(pid_t, pid)
+                   __field(unsigned int, active)
+                   __field(unsigned int, core_type)
+                   __field(unsigned int, core_id)
+                   __field(unsigned int, frame_builder_id)
+                   __field(unsigned int, flush_id)
+           ),
+
+           TP_fast_assign(
+                   __entry->pid = pid;
+                   __entry->active = active;
+                   __entry->core_type = core_type;
+                   __entry->core_id = core_id;
+                   __entry->frame_builder_id = frame_builder_id;
+                   __entry->flush_id = flush_id;
+           ),
+
+           TP_printk("%s|%d|%s%i:%x|%d", __entry->active ? "S" : "F", __entry->pid, __entry->core_type ? "GP" : "PP", __entry->core_id, __entry->flush_id, __entry->frame_builder_id)
+          );
 
 #endif /* MALI_LINUX_TRACE_H */
 
index ffa03d5bbd3decbe541816a2c9760b16a1ca9a4a..ba36f9350736e3c7a6f1e4da8eff723019e68353 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013 ARM Limited
+ * (C) COPYRIGHT 2013-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 #include <linux/slab.h>
 #include <linux/version.h>
 #include <linux/platform_device.h>
+#include <linux/idr.h>
 
 #include "mali_osk.h"
-#include "mali_osk_mali.h"
-#include "mali_kernel_linux.h"
-#include "mali_scheduler.h"
-#include "mali_kernel_descriptor_mapping.h"
+#include "mali_executor.h"
 
 #include "mali_memory.h"
-#include "mali_memory_dma_buf.h"
 #include "mali_memory_os_alloc.h"
 #include "mali_memory_block_alloc.h"
+#include "mali_memory_util.h"
+#include "mali_memory_virtual.h"
+#include "mali_memory_manager.h"
+#include "mali_memory_cow.h"
 
-/* session->memory_lock must be held when calling this function */
-static void mali_mem_release(mali_mem_allocation *descriptor)
-{
-       MALI_DEBUG_ASSERT_POINTER(descriptor);
-       MALI_DEBUG_ASSERT_LOCK_HELD(descriptor->session->memory_lock);
 
-       MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
+extern unsigned int mali_dedicated_mem_size;
+extern unsigned int mali_shared_mem_size;
 
-       switch (descriptor->type) {
-       case MALI_MEM_OS:
-               mali_mem_os_release(descriptor);
-               break;
-       case MALI_MEM_DMA_BUF:
-#if defined(CONFIG_DMA_SHARED_BUFFER)
-               mali_mem_dma_buf_release(descriptor);
-#endif
-               break;
-       case MALI_MEM_UMP:
-#if defined(CONFIG_MALI400_UMP)
-               mali_mem_ump_release(descriptor);
-#endif
-               break;
-       case MALI_MEM_EXTERNAL:
-               mali_mem_external_release(descriptor);
-               break;
-       case MALI_MEM_BLOCK:
-               mali_mem_block_release(descriptor);
-               break;
-       }
-}
+#define MALI_VM_NUM_FAULT_PREFETCH (0x8)
 
-static void mali_mem_vma_open(struct vm_area_struct * vma)
+static void mali_mem_vma_open(struct vm_area_struct *vma)
 {
-       mali_mem_allocation *descriptor = (mali_mem_allocation*)vma->vm_private_data;
+       mali_mem_allocation *alloc = (mali_mem_allocation *)vma->vm_private_data;
        MALI_DEBUG_PRINT(4, ("Open called on vma %p\n", vma));
-
-       descriptor->cpu_mapping.ref++;
-
+       alloc->cpu_mapping.vma = vma;
+       /* If need to share the allocation, add ref_count here */
+       mali_allocation_ref(alloc);
        return;
 }
-
 static void mali_mem_vma_close(struct vm_area_struct *vma)
 {
-       mali_mem_allocation *descriptor;
-       struct mali_session_data *session;
-       mali_mem_virt_cpu_mapping *mapping;
-
-       MALI_DEBUG_PRINT(3, ("Close called on vma %p\n", vma));
-
-       descriptor = (mali_mem_allocation*)vma->vm_private_data;
-       BUG_ON(!descriptor);
-
-       MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
-
-       mapping = &descriptor->cpu_mapping;
-       BUG_ON(0 == mapping->ref);
-
-       mapping->ref--;
-       if (0 != mapping->ref) {
-               MALI_DEBUG_PRINT(3, ("Ignoring this close, %d references still exists\n", mapping->ref));
-               return;
-       }
-
-       session = descriptor->session;
-
-       mali_descriptor_mapping_free(session->descriptor_mapping, descriptor->id);
-
-       _mali_osk_mutex_wait(session->memory_lock);
-       mali_mem_release(descriptor);
-       _mali_osk_mutex_signal(session->memory_lock);
-
-       mali_mem_descriptor_destroy(descriptor);
+       /* If need to share the allocation, unref ref_count here */
+       mali_mem_allocation *alloc = (mali_mem_allocation *)vma->vm_private_data;
+       alloc->cpu_mapping.addr = 0;
+       alloc->cpu_mapping.vma = NULL;
+       mali_allocation_unref(&alloc);
+       vma->vm_private_data = NULL;
 }
 
-static int mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int mali_mem_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
-       void __user * address;
-       mali_mem_allocation *descriptor;
-
-       address = vmf->virtual_address;
-       descriptor = (mali_mem_allocation *)vma->vm_private_data;
-
-       MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
-
-       /*
-        * We always fail the call since all memory is pre-faulted when assigned to the process.
-        * Only the Mali cores can use page faults to extend buffers.
-       */
-
-       MALI_DEBUG_PRINT(1, ("Page-fault in Mali memory region caused by the CPU.\n"));
-       MALI_DEBUG_PRINT(1, ("Tried to access %p (process local virtual address) which is not currently mapped to any Mali memory.\n", (void*)address));
-
-       MALI_IGNORE(address);
-       MALI_IGNORE(descriptor);
+       mali_mem_allocation *alloc = (mali_mem_allocation *)vma->vm_private_data;
+       mali_mem_backend *mem_bkend = NULL;
+       int ret;
+       int prefetch_num = MALI_VM_NUM_FAULT_PREFETCH;
+
+       unsigned long address = (unsigned long)vmf->virtual_address;
+       MALI_DEBUG_ASSERT(alloc->backend_handle);
+       MALI_DEBUG_ASSERT(alloc->cpu_mapping.vma == vma);
+       MALI_DEBUG_ASSERT((unsigned long)alloc->cpu_mapping.addr <= address);
+
+       /* Get backend memory & Map on CPU */
+       mutex_lock(&mali_idr_mutex);
+       if (!(mem_bkend = idr_find(&mali_backend_idr, alloc->backend_handle))) {
+               MALI_DEBUG_PRINT(1, ("Can't find memory backend in mmap!\n"));
+               mutex_unlock(&mali_idr_mutex);
+               return VM_FAULT_SIGBUS;
+       }
+       mutex_unlock(&mali_idr_mutex);
+       MALI_DEBUG_ASSERT(mem_bkend->type == alloc->type);
+
+       if (mem_bkend->type == MALI_MEM_COW) {
+               /*check if use page fault to do COW*/
+               if (mem_bkend->cow_flag & MALI_MEM_BACKEND_FLAG_COW_CPU_NO_WRITE) {
+                       MALI_DEBUG_PRINT(4, ("mali_vma_fault: do cow allocate on demand!, address=0x%x\n", address));
+                       mutex_lock(&mem_bkend->mutex);
+                       ret = mali_mem_cow_allocate_on_demand(mem_bkend,
+                                                             (address - (unsigned long)alloc->cpu_mapping.addr) / PAGE_SIZE);
+                       mutex_unlock(&mem_bkend->mutex);
+                       if (ret != _MALI_OSK_ERR_OK) {
+                               return VM_FAULT_OOM;
+                       }
+                       prefetch_num = 1;
+               }
 
-       return VM_FAULT_SIGBUS;
+               /* handle COW modified range cpu mapping
+                we zap the mapping in cow_modify_range, it will trigger page fault
+                when CPU access it, so here we map it to CPU*/
+               mutex_lock(&mem_bkend->mutex);
+               ret = mali_mem_cow_cpu_map_pages_locked(mem_bkend,
+                                                       vma,
+                                                       address,
+                                                       prefetch_num);
+               mutex_unlock(&mem_bkend->mutex);
+
+               if (unlikely(ret != _MALI_OSK_ERR_OK)) {
+                       return VM_FAULT_SIGBUS;
+               }
+       } else {
+               MALI_DEBUG_ASSERT(0);
+               /*NOT support yet*/
+       }
+       return VM_FAULT_NOPAGE;
 }
 
-struct vm_operations_struct mali_kernel_vm_ops = {
+static struct vm_operations_struct mali_kernel_vm_ops = {
        .open = mali_mem_vma_open,
        .close = mali_mem_vma_close,
-       .fault = mali_kernel_memory_cpu_page_fault_handler
+       .fault = mali_mem_vma_fault,
 };
 
-/** @note munmap handler is done by vma close handler */
+
+/** @ map mali allocation to CPU address
+*
+* Supported backend types:
+* --MALI_MEM_OS
+* -- need to add COW?
+ *Not supported backend types:
+* -_MALI_MEMORY_BIND_BACKEND_UMP
+* -_MALI_MEMORY_BIND_BACKEND_DMA_BUF
+* -_MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY
+*
+*/
 int mali_mmap(struct file *filp, struct vm_area_struct *vma)
 {
        struct mali_session_data *session;
-       mali_mem_allocation *descriptor = NULL;
-       u32 size = vma->vm_end - vma->vm_start;
+       mali_mem_allocation *mali_alloc = NULL;
        u32 mali_addr = vma->vm_pgoff << PAGE_SHIFT;
+       struct mali_vma_node *mali_vma_node = NULL;
+       mali_mem_backend *mem_bkend = NULL;
+       int ret = -EFAULT;
 
        session = (struct mali_session_data *)filp->private_data;
        if (NULL == session) {
@@ -147,8 +144,8 @@ int mali_mmap(struct file *filp, struct vm_area_struct *vma)
        }
 
        MALI_DEBUG_PRINT(4, ("MMap() handler: start=0x%08X, phys=0x%08X, size=0x%08X vma->flags 0x%08x\n",
-                            (unsigned int)vma->vm_start, (unsigned int)(vma->vm_pgoff << PAGE_SHIFT),
-                            (unsigned int)(vma->vm_end - vma->vm_start), vma->vm_flags));
+                            (unsigned int)vma->vm_start, (unsigned int)(vma->vm_pgoff << PAGE_SHIFT),
+                            (unsigned int)(vma->vm_end - vma->vm_start), vma->vm_flags));
 
        /* Set some bits which indicate that, the memory is IO memory, meaning
         * that no paging is to be performed and the memory should not be
@@ -159,7 +156,7 @@ int mali_mmap(struct file *filp, struct vm_area_struct *vma)
        vma->vm_flags |= VM_IO;
        vma->vm_flags |= VM_DONTCOPY;
        vma->vm_flags |= VM_PFNMAP;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)
        vma->vm_flags |= VM_RESERVED;
 #else
        vma->vm_flags |= VM_DONTDUMP;
@@ -167,68 +164,71 @@ int mali_mmap(struct file *filp, struct vm_area_struct *vma)
 #endif
 
        vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-       vma->vm_ops = &mali_kernel_vm_ops; /* Operations used on any memory system */
-
-   /// since in ALPS project, especially low-memory segment, 
-   /// it would be hard to allocate a 256KB(2^6 * 4K) physical continuous memory due to memory fragmentation      
-   /// even 32KB conti. phy. might be hard to allocate. And it might cause ANR or KE
-   /// avoid using block allocate(256KB) directly
-       /// descriptor = mali_mem_block_alloc(mali_addr, size, vma, session);
-       if (NULL == descriptor) {
-               descriptor = mali_mem_os_alloc(mali_addr, size, vma, session);
-               if (NULL == descriptor) {
-                       MALI_DEBUG_PRINT(3, ("MMAP failed\n"));
-                       return -ENOMEM;
+       vma->vm_ops = &mali_kernel_vm_ops;
+       /* Operations used on any memory system */
+
+       /* find mali allocation structure by vaddress*/
+       mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0);
+       if (likely(mali_vma_node)) {
+               mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
+               MALI_DEBUG_ASSERT(mali_addr == mali_vma_node->vm_node.start);
+               if (unlikely(mali_addr != mali_vma_node->vm_node.start)) {
+                       /* only allow to use start address for mmap */
+                       return -EFAULT;
                }
+       } else {
+               MALI_DEBUG_ASSERT(NULL == mali_vma_node);
+               return -EFAULT;
        }
 
-       MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
-
-       vma->vm_private_data = (void*)descriptor;
+       mali_alloc->cpu_mapping.addr = (void __user *)vma->vm_start;
 
-       /* Put on descriptor map */
-       if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session->descriptor_mapping, descriptor, &descriptor->id)) {
-               _mali_osk_mutex_wait(session->memory_lock);
-               mali_mem_os_release(descriptor);
-               _mali_osk_mutex_signal(session->memory_lock);
+       /* Get backend memory & Map on CPU */
+       mutex_lock(&mali_idr_mutex);
+       if (!(mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle))) {
+               MALI_DEBUG_PRINT(1, ("Can't find memory backend in mmap!\n"));
+               mutex_unlock(&mali_idr_mutex);
                return -EFAULT;
        }
-
-       return 0;
-}
-
-
-/* Prepare memory descriptor */
-mali_mem_allocation *mali_mem_descriptor_create(struct mali_session_data *session, mali_mem_type type)
-{
-       mali_mem_allocation *descriptor;
-
-       descriptor = (mali_mem_allocation*)kzalloc(sizeof(mali_mem_allocation), GFP_KERNEL);
-       if (NULL == descriptor) {
-               MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: descriptor was NULL\n"));
-               return NULL;
+       mutex_unlock(&mali_idr_mutex);
+
+       /* If it's a copy-on-write mapping, map to read only */
+       if (!(vma->vm_flags & VM_WRITE)) {
+               MALI_DEBUG_PRINT(4, ("mmap allocation with read only !\n"));
+               /* add VM_WRITE for do_page_fault will check this when a write fault */
+               vma->vm_flags |= VM_WRITE | VM_READ;
+               vma->vm_page_prot = PAGE_READONLY;
+               vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+               mem_bkend->cow_flag |= MALI_MEM_BACKEND_FLAG_COW_CPU_NO_WRITE;
+               goto out;
        }
 
-       MALI_DEBUG_CODE(descriptor->magic = MALI_MEM_ALLOCATION_VALID_MAGIC);
+       if (mem_bkend->type == MALI_MEM_OS) {
+               ret = mali_mem_os_cpu_map(mem_bkend, vma);
+       } else if (mem_bkend->type == MALI_MEM_COW) {
+               ret = mali_mem_cow_cpu_map(mem_bkend, vma);
+       } else if (mem_bkend->type == MALI_MEM_BLOCK) {
+               ret = mali_mem_block_cpu_map(mem_bkend, vma);
+       } else {
+               /* Not support yet*/
+               MALI_DEBUG_ASSERT(0);
+       }
 
-       descriptor->flags = 0;
-       descriptor->type = type;
-       descriptor->session = session;
+       if (ret != 0)
+               return -EFAULT;
+out:
+       MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == mali_alloc->magic);
 
-       return descriptor;
-}
+       vma->vm_private_data = (void *)mali_alloc;
+       mali_alloc->cpu_mapping.vma = vma;
+       mali_allocation_ref(mali_alloc);
 
-void mali_mem_descriptor_destroy(mali_mem_allocation *descriptor)
-{
-       MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
-       MALI_DEBUG_CODE(descriptor->magic = MALI_MEM_ALLOCATION_FREED_MAGIC);
-
-       kfree(descriptor);
+       return 0;
 }
 
 _mali_osk_errcode_t mali_mem_mali_map_prepare(mali_mem_allocation *descriptor)
 {
-       u32 size = descriptor->size;
+       u32 size = descriptor->psize;
        struct mali_session_data *session = descriptor->session;
 
        MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
@@ -239,85 +239,64 @@ _mali_osk_errcode_t mali_mem_mali_map_prepare(mali_mem_allocation *descriptor)
                size += MALI_MMU_PAGE_SIZE;
        }
 
-       return mali_mmu_pagedir_map(session->page_directory, descriptor->mali_mapping.addr, size);
+       return mali_mmu_pagedir_map(session->page_directory, descriptor->mali_vma_node.vm_node.start, size);
 }
 
-void mali_mem_mali_map_free(mali_mem_allocation *descriptor)
-{
-       u32 size = descriptor->size;
-       struct mali_session_data *session = descriptor->session;
-
-       MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
 
-       if (descriptor->flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
+void mali_mem_mali_map_free(struct mali_session_data *session, u32 size, mali_address_t vaddr, u32 flags)
+{
+       if (flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
                size += MALI_MMU_PAGE_SIZE;
        }
 
        /* Umap and flush L2 */
-       mali_mmu_pagedir_unmap(session->page_directory, descriptor->mali_mapping.addr, descriptor->size);
-
-       mali_scheduler_zap_all_active(session);
+       mali_mmu_pagedir_unmap(session->page_directory, vaddr, size);
+       mali_executor_zap_all_active(session);
 }
 
 u32 _mali_ukk_report_memory_usage(void)
 {
        u32 sum = 0;
 
-       sum += mali_mem_block_allocator_stat();
+       if (MALI_TRUE == mali_memory_have_dedicated_memory()) {
+               sum += mali_mem_block_allocator_stat();
+       }
+
        sum += mali_mem_os_stat();
 
        return sum;
 }
 
+u32 _mali_ukk_report_total_memory_size(void)
+{
+       return mali_dedicated_mem_size + mali_shared_mem_size;
+}
+
+
 /**
  * Per-session memory descriptor mapping table sizes
  */
 #define MALI_MEM_DESCRIPTORS_INIT 64
 #define MALI_MEM_DESCRIPTORS_MAX 65536
 
-_mali_osk_errcode_t mali_memory_session_begin(struct mali_session_data * session_data)
+_mali_osk_errcode_t mali_memory_session_begin(struct mali_session_data *session_data)
 {
        MALI_DEBUG_PRINT(5, ("Memory session begin\n"));
 
-       /* Create descriptor mapping table */
-       session_data->descriptor_mapping = mali_descriptor_mapping_create(MALI_MEM_DESCRIPTORS_INIT, MALI_MEM_DESCRIPTORS_MAX);
-
-       if (NULL == session_data->descriptor_mapping) {
-               MALI_ERROR(_MALI_OSK_ERR_NOMEM);
-       }
-
        session_data->memory_lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED,
-                                   _MALI_OSK_LOCK_ORDER_MEM_SESSION);
+                                   _MALI_OSK_LOCK_ORDER_MEM_SESSION);
 
        if (NULL == session_data->memory_lock) {
-               mali_descriptor_mapping_destroy(session_data->descriptor_mapping);
                _mali_osk_free(session_data);
                MALI_ERROR(_MALI_OSK_ERR_FAULT);
        }
 
+       mali_memory_manager_init(&session_data->allocation_mgr);
+
        MALI_DEBUG_PRINT(5, ("MMU session begin: success\n"));
        MALI_SUCCESS;
 }
 
-/** @brief Callback function that releases memory
- *
- * session->memory_lock must be held when calling this function.
- */
-static void descriptor_table_cleanup_callback(int descriptor_id, void* map_target)
-{
-       mali_mem_allocation *descriptor;
-
-       descriptor = (mali_mem_allocation*)map_target;
-
-       MALI_DEBUG_ASSERT_LOCK_HELD(descriptor->session->memory_lock);
-
-       MALI_DEBUG_PRINT(3, ("Cleanup of descriptor %d mapping to 0x%x in descriptor table\n", descriptor_id, map_target));
-       MALI_DEBUG_ASSERT(descriptor);
-
-       mali_mem_release(descriptor);
-       mali_mem_descriptor_destroy(descriptor);
-}
-
 void mali_memory_session_end(struct mali_session_data *session)
 {
        MALI_DEBUG_PRINT(3, ("MMU session end\n"));
@@ -326,18 +305,10 @@ void mali_memory_session_end(struct mali_session_data *session)
                MALI_DEBUG_PRINT(1, ("No session data found during session end\n"));
                return;
        }
-
-       /* Lock the session so we can modify the memory list */
-       _mali_osk_mutex_wait(session->memory_lock);
-
-       /* Free all allocations still in the descriptor map, and terminate the map */
-       if (NULL != session->descriptor_mapping) {
-               mali_descriptor_mapping_call_for_each(session->descriptor_mapping, descriptor_table_cleanup_callback);
-               mali_descriptor_mapping_destroy(session->descriptor_mapping);
-               session->descriptor_mapping = NULL;
-       }
-
-       _mali_osk_mutex_signal(session->memory_lock);
+       /* free allocation */
+       mali_free_session_allocations(session);
+       /* do some check in unint*/
+       mali_memory_manager_uninit(&session->allocation_mgr);
 
        /* Free the lock */
        _mali_osk_mutex_term(session->memory_lock);
@@ -345,17 +316,112 @@ void mali_memory_session_end(struct mali_session_data *session)
        return;
 }
 
-
-extern unsigned int (*mtk_get_gpu_memory_usage_fp)(void);
-
 _mali_osk_errcode_t mali_memory_initialize(void)
 {
-    mtk_get_gpu_memory_usage_fp = _mali_ukk_report_memory_usage;
+       idr_init(&mali_backend_idr);
+       mutex_init(&mali_idr_mutex);
        return mali_mem_os_init();
 }
 
 void mali_memory_terminate(void)
 {
        mali_mem_os_term();
-       mali_mem_block_allocator_destroy(NULL);
+       if (mali_memory_have_dedicated_memory()) {
+               mali_mem_block_allocator_destroy();
+       }
+}
+
+
+struct mali_page_node *_mali_page_node_allocate(mali_page_node_type type)
+{
+       mali_page_node *page_node = NULL;
+
+       page_node = kzalloc(sizeof(mali_page_node), GFP_KERNEL);
+       MALI_DEBUG_ASSERT(NULL != page_node);
+
+       if (page_node) {
+               page_node->type = type;
+               INIT_LIST_HEAD(&page_node->list);
+       }
+
+       return page_node;
+}
+
+void _mali_page_node_ref(struct mali_page_node *node)
+{
+       if (node->type == MALI_PAGE_NODE_OS) {
+               /* add ref to this page */
+               get_page(node->page);
+       } else if (node->type == MALI_PAGE_NODE_BLOCK) {
+               mali_mem_block_add_ref(node);
+       } else
+               MALI_DEBUG_ASSERT(0);
 }
+
+void _mali_page_node_unref(struct mali_page_node *node)
+{
+       if (node->type == MALI_PAGE_NODE_OS) {
+               /* unref to this page */
+               put_page(node->page);
+       } else if (node->type == MALI_PAGE_NODE_BLOCK) {
+               mali_mem_block_dec_ref(node);
+       } else
+               MALI_DEBUG_ASSERT(0);
+}
+
+
+void _mali_page_node_add_page(struct mali_page_node *node, struct page *page)
+{
+       MALI_DEBUG_ASSERT(MALI_PAGE_NODE_OS == node->type);
+       node->page = page;
+}
+
+
+void _mali_page_node_add_block_item(struct mali_page_node *node, mali_block_item *item)
+{
+       MALI_DEBUG_ASSERT(MALI_PAGE_NODE_BLOCK == node->type);
+       node->blk_it = item;
+}
+
+
+int _mali_page_node_get_ref_count(struct mali_page_node *node)
+{
+       if (node->type == MALI_PAGE_NODE_OS) {
+               /* get ref count of this page */
+               return page_count(node->page);
+       } else if (node->type == MALI_PAGE_NODE_BLOCK) {
+               return mali_mem_block_get_ref_count(node);
+       } else {
+               MALI_DEBUG_ASSERT(0);
+       }
+       return -1;
+}
+
+
+dma_addr_t _mali_page_node_get_phy_addr(struct mali_page_node *node)
+{
+       if (node->type == MALI_PAGE_NODE_OS) {
+               return page_private(node->page);
+       } else if (node->type == MALI_PAGE_NODE_BLOCK) {
+               return _mali_blk_item_get_phy_addr(node->blk_it);
+       } else {
+               MALI_DEBUG_ASSERT(0);
+       }
+       return 0;
+}
+
+
+unsigned long _mali_page_node_get_pfn(struct mali_page_node *node)
+{
+       if (node->type == MALI_PAGE_NODE_OS) {
+               return page_to_pfn(node->page);
+       } else if (node->type == MALI_PAGE_NODE_BLOCK) {
+               /* get phy addr for BLOCK page*/
+               return _mali_blk_item_get_pfn(node->blk_it);
+       } else {
+               MALI_DEBUG_ASSERT(0);
+       }
+       return 0;
+}
+
+
index 95aeba4f17105ea93b7e20307267f04f7b661a7c..49c2de9dd73e5dbd2d6d79e1424050603f7d9e5a 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013 ARM Limited
+ * (C) COPYRIGHT 2013-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -32,7 +32,8 @@ void mali_memory_terminate(void);
  * @param table_page GPU pointer to the allocated page
  * @param mapping CPU pointer to the mapping of the allocated page
  */
-MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_get_table_page(u32 *table_page, mali_io_address *mapping)
+MALI_STATIC_INLINE _mali_osk_errcode_t
+mali_mmu_get_table_page(mali_dma_addr *table_page, mali_io_address *mapping)
 {
        return mali_mem_os_get_table_page(table_page, mapping);
 }
@@ -43,7 +44,8 @@ MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_get_table_page(u32 *table_page,
  *
  * @param pa the GPU address of the page to release
  */
-MALI_STATIC_INLINE void mali_mmu_release_table_page(u32 phys, void *virt)
+MALI_STATIC_INLINE void
+mali_mmu_release_table_page(mali_dma_addr phys, void *virt)
 {
        mali_mem_os_release_table_page(phys, virt);
 }
@@ -56,22 +58,6 @@ MALI_STATIC_INLINE void mali_mmu_release_table_page(u32 phys, void *virt)
  */
 int mali_mmap(struct file *filp, struct vm_area_struct *vma);
 
-/** @brief Allocate and initialize a Mali memory descriptor
- *
- * @param session Pointer to the session allocating the descriptor
- * @param type Type of memory the descriptor will represent
- */
-mali_mem_allocation *mali_mem_descriptor_create(struct mali_session_data *session, mali_mem_type type);
-
-/** @brief Destroy a Mali memory descriptor
- *
- * This function will only free the descriptor itself, and not the memory it
- * represents.
- *
- * @param descriptor Pointer to the descriptor to destroy
- */
-void mali_mem_descriptor_destroy(mali_mem_allocation *descriptor);
-
 /** @brief Start a new memory session
  *
  * Called when a process opens the Mali device node.
@@ -110,7 +96,7 @@ _mali_osk_errcode_t mali_mem_mali_map_prepare(mali_mem_allocation *descriptor);
  *
  * @param descriptor Pointer to the memory descriptor to unmap
  */
-void mali_mem_mali_map_free(mali_mem_allocation *descriptor);
+void mali_mem_mali_map_free(struct mali_session_data *session, u32 size, mali_address_t vaddr, u32 flags);
 
 /** @brief Parse resource and prepare the OS memory allocator
  *
@@ -128,7 +114,16 @@ _mali_osk_errcode_t mali_memory_core_resource_os_memory(u32 size);
 _mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(u32 start, u32 size);
 
 
-void mali_mem_ump_release(mali_mem_allocation *descriptor);
-void mali_mem_external_release(mali_mem_allocation *descriptor);
+struct mali_page_node *_mali_page_node_allocate(mali_page_node_type type);
+
+void _mali_page_node_ref(struct mali_page_node *node);
+void _mali_page_node_unref(struct mali_page_node *node);
+void _mali_page_node_add_page(struct mali_page_node *node, struct page *page);
+
+void _mali_page_node_add_block_item(struct mali_page_node *node, mali_block_item *item);
+
+int _mali_page_node_get_ref_count(struct mali_page_node *node);
+dma_addr_t _mali_page_node_get_phy_addr(struct mali_page_node *node);
+unsigned long _mali_page_node_get_pfn(struct mali_page_node *node);
 
 #endif /* __MALI_MEMORY_H__ */
index e8c890e9a16bb031ff3865dd4b7cdf2d66705866..6bbe47df9f947cec569446bace289e132a817f59 100644 (file)
@@ -1,48 +1,71 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
+ * (C) COPYRIGHT 2008-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
  * by a licensing agreement from ARM Limited.
  */
+
 #include "mali_kernel_common.h"
 #include "mali_memory.h"
 #include "mali_memory_block_alloc.h"
 #include "mali_osk.h"
 #include <linux/mutex.h>
-#define MALI_BLOCK_SIZE (256UL * 1024UL)  /* 256 kB, remember to keep the ()s */
 
-struct block_info {
-       struct block_info *next;
-};
 
-typedef struct block_info block_info;
+static mali_block_allocator *mali_mem_block_gobal_allocator = NULL;
+
+unsigned long _mali_blk_item_get_phy_addr(mali_block_item *item)
+{
+       return (item->phy_addr & ~(MALI_BLOCK_REF_MASK));
+}
 
 
-typedef struct block_allocator {
-       struct mutex mutex;
-       block_info *all_blocks;
-       block_info *first_free;
-       u32 base;
-       u32 cpu_usage_adjust;
-       u32 num_blocks;
-       u32 free_blocks;
-} block_allocator;
+unsigned long _mali_blk_item_get_pfn(mali_block_item *item)
+{
+       return (item->phy_addr / MALI_BLOCK_SIZE);
+}
+
+
+u32 mali_mem_block_get_ref_count(mali_page_node *node)
+{
+       MALI_DEBUG_ASSERT(node->type == MALI_PAGE_NODE_BLOCK);
+       return (node->blk_it->phy_addr & MALI_BLOCK_REF_MASK);
+}
+
 
-static block_allocator *mali_mem_block_gobal_allocator = NULL;
+/* Increase the refence count
+* It not atomic, so it need to get sp_lock before call this function
+*/
 
-MALI_STATIC_INLINE u32 get_phys(block_allocator *info, block_info *block)
+u32 mali_mem_block_add_ref(mali_page_node *node)
 {
-       return info->base + ((block - info->all_blocks) * MALI_BLOCK_SIZE);
+       MALI_DEBUG_ASSERT(node->type == MALI_PAGE_NODE_BLOCK);
+       MALI_DEBUG_ASSERT(mali_mem_block_get_ref_count(node) < MALI_BLOCK_MAX_REF_COUNT);
+       return (node->blk_it->phy_addr++ & MALI_BLOCK_REF_MASK);
 }
 
-mali_mem_allocator *mali_mem_block_allocator_create(u32 base_address, u32 cpu_usage_adjust, u32 size)
+/* Decase the refence count
+* It not atomic, so it need to get sp_lock before call this function
+*/
+u32 mali_mem_block_dec_ref(mali_page_node *node)
 {
-       block_allocator *info;
+       MALI_DEBUG_ASSERT(node->type == MALI_PAGE_NODE_BLOCK);
+       MALI_DEBUG_ASSERT(mali_mem_block_get_ref_count(node) > 0);
+       return (node->blk_it->phy_addr-- & MALI_BLOCK_REF_MASK);
+}
+
+
+static mali_block_allocator *mali_mem_block_allocator_create(u32 base_address, u32 size)
+{
+       mali_block_allocator *info;
        u32 usable_size;
        u32 num_blocks;
+       mali_page_node *m_node;
+       mali_block_item *mali_blk_items = NULL;
+       int i = 0;
 
        usable_size = size & ~(MALI_BLOCK_SIZE - 1);
        MALI_DEBUG_PRINT(3, ("Mali block allocator create for region starting at 0x%08X length 0x%08X\n", base_address, size));
@@ -55,246 +78,253 @@ mali_mem_allocator *mali_mem_block_allocator_create(u32 base_address, u32 cpu_us
                return NULL;
        }
 
-       info = _mali_osk_malloc(sizeof(block_allocator));
+       info = _mali_osk_calloc(1, sizeof(mali_block_allocator));
        if (NULL != info) {
-               mutex_init(&info->mutex);
-               info->all_blocks = _mali_osk_malloc(sizeof(block_info) * num_blocks);
-               if (NULL != info->all_blocks) {
-                       u32 i;
-                       info->first_free = NULL;
-                       info->num_blocks = num_blocks;
-                       info->free_blocks = num_blocks;
-
-                       info->base = base_address;
-                       info->cpu_usage_adjust = cpu_usage_adjust;
-
-                       for ( i = 0; i < num_blocks; i++) {
-                               info->all_blocks[i].next = info->first_free;
-                               info->first_free = &info->all_blocks[i];
+               INIT_LIST_HEAD(&info->free);
+               spin_lock_init(&info->sp_lock);
+               info->total_num = num_blocks;
+               mali_blk_items = _mali_osk_calloc(1, sizeof(mali_block_item) * num_blocks);
+
+               if (mali_blk_items) {
+                       info->items = mali_blk_items;
+                       /* add blocks(4k size) to free list*/
+                       for (i = 0 ; i < num_blocks ; i++) {
+                               /* add block information*/
+                               mali_blk_items[i].phy_addr = base_address + (i * MALI_BLOCK_SIZE);
+                               /* add  to free list */
+                               m_node = _mali_page_node_allocate(MALI_PAGE_NODE_BLOCK);
+                               if (m_node == NULL)
+                                       goto fail;
+                               _mali_page_node_add_block_item(m_node, &(mali_blk_items[i]));
+                               list_add_tail(&m_node->list, &info->free);
+                               atomic_add(1, &info->free_num);
                        }
-
-                       return (mali_mem_allocator *)info;
+                       return info;
                }
-               _mali_osk_free(info);
        }
-
+fail:
+       mali_mem_block_allocator_destroy();
        return NULL;
 }
 
-void mali_mem_block_allocator_destroy(mali_mem_allocator *allocator)
+void mali_mem_block_allocator_destroy(void)
 {
-       block_allocator *info = (block_allocator*)allocator;
+       struct mali_page_node *m_page, *m_tmp;
+       mali_block_allocator *info = mali_mem_block_gobal_allocator;
+       MALI_DEBUG_ASSERT_POINTER(info);
+       MALI_DEBUG_PRINT(4, ("Memory block destroy !\n"));
 
-       info = mali_mem_block_gobal_allocator;
-       if (NULL == info) return;
+       if (NULL == info)
+               return;
 
-       MALI_DEBUG_ASSERT_POINTER(info);
+       list_for_each_entry_safe(m_page, m_tmp , &info->free, list) {
+               MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK);
+               list_del(&m_page->list);
+               kfree(m_page);
+       }
 
-       _mali_osk_free(info->all_blocks);
+       _mali_osk_free(info->items);
        _mali_osk_free(info);
 }
 
-static void mali_mem_block_mali_map(mali_mem_allocation *descriptor, u32 phys, u32 virt, u32 size)
+u32 mali_mem_block_release(mali_mem_backend *mem_bkend)
 {
-       struct mali_page_directory *pagedir = descriptor->session->page_directory;
-       u32 prop = descriptor->mali_mapping.properties;
-       u32 offset = 0;
-
-       while (size) {
-               mali_mmu_pagedir_update(pagedir, virt + offset, phys + offset, MALI_MMU_PAGE_SIZE, prop);
-
-               size -= MALI_MMU_PAGE_SIZE;
-               offset += MALI_MMU_PAGE_SIZE;
-       }
+       mali_mem_allocation *alloc = mem_bkend->mali_allocation;
+       u32 free_pages_nr = 0;
+       MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_BLOCK);
+
+       /* Unmap the memory from the mali virtual address space. */
+       mali_mem_block_mali_unmap(alloc);
+       mutex_lock(&mem_bkend->mutex);
+       free_pages_nr = mali_mem_block_free(&mem_bkend->block_mem);
+       mutex_unlock(&mem_bkend->mutex);
+       return free_pages_nr;
 }
 
-static int mali_mem_block_cpu_map(mali_mem_allocation *descriptor, struct vm_area_struct *vma, u32 mali_phys, u32 mapping_offset, u32 size, u32 cpu_usage_adjust)
-{
-       u32 virt = vma->vm_start + mapping_offset;
-       u32 cpu_phys = mali_phys + cpu_usage_adjust;
-       u32 offset = 0;
-       int ret;
 
-       while (size) {
-               ret = vm_insert_pfn(vma, virt + offset, __phys_to_pfn(cpu_phys + offset));
+int mali_mem_block_alloc(mali_mem_block_mem *block_mem, u32 size)
+{
+       struct mali_page_node *m_page, *m_tmp;
+       size_t page_count = PAGE_ALIGN(size) / _MALI_OSK_MALI_PAGE_SIZE;
+       mali_block_allocator *info = mali_mem_block_gobal_allocator;
+       MALI_DEBUG_ASSERT_POINTER(info);
 
-               if (unlikely(ret)) {
-                       MALI_DEBUG_PRINT(1, ("Block allocator: Failed to insert pfn into vma\n"));
-                       return 1;
+       MALI_DEBUG_PRINT(4, ("BLOCK Mem: Allocate size = 0x%x\n", size));
+       /*do some init */
+       INIT_LIST_HEAD(&block_mem->pfns);
+
+       spin_lock(&info->sp_lock);
+       /*check if have enough space*/
+       if (atomic_read(&info->free_num) > page_count) {
+               list_for_each_entry_safe(m_page, m_tmp , &info->free, list) {
+                       if (page_count > 0) {
+                               MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK);
+                               MALI_DEBUG_ASSERT(mali_mem_block_get_ref_count(m_page) == 0);
+                               list_move(&m_page->list, &block_mem->pfns);
+                               block_mem->count++;
+                               atomic_dec(&info->free_num);
+                               _mali_page_node_ref(m_page);
+                       } else {
+                               break;
+                       }
+                       page_count--;
                }
-
-               size -= MALI_MMU_PAGE_SIZE;
-               offset += MALI_MMU_PAGE_SIZE;
+       } else {
+               /* can't allocate from BLOCK memory*/
+               spin_unlock(&info->sp_lock);
+               return -1;
        }
 
+       spin_unlock(&info->sp_lock);
        return 0;
 }
 
-mali_mem_allocation *mali_mem_block_alloc(u32 mali_addr, u32 size, struct vm_area_struct *vma, struct mali_session_data *session)
+u32 mali_mem_block_free(mali_mem_block_mem *block_mem)
 {
-       _mali_osk_errcode_t err;
-       mali_mem_allocation *descriptor;
-       block_allocator *info;
-       u32 left;
-       block_info *last_allocated = NULL;
-       block_allocator_allocation *ret_allocation;
-       u32 offset = 0;
-
-       size = ALIGN(size, MALI_BLOCK_SIZE);
-
-       info = mali_mem_block_gobal_allocator;
-       if (NULL == info) return NULL;
+       u32 free_pages_nr = 0;
 
-       left = size;
-       MALI_DEBUG_ASSERT(0 != left);
-
-       descriptor = mali_mem_descriptor_create(session, MALI_MEM_BLOCK);
-       if (NULL == descriptor) {
-               return NULL;
-       }
+       free_pages_nr = mali_mem_block_free_list(&block_mem->pfns);
+       MALI_DEBUG_PRINT(4, ("BLOCK Mem free : allocated size = 0x%x, free size = 0x%x\n", block_mem->count * _MALI_OSK_MALI_PAGE_SIZE,
+                            free_pages_nr * _MALI_OSK_MALI_PAGE_SIZE));
+       block_mem->count = 0;
+       MALI_DEBUG_ASSERT(list_empty(&block_mem->pfns));
 
-       descriptor->mali_mapping.addr = mali_addr;
-       descriptor->size = size;
-       descriptor->cpu_mapping.addr = (void __user*)vma->vm_start;
-       descriptor->cpu_mapping.ref = 1;
-
-       if (VM_SHARED == (VM_SHARED & vma->vm_flags)) {
-               descriptor->mali_mapping.properties = MALI_MMU_FLAGS_DEFAULT;
-       } else {
-               /* Cached Mali memory mapping */
-               descriptor->mali_mapping.properties = MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE;
-               vma->vm_flags |= VM_SHARED;
-       }
-
-       ret_allocation = &descriptor->block_mem.mem;
-
-       ret_allocation->mapping_length = 0;
-
-       _mali_osk_mutex_wait(session->memory_lock);
-       mutex_lock(&info->mutex);
+       return free_pages_nr;
+}
 
-       if (left > (info->free_blocks * MALI_BLOCK_SIZE)) {
-               MALI_DEBUG_PRINT(2, ("Mali block allocator: not enough free blocks to service allocation (%u)\n", left));
-               mutex_unlock(&info->mutex);
-               _mali_osk_mutex_signal(session->memory_lock);
-               mali_mem_descriptor_destroy(descriptor);
-               return NULL;
-       }
 
-       err = mali_mem_mali_map_prepare(descriptor);
-       if (_MALI_OSK_ERR_OK != err) {
-               mutex_unlock(&info->mutex);
-               _mali_osk_mutex_signal(session->memory_lock);
-               mali_mem_descriptor_destroy(descriptor);
-               return NULL;
+u32 mali_mem_block_free_list(struct list_head *list)
+{
+       struct mali_page_node *m_page, *m_tmp;
+       mali_block_allocator *info = mali_mem_block_gobal_allocator;
+       u32 free_pages_nr = 0;
+
+       if (info) {
+               spin_lock(&info->sp_lock);
+               list_for_each_entry_safe(m_page, m_tmp , list, list) {
+                       if (1 == _mali_page_node_get_ref_count(m_page)) {
+                               free_pages_nr++;
+                       }
+                       mali_mem_block_free_node(m_page);
+               }
+               spin_unlock(&info->sp_lock);
        }
+       return free_pages_nr;
+}
 
-       while ((left > 0) && (info->first_free)) {
-               block_info *block;
-               u32 phys_addr;
-               u32 current_mapping_size;
-
-               block = info->first_free;
-               info->first_free = info->first_free->next;
-               block->next = last_allocated;
-               last_allocated = block;
-
-               phys_addr = get_phys(info, block);
-
-               if (MALI_BLOCK_SIZE < left) {
-                       current_mapping_size = MALI_BLOCK_SIZE;
+/* free the node,*/
+void mali_mem_block_free_node(struct mali_page_node *node)
+{
+       mali_block_allocator *info = mali_mem_block_gobal_allocator;
+
+       /* only handle BLOCK node */
+       if (node->type == MALI_PAGE_NODE_BLOCK && info) {
+               /*Need to make this atomic?*/
+               if (1 == _mali_page_node_get_ref_count(node)) {
+                       /*Move to free list*/
+                       _mali_page_node_unref(node);
+                       list_move_tail(&node->list, &info->free);
+                       atomic_add(1, &info->free_num);
                } else {
-                       current_mapping_size = left;
+                       _mali_page_node_unref(node);
+                       list_del(&node->list);
+                       kfree(node);
                }
+       }
+}
 
-               mali_mem_block_mali_map(descriptor, phys_addr, mali_addr + offset, current_mapping_size);
-               if (mali_mem_block_cpu_map(descriptor, vma, phys_addr, offset, current_mapping_size, info->cpu_usage_adjust)) {
-                       /* release all memory back to the pool */
-                       while (last_allocated) {
-                               /* This relinks every block we've just allocated back into the free-list */
-                               block = last_allocated->next;
-                               last_allocated->next = info->first_free;
-                               info->first_free = last_allocated;
-                               last_allocated = block;
-                       }
-
-                       mutex_unlock(&info->mutex);
-                       _mali_osk_mutex_signal(session->memory_lock);
-
-                       mali_mem_mali_map_free(descriptor);
-                       mali_mem_descriptor_destroy(descriptor);
+/* unref the node, but not free it */
+_mali_osk_errcode_t mali_mem_block_unref_node(struct mali_page_node *node)
+{
+       mali_block_allocator *info = mali_mem_block_gobal_allocator;
+       mali_page_node *new_node;
+
+       /* only handle BLOCK node */
+       if (node->type == MALI_PAGE_NODE_BLOCK && info) {
+               /*Need to make this atomic?*/
+               if (1 == _mali_page_node_get_ref_count(node)) {
+                       /* allocate a  new node, Add to free list, keep the old node*/
+                       _mali_page_node_unref(node);
+                       new_node = _mali_page_node_allocate(MALI_PAGE_NODE_BLOCK);
+                       if (new_node) {
+                               memcpy(new_node, node, sizeof(mali_page_node));
+                               list_add(&new_node->list, &info->free);
+                               atomic_add(1, &info->free_num);
+                       } else
+                               return _MALI_OSK_ERR_FAULT;
 
-                       return NULL;
+               } else {
+                       _mali_page_node_unref(node);
                }
-
-               left -= current_mapping_size;
-               offset += current_mapping_size;
-               ret_allocation->mapping_length += current_mapping_size;
-
-               --info->free_blocks;
        }
+       return _MALI_OSK_ERR_OK;
+}
 
-       mutex_unlock(&info->mutex);
-       _mali_osk_mutex_signal(session->memory_lock);
-
-       MALI_DEBUG_ASSERT(0 == left);
 
-       /* Record all the information about this allocation */
-       ret_allocation->last_allocated = last_allocated;
-       ret_allocation->info = info;
+int mali_mem_block_mali_map(mali_mem_block_mem *block_mem, struct mali_session_data *session, u32 vaddr, u32 props)
+{
+       struct mali_page_directory *pagedir = session->page_directory;
+       struct mali_page_node *m_page;
+       dma_addr_t phys;
+       u32 virt = vaddr;
+       u32 prop = props;
+
+       list_for_each_entry(m_page, &block_mem->pfns, list) {
+               MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK);
+               phys = _mali_page_node_get_phy_addr(m_page);
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
+               /* Verify that the "physical" address is 32-bit and
+                * usable for Mali, when on a system with bus addresses
+                * wider than 32-bit. */
+               MALI_DEBUG_ASSERT(0 == (phys >> 32));
+#endif
+               mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys, MALI_MMU_PAGE_SIZE, prop);
+               virt += MALI_MMU_PAGE_SIZE;
+       }
 
-       return descriptor;
+       return 0;
 }
 
-void mali_mem_block_release(mali_mem_allocation *descriptor)
+void mali_mem_block_mali_unmap(mali_mem_allocation *alloc)
 {
-       block_allocator *info = descriptor->block_mem.mem.info;
-       block_info *block, *next;
-       block_allocator_allocation *allocation = &descriptor->block_mem.mem;
-
-       MALI_DEBUG_ASSERT(MALI_MEM_BLOCK == descriptor->type);
-
-       block = allocation->last_allocated;
-
-       MALI_DEBUG_ASSERT_POINTER(block);
-
-       /* unmap */
-       mali_mem_mali_map_free(descriptor);
-
-       mutex_lock(&info->mutex);
+       struct mali_session_data *session;
+       MALI_DEBUG_ASSERT_POINTER(alloc);
+       session = alloc->session;
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       mali_session_memory_lock(session);
+       mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start,
+                              alloc->flags);
+       mali_session_memory_unlock(session);
+}
 
-       while (block) {
-               MALI_DEBUG_ASSERT(!((block < info->all_blocks) || (block > (info->all_blocks + info->num_blocks))));
 
-               next = block->next;
+int mali_mem_block_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma)
+{
+       int ret;
+       mali_mem_block_mem *block_mem = &mem_bkend->block_mem;
+       unsigned long addr = vma->vm_start;
+       struct mali_page_node *m_page;
+       MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_BLOCK);
 
-               /* relink into free-list */
-               block->next = info->first_free;
-               info->first_free = block;
+       list_for_each_entry(m_page, &block_mem->pfns, list) {
+               MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK);
+               ret = vm_insert_pfn(vma, addr, _mali_page_node_get_pfn(m_page));
 
-               /* advance the loop */
-               block = next;
+               if (unlikely(0 != ret)) {
+                       return -EFAULT;
+               }
+               addr += _MALI_OSK_MALI_PAGE_SIZE;
 
-               ++info->free_blocks;
        }
 
-       mutex_unlock(&info->mutex);
+       return 0;
 }
 
-u32 mali_mem_block_allocator_stat(void)
-{
-       block_allocator *info = (block_allocator *)mali_mem_block_gobal_allocator;
-
-       if (NULL == info) return 0;
-
-       MALI_DEBUG_ASSERT_POINTER(info);
-
-       return (info->num_blocks - info->free_blocks) * MALI_BLOCK_SIZE;
-}
 
 _mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(u32 start, u32 size)
 {
-       mali_mem_allocator *allocator;
+       mali_block_allocator *allocator;
 
        /* Do the low level linux operation first */
 
@@ -305,7 +335,7 @@ _mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(u32 start, u32 si
        }
 
        /* Create generic block allocator object to handle it */
-       allocator = mali_mem_block_allocator_create(start, 0 /* cpu_usage_adjust */, size);
+       allocator = mali_mem_block_allocator_create(start, size);
 
        if (NULL == allocator) {
                MALI_DEBUG_PRINT(1, ("Memory bank registration failed\n"));
@@ -313,7 +343,20 @@ _mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(u32 start, u32 si
                MALI_ERROR(_MALI_OSK_ERR_FAULT);
        }
 
-       mali_mem_block_gobal_allocator = (block_allocator*)allocator;
+       mali_mem_block_gobal_allocator = (mali_block_allocator *)allocator;
 
        return _MALI_OSK_ERR_OK;
 }
+
+mali_bool mali_memory_have_dedicated_memory(void)
+{
+       return mali_mem_block_gobal_allocator ? MALI_TRUE : MALI_FALSE;
+}
+
+u32 mali_mem_block_allocator_stat(void)
+{
+       mali_block_allocator *allocator = mali_mem_block_gobal_allocator;
+       MALI_DEBUG_ASSERT_POINTER(allocator);
+
+       return (allocator->total_num - atomic_read(&allocator->free_num)) * _MALI_OSK_MALI_PAGE_SIZE;
+}
index c0d412dc8010da2cb9cf6a6bcdc49faa0f1b693e..e8b34d2ab16feda74e26ac43fab6193133165574 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2010, 2013 ARM Limited
+ * (C) COPYRIGHT 2008-2010, 2013, 2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 
 #include "mali_session.h"
 #include "mali_memory.h"
+#include <linux/spinlock.h>
 
 #include "mali_memory_types.h"
 
-typedef struct mali_mem_allocator mali_mem_allocator;
-
-mali_mem_allocator *mali_block_allocator_create(u32 base_address, u32 cpu_usage_adjust, u32 size);
-void mali_mem_block_allocator_destroy(mali_mem_allocator *allocator);
-
-mali_mem_allocation *mali_mem_block_alloc(u32 mali_addr, u32 size, struct vm_area_struct *vma, struct mali_session_data *session);
-void mali_mem_block_release(mali_mem_allocation *descriptor);
-
+#define MALI_BLOCK_SIZE (PAGE_SIZE)  /* 4 kB, manage BLOCK memory as page size */
+#define MALI_BLOCK_REF_MASK (0xFFF)
+#define MALI_BLOCK_MAX_REF_COUNT (0xFFF)
+
+
+
+typedef struct mali_block_allocator {
+       /*
+       * In free list, each node's ref_count is 0,
+       * ref_count added when allocated or referenced in COW
+       */
+       mali_block_item *items; /* information for each block item*/
+       struct list_head free; /*free list of mali_memory_node*/
+       spinlock_t sp_lock; /*lock for reference count & free list opertion*/
+       u32 total_num; /* Number of total pages*/
+       atomic_t free_num; /*number of free pages*/
+} mali_block_allocator;
+
+unsigned long _mali_blk_item_get_phy_addr(mali_block_item *item);
+unsigned long _mali_blk_item_get_pfn(mali_block_item *item);
+u32 mali_mem_block_get_ref_count(mali_page_node *node);
+u32 mali_mem_block_add_ref(mali_page_node *node);
+u32 mali_mem_block_dec_ref(mali_page_node *node);
+u32 mali_mem_block_release(mali_mem_backend *mem_bkend);
+int mali_mem_block_alloc(mali_mem_block_mem *block_mem, u32 size);
+int mali_mem_block_mali_map(mali_mem_block_mem *block_mem, struct mali_session_data *session, u32 vaddr, u32 props);
+void mali_mem_block_mali_unmap(mali_mem_allocation *alloc);
+
+int mali_mem_block_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma);
+_mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(u32 start, u32 size);
+mali_bool mali_memory_have_dedicated_memory(void);
+u32 mali_mem_block_free(mali_mem_block_mem *block_mem);
+u32 mali_mem_block_free_list(struct list_head *list);
+void mali_mem_block_free_node(struct mali_page_node *node);
+void mali_mem_block_allocator_destroy(void);
+_mali_osk_errcode_t mali_mem_block_unref_node(struct mali_page_node *node);
 u32 mali_mem_block_allocator_stat(void);
 
 #endif /* __MALI_BLOCK_ALLOCATOR_H__ */
diff --git a/drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_cow.c b/drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_cow.c
new file mode 100644 (file)
index 0000000..bf81a01
--- /dev/null
@@ -0,0 +1,571 @@
+/*\r
+ * This confidential and proprietary software may be used only as\r
+ * authorised by a licensing agreement from ARM Limited\r
+ * (C) COPYRIGHT 2013-2015 ARM Limited\r
+ * ALL RIGHTS RESERVED\r
+ * The entire notice above must be reproduced on all authorised\r
+ * copies and copies may only be made to the extent permitted\r
+ * by a licensing agreement from ARM Limited.\r
+ */\r
+#include <linux/mm.h>\r
+#include <linux/list.h>\r
+#include <linux/mm_types.h>\r
+#include <linux/fs.h>\r
+#include <linux/dma-mapping.h>\r
+#include <linux/highmem.h>\r
+#include <asm/cacheflush.h>\r
+#include <linux/sched.h>\r
+#ifdef CONFIG_ARM\r
+#include <asm/outercache.h>\r
+#endif\r
+#include <asm/dma-mapping.h>\r
+\r
+#include "mali_memory.h"\r
+#include "mali_kernel_common.h"\r
+#include "mali_uk_types.h"\r
+#include "mali_osk.h"\r
+#include "mali_kernel_linux.h"\r
+#include "mali_memory_cow.h"\r
+#include "mali_memory_block_alloc.h"\r
+\r
+/**\r
+* allocate pages for COW backend and flush cache\r
+*/\r
+static struct page *mali_mem_cow_alloc_page(void)\r
+\r
+{\r
+       mali_mem_os_mem os_mem;\r
+       struct mali_page_node *node;\r
+       struct page *new_page;\r
+\r
+       int ret = 0;\r
+       /* allocate pages from os mem */\r
+       ret = mali_mem_os_alloc_pages(&os_mem, _MALI_OSK_MALI_PAGE_SIZE);\r
+\r
+       if (ret) {\r
+               return NULL;\r
+       }\r
+\r
+       MALI_DEBUG_ASSERT(1 == os_mem.count);\r
+\r
+       node = _MALI_OSK_CONTAINER_OF(os_mem.pages.next, struct mali_page_node, list);\r
+       new_page = node->page;\r
+       node->page = NULL;\r
+       list_del(&node->list);\r
+       kfree(node);\r
+\r
+       return new_page;\r
+}\r
+\r
+\r
+static struct list_head *_mali_memory_cow_get_node_list(mali_mem_backend *target_bk,\r
+               u32 target_offset,\r
+               u32 target_size)\r
+{\r
+       MALI_DEBUG_ASSERT(MALI_MEM_OS == target_bk->type || MALI_MEM_COW == target_bk->type ||\r
+                         MALI_MEM_BLOCK == target_bk->type);\r
+\r
+       if (MALI_MEM_OS == target_bk->type) {\r
+               MALI_DEBUG_ASSERT(&target_bk->os_mem);\r
+               MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->os_mem.count);\r
+               return &target_bk->os_mem.pages;\r
+       } else if (MALI_MEM_COW == target_bk->type) {\r
+               MALI_DEBUG_ASSERT(&target_bk->cow_mem);\r
+               MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->cow_mem.count);\r
+               return  &target_bk->cow_mem.pages;\r
+       } else if (MALI_MEM_BLOCK == target_bk->type) {\r
+               MALI_DEBUG_ASSERT(&target_bk->block_mem);\r
+               MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->block_mem.count);\r
+               return  &target_bk->block_mem.pfns;\r
+       }\r
+\r
+       return NULL;\r
+}\r
+\r
+/**\r
+* Do COW for os memory - support do COW for memory from bank memory\r
+* The range_start/size can be zero, which means it will call cow_modify_range\r
+* latter.\r
+* This function allocate new pages for COW backend from os mem for a modified range\r
+* It will keep the page which not in the modified range and Add ref to it\r
+*\r
+* @target_bk - target allocation's backend(the allocation need to do COW)\r
+* @target_offset - the offset in target allocation to do COW(for support COW  a memory allocated from memory_bank, 4K align)\r
+* @target_size - size of target allocation to do COW (for support memory bank)\r
+* @backend -COW backend\r
+* @range_start - offset of modified range (4K align)\r
+* @range_size - size of modified range\r
+*/\r
+_mali_osk_errcode_t mali_memory_cow_os_memory(mali_mem_backend *target_bk,\r
+               u32 target_offset,\r
+               u32 target_size,\r
+               mali_mem_backend *backend,\r
+               u32 range_start,\r
+               u32 range_size)\r
+{\r
+       mali_mem_cow *cow = &backend->cow_mem;\r
+       struct mali_page_node *m_page, *m_tmp, *page_node;\r
+       int target_page = 0;\r
+       struct page *new_page;\r
+       struct list_head *pages = NULL;\r
+\r
+       pages = _mali_memory_cow_get_node_list(target_bk, target_offset, target_size);\r
+\r
+       if (NULL == pages) {\r
+               MALI_DEBUG_ASSERT(0);\r
+               return _MALI_OSK_ERR_FAULT;\r
+       }\r
+\r
+       MALI_DEBUG_ASSERT(0 == cow->count);\r
+\r
+       INIT_LIST_HEAD(&cow->pages);\r
+       mutex_lock(&target_bk->mutex);\r
+       list_for_each_entry_safe(m_page, m_tmp, pages, list) {\r
+               /* add page from (target_offset,target_offset+size) to cow backend */\r
+               if ((target_page >= target_offset / _MALI_OSK_MALI_PAGE_SIZE) &&\r
+                   (target_page < ((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE))) {\r
+\r
+                       /* allocate a new page node, alway use OS memory for COW */\r
+                       page_node = _mali_page_node_allocate(MALI_PAGE_NODE_OS);\r
+\r
+                       if (NULL == page_node) {\r
+                               mutex_unlock(&target_bk->mutex);\r
+                               goto error;\r
+                       }\r
+\r
+                       INIT_LIST_HEAD(&page_node->list);\r
+\r
+                       /* check if in the modified range*/\r
+                       if ((cow->count >= range_start / _MALI_OSK_MALI_PAGE_SIZE) &&\r
+                           (cow->count < (range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE)) {\r
+                               /* need to allocate a new page */\r
+                               /* To simplify the case, All COW memory is allocated from os memory ?*/\r
+                               new_page = mali_mem_cow_alloc_page();\r
+\r
+                               if (NULL == new_page) {\r
+                                       kfree(page_node);\r
+                                       mutex_unlock(&target_bk->mutex);\r
+                                       goto error;\r
+                               }\r
+\r
+                               _mali_page_node_add_page(page_node, new_page);\r
+                       } else {\r
+                               /*Add Block memory case*/\r
+                               if (m_page->type != MALI_PAGE_NODE_BLOCK) {\r
+                                       _mali_page_node_add_page(page_node, m_page->page);\r
+                               } else {\r
+                                       page_node->type = MALI_PAGE_NODE_BLOCK;\r
+                                       _mali_page_node_add_block_item(page_node, m_page->blk_it);\r
+                               }\r
+\r
+                               /* add ref to this page */\r
+                               _mali_page_node_ref(m_page);\r
+                       }\r
+\r
+                       /* add it to COW backend page list */\r
+                       list_add_tail(&page_node->list, &cow->pages);\r
+                       cow->count++;\r
+               }\r
+               target_page++;\r
+       }\r
+       mutex_unlock(&target_bk->mutex);\r
+       return _MALI_OSK_ERR_OK;\r
+error:\r
+       mali_mem_cow_release(backend, MALI_FALSE);\r
+       return _MALI_OSK_ERR_FAULT;\r
+}\r
+\r
+\r
+_mali_osk_errcode_t _mali_mem_put_page_node(mali_page_node *node)\r
+{\r
+       if (node->type == MALI_PAGE_NODE_OS) {\r
+               return mali_mem_os_put_page(node->page);\r
+       } else if (node->type == MALI_PAGE_NODE_BLOCK) {\r
+               return mali_mem_block_unref_node(node);\r
+       } else\r
+               MALI_DEBUG_ASSERT(0);\r
+       return _MALI_OSK_ERR_FAULT;\r
+}\r
+\r
+\r
+/**\r
+* Modify a range of a exist COW backend\r
+* @backend -COW backend\r
+* @range_start - offset of modified range (4K align)\r
+* @range_size - size of modified range(in byte)\r
+*/\r
+_mali_osk_errcode_t mali_memory_cow_modify_range(mali_mem_backend *backend,\r
+               u32 range_start,\r
+               u32 range_size)\r
+{\r
+       mali_mem_allocation *alloc = NULL;\r
+       mali_mem_cow *cow = &backend->cow_mem;\r
+       struct mali_page_node *m_page, *m_tmp;\r
+       LIST_HEAD(pages);\r
+       struct page *new_page;\r
+       u32 count = 0;\r
+       s32 change_pages_nr = 0;\r
+\r
+       if (range_start % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);\r
+       if (range_size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);\r
+\r
+       alloc = backend->mali_allocation;\r
+       MALI_DEBUG_ASSERT_POINTER(alloc);\r
+\r
+       MALI_DEBUG_ASSERT(MALI_MEM_COW == backend->type);\r
+       MALI_DEBUG_ASSERT(((range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE) <= cow->count);\r
+\r
+       mutex_lock(&backend->mutex);\r
+\r
+       /* free pages*/\r
+       list_for_each_entry_safe(m_page, m_tmp, &cow->pages, list) {\r
+\r
+               /* check if in the modified range*/\r
+               if ((count >= range_start / _MALI_OSK_MALI_PAGE_SIZE) &&\r
+                   (count < (range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE)) {\r
+                       new_page = mali_mem_cow_alloc_page();\r
+\r
+                       if (NULL == new_page) {\r
+                               goto error;\r
+                       }\r
+                       if (1 != _mali_page_node_get_ref_count(m_page))\r
+                               change_pages_nr++;\r
+                       /* unref old page*/\r
+                       if (_mali_mem_put_page_node(m_page)) {\r
+                               __free_page(new_page);\r
+                               goto error;\r
+                       }\r
+                       /* add new page*/\r
+                       /* always use OS for COW*/\r
+                       m_page->type = MALI_PAGE_NODE_OS;\r
+                       _mali_page_node_add_page(m_page, new_page);\r
+\r
+               }\r
+               count++;\r
+       }\r
+       cow->change_pages_nr  = change_pages_nr;\r
+       mutex_unlock(&backend->mutex);\r
+\r
+       MALI_DEBUG_ASSERT(MALI_MEM_COW == alloc->type);\r
+\r
+       /* ZAP cpu mapping(modified range) if have\r
+        * those cpu mapping will be handled in page fault\r
+        */\r
+       if (0 != alloc->cpu_mapping.addr) {\r
+               MALI_DEBUG_ASSERT(0 != alloc->backend_handle);\r
+               MALI_DEBUG_ASSERT(NULL != alloc->cpu_mapping.vma);\r
+               MALI_DEBUG_ASSERT(alloc->cpu_mapping.vma->vm_end - alloc->cpu_mapping.vma->vm_start >= range_size);\r
+               zap_vma_ptes(alloc->cpu_mapping.vma, alloc->cpu_mapping.vma->vm_start + range_start, range_size);\r
+       }\r
+       return _MALI_OSK_ERR_OK;\r
+error:\r
+       mutex_unlock(&backend->mutex);\r
+       return _MALI_OSK_ERR_FAULT;\r
+\r
+}\r
+\r
+\r
+/**\r
+* Allocate pages for COW backend\r
+* @alloc  -allocation for COW allocation\r
+* @target_bk - target allocation's backend(the allocation need to do COW)\r
+* @target_offset - the offset in target allocation to do COW(for support COW  a memory allocated from memory_bank, 4K align)\r
+* @target_size - size of target allocation to do COW (for support memory bank)(in byte)\r
+* @backend -COW backend\r
+* @range_start - offset of modified range (4K align)\r
+* @range_size - size of modified range(in byte)\r
+*/\r
+_mali_osk_errcode_t mali_memory_do_cow(mali_mem_backend *target_bk,\r
+                                      u32 target_offset,\r
+                                      u32 target_size,\r
+                                      mali_mem_backend *backend,\r
+                                      u32 range_start,\r
+                                      u32 range_size)\r
+{\r
+       struct mali_session_data *session = backend->mali_allocation->session;\r
+\r
+       MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);\r
+\r
+       /* size & offset must be a multiple of the system page size */\r
+       if (target_size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);\r
+       if (range_size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);\r
+       if (target_offset % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);\r
+       if (range_start % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);\r
+\r
+       /* check backend type */\r
+       MALI_DEBUG_ASSERT(MALI_MEM_COW == backend->type);\r
+\r
+       switch (target_bk->type) {\r
+       case MALI_MEM_OS:\r
+       case MALI_MEM_COW:\r
+       case MALI_MEM_BLOCK:\r
+               return mali_memory_cow_os_memory(target_bk, target_offset, target_size, backend, range_start, range_size);\r
+               break;\r
+       case MALI_MEM_EXTERNAL:\r
+               /*NOT support yet*/\r
+               MALI_DEBUG_ASSERT(0);\r
+               break;\r
+       case MALI_MEM_DMA_BUF:\r
+               /*NOT support yet*/\r
+               MALI_DEBUG_ASSERT(0);\r
+               break;\r
+       case MALI_MEM_UMP:\r
+               /*NOT support yet*/\r
+               MALI_DEBUG_ASSERT(0);\r
+               break;\r
+       default:\r
+               /*Not support yet*/\r
+               MALI_DEBUG_ASSERT(0);\r
+               break;\r
+       }\r
+       return _MALI_OSK_ERR_OK;\r
+}\r
+\r
+\r
+/**\r
+* Map COW backend memory to mali\r
+* Support OS/BLOCK for mali_page_node\r
+*/\r
+int mali_mem_cow_mali_map(mali_mem_backend *mem_bkend, u32 range_start, u32 range_size)\r
+{\r
+       mali_mem_allocation *cow_alloc;\r
+       struct mali_page_node *m_page;\r
+       struct mali_session_data *session;\r
+       struct mali_page_directory *pagedir;\r
+       u32 virt, start;\r
+\r
+       cow_alloc = mem_bkend->mali_allocation;\r
+       virt = cow_alloc->mali_vma_node.vm_node.start;\r
+       start = virt;\r
+\r
+       MALI_DEBUG_ASSERT_POINTER(mem_bkend);\r
+       MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);\r
+       MALI_DEBUG_ASSERT_POINTER(cow_alloc);\r
+\r
+       session = cow_alloc->session;\r
+       pagedir = session->page_directory;\r
+       MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);\r
+       list_for_each_entry(m_page, &mem_bkend->cow_mem.pages, list) {\r
+               if ((virt - start >= range_start) && (virt - start < range_start + range_size)) {\r
+                       dma_addr_t phys = _mali_page_node_get_phy_addr(m_page);\r
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)\r
+                       MALI_DEBUG_ASSERT(0 == (phys >> 32));\r
+#endif\r
+                       mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys,\r
+                                               MALI_MMU_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);\r
+               }\r
+               virt += MALI_MMU_PAGE_SIZE;\r
+       }\r
+       return 0;\r
+}\r
+\r
+/**\r
+* Map COW backend to cpu\r
+* support OS/BLOCK memory\r
+*/\r
+int mali_mem_cow_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma)\r
+{\r
+       mali_mem_cow *cow = &mem_bkend->cow_mem;\r
+       struct mali_page_node *m_page;\r
+       int ret;\r
+       unsigned long addr = vma->vm_start;\r
+       MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_COW);\r
+\r
+       list_for_each_entry(m_page, &cow->pages, list) {\r
+               /* We should use vm_insert_page, but it does a dcache\r
+                * flush which makes it way slower than remap_pfn_range or vm_insert_pfn.\r
+               ret = vm_insert_page(vma, addr, page);\r
+               */\r
+               ret = vm_insert_pfn(vma, addr, _mali_page_node_get_pfn(m_page));\r
+\r
+               if (unlikely(0 != ret)) {\r
+                       return ret;\r
+               }\r
+               addr += _MALI_OSK_MALI_PAGE_SIZE;\r
+       }\r
+\r
+       return 0;\r
+}\r
+\r
+/**\r
+* Map some pages(COW backend) to CPU vma@vaddr\r
+*@ mem_bkend - COW backend\r
+*@ vma\r
+*@ vaddr -start CPU vaddr mapped to\r
+*@ num - max number of pages to map to CPU vaddr\r
+*/\r
+_mali_osk_errcode_t mali_mem_cow_cpu_map_pages_locked(mali_mem_backend *mem_bkend,\r
+               struct vm_area_struct *vma,\r
+               unsigned long vaddr,\r
+               int num)\r
+{\r
+       mali_mem_cow *cow = &mem_bkend->cow_mem;\r
+       struct mali_page_node *m_page;\r
+       int ret;\r
+       int offset;\r
+       int count ;\r
+       unsigned long vstart = vma->vm_start;\r
+       count = 0;\r
+       MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_COW);\r
+       MALI_DEBUG_ASSERT(0 == vaddr % _MALI_OSK_MALI_PAGE_SIZE);\r
+       MALI_DEBUG_ASSERT(0 == vstart % _MALI_OSK_MALI_PAGE_SIZE);\r
+       offset = (vaddr - vstart) / _MALI_OSK_MALI_PAGE_SIZE;\r
+\r
+       list_for_each_entry(m_page, &cow->pages, list) {\r
+               if ((count >= offset) && (count < offset + num)) {\r
+                       ret = vm_insert_pfn(vma, vaddr, _mali_page_node_get_pfn(m_page));\r
+\r
+                       if (unlikely(0 != ret)) {\r
+                               if (count == offset) {\r
+                                       return _MALI_OSK_ERR_FAULT;\r
+                               } else {\r
+                                       /* ret is EBUSY when page isn't in modify range, but now it's OK*/\r
+                                       return _MALI_OSK_ERR_OK;\r
+                               }\r
+                       }\r
+                       vaddr += _MALI_OSK_MALI_PAGE_SIZE;\r
+               }\r
+               count++;\r
+       }\r
+       return _MALI_OSK_ERR_OK;\r
+}\r
+\r
+/**\r
+* Release COW backend memory\r
+* free it directly(put_page--unref page), not put into pool\r
+*/\r
+u32 mali_mem_cow_release(mali_mem_backend *mem_bkend, mali_bool is_mali_mapped)\r
+{\r
+       mali_mem_allocation *alloc;\r
+       u32 free_pages_nr = 0;\r
+       MALI_DEBUG_ASSERT_POINTER(mem_bkend);\r
+       MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);\r
+       alloc = mem_bkend->mali_allocation;\r
+       MALI_DEBUG_ASSERT_POINTER(alloc);\r
+       /* Unmap the memory from the mali virtual address space. */\r
+       if (MALI_TRUE == is_mali_mapped)\r
+               mali_mem_os_mali_unmap(alloc);\r
+       /* free cow backend list*/\r
+       free_pages_nr = mali_mem_os_free(&mem_bkend->cow_mem.pages, mem_bkend->cow_mem.count, MALI_TRUE);\r
+       free_pages_nr += mali_mem_block_free_list(&mem_bkend->cow_mem.pages);\r
+       MALI_DEBUG_ASSERT(list_empty(&mem_bkend->cow_mem.pages));\r
+\r
+       MALI_DEBUG_PRINT(4, ("COW Mem free : allocated size = 0x%x, free size = 0x%x\n", mem_bkend->cow_mem.count * _MALI_OSK_MALI_PAGE_SIZE,\r
+                            free_pages_nr * _MALI_OSK_MALI_PAGE_SIZE));\r
+\r
+       mem_bkend->cow_mem.count = 0;\r
+       return free_pages_nr;\r
+}\r
+\r
+\r
+/*dst alway be OS memory*/\r
+void _mali_mem_cow_copy_page(mali_page_node *src_node, struct page *new_page)\r
+{\r
+       void *dst, *src;\r
+       MALI_DEBUG_ASSERT(src_node != NULL);\r
+\r
+       dma_unmap_page(&mali_platform_device->dev, page_private(new_page),\r
+                      _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);\r
+       /* map it , and copy the content*/\r
+\r
+       dst = kmap_atomic(new_page);\r
+\r
+       if (src_node->type == MALI_PAGE_NODE_OS) {\r
+               struct page *src_page = src_node->page;\r
+               /*clear cache */\r
+\r
+               dma_unmap_page(&mali_platform_device->dev, page_private(src_page),\r
+                              _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);\r
+               src = kmap_atomic(src_page);\r
+#ifdef CONFIG_ARM\r
+               /* It seem have cache coherence issue if we use\r
+               * kmap to map the src_page. we need to invlidate L2 cache here\r
+               */\r
+               outer_inv_range(page_to_phys(src_page), page_to_phys(src_page) + _MALI_OSK_MALI_PAGE_SIZE);\r
+#else\r
+               /* use sync for CPU for arm64 becasue no HIGMEM in aarch 64,\r
+               * So this function can work\r
+               */\r
+               dma_sync_single_for_cpu(&mali_platform_device->dev, page_private(src_page),\r
+                                       _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);\r
+#endif\r
+               memcpy(dst, src , _MALI_OSK_MALI_PAGE_SIZE);\r
+               kunmap_atomic(src);\r
+               dma_map_page(&mali_platform_device->dev, src_page,\r
+                            0, _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);\r
+       } else if (src_node->type == MALI_PAGE_NODE_BLOCK) {\r
+               /*\r
+               * use ioremap to map src for BLOCK memory\r
+               */\r
+               src = ioremap_nocache(_mali_page_node_get_phy_addr(src_node), _MALI_OSK_MALI_PAGE_SIZE);\r
+               memcpy(dst, src , _MALI_OSK_MALI_PAGE_SIZE);\r
+               iounmap(src);\r
+       }\r
+\r
+       kunmap_atomic(dst);\r
+       dma_map_page(&mali_platform_device->dev, new_page,\r
+                    0, _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);\r
+\r
+}\r
+\r
+\r
+/*\r
+* allocate page on demand when CPU access it,\r
+* THis used in page fault handler\r
+*/\r
+_mali_osk_errcode_t mali_mem_cow_allocate_on_demand(mali_mem_backend *mem_bkend, u32 offset_page)\r
+{\r
+       struct page *new_page = NULL;\r
+       int i = 0;\r
+       struct mali_page_node *m_page, *found_node = NULL;\r
+       struct  mali_session_data *session = NULL;\r
+       mali_mem_cow *cow = &mem_bkend->cow_mem;\r
+       MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);\r
+       MALI_DEBUG_ASSERT(offset_page < mem_bkend->size / _MALI_OSK_MALI_PAGE_SIZE);\r
+       MALI_DEBUG_PRINT(4, ("mali_mem_cow_allocate_on_demand !, offset_page =0x%x\n", offset_page));\r
+\r
+       /* allocate new page here */\r
+       new_page = mali_mem_cow_alloc_page();\r
+       if (!new_page)\r
+               return _MALI_OSK_ERR_NOMEM;\r
+\r
+       /* find the page in backend*/\r
+       list_for_each_entry(m_page, &cow->pages, list) {\r
+               if (i == offset_page) {\r
+                       found_node = m_page;\r
+                       break;\r
+               }\r
+               i++;\r
+       }\r
+       MALI_DEBUG_ASSERT(found_node);\r
+       if (NULL == found_node) {\r
+               __free_page(new_page);\r
+               return _MALI_OSK_ERR_ITEM_NOT_FOUND;\r
+       }\r
+       /* Copy the src page's content to new page */\r
+       _mali_mem_cow_copy_page(found_node, new_page);\r
+\r
+       MALI_DEBUG_ASSERT_POINTER(mem_bkend->mali_allocation);\r
+       session = mem_bkend->mali_allocation->session;\r
+       MALI_DEBUG_ASSERT_POINTER(session);\r
+       if (1 != _mali_page_node_get_ref_count(found_node)) {\r
+               atomic_add(1, &session->mali_mem_allocated_pages);\r
+               if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {\r
+                       session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;\r
+               }\r
+               mem_bkend->cow_mem.change_pages_nr++;\r
+       }\r
+       if (_mali_mem_put_page_node(found_node)) {\r
+               __free_page(new_page);\r
+               return _MALI_OSK_ERR_NOMEM;\r
+       }\r
+       /* always use OS for COW*/\r
+       found_node->type = MALI_PAGE_NODE_OS;\r
+       _mali_page_node_add_page(found_node, new_page);\r
+       /* map to GPU side*/\r
+\r
+       _mali_osk_mutex_wait(session->memory_lock);\r
+       mali_mem_cow_mali_map(mem_bkend, offset_page * _MALI_OSK_MALI_PAGE_SIZE, _MALI_OSK_MALI_PAGE_SIZE);\r
+       _mali_osk_mutex_signal(session->memory_lock);\r
+       return _MALI_OSK_ERR_OK;\r
+}\r
diff --git a/drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_cow.h b/drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_cow.h
new file mode 100644 (file)
index 0000000..0a1c8f3
--- /dev/null
@@ -0,0 +1,46 @@
+/*\r
+ * This confidential and proprietary software may be used only as\r
+ * authorised by a licensing agreement from ARM Limited\r
+ * (C) COPYRIGHT 2013-2015 ARM Limited\r
+ * ALL RIGHTS RESERVED\r
+ * The entire notice above must be reproduced on all authorised\r
+ * copies and copies may only be made to the extent permitted\r
+ * by a licensing agreement from ARM Limited.\r
+ */\r
+\r
+#ifndef __MALI_MEMORY_COW_H__\r
+#define __MALI_MEMORY_COW_H__\r
+\r
+#include "mali_osk.h"\r
+#include "mali_session.h"\r
+#include "mali_memory_types.h"\r
+\r
+int mali_mem_cow_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma);\r
+_mali_osk_errcode_t mali_mem_cow_cpu_map_pages_locked(mali_mem_backend *mem_bkend,\r
+               struct vm_area_struct *vma,\r
+               unsigned long vaddr,\r
+               int num);\r
+\r
+_mali_osk_errcode_t mali_memory_do_cow(mali_mem_backend *target_bk,\r
+                                      u32 target_offset,\r
+                                      u32 target_size,\r
+                                      mali_mem_backend *backend,\r
+                                      u32 range_start,\r
+                                      u32 range_size);\r
+\r
+_mali_osk_errcode_t mali_memory_cow_modify_range(mali_mem_backend *backend,\r
+               u32 range_start,\r
+               u32 range_size);\r
+\r
+_mali_osk_errcode_t mali_memory_cow_os_memory(mali_mem_backend *target_bk,\r
+               u32 target_offset,\r
+               u32 target_size,\r
+               mali_mem_backend *backend,\r
+               u32 range_start,\r
+               u32 range_size);\r
+\r
+int mali_mem_cow_mali_map(mali_mem_backend *mem_bkend, u32 range_start, u32 range_size);\r
+u32 mali_mem_cow_release(mali_mem_backend *mem_bkend, mali_bool is_mali_mapped);\r
+_mali_osk_errcode_t mali_mem_cow_allocate_on_demand(mali_mem_backend *mem_bkend, u32 offset_page);\r
+#endif\r
+\r
index 960531279f384be81880e89c70a8f5177a37566a..1c48d44f4bb905963c760ffac6e8ef14137490de 100644 (file)
@@ -1,15 +1,15 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2013 ARM Limited
+ * (C) COPYRIGHT 2012-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
  * by a licensing agreement from ARM Limited.
  */
 
-#include <linux/fs.h>     /* file system operations */
-#include <asm/uaccess.h>       /* user space access */
+#include <linux/fs.h>      /* file system operations */
+#include <asm/uaccess.h>        /* user space access */
 #include <linux/dma-buf.h>
 #include <linux/scatterlist.h>
 #include <linux/rbtree.h>
 
 #include "mali_memory.h"
 #include "mali_memory_dma_buf.h"
-
+#include "mali_memory_virtual.h"
 #include "mali_pp_job.h"
 
-static void mali_dma_buf_unmap(struct mali_dma_buf_attachment *mem);
-
-struct mali_dma_buf_attachment {
-       struct dma_buf *buf;
-       struct dma_buf_attachment *attachment;
-       struct sg_table *sgt;
-       struct mali_session_data *session;
-       int map_ref;
-       struct mutex map_lock;
-       mali_bool is_mapped;
-       wait_queue_head_t wait_queue;
-};
-
-static void mali_dma_buf_release(struct mali_dma_buf_attachment *mem)
-{
-       MALI_DEBUG_PRINT(3, ("Mali DMA-buf: release attachment %p\n", mem));
-
-       MALI_DEBUG_ASSERT_POINTER(mem);
-       MALI_DEBUG_ASSERT_POINTER(mem->attachment);
-       MALI_DEBUG_ASSERT_POINTER(mem->buf);
-
-#if defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
-       /* We mapped implicitly on attach, so we need to unmap on release */
-       mali_dma_buf_unmap(mem);
-#endif
-
-       /* Wait for buffer to become unmapped */
-       wait_event(mem->wait_queue, !mem->is_mapped);
-       MALI_DEBUG_ASSERT(!mem->is_mapped);
-
-       dma_buf_detach(mem->buf, mem->attachment);
-       dma_buf_put(mem->buf);
-
-       _mali_osk_free(mem);
-}
-
-void mali_mem_dma_buf_release(mali_mem_allocation *descriptor)
-{
-       struct mali_dma_buf_attachment *mem = descriptor->dma_buf.attachment;
-
-       mali_dma_buf_release(mem);
-}
-
 /*
  * Map DMA buf attachment \a mem into \a session at virtual address \a virt.
  */
-static int mali_dma_buf_map(struct mali_dma_buf_attachment *mem, struct mali_session_data *session, u32 virt, u32 flags)
+static int mali_dma_buf_map(mali_mem_backend *mem_backend)
 {
+       mali_mem_allocation *alloc;
+       struct mali_dma_buf_attachment *mem;
+       struct  mali_session_data *session;
        struct mali_page_directory *pagedir;
+       _mali_osk_errcode_t err;
        struct scatterlist *sg;
+       u32 virt, flags;
        int i;
 
+       MALI_DEBUG_ASSERT_POINTER(mem_backend);
+
+       alloc = mem_backend->mali_allocation;
+       MALI_DEBUG_ASSERT_POINTER(alloc);
+
+       mem = mem_backend->dma_buf.attachment;
        MALI_DEBUG_ASSERT_POINTER(mem);
+
+       session = alloc->session;
        MALI_DEBUG_ASSERT_POINTER(session);
        MALI_DEBUG_ASSERT(mem->session == session);
 
-       mutex_lock(&mem->map_lock);
+       virt = alloc->mali_vma_node.vm_node.start;
+       flags = alloc->flags;
 
+       mali_session_memory_lock(session);
        mem->map_ref++;
 
        MALI_DEBUG_PRINT(5, ("Mali DMA-buf: map attachment %p, new map_ref = %d\n", mem, mem->map_ref));
 
        if (1 == mem->map_ref) {
+
                /* First reference taken, so we need to map the dma buf */
                MALI_DEBUG_ASSERT(!mem->is_mapped);
 
-               pagedir = mali_session_get_page_directory(session);
-               MALI_DEBUG_ASSERT_POINTER(pagedir);
-
                mem->sgt = dma_buf_map_attachment(mem->attachment, DMA_BIDIRECTIONAL);
                if (IS_ERR_OR_NULL(mem->sgt)) {
                        MALI_DEBUG_PRINT_ERROR(("Failed to map dma-buf attachment\n"));
+                       mem->map_ref--;
+                       mali_session_memory_unlock(session);
                        return -EFAULT;
                }
 
+               err = mali_mem_mali_map_prepare(alloc);
+               if (_MALI_OSK_ERR_OK != err) {
+                       MALI_DEBUG_PRINT(1, ("Mapping of DMA memory failed\n"));
+                       mem->map_ref--;
+                       mali_session_memory_unlock(session);
+                       return -ENOMEM;
+               }
+
+               pagedir = mali_session_get_page_directory(session);
+               MALI_DEBUG_ASSERT_POINTER(pagedir);
+
                for_each_sg(mem->sgt->sgl, sg, mem->sgt->nents, i) {
                        u32 size = sg_dma_len(sg);
                        dma_addr_t phys = sg_dma_address(sg);
 
                        /* sg must be page aligned. */
                        MALI_DEBUG_ASSERT(0 == size % MALI_MMU_PAGE_SIZE);
+                       MALI_DEBUG_ASSERT(0 == (phys & ~(uintptr_t)0xFFFFFFFF));
 
                        mali_mmu_pagedir_update(pagedir, virt, phys, size, MALI_MMU_FLAGS_DEFAULT);
 
@@ -125,38 +109,39 @@ static int mali_dma_buf_map(struct mali_dma_buf_attachment *mem, struct mali_ses
                }
 
                mem->is_mapped = MALI_TRUE;
-               mutex_unlock(&mem->map_lock);
-
+               mali_session_memory_unlock(session);
                /* Wake up any thread waiting for buffer to become mapped */
                wake_up_all(&mem->wait_queue);
        } else {
                MALI_DEBUG_ASSERT(mem->is_mapped);
-               mutex_unlock(&mem->map_lock);
+               mali_session_memory_unlock(session);
        }
 
        return 0;
 }
 
-static void mali_dma_buf_unmap(struct mali_dma_buf_attachment *mem)
+static void mali_dma_buf_unmap(mali_mem_allocation *alloc, struct mali_dma_buf_attachment *mem)
 {
+       MALI_DEBUG_ASSERT_POINTER(alloc);
        MALI_DEBUG_ASSERT_POINTER(mem);
        MALI_DEBUG_ASSERT_POINTER(mem->attachment);
        MALI_DEBUG_ASSERT_POINTER(mem->buf);
+       MALI_DEBUG_ASSERT_POINTER(alloc->session);
 
-       mutex_lock(&mem->map_lock);
-
+       mali_session_memory_lock(alloc->session);
        mem->map_ref--;
 
        MALI_DEBUG_PRINT(5, ("Mali DMA-buf: unmap attachment %p, new map_ref = %d\n", mem, mem->map_ref));
 
        if (0 == mem->map_ref) {
                dma_buf_unmap_attachment(mem->attachment, mem->sgt, DMA_BIDIRECTIONAL);
-
+               if (MALI_TRUE == mem->is_mapped) {
+                       mali_mem_mali_map_free(alloc->session, alloc->psize, alloc->mali_vma_node.vm_node.start,
+                                              alloc->flags);
+               }
                mem->is_mapped = MALI_FALSE;
        }
-
-       mutex_unlock(&mem->map_lock);
-
+       mali_session_memory_unlock(alloc->session);
        /* Wake up any thread waiting for buffer to become unmapped */
        wake_up_all(&mem->wait_queue);
 }
@@ -164,100 +149,110 @@ static void mali_dma_buf_unmap(struct mali_dma_buf_attachment *mem)
 #if !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
 int mali_dma_buf_map_job(struct mali_pp_job *job)
 {
-       mali_mem_allocation *descriptor;
        struct mali_dma_buf_attachment *mem;
        _mali_osk_errcode_t err;
        int i;
        int ret = 0;
+       u32 num_memory_cookies;
+       struct mali_session_data *session;
+       struct mali_vma_node *mali_vma_node = NULL;
+       mali_mem_allocation *mali_alloc = NULL;
+       mali_mem_backend *mem_bkend = NULL;
 
-       _mali_osk_mutex_wait(job->session->memory_lock);
-
-       for (i = 0; i < job->num_memory_cookies; i++) {
-               int cookie = job->memory_cookies[i];
+       MALI_DEBUG_ASSERT_POINTER(job);
 
-               if (0 == cookie) {
-                       /* 0 is not a valid cookie */
-                       MALI_DEBUG_ASSERT(NULL == job->dma_bufs[i]);
-                       continue;
-               }
+       num_memory_cookies = mali_pp_job_num_memory_cookies(job);
 
-               MALI_DEBUG_ASSERT(0 < cookie);
+       session = mali_pp_job_get_session(job);
 
-               err = mali_descriptor_mapping_get(job->session->descriptor_mapping,
-                                                 cookie, (void**)&descriptor);
+       MALI_DEBUG_ASSERT_POINTER(session);
 
-               if (_MALI_OSK_ERR_OK != err) {
-                       MALI_DEBUG_PRINT_ERROR(("Mali DMA-buf: Failed to get descriptor for cookie %d\n", cookie));
-                       ret = -EFAULT;
-                       MALI_DEBUG_ASSERT(NULL == job->dma_bufs[i]);
+       for (i = 0; i < num_memory_cookies; i++) {
+               u32 mali_addr  = mali_pp_job_get_memory_cookie(job, i);
+               mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0);
+               MALI_DEBUG_ASSERT(NULL != mali_vma_node);
+               mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
+               MALI_DEBUG_ASSERT(NULL != mali_alloc);
+               if (MALI_MEM_DMA_BUF != mali_alloc->type) {
                        continue;
                }
 
-               if (MALI_MEM_DMA_BUF != descriptor->type) {
-                       /* Not a DMA-buf */
-                       MALI_DEBUG_ASSERT(NULL == job->dma_bufs[i]);
-                       continue;
-               }
+               /* Get backend memory & Map on CPU */
+               mutex_lock(&mali_idr_mutex);
+               mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle);
+               mutex_unlock(&mali_idr_mutex);
+               MALI_DEBUG_ASSERT(NULL != mem_bkend);
 
-               mem = descriptor->dma_buf.attachment;
+               mem = mem_bkend->dma_buf.attachment;
 
                MALI_DEBUG_ASSERT_POINTER(mem);
-               MALI_DEBUG_ASSERT(mem->session == job->session);
+               MALI_DEBUG_ASSERT(mem->session == mali_pp_job_get_session(job));
 
-               err = mali_dma_buf_map(mem, mem->session, descriptor->mali_mapping.addr, descriptor->flags);
+               err = mali_dma_buf_map(mem_bkend);
                if (0 != err) {
-                       MALI_DEBUG_PRINT_ERROR(("Mali DMA-buf: Failed to map dma-buf for cookie %d at mali address %x\b",
-                                               cookie, descriptor->mali_mapping.addr));
+                       MALI_DEBUG_PRINT_ERROR(("Mali DMA-buf: Failed to map dma-buf for mali address %x\n", mali_addr));
                        ret = -EFAULT;
-                       MALI_DEBUG_ASSERT(NULL == job->dma_bufs[i]);
                        continue;
                }
-
-               /* Add mem to list of DMA-bufs mapped for this job */
-               job->dma_bufs[i] = mem;
        }
-
-       _mali_osk_mutex_signal(job->session->memory_lock);
-
        return ret;
 }
 
 void mali_dma_buf_unmap_job(struct mali_pp_job *job)
 {
+       struct mali_dma_buf_attachment *mem;
        int i;
-       for (i = 0; i < job->num_dma_bufs; i++) {
-               if (NULL == job->dma_bufs[i]) continue;
+       u32 num_memory_cookies;
+       struct mali_session_data *session;
+       struct mali_vma_node *mali_vma_node = NULL;
+       mali_mem_allocation *mali_alloc = NULL;
+       mali_mem_backend *mem_bkend = NULL;
+
+       MALI_DEBUG_ASSERT_POINTER(job);
 
-               mali_dma_buf_unmap(job->dma_bufs[i]);
-               job->dma_bufs[i] = NULL;
+       num_memory_cookies = mali_pp_job_num_memory_cookies(job);
+
+       session = mali_pp_job_get_session(job);
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       for (i = 0; i < num_memory_cookies; i++) {
+               u32 mali_addr  = mali_pp_job_get_memory_cookie(job, i);
+               mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0);
+               MALI_DEBUG_ASSERT(NULL != mali_vma_node);
+               mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
+               MALI_DEBUG_ASSERT(NULL != mali_alloc);
+               if (MALI_MEM_DMA_BUF != mali_alloc->type) {
+                       continue;
+               }
+
+               /* Get backend memory & Map on CPU */
+               mutex_lock(&mali_idr_mutex);
+               mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle);
+               mutex_unlock(&mali_idr_mutex);
+               MALI_DEBUG_ASSERT(NULL != mem_bkend);
+
+               mem = mem_bkend->dma_buf.attachment;
+
+               MALI_DEBUG_ASSERT_POINTER(mem);
+               MALI_DEBUG_ASSERT(mem->session == mali_pp_job_get_session(job));
+               mali_dma_buf_unmap(mem_bkend->mali_allocation, mem);
        }
 }
 #endif /* !CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH */
 
-int mali_attach_dma_buf(struct mali_session_data *session, _mali_uk_attach_dma_buf_s __user *user_arg)
+int mali_dma_buf_get_size(struct mali_session_data *session, _mali_uk_dma_buf_get_size_s __user *user_arg)
 {
-       struct dma_buf *buf;
-       struct mali_dma_buf_attachment *mem;
-       _mali_uk_attach_dma_buf_s args;
-       mali_mem_allocation *descriptor;
-       int md;
+       _mali_uk_dma_buf_get_size_s args;
        int fd;
+       struct dma_buf *buf;
 
-       /* Get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
-       if (0 != copy_from_user(&args, (void __user *)user_arg, sizeof(_mali_uk_attach_dma_buf_s))) {
+       /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+       if (0 != copy_from_user(&args, (void __user *)user_arg, sizeof(_mali_uk_dma_buf_get_size_s))) {
                return -EFAULT;
        }
 
-       if (args.mali_address & ~PAGE_MASK) {
-               MALI_DEBUG_PRINT_ERROR(("Requested address (0x%08x) is not page aligned\n", args.mali_address));
-               return -EINVAL;
-       }
-
-       if (args.mali_address >= args.mali_address + args.size) {
-               MALI_DEBUG_PRINT_ERROR(("Requested address and size (0x%08x + 0x%08x) is too big\n", args.mali_address, args.size));
-               return -EINVAL;
-       }
-
+       /* Do DMA-BUF stuff */
        fd = args.mem_fd;
 
        buf = dma_buf_get(fd);
@@ -266,169 +261,109 @@ int mali_attach_dma_buf(struct mali_session_data *session, _mali_uk_attach_dma_b
                return PTR_RET(buf);
        }
 
-       /* Currently, mapping of the full buffer are supported. */
-       if (args.size != buf->size) {
-               MALI_DEBUG_PRINT_ERROR(("dma-buf size doesn't match mapping size.\n"));
-               dma_buf_put(buf);
-               return -EINVAL;
-       }
-
-       mem = _mali_osk_calloc(1, sizeof(struct mali_dma_buf_attachment));
-       if (NULL == mem) {
-               MALI_DEBUG_PRINT_ERROR(("Failed to allocate dma-buf tracing struct\n"));
+       if (0 != put_user(buf->size, &user_arg->size)) {
                dma_buf_put(buf);
-               return -ENOMEM;
-       }
-
-       mem->buf = buf;
-       mem->session = session;
-       mem->map_ref = 0;
-       mutex_init(&mem->map_lock);
-       init_waitqueue_head(&mem->wait_queue);
-
-       mem->attachment = dma_buf_attach(mem->buf, &mali_platform_device->dev);
-       if (NULL == mem->attachment) {
-               MALI_DEBUG_PRINT_ERROR(("Failed to attach to dma-buf %d\n", fd));
-               dma_buf_put(mem->buf);
-               _mali_osk_free(mem);
                return -EFAULT;
        }
 
-       /* Set up Mali memory descriptor */
-       descriptor = mali_mem_descriptor_create(session, MALI_MEM_DMA_BUF);
-       if (NULL == descriptor) {
-               MALI_DEBUG_PRINT_ERROR(("Failed to allocate descriptor dma-buf %d\n", fd));
-               mali_dma_buf_release(mem);
-               return -ENOMEM;
-       }
-
-       descriptor->size = args.size;
-       descriptor->mali_mapping.addr = args.mali_address;
+       dma_buf_put(buf);
 
-       descriptor->dma_buf.attachment = mem;
+       return 0;
+}
 
-       descriptor->flags |= MALI_MEM_FLAG_DONT_CPU_MAP;
-       if (args.flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
-               descriptor->flags = MALI_MEM_FLAG_MALI_GUARD_PAGE;
-       }
+_mali_osk_errcode_t mali_mem_bind_dma_buf(mali_mem_allocation *alloc,
+               mali_mem_backend *mem_backend,
+               int fd, u32 flags)
+{
+       struct dma_buf *buf;
+       struct mali_dma_buf_attachment *dma_mem;
+       struct  mali_session_data *session = alloc->session;
 
-       _mali_osk_mutex_wait(session->memory_lock);
+       MALI_DEBUG_ASSERT_POINTER(session);
+       MALI_DEBUG_ASSERT_POINTER(mem_backend);
+       MALI_DEBUG_ASSERT_POINTER(alloc);
 
-       /* Map dma-buf into this session's page tables */
-       if (_MALI_OSK_ERR_OK != mali_mem_mali_map_prepare(descriptor)) {
-               _mali_osk_mutex_signal(session->memory_lock);
-               MALI_DEBUG_PRINT_ERROR(("Failed to map dma-buf on Mali\n"));
-               mali_mem_descriptor_destroy(descriptor);
-               mali_dma_buf_release(mem);
-               return -ENOMEM;
+       /* get dma buffer */
+       buf = dma_buf_get(fd);
+       if (IS_ERR_OR_NULL(buf)) {
+               return _MALI_OSK_ERR_FAULT;
        }
 
-#if defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
-       /* Map memory into session's Mali virtual address space. */
-
-       if (0 != mali_dma_buf_map(mem, session, descriptor->mali_mapping.addr, descriptor->flags)) {
-               mali_mem_mali_map_free(descriptor);
-               _mali_osk_mutex_signal(session->memory_lock);
-
-               MALI_DEBUG_PRINT_ERROR(("Failed to map dma-buf %d into Mali address space\n", fd));
-               mali_mem_descriptor_destroy(descriptor);
-               mali_dma_buf_release(mem);
-               return -ENOMEM;
+       /* Currently, mapping of the full buffer are supported. */
+       if (alloc->psize != buf->size) {
+               goto failed_alloc_mem;
        }
 
-#endif
-
-       _mali_osk_mutex_signal(session->memory_lock);
-
-       /* Get descriptor mapping for memory. */
-       if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session->descriptor_mapping, descriptor, &md)) {
-               _mali_osk_mutex_wait(session->memory_lock);
-               mali_mem_mali_map_free(descriptor);
-               _mali_osk_mutex_signal(session->memory_lock);
-
-               MALI_DEBUG_PRINT_ERROR(("Failed to create descriptor mapping for dma-buf %d\n", fd));
-               mali_mem_descriptor_destroy(descriptor);
-               mali_dma_buf_release(mem);
-               return -EFAULT;
+       dma_mem = _mali_osk_calloc(1, sizeof(struct mali_dma_buf_attachment));
+       if (NULL == dma_mem) {
+               goto failed_alloc_mem;
        }
 
-       /* Return stuff to user space */
-       if (0 != put_user(md, &user_arg->cookie)) {
-               _mali_osk_mutex_wait(session->memory_lock);
-               mali_mem_mali_map_free(descriptor);
-               _mali_osk_mutex_signal(session->memory_lock);
+       dma_mem->buf = buf;
+       dma_mem->session = session;
+       dma_mem->map_ref = 0;
+       init_waitqueue_head(&dma_mem->wait_queue);
 
-               MALI_DEBUG_PRINT_ERROR(("Failed to return descriptor to user space for dma-buf %d\n", fd));
-               mali_descriptor_mapping_free(session->descriptor_mapping, md);
-               mali_dma_buf_release(mem);
-               return -EFAULT;
+       dma_mem->attachment = dma_buf_attach(dma_mem->buf, &mali_platform_device->dev);
+       if (NULL == dma_mem->attachment) {
+               goto failed_dma_attach;
        }
 
-       return 0;
-}
+       mem_backend->dma_buf.attachment = dma_mem;
 
-int mali_release_dma_buf(struct mali_session_data *session, _mali_uk_release_dma_buf_s __user *user_arg)
-{
-       int ret = 0;
-       _mali_uk_release_dma_buf_s args;
-       mali_mem_allocation *descriptor;
-
-       /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
-       if ( 0 != copy_from_user(&args, (void __user *)user_arg, sizeof(_mali_uk_release_dma_buf_s)) ) {
-               return -EFAULT;
+       alloc->flags |= MALI_MEM_FLAG_DONT_CPU_MAP;
+       if (flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
+               alloc->flags |= MALI_MEM_FLAG_MALI_GUARD_PAGE;
        }
 
-       MALI_DEBUG_PRINT(3, ("Mali DMA-buf: release descriptor cookie %d\n", args.cookie));
-
-       _mali_osk_mutex_wait(session->memory_lock);
-
-       descriptor = mali_descriptor_mapping_free(session->descriptor_mapping, args.cookie);
 
-       if (NULL != descriptor) {
-               MALI_DEBUG_PRINT(3, ("Mali DMA-buf: Releasing dma-buf at mali address %x\n", descriptor->mali_mapping.addr));
-
-               mali_mem_mali_map_free(descriptor);
-
-               mali_dma_buf_release(descriptor->dma_buf.attachment);
-
-               mali_mem_descriptor_destroy(descriptor);
-       } else {
-               MALI_DEBUG_PRINT_ERROR(("Invalid memory descriptor %d used to release dma-buf\n", args.cookie));
-               ret = -EINVAL;
+#if defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+       /* Map memory into session's Mali virtual address space. */
+       if (0 != mali_dma_buf_map(mem_backend)) {
+               goto Failed_dma_map;
        }
+#endif
 
-       _mali_osk_mutex_signal(session->memory_lock);
+       return _MALI_OSK_ERR_OK;
 
-       /* Return the error that _mali_ukk_map_external_ump_mem produced */
-       return ret;
+#if defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+Failed_dma_map:
+       mali_dma_buf_unmap(alloc, dma_mem);
+#endif
+       /* Wait for buffer to become unmapped */
+       wait_event(dma_mem->wait_queue, !dma_mem->is_mapped);
+       MALI_DEBUG_ASSERT(!dma_mem->is_mapped);
+       dma_buf_detach(dma_mem->buf, dma_mem->attachment);
+failed_dma_attach:
+       _mali_osk_free(dma_mem);
+failed_alloc_mem:
+       dma_buf_put(buf);
+       return _MALI_OSK_ERR_FAULT;
 }
 
-int mali_dma_buf_get_size(struct mali_session_data *session, _mali_uk_dma_buf_get_size_s __user *user_arg)
+void mali_mem_unbind_dma_buf(mali_mem_backend *mem_backend)
 {
-       _mali_uk_dma_buf_get_size_s args;
-       int fd;
-       struct dma_buf *buf;
-
-       /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
-       if ( 0 != copy_from_user(&args, (void __user *)user_arg, sizeof(_mali_uk_dma_buf_get_size_s)) ) {
-               return -EFAULT;
-       }
-
-       /* Do DMA-BUF stuff */
-       fd = args.mem_fd;
+       struct mali_dma_buf_attachment *mem;
+       MALI_DEBUG_ASSERT_POINTER(mem_backend);
+       MALI_DEBUG_ASSERT(MALI_MEM_DMA_BUF == mem_backend->type);
 
-       buf = dma_buf_get(fd);
-       if (IS_ERR_OR_NULL(buf)) {
-               MALI_DEBUG_PRINT_ERROR(("Failed to get dma-buf from fd: %d\n", fd));
-               return PTR_RET(buf);
-       }
+       mem = mem_backend->dma_buf.attachment;
+       MALI_DEBUG_ASSERT_POINTER(mem);
+       MALI_DEBUG_ASSERT_POINTER(mem->attachment);
+       MALI_DEBUG_ASSERT_POINTER(mem->buf);
+       MALI_DEBUG_PRINT(3, ("Mali DMA-buf: release attachment %p\n", mem));
 
-       if (0 != put_user(buf->size, &user_arg->size)) {
-               dma_buf_put(buf);
-               return -EFAULT;
-       }
+#if defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+       MALI_DEBUG_ASSERT_POINTER(mem_backend->mali_allocation);
+       /* We mapped implicitly on attach, so we need to unmap on release */
+       mali_dma_buf_unmap(mem_backend->mali_allocation, mem);
+#endif
+       /* Wait for buffer to become unmapped */
+       wait_event(mem->wait_queue, !mem->is_mapped);
+       MALI_DEBUG_ASSERT(!mem->is_mapped);
 
-       dma_buf_put(buf);
+       dma_buf_detach(mem->buf, mem->attachment);
+       dma_buf_put(mem->buf);
 
-       return 0;
+       _mali_osk_free(mem);
 }
index c34edd8936b3418fff82c3e3eb9ea9d43e995566..27bb05a3f951b55416a328c14e7e44096fbf1a71 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2013 ARM Limited
+ * (C) COPYRIGHT 2011-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 extern "C" {
 #endif
 
+#include "mali_uk_types.h"
 #include "mali_osk.h"
 #include "mali_memory.h"
 
 struct mali_pp_job;
 
 struct mali_dma_buf_attachment;
+struct mali_dma_buf_attachment {
+       struct dma_buf *buf;
+       struct dma_buf_attachment *attachment;
+       struct sg_table *sgt;
+       struct mali_session_data *session;
+       int map_ref;
+       struct mutex map_lock;
+       mali_bool is_mapped;
+       wait_queue_head_t wait_queue;
+};
 
-int mali_attach_dma_buf(struct mali_session_data *session, _mali_uk_attach_dma_buf_s __user *arg);
-int mali_release_dma_buf(struct mali_session_data *session, _mali_uk_release_dma_buf_s __user *arg);
 int mali_dma_buf_get_size(struct mali_session_data *session, _mali_uk_dma_buf_get_size_s __user *arg);
 
-void mali_mem_dma_buf_release(mali_mem_allocation *descriptor);
+void mali_mem_unbind_dma_buf(mali_mem_backend *mem_backend);
+
+_mali_osk_errcode_t mali_mem_bind_dma_buf(mali_mem_allocation *alloc,
+               mali_mem_backend *mem_backend,
+               int fd, u32 flags);
 
 #if !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
 int mali_dma_buf_map_job(struct mali_pp_job *job);
index 0f09eedaffff7f9ad4aba96c242242a99a3ffcca..41b24e0a9d1eb7535ae68a9b8df9384b6d1f9878 100644 (file)
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013 ARM Limited
+ * (C) COPYRIGHT 2013-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
  * by a licensing agreement from ARM Limited.
  */
 
+#include "mali_kernel_common.h"
 #include "mali_osk.h"
+#include "mali_ukk.h"
 #include "mali_memory.h"
-#include "mali_kernel_descriptor_mapping.h"
 #include "mali_mem_validation.h"
 #include "mali_uk_types.h"
 
-void mali_mem_external_release(mali_mem_allocation *descriptor)
+void mali_mem_unbind_ext_buf(mali_mem_backend *mem_backend)
 {
-       MALI_DEBUG_ASSERT(MALI_MEM_EXTERNAL == descriptor->type);
-
-       mali_mem_mali_map_free(descriptor);
+       mali_mem_allocation *alloc;
+       struct mali_session_data *session;
+       MALI_DEBUG_ASSERT_POINTER(mem_backend);
+       alloc = mem_backend->mali_allocation;
+       MALI_DEBUG_ASSERT_POINTER(alloc);
+       MALI_DEBUG_ASSERT(MALI_MEM_EXTERNAL == mem_backend->type);
+
+       session = alloc->session;
+       MALI_DEBUG_ASSERT_POINTER(session);
+       mali_session_memory_lock(session);
+       mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start,
+                              alloc->flags);
+       mali_session_memory_unlock(session);
 }
 
-_mali_osk_errcode_t _mali_ukk_map_external_mem(_mali_uk_map_external_mem_s *args)
+_mali_osk_errcode_t mali_mem_bind_ext_buf(mali_mem_allocation *alloc,
+               mali_mem_backend *mem_backend,
+               u32 phys_addr,
+               u32 flag)
 {
        struct mali_session_data *session;
-       mali_mem_allocation * descriptor;
-       int md;
        _mali_osk_errcode_t err;
-
-       MALI_DEBUG_ASSERT_POINTER(args);
-       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
-
-       session = (struct mali_session_data *)args->ctx;
+       u32 virt, phys, size;
+       MALI_DEBUG_ASSERT_POINTER(mem_backend);
+       MALI_DEBUG_ASSERT_POINTER(alloc);
+       size = alloc->psize;
+       session = (struct mali_session_data *)(uintptr_t)alloc->session;
        MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);
 
        /* check arguments */
        /* NULL might be a valid Mali address */
-       if (! args->size) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+       if (!size) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
 
        /* size must be a multiple of the system page size */
-       if (args->size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
-
-       MALI_DEBUG_PRINT(3,
-                        ("Requested to map physical memory 0x%x-0x%x into virtual memory 0x%x\n",
-                         (void*)args->phys_addr,
-                         (void*)(args->phys_addr + args->size -1),
-                         (void*)args->mali_address)
-                       );
+       if (size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
 
        /* Validate the mali physical range */
-       if (_MALI_OSK_ERR_OK != mali_mem_validation_check(args->phys_addr, args->size)) {
+       if (_MALI_OSK_ERR_OK != mali_mem_validation_check(phys_addr, size)) {
                return _MALI_OSK_ERR_FAULT;
        }
 
-       descriptor = mali_mem_descriptor_create(session, MALI_MEM_EXTERNAL);
-       if (NULL == descriptor) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
-
-       descriptor->mali_mapping.addr = args->mali_address;
-       descriptor->size = args->size;
-
-       if (args->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
-               descriptor->flags = MALI_MEM_FLAG_MALI_GUARD_PAGE;
+       if (flag & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
+               alloc->flags |= MALI_MEM_FLAG_MALI_GUARD_PAGE;
        }
 
-       _mali_osk_mutex_wait(session->memory_lock);
-       {
-               u32 virt = descriptor->mali_mapping.addr;
-               u32 phys = args->phys_addr;
-               u32 size = args->size;
-
-               err = mali_mem_mali_map_prepare(descriptor);
-               if (_MALI_OSK_ERR_OK != err) {
-                       _mali_osk_mutex_signal(session->memory_lock);
-                       mali_mem_descriptor_destroy(descriptor);
-                       return _MALI_OSK_ERR_NOMEM;
-               }
-
-               mali_mmu_pagedir_update(session->page_directory, virt, phys, size, MALI_MMU_FLAGS_DEFAULT);
+       mali_session_memory_lock(session);
 
-               if (descriptor->flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
-                       mali_mmu_pagedir_update(session->page_directory, virt + size, phys, _MALI_OSK_MALI_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);
-               }
-       }
-       _mali_osk_mutex_signal(session->memory_lock);
+       virt = alloc->mali_vma_node.vm_node.start;
+       phys = phys_addr;
 
-       if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session->descriptor_mapping, descriptor, &md)) {
-               _mali_osk_mutex_wait(session->memory_lock);
-               mali_mem_external_release(descriptor);
-               _mali_osk_mutex_signal(session->memory_lock);
-               mali_mem_descriptor_destroy(descriptor);
-               MALI_ERROR(_MALI_OSK_ERR_FAULT);
+       err = mali_mem_mali_map_prepare(alloc);
+       if (_MALI_OSK_ERR_OK != err) {
+               mali_session_memory_unlock(session);
+               return _MALI_OSK_ERR_NOMEM;
        }
 
-       args->cookie = md;
+       mali_mmu_pagedir_update(session->page_directory, virt, phys, size, MALI_MMU_FLAGS_DEFAULT);
 
-       MALI_SUCCESS;
-}
-
-_mali_osk_errcode_t _mali_ukk_unmap_external_mem( _mali_uk_unmap_external_mem_s *args )
-{
-       mali_mem_allocation * descriptor;
-       void* old_value;
-       struct mali_session_data *session;
-
-       MALI_DEBUG_ASSERT_POINTER(args);
-       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
-
-       session = (struct mali_session_data *)args->ctx;
-       MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);
-
-       if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_get(session->descriptor_mapping, args->cookie, (void**)&descriptor)) {
-               MALI_DEBUG_PRINT(1, ("Invalid memory descriptor %d used to unmap external memory\n", args->cookie));
-               MALI_ERROR(_MALI_OSK_ERR_FAULT);
-       }
-
-       old_value = mali_descriptor_mapping_free(session->descriptor_mapping, args->cookie);
-
-       if (NULL != old_value) {
-               _mali_osk_mutex_wait(session->memory_lock);
-               mali_mem_external_release(descriptor);
-               _mali_osk_mutex_signal(session->memory_lock);
-               mali_mem_descriptor_destroy(descriptor);
+       if (alloc->flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
+               mali_mmu_pagedir_update(session->page_directory, virt + size, phys, _MALI_OSK_MALI_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);
        }
+       MALI_DEBUG_PRINT(3,
+                        ("Requested to map physical memory 0x%x-0x%x into virtual memory 0x%x\n",
+                         phys_addr, (phys_addr + size - 1),
+                         virt));
+       mali_session_memory_unlock(session);
 
        MALI_SUCCESS;
 }
+
diff --git a/drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_external.h b/drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_external.h
new file mode 100644 (file)
index 0000000..da18d85
--- /dev/null
@@ -0,0 +1,29 @@
+
+/*
+ * This confidential and proprietary software may be used only as
+ * authorised by a licensing agreement from ARM Limited
+ * (C) COPYRIGHT 2011-2015 ARM Limited
+ * ALL RIGHTS RESERVED
+ * The entire notice above must be reproduced on all authorised
+ * copies and copies may only be made to the extent permitted
+ * by a licensing agreement from ARM Limited.
+ */
+
+#ifndef __MALI_MEMORY_EXTERNAL_H__
+#define __MALI_MEMORY_EXTERNAL_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+_mali_osk_errcode_t mali_mem_bind_ext_buf(mali_mem_allocation *alloc,
+               mali_mem_backend *mem_backend,
+               u32 phys_addr,
+               u32 flag);
+void mali_mem_unbind_ext_buf(mali_mem_backend *mem_backend);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_manager.c b/drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_manager.c
new file mode 100644 (file)
index 0000000..f4bc6f0
--- /dev/null
@@ -0,0 +1,701 @@
+/*
+ * This confidential and proprietary software may be used only as
+ * authorised by a licensing agreement from ARM Limited
+ * (C) COPYRIGHT 2013-2015 ARM Limited
+ * ALL RIGHTS RESERVED
+ * The entire notice above must be reproduced on all authorised
+ * copies and copies may only be made to the extent permitted
+ * by a licensing agreement from ARM Limited.
+ */
+
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/sched.h>
+
+#include <linux/platform_device.h>
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#include <linux/dma-buf.h>
+#endif
+#include <linux/idr.h>
+
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_kernel_linux.h"
+#include "mali_scheduler.h"
+#include "mali_memory.h"
+#include "mali_memory_os_alloc.h"
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#include "mali_memory_dma_buf.h"
+#endif
+#if defined(CONFIG_MALI400_UMP)
+#include "mali_memory_ump.h"
+#endif
+#include "mali_memory_manager.h"
+#include "mali_memory_virtual.h"
+#include "mali_memory_util.h"
+#include "mali_memory_external.h"
+#include "mali_memory_cow.h"
+#include "mali_memory_block_alloc.h"
+#include "mali_ukk.h"
+
+#define MALI_S32_MAX 0x7fffffff
+
+/*
+* New memory system interface
+*/
+
+/*inti idr for backend memory */
+struct idr mali_backend_idr;
+struct mutex mali_idr_mutex;
+
+/* init allocation manager */
+int mali_memory_manager_init(struct mali_allocation_manager *mgr)
+{
+       /* init Locks */
+       rwlock_init(&mgr->vm_lock);
+       mutex_init(&mgr->list_mutex);
+
+       /* init link */
+       INIT_LIST_HEAD(&mgr->head);
+
+       /* init RB tree */
+       mgr->allocation_mgr_rb = RB_ROOT;
+       return 0;
+}
+
+/* Deinit allocation manager
+* Do some check for debug
+*/
+void mali_memory_manager_uninit(struct mali_allocation_manager *mgr)
+{
+       /* check RB tree is empty */
+       MALI_DEBUG_ASSERT(((void *)(mgr->allocation_mgr_rb.rb_node) == (void *)rb_last(&mgr->allocation_mgr_rb)));
+       /* check allocation List */
+       MALI_DEBUG_ASSERT(list_empty(&mgr->head));
+}
+
+/* Prepare memory descriptor */
+static mali_mem_allocation *mali_mem_allocation_struct_create(struct mali_session_data *session)
+{
+       mali_mem_allocation *mali_allocation;
+
+       /* Allocate memory */
+       mali_allocation = (mali_mem_allocation *)kzalloc(sizeof(mali_mem_allocation), GFP_KERNEL);
+       if (NULL == mali_allocation) {
+               MALI_DEBUG_PRINT(1, ("mali_mem_allocation_struct_create: descriptor was NULL\n"));
+               return NULL;
+       }
+
+       MALI_DEBUG_CODE(mali_allocation->magic = MALI_MEM_ALLOCATION_VALID_MAGIC);
+
+       /* do init */
+       mali_allocation->flags = 0;
+       mali_allocation->session = session;
+
+       INIT_LIST_HEAD(&mali_allocation->list);
+       _mali_osk_atomic_init(&mali_allocation->mem_alloc_refcount, 1);
+
+       /**
+       *add to session list
+       */
+       mutex_lock(&session->allocation_mgr.list_mutex);
+       list_add_tail(&mali_allocation->list, &session->allocation_mgr.head);
+       mutex_unlock(&session->allocation_mgr.list_mutex);
+
+       return mali_allocation;
+}
+
+void  mali_mem_allocation_struct_destory(mali_mem_allocation *alloc)
+{
+       MALI_DEBUG_ASSERT_POINTER(alloc);
+       MALI_DEBUG_ASSERT_POINTER(alloc->session);
+       mutex_lock(&alloc->session->allocation_mgr.list_mutex);
+       list_del(&alloc->list);
+       mutex_unlock(&alloc->session->allocation_mgr.list_mutex);
+
+       kfree(alloc);
+}
+
+int mali_mem_backend_struct_create(mali_mem_backend **backend, u32 psize)
+{
+       mali_mem_backend *mem_backend = NULL;
+       s32 ret = -ENOSPC;
+       s32 index = -1;
+       *backend = (mali_mem_backend *)kzalloc(sizeof(mali_mem_backend), GFP_KERNEL);
+       if (NULL == *backend) {
+               MALI_DEBUG_PRINT(1, ("mali_mem_backend_struct_create: backend descriptor was NULL\n"));
+               return -1;
+       }
+       mem_backend = *backend;
+       mem_backend->size = psize;
+       mutex_init(&mem_backend->mutex);
+
+       /* link backend with id */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
+again:
+       if (!idr_pre_get(&mali_backend_idr, GFP_KERNEL)) {
+               kfree(mem_backend);
+               return -ENOMEM;
+       }
+       mutex_lock(&mali_idr_mutex);
+       ret = idr_get_new_above(&mali_backend_idr, mem_backend, 1, &index);
+       mutex_unlock(&mali_idr_mutex);
+
+       if (-ENOSPC == ret) {
+               kfree(mem_backend);
+               return -ENOSPC;
+       }
+       if (-EAGAIN == ret)
+               goto again;
+#else
+       mutex_lock(&mali_idr_mutex);
+       ret = idr_alloc(&mali_backend_idr, mem_backend, 1, MALI_S32_MAX, GFP_KERNEL);
+       mutex_unlock(&mali_idr_mutex);
+       index = ret;
+       if (ret < 0) {
+               MALI_DEBUG_PRINT(1, ("mali_mem_backend_struct_create: Can't allocate idr for backend! \n"));
+               kfree(mem_backend);
+               return -ENOSPC;
+       }
+#endif
+       return index;
+}
+
+
+static void mali_mem_backend_struct_destory(mali_mem_backend **backend, s32 backend_handle)
+{
+       mali_mem_backend *mem_backend = *backend;
+
+       mutex_lock(&mali_idr_mutex);
+       idr_remove(&mali_backend_idr, backend_handle);
+       mutex_unlock(&mali_idr_mutex);
+       kfree(mem_backend);
+       *backend = NULL;
+}
+
+mali_mem_backend *mali_mem_backend_struct_search(struct mali_allocation_manager *mgr, u32 mali_address)
+{
+       struct mali_vma_node *mali_vma_node = NULL;
+       mali_mem_backend *mem_bkend = NULL;
+       mali_mem_allocation *mali_alloc = NULL;
+       MALI_DEBUG_ASSERT_POINTER(mgr);
+       mali_vma_node = mali_vma_offset_search(mgr, mali_address, 0);
+       if (NULL == mali_vma_node)  {
+               MALI_DEBUG_PRINT(1, ("mali_mem_backend_struct_search:vma node was NULL\n"));
+               return NULL;
+       }
+       mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
+       /* Get backend memory & Map on CPU */
+       mutex_lock(&mali_idr_mutex);
+       mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle);
+       mutex_unlock(&mali_idr_mutex);
+       MALI_DEBUG_ASSERT(NULL != mem_bkend);
+       return mem_bkend;
+}
+
+/* Set GPU MMU properties */
+static void _mali_memory_gpu_map_property_set(u32 *properties, u32 flags)
+{
+       if (_MALI_MEMORY_GPU_READ_ALLOCATE & flags) {
+               *properties = MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE;
+       } else {
+               *properties = MALI_MMU_FLAGS_DEFAULT;
+       }
+}
+
+
+/**
+*  function@_mali_ukk_mem_allocate - allocate mali memory
+*/
+_mali_osk_errcode_t _mali_ukk_mem_allocate(_mali_uk_alloc_mem_s *args)
+{
+       struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+       mali_mem_backend *mem_backend = NULL;
+       _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
+       int retval = 0;
+       mali_mem_allocation *mali_allocation = NULL;
+       struct mali_vma_node *mali_vma_node = NULL;
+
+       MALI_DEBUG_PRINT(4, (" _mali_ukk_mem_allocate, vaddr=0x%x, size =0x%x! \n", args->gpu_vaddr, args->psize));
+
+       /* Check if the address is allocated
+       */
+       mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, args->gpu_vaddr, 0);
+
+       if (unlikely(mali_vma_node)) {
+               MALI_DEBUG_ASSERT(0);
+               return _MALI_OSK_ERR_FAULT;
+       }
+       /**
+       *create mali memory allocation
+       */
+
+       mali_allocation = mali_mem_allocation_struct_create(session);
+
+       if (mali_allocation == NULL) {
+               MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_allocate: Failed to create allocation struct! \n"));
+               return _MALI_OSK_ERR_NOMEM;
+       }
+       mali_allocation->psize = args->psize;
+       mali_allocation->vsize = args->vsize;
+
+       /* check if have dedicated memory */
+       if (MALI_TRUE == mali_memory_have_dedicated_memory()) {
+               mali_allocation->type = MALI_MEM_BLOCK;
+       } else {
+               mali_allocation->type = MALI_MEM_OS;
+       }
+
+       /**
+       *add allocation node to RB tree for index
+       */
+       mali_allocation->mali_vma_node.vm_node.start = args->gpu_vaddr;
+       mali_allocation->mali_vma_node.vm_node.size = args->vsize;
+
+       mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);
+
+       /* check if need to allocate backend */
+       if (mali_allocation->psize == 0)
+               return _MALI_OSK_ERR_OK;
+
+       /**
+       *allocate physical backend & pages
+       */
+       if (likely(mali_allocation->psize > 0)) {
+               mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, args->psize);
+               if (mali_allocation->backend_handle < 0) {
+                       ret = _MALI_OSK_ERR_NOMEM;
+                       MALI_DEBUG_PRINT(1, ("mali_allocation->backend_handle < 0! \n"));
+                       goto failed_alloc_backend;
+               }
+
+               mem_backend->mali_allocation = mali_allocation;
+               mem_backend->type = mali_allocation->type;
+
+               if (mem_backend->type == MALI_MEM_OS) {
+                       retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
+               } else if (mem_backend->type == MALI_MEM_BLOCK) {
+                       /* try to allocated from BLOCK memory first, then try OS memory if failed.*/
+                       if (mali_mem_block_alloc(&mem_backend->block_mem, mem_backend->size)) {
+                               retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
+                               mem_backend->type = MALI_MEM_OS;
+                               mali_allocation->type = MALI_MEM_OS;
+                       }
+               } else {
+                       /* ONLY support mem_os type */
+                       MALI_DEBUG_ASSERT(0);
+               }
+
+               if (retval) {
+                       ret = _MALI_OSK_ERR_NOMEM;
+                       MALI_DEBUG_PRINT(1, (" can't allocate enough pages! \n"));
+                       goto failed_alloc_pages;
+               }
+       }
+
+       /**
+       *map to GPU side
+       */
+       mali_allocation->mali_mapping.addr = args->gpu_vaddr;
+
+       /* set gpu mmu propery */
+       _mali_memory_gpu_map_property_set(&mali_allocation->mali_mapping.properties, args->flags);
+
+       if (!(args->flags & _MALI_MEMORY_ALLOCATE_NO_BIND_GPU) && mali_allocation->psize > 0) {
+               _mali_osk_mutex_wait(session->memory_lock);
+               /* Map on Mali */
+               ret = mali_mem_mali_map_prepare(mali_allocation);
+               if (0 != ret) {
+                       MALI_DEBUG_PRINT(1, (" prepare map fail! \n"));
+                       goto failed_gpu_map;
+               }
+
+               if (mem_backend->type == MALI_MEM_OS) {
+                       mali_mem_os_mali_map(mem_backend, args->gpu_vaddr,
+                                            mali_allocation->mali_mapping.properties);
+               } else if (mem_backend->type == MALI_MEM_BLOCK) {
+                       mali_mem_block_mali_map(&mem_backend->block_mem, session, args->gpu_vaddr,
+                                               mali_allocation->mali_mapping.properties);
+               } else { /* unsupport type */
+                       MALI_DEBUG_ASSERT(0);
+               }
+
+               _mali_osk_mutex_signal(session->memory_lock);
+       }
+
+       if (MALI_MEM_OS == mem_backend->type) {
+               atomic_add(mem_backend->os_mem.count, &session->mali_mem_allocated_pages);
+       } else {
+               MALI_DEBUG_ASSERT(MALI_MEM_BLOCK == mem_backend->type);
+               atomic_add(mem_backend->block_mem.count, &session->mali_mem_allocated_pages);
+       }
+
+       if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
+               session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
+       }
+       return _MALI_OSK_ERR_OK;
+
+failed_gpu_map:
+       _mali_osk_mutex_signal(session->memory_lock);
+       if (mem_backend->type == MALI_MEM_OS) {
+               mali_mem_os_free(&mem_backend->os_mem.pages, mem_backend->os_mem.count, MALI_FALSE);
+       } else {
+               mali_mem_block_free(&mem_backend->block_mem);
+       }
+failed_alloc_pages:
+       mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle);
+failed_alloc_backend:
+
+       mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node);
+       mali_mem_allocation_struct_destory(mali_allocation);
+
+       return ret;
+}
+
+
+_mali_osk_errcode_t _mali_ukk_mem_free(_mali_uk_free_mem_s *args)
+{
+       struct  mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+       u32 vaddr = args->gpu_vaddr;
+       mali_mem_allocation *mali_alloc = NULL;
+       struct mali_vma_node *mali_vma_node = NULL;
+
+       /* find mali allocation structure by vaddress*/
+       mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, vaddr, 0);
+       MALI_DEBUG_ASSERT(NULL != mali_vma_node);
+       mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
+
+       if (mali_alloc)
+               /* check ref_count */
+               args->free_pages_nr = mali_allocation_unref(&mali_alloc);
+
+       return _MALI_OSK_ERR_OK;
+}
+
+
+/**
+* Function _mali_ukk_mem_bind -- bind a external memory to a new GPU address
+* It will allocate a new mem allocation and bind external memory to it.
+* Supported backend type are:
+* _MALI_MEMORY_BIND_BACKEND_UMP
+* _MALI_MEMORY_BIND_BACKEND_DMA_BUF
+* _MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY
+* CPU access is not supported yet
+*/
+_mali_osk_errcode_t _mali_ukk_mem_bind(_mali_uk_bind_mem_s *args)
+{
+       struct  mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+       mali_mem_backend *mem_backend = NULL;
+       _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
+       mali_mem_allocation *mali_allocation = NULL;
+       MALI_DEBUG_PRINT(5, (" _mali_ukk_mem_bind, vaddr=0x%x, size =0x%x! \n", args->vaddr, args->size));
+
+       /**
+       * allocate mali allocation.
+       */
+       mali_allocation = mali_mem_allocation_struct_create(session);
+
+       if (mali_allocation == NULL) {
+               return _MALI_OSK_ERR_NOMEM;
+       }
+       mali_allocation->psize = args->size;
+       mali_allocation->vsize = args->size;
+       mali_allocation->mali_mapping.addr = args->vaddr;
+
+       /* add allocation node to RB tree for index  */
+       mali_allocation->mali_vma_node.vm_node.start = args->vaddr;
+       mali_allocation->mali_vma_node.vm_node.size = args->size;
+       mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);
+
+       /* allocate backend*/
+       if (mali_allocation->psize > 0) {
+               mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, mali_allocation->psize);
+               if (mali_allocation->backend_handle < 0) {
+                       goto Failed_alloc_backend;
+               }
+
+       } else {
+               goto Failed_alloc_backend;
+       }
+
+       mem_backend->size = mali_allocation->psize;
+       mem_backend->mali_allocation = mali_allocation;
+
+       switch (args->flags & _MALI_MEMORY_BIND_BACKEND_MASK) {
+       case  _MALI_MEMORY_BIND_BACKEND_UMP:
+#if defined(CONFIG_MALI400_UMP)
+               mali_allocation->type = MALI_MEM_UMP;
+               mem_backend->type = MALI_MEM_UMP;
+               ret = mali_mem_bind_ump_buf(mali_allocation, mem_backend,
+                                           args->mem_union.bind_ump.secure_id, args->mem_union.bind_ump.flags);
+               if (_MALI_OSK_ERR_OK != ret) {
+                       MALI_DEBUG_PRINT(1, ("Bind ump buf failed\n"));
+                       goto  Failed_bind_backend;
+               }
+#else
+               MALI_DEBUG_PRINT(1, ("UMP not supported\n"));
+               goto Failed_bind_backend;
+#endif
+               break;
+       case  _MALI_MEMORY_BIND_BACKEND_DMA_BUF:
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+               mali_allocation->type = MALI_MEM_DMA_BUF;
+               mem_backend->type = MALI_MEM_DMA_BUF;
+               ret = mali_mem_bind_dma_buf(mali_allocation, mem_backend,
+                                           args->mem_union.bind_dma_buf.mem_fd, args->mem_union.bind_dma_buf.flags);
+               if (_MALI_OSK_ERR_OK != ret) {
+                       MALI_DEBUG_PRINT(1, ("Bind dma buf failed\n"));
+                       goto Failed_bind_backend;
+               }
+#else
+               MALI_DEBUG_PRINT(1, ("DMA not supported\n"));
+               goto Failed_bind_backend;
+#endif
+               break;
+       case _MALI_MEMORY_BIND_BACKEND_MALI_MEMORY:
+               /* not allowed */
+               MALI_DEBUG_ASSERT(0);
+               break;
+
+       case _MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY:
+               mali_allocation->type = MALI_MEM_EXTERNAL;
+               mem_backend->type = MALI_MEM_EXTERNAL;
+               ret = mali_mem_bind_ext_buf(mali_allocation, mem_backend, args->mem_union.bind_ext_memory.phys_addr,
+                                           args->mem_union.bind_ext_memory.flags);
+               if (_MALI_OSK_ERR_OK != ret) {
+                       MALI_DEBUG_PRINT(1, ("Bind external buf failed\n"));
+                       goto Failed_bind_backend;
+               }
+               break;
+
+       case _MALI_MEMORY_BIND_BACKEND_EXT_COW:
+               /* not allowed */
+               MALI_DEBUG_ASSERT(0);
+               break;
+
+       default:
+               MALI_DEBUG_ASSERT(0);
+               break;
+       }
+       MALI_DEBUG_ASSERT(0 == mem_backend->size % MALI_MMU_PAGE_SIZE);
+       atomic_add(mem_backend->size / MALI_MMU_PAGE_SIZE, &session->mali_mem_array[mem_backend->type]);
+       return _MALI_OSK_ERR_OK;
+
+Failed_bind_backend:
+       mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle);
+
+Failed_alloc_backend:
+       mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node);
+       mali_mem_allocation_struct_destory(mali_allocation);
+
+       MALI_DEBUG_PRINT(1, (" _mali_ukk_mem_bind, return ERROR! \n"));
+       return ret;
+}
+
+
+/*
+* Function _mali_ukk_mem_unbind -- unbind a external memory to a new GPU address
+* This function unbind the backend memory and free the allocation
+* no ref_count for this type of memory
+*/
+_mali_osk_errcode_t _mali_ukk_mem_unbind(_mali_uk_unbind_mem_s *args)
+{
+       /**/
+       struct  mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+       mali_mem_allocation *mali_allocation = NULL;
+       struct mali_vma_node *mali_vma_node = NULL;
+       u32 mali_addr = args->vaddr;
+       MALI_DEBUG_PRINT(5, (" _mali_ukk_mem_unbind, vaddr=0x%x! \n", args->vaddr));
+
+       /* find the allocation by vaddr */
+       mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0);
+       if (likely(mali_vma_node)) {
+               MALI_DEBUG_ASSERT(mali_addr == mali_vma_node->vm_node.start);
+               mali_allocation = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
+       } else {
+               MALI_DEBUG_ASSERT(NULL != mali_vma_node);
+               return _MALI_OSK_ERR_INVALID_ARGS;
+       }
+
+       if (NULL != mali_allocation)
+               /* check ref_count */
+               mali_allocation_unref(&mali_allocation);
+       return _MALI_OSK_ERR_OK;
+}
+
+/*
+* Function _mali_ukk_mem_cow --  COW for an allocation
+* This function allocate new pages for  a range (range, range+size) of allocation
+*  And Map it(keep use the not in range pages from target allocation ) to an GPU vaddr
+*/
+_mali_osk_errcode_t _mali_ukk_mem_cow(_mali_uk_cow_mem_s *args)
+{
+       _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
+       mali_mem_backend *target_backend = NULL;
+       mali_mem_backend *mem_backend = NULL;
+       struct mali_vma_node *mali_vma_node = NULL;
+       mali_mem_allocation *mali_allocation = NULL;
+
+       struct  mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+       /* Get the target backend for cow */
+       target_backend = mali_mem_backend_struct_search(&session->allocation_mgr, args->target_handle);
+
+       if (NULL == target_backend || 0 == target_backend->size) {
+               MALI_DEBUG_ASSERT_POINTER(target_backend);
+               MALI_DEBUG_ASSERT(0 != target_backend->size);
+               return ret;
+       }
+
+       /* Check if the new mali address is allocated */
+       mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, args->vaddr, 0);
+
+       if (unlikely(mali_vma_node)) {
+               MALI_DEBUG_ASSERT(0);
+               return ret;
+       }
+
+       /* create new alloction for COW*/
+       mali_allocation = mali_mem_allocation_struct_create(session);
+       if (mali_allocation == NULL) {
+               MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_cow: Failed to create allocation struct!\n"));
+               return _MALI_OSK_ERR_NOMEM;
+       }
+       mali_allocation->psize = args->target_size;
+       mali_allocation->vsize = args->target_size;
+       mali_allocation->type = MALI_MEM_COW;
+
+       /*add allocation node to RB tree for index*/
+       mali_allocation->mali_vma_node.vm_node.start = args->vaddr;
+       mali_allocation->mali_vma_node.vm_node.size = mali_allocation->vsize;
+       mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);
+
+       /* create new backend for COW memory */
+       mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, mali_allocation->psize);
+       if (mali_allocation->backend_handle < 0) {
+               ret = _MALI_OSK_ERR_NOMEM;
+               MALI_DEBUG_PRINT(1, ("mali_allocation->backend_handle < 0! \n"));
+               goto failed_alloc_backend;
+       }
+       mem_backend->mali_allocation = mali_allocation;
+       mem_backend->type = mali_allocation->type;
+
+       /* Add the target backend's cow count, also allocate new pages for COW backend from os mem
+       *for a modified range and keep the page which not in the modified range and Add ref to it
+       */
+       MALI_DEBUG_PRINT(3, ("Cow mapping: target_addr: 0x%x;  cow_addr: 0x%x,  size: %u\n", target_backend->mali_allocation->mali_vma_node.vm_node.start,
+                            mali_allocation->mali_vma_node.vm_node.start, mali_allocation->mali_vma_node.vm_node.size));
+
+       ret = mali_memory_do_cow(target_backend, args->target_offset, args->target_size, mem_backend, args->range_start, args->range_size);
+       if (_MALI_OSK_ERR_OK != ret) {
+               MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_cow: Failed to cow!\n"));
+               goto failed_do_cow;
+       }
+
+       /**
+       *map to GPU side
+       */
+       mali_allocation->mali_mapping.addr = args->vaddr;
+       /* set gpu mmu propery */
+       _mali_memory_gpu_map_property_set(&mali_allocation->mali_mapping.properties, args->flags);
+
+       _mali_osk_mutex_wait(session->memory_lock);
+       /* Map on Mali */
+       ret = mali_mem_mali_map_prepare(mali_allocation);
+       if (0 != ret) {
+               MALI_DEBUG_PRINT(1, (" prepare map fail! \n"));
+               goto failed_gpu_map;
+       }
+       mali_mem_cow_mali_map(mem_backend, 0, mem_backend->size);
+
+       _mali_osk_mutex_signal(session->memory_lock);
+
+       mutex_lock(&target_backend->mutex);
+       target_backend->cow_flag |= MALI_MEM_BACKEND_FLAG_COWED;
+       mutex_unlock(&target_backend->mutex);
+
+       atomic_add(args->range_size / MALI_MMU_PAGE_SIZE, &session->mali_mem_allocated_pages);
+       if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
+               session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
+       }
+       return _MALI_OSK_ERR_OK;
+
+failed_gpu_map:
+       _mali_osk_mutex_signal(session->memory_lock);
+       mali_mem_cow_release(mem_backend, MALI_FALSE);
+       mem_backend->cow_mem.count = 0;
+failed_do_cow:
+       mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle);
+failed_alloc_backend:
+       mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node);
+       mali_mem_allocation_struct_destory(mali_allocation);
+
+       return ret;
+}
+
+_mali_osk_errcode_t _mali_ukk_mem_cow_modify_range(_mali_uk_cow_modify_range_s *args)
+{
+       _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
+       mali_mem_backend *mem_backend = NULL;
+       struct  mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+
+       MALI_DEBUG_PRINT(4, (" _mali_ukk_mem_cow_modify_range called! \n"));
+       /* Get the backend that need to be modified. */
+       mem_backend = mali_mem_backend_struct_search(&session->allocation_mgr, args->vaddr);
+
+       if (NULL == mem_backend || 0 == mem_backend->size) {
+               MALI_DEBUG_ASSERT_POINTER(mem_backend);
+               MALI_DEBUG_ASSERT(0 != mem_backend->size);
+               return ret;
+       }
+
+       MALI_DEBUG_ASSERT(MALI_MEM_COW  == mem_backend->type);
+
+       ret =  mali_memory_cow_modify_range(mem_backend, args->range_start, args->size);
+       args->change_pages_nr = mem_backend->cow_mem.change_pages_nr;
+       if (_MALI_OSK_ERR_OK != ret)
+               return  ret;
+       _mali_osk_mutex_wait(session->memory_lock);
+       mali_mem_cow_mali_map(mem_backend, args->range_start, args->size);
+       _mali_osk_mutex_signal(session->memory_lock);
+
+       atomic_add(args->change_pages_nr, &session->mali_mem_allocated_pages);
+       if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
+               session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
+       }
+
+       return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_mem_usage_get(_mali_uk_profiling_memory_usage_get_s *args)
+{
+       args->memory_usage = _mali_ukk_report_memory_usage();
+       if (0 != args->vaddr) {
+               mali_mem_backend *mem_backend = NULL;
+               struct  mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+               /* Get the backend that need to be modified. */
+               mem_backend = mali_mem_backend_struct_search(&session->allocation_mgr, args->vaddr);
+               if (NULL == mem_backend) {
+                       MALI_DEBUG_ASSERT_POINTER(mem_backend);
+                       return _MALI_OSK_ERR_FAULT;
+               }
+
+               if (MALI_MEM_COW == mem_backend->type)
+                       args->change_pages_nr = mem_backend->cow_mem.change_pages_nr;
+       }
+       return _MALI_OSK_ERR_OK;
+}
+
+/**
+*  attach a backend to an exist mali allocation
+*/
+
+
+/**
+*  deattach a backend from an exist mali allocation
+*/
+
diff --git a/drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_manager.h b/drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_manager.h
new file mode 100644 (file)
index 0000000..7b417f3
--- /dev/null
@@ -0,0 +1,49 @@
+/*\r
+ * This confidential and proprietary software may be used only as\r
+ * authorised by a licensing agreement from ARM Limited\r
+ * (C) COPYRIGHT 2013-2015 ARM Limited\r
+ * ALL RIGHTS RESERVED\r
+ * The entire notice above must be reproduced on all authorised\r
+ * copies and copies may only be made to the extent permitted\r
+ * by a licensing agreement from ARM Limited.\r
+ */\r
+\r
+#ifndef __MALI_MEMORY_MANAGER_H__\r
+#define __MALI_MEMORY_MANAGER_H__\r
+\r
+#include "mali_osk.h"\r
+#include <linux/list.h>\r
+#include <linux/mm.h>\r
+#include <linux/rbtree.h>\r
+#include <linux/spinlock.h>\r
+#include <linux/types.h>\r
+#include "mali_memory_types.h"\r
+#include "mali_memory_os_alloc.h"\r
+#include "mali_uk_types.h"\r
+\r
+struct mali_allocation_manager {\r
+       rwlock_t vm_lock;\r
+       struct rb_root allocation_mgr_rb;\r
+       struct list_head head;\r
+       struct mutex list_mutex;\r
+};\r
+\r
+extern struct idr mali_backend_idr;\r
+extern struct mutex mali_idr_mutex;\r
+\r
+int mali_memory_manager_init(struct mali_allocation_manager *mgr);\r
+void mali_memory_manager_uninit(struct mali_allocation_manager *mgr);\r
+\r
+void  mali_mem_allocation_struct_destory(mali_mem_allocation *alloc);\r
+\r
+mali_mem_backend *mali_mem_backend_struct_search(struct mali_allocation_manager *mgr, u32 mali_address);\r
+_mali_osk_errcode_t _mali_ukk_mem_allocate(_mali_uk_alloc_mem_s *args);\r
+_mali_osk_errcode_t _mali_ukk_mem_free(_mali_uk_free_mem_s *args);\r
+_mali_osk_errcode_t _mali_ukk_mem_bind(_mali_uk_bind_mem_s *args);\r
+_mali_osk_errcode_t _mali_ukk_mem_unbind(_mali_uk_unbind_mem_s *args);\r
+_mali_osk_errcode_t _mali_ukk_mem_cow(_mali_uk_cow_mem_s *args);\r
+_mali_osk_errcode_t _mali_ukk_mem_cow_modify_range(_mali_uk_cow_modify_range_s *args);\r
+_mali_osk_errcode_t _mali_ukk_mem_usage_get(_mali_uk_profiling_memory_usage_get_s *args);\r
+\r
+#endif\r
+\r
index cf4727a8222c7da0c5ea003d5560efacda0ec419..14f02e131cd437380a9dd83c44bbb6cbf4aacfee 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013 ARM Limited
+ * (C) COPYRIGHT 2013-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 #define MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB * 256)
 #define MALI_OS_MEMORY_POOL_TRIM_JIFFIES (10 * CONFIG_HZ) /* Default to 10s */
 
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0)
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+/* Write combine dma_attrs */
+static DEFINE_DMA_ATTRS(dma_attrs_wc);
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
 static int mali_mem_os_shrink(int nr_to_scan, gfp_t gfp_mask);
 #else
 static int mali_mem_os_shrink(struct shrinker *shrinker, int nr_to_scan, gfp_t gfp_mask);
 #endif
 #else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
 static int mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc);
+#else
+static unsigned long mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc);
+static unsigned long mali_mem_os_shrink_count(struct shrinker *shrinker, struct shrink_control *sc);
+#endif
 #endif
 static void mali_mem_os_trim_pool(struct work_struct *work);
 
@@ -56,54 +66,100 @@ static struct mali_mem_os_allocator {
        .allocated_pages = ATOMIC_INIT(0),
        .allocation_limit = 0,
 
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
        .shrinker.shrink = mali_mem_os_shrink,
+#else
+       .shrinker.count_objects = mali_mem_os_shrink_count,
+       .shrinker.scan_objects = mali_mem_os_shrink,
+#endif
        .shrinker.seeks = DEFAULT_SEEKS,
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
        .timed_shrinker = __DELAYED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool, TIMER_DEFERRABLE),
-#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38)
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38)
        .timed_shrinker = __DEFERRED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool),
 #else
        .timed_shrinker = __DELAYED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool),
 #endif
 };
 
-static void mali_mem_os_free(mali_mem_allocation *descriptor)
+u32 mali_mem_os_free(struct list_head *os_pages, u32 pages_count, mali_bool cow_flag)
 {
        LIST_HEAD(pages);
-
-       MALI_DEBUG_ASSERT(MALI_MEM_OS == descriptor->type);
-
-       atomic_sub(descriptor->os_mem.count, &mali_mem_os_allocator.allocated_pages);
+       struct mali_page_node *m_page, *m_tmp;
+       u32 free_pages_nr = 0;
+
+       if (MALI_TRUE == cow_flag) {
+               list_for_each_entry_safe(m_page, m_tmp, os_pages, list) {
+                       /*only handle OS node here */
+                       if (m_page->type == MALI_PAGE_NODE_OS) {
+                               if (1 == _mali_page_node_get_ref_count(m_page)) {
+                                       list_move(&m_page->list, &pages);
+                                       atomic_sub(1, &mali_mem_os_allocator.allocated_pages);
+                                       free_pages_nr ++;
+                               } else {
+                                       _mali_page_node_unref(m_page);
+                                       m_page->page = NULL;
+                                       list_del(&m_page->list);
+                                       kfree(m_page);
+                               }
+                       }
+               }
+       } else {
+               list_cut_position(&pages, os_pages, os_pages->prev);
+               atomic_sub(pages_count, &mali_mem_os_allocator.allocated_pages);
+               free_pages_nr = pages_count;
+       }
 
        /* Put pages on pool. */
-       list_cut_position(&pages, &descriptor->os_mem.pages, descriptor->os_mem.pages.prev);
-
        spin_lock(&mali_mem_os_allocator.pool_lock);
-
        list_splice(&pages, &mali_mem_os_allocator.pool_pages);
-       mali_mem_os_allocator.pool_count += descriptor->os_mem.count;
-
+       mali_mem_os_allocator.pool_count += free_pages_nr;
        spin_unlock(&mali_mem_os_allocator.pool_lock);
 
        if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) {
                MALI_DEBUG_PRINT(5, ("OS Mem: Starting pool trim timer %u\n", mali_mem_os_allocator.pool_count));
                queue_delayed_work(mali_mem_os_allocator.wq, &mali_mem_os_allocator.timed_shrinker, MALI_OS_MEMORY_POOL_TRIM_JIFFIES);
        }
+       return free_pages_nr;
 }
 
-static int mali_mem_os_alloc_pages(mali_mem_allocation *descriptor, u32 size)
+/**
+* put page without put it into page pool
+*/
+_mali_osk_errcode_t mali_mem_os_put_page(struct page *page)
 {
-       struct page *new_page, *tmp;
-       LIST_HEAD(pages);
+       MALI_DEBUG_ASSERT_POINTER(page);
+       if (1 == page_count(page)) {
+               atomic_sub(1, &mali_mem_os_allocator.allocated_pages);
+               dma_unmap_page(&mali_platform_device->dev, page_private(page),
+                              _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
+               ClearPagePrivate(page);
+       }
+       put_page(page);
+       return _MALI_OSK_ERR_OK;
+}
+
+int mali_mem_os_alloc_pages(mali_mem_os_mem *os_mem, u32 size)
+{
+       struct page *new_page;
+       LIST_HEAD(pages_list);
        size_t page_count = PAGE_ALIGN(size) / _MALI_OSK_MALI_PAGE_SIZE;
        size_t remaining = page_count;
+       struct mali_page_node *m_page, *m_tmp;
        u32 i;
 
-       MALI_DEBUG_ASSERT_POINTER(descriptor);
-       MALI_DEBUG_ASSERT(MALI_MEM_OS == descriptor->type);
+       MALI_DEBUG_ASSERT_POINTER(os_mem);
 
-       INIT_LIST_HEAD(&descriptor->os_mem.pages);
-       descriptor->os_mem.count = page_count;
+       if (atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE + size > mali_mem_os_allocator.allocation_limit) {
+               MALI_DEBUG_PRINT(2, ("Mali Mem: Unable to allocate %u bytes. Currently allocated: %lu, max limit %lu\n",
+                                    size,
+                                    atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE,
+                                    mali_mem_os_allocator.allocation_limit));
+               return -ENOMEM;
+       }
+
+       INIT_LIST_HEAD(&os_mem->pages);
+       os_mem->count = page_count;
 
        /* Grab pages from pool. */
        {
@@ -112,7 +168,7 @@ static int mali_mem_os_alloc_pages(mali_mem_allocation *descriptor, u32 size)
                pool_pages = min(remaining, mali_mem_os_allocator.pool_count);
                for (i = pool_pages; i > 0; i--) {
                        BUG_ON(list_empty(&mali_mem_os_allocator.pool_pages));
-                       list_move(mali_mem_os_allocator.pool_pages.next, &pages);
+                       list_move(mali_mem_os_allocator.pool_pages.next, &pages_list);
                }
                mali_mem_os_allocator.pool_count -= pool_pages;
                remaining -= pool_pages;
@@ -121,35 +177,79 @@ static int mali_mem_os_alloc_pages(mali_mem_allocation *descriptor, u32 size)
 
        /* Process pages from pool. */
        i = 0;
-       list_for_each_entry_safe(new_page, tmp, &pages, lru) {
-               BUG_ON(NULL == new_page);
+       list_for_each_entry_safe(m_page, m_tmp, &pages_list, list) {
+               BUG_ON(NULL == m_page);
 
-               list_move_tail(&new_page->lru, &descriptor->os_mem.pages);
+               list_move_tail(&m_page->list, &os_mem->pages);
        }
 
        /* Allocate new pages, if needed. */
        for (i = 0; i < remaining; i++) {
                dma_addr_t dma_addr;
+               gfp_t flags = __GFP_ZERO | __GFP_NORETRY | __GFP_NOWARN | __GFP_COLD;
+               int err;
+
+#if defined(CONFIG_ARM) && !defined(CONFIG_ARM_LPAE)
+               flags |= GFP_HIGHUSER;
+#else
+#ifdef CONFIG_ZONE_DMA32
+               flags |= GFP_DMA32;
+#else
+#ifdef CONFIG_ZONE_DMA
+               flags |= GFP_DMA;
+#else
+               /* arm64 utgard only work on < 4G, but the kernel
+                * didn't provide method to allocte memory < 4G
+                */
+               MALI_DEBUG_ASSERT(0);
+#endif
+#endif
+#endif
 
-               new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD);
+               new_page = alloc_page(flags);
 
                if (unlikely(NULL == new_page)) {
                        /* Calculate the number of pages actually allocated, and free them. */
-                       descriptor->os_mem.count = (page_count - remaining) + i;
-                       atomic_add(descriptor->os_mem.count, &mali_mem_os_allocator.allocated_pages);
-                       mali_mem_os_free(descriptor);
+                       os_mem->count = (page_count - remaining) + i;
+                       atomic_add(os_mem->count, &mali_mem_os_allocator.allocated_pages);
+                       mali_mem_os_free(&os_mem->pages, os_mem->count, MALI_FALSE);
                        return -ENOMEM;
                }
 
                /* Ensure page is flushed from CPU caches. */
                dma_addr = dma_map_page(&mali_platform_device->dev, new_page,
-                                       0, _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
+                                       0, _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
+
+               err = dma_mapping_error(&mali_platform_device->dev, dma_addr);
+               if (unlikely(err)) {
+                       MALI_DEBUG_PRINT_ERROR(("OS Mem: Failed to DMA map page %p: %u",
+                                               new_page, err));
+                       __free_page(new_page);
+                       os_mem->count = (page_count - remaining) + i;
+                       atomic_add(os_mem->count, &mali_mem_os_allocator.allocated_pages);
+                       mali_mem_os_free(&os_mem->pages, os_mem->count, MALI_FALSE);
+                       return -EFAULT;
+               }
 
                /* Store page phys addr */
                SetPagePrivate(new_page);
                set_page_private(new_page, dma_addr);
 
-               list_add_tail(&new_page->lru, &descriptor->os_mem.pages);
+               m_page = _mali_page_node_allocate(MALI_PAGE_NODE_OS);
+               if (unlikely(NULL == m_page)) {
+                       MALI_PRINT_ERROR(("OS Mem: Can't allocate mali_page node! \n"));
+                       dma_unmap_page(&mali_platform_device->dev, page_private(new_page),
+                                      _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
+                       ClearPagePrivate(new_page);
+                       __free_page(new_page);
+                       os_mem->count = (page_count - remaining) + i;
+                       atomic_add(os_mem->count, &mali_mem_os_allocator.allocated_pages);
+                       mali_mem_os_free(&os_mem->pages, os_mem->count, MALI_FALSE);
+                       return -EFAULT;
+               }
+               m_page->page = new_page;
+
+               list_add_tail(&m_page->list, &os_mem->pages);
        }
 
        atomic_add(page_count, &mali_mem_os_allocator.allocated_pages);
@@ -162,46 +262,65 @@ static int mali_mem_os_alloc_pages(mali_mem_allocation *descriptor, u32 size)
        return 0;
 }
 
-static int mali_mem_os_mali_map(mali_mem_allocation *descriptor, struct mali_session_data *session)
-{
-       struct mali_page_directory *pagedir = session->page_directory;
-       struct page *page;
-       _mali_osk_errcode_t err;
-       u32 virt = descriptor->mali_mapping.addr;
-       u32 prop = descriptor->mali_mapping.properties;
-
-       MALI_DEBUG_ASSERT(MALI_MEM_OS == descriptor->type);
 
-       err = mali_mem_mali_map_prepare(descriptor);
-       if (_MALI_OSK_ERR_OK != err) {
-               return -ENOMEM;
-       }
-
-       list_for_each_entry(page, &descriptor->os_mem.pages, lru) {
-               u32 phys = page_private(page);
-               mali_mmu_pagedir_update(pagedir, virt, phys, MALI_MMU_PAGE_SIZE, prop);
+void mali_mem_os_mali_map(mali_mem_backend *mem_bkend, u32 vaddr, u32 props)
+{
+       struct mali_session_data *session;
+       struct mali_page_directory *pagedir;
+       struct mali_page_node *m_page;
+       u32 virt = vaddr;
+       u32 prop = props;
+
+       MALI_DEBUG_ASSERT_POINTER(mem_bkend);
+       MALI_DEBUG_ASSERT_POINTER(mem_bkend->mali_allocation);
+
+       session = mem_bkend->mali_allocation->session;
+       MALI_DEBUG_ASSERT_POINTER(session);
+       pagedir = session->page_directory;
+
+       list_for_each_entry(m_page, &mem_bkend->os_mem.pages, list) {
+               dma_addr_t phys = page_private(m_page->page);
+
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
+               /* Verify that the "physical" address is 32-bit and
+                * usable for Mali, when on a system with bus addresses
+                * wider than 32-bit. */
+               MALI_DEBUG_ASSERT(0 == (phys >> 32));
+#endif
+               mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys, MALI_MMU_PAGE_SIZE, prop);
                virt += MALI_MMU_PAGE_SIZE;
        }
-
-       return 0;
 }
 
-static void mali_mem_os_mali_unmap(struct mali_session_data *session, mali_mem_allocation *descriptor)
+
+void mali_mem_os_mali_unmap(mali_mem_allocation *alloc)
 {
-       mali_mem_mali_map_free(descriptor);
+       struct mali_session_data *session;
+       MALI_DEBUG_ASSERT_POINTER(alloc);
+       session = alloc->session;
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       mali_session_memory_lock(session);
+       mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start,
+                              alloc->flags);
+       mali_session_memory_unlock(session);
 }
 
-static int mali_mem_os_cpu_map(mali_mem_allocation *descriptor, struct vm_area_struct *vma)
+int mali_mem_os_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma)
 {
+       mali_mem_os_mem *os_mem = &mem_bkend->os_mem;
+       struct mali_page_node *m_page;
        struct page *page;
        int ret;
        unsigned long addr = vma->vm_start;
+       MALI_DEBUG_ASSERT(MALI_MEM_OS == mem_bkend->type);
 
-       list_for_each_entry(page, &descriptor->os_mem.pages, lru) {
+       list_for_each_entry(m_page, &os_mem->pages, list) {
                /* We should use vm_insert_page, but it does a dcache
                 * flush which makes it way slower than remap_pfn_range or vm_insert_pfn.
                ret = vm_insert_page(vma, addr, page);
                */
+               page = m_page->page;
                ret = vm_insert_pfn(vma, addr, page_to_pfn(page));
 
                if (unlikely(0 != ret)) {
@@ -213,90 +332,53 @@ static int mali_mem_os_cpu_map(mali_mem_allocation *descriptor, struct vm_area_s
        return 0;
 }
 
-mali_mem_allocation *mali_mem_os_alloc(u32 mali_addr, u32 size, struct vm_area_struct *vma, struct mali_session_data *session)
+u32 mali_mem_os_release(mali_mem_backend *mem_bkend)
 {
-       mali_mem_allocation *descriptor;
-       int err;
 
-       if (atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE + size > mali_mem_os_allocator.allocation_limit) {
-               MALI_DEBUG_PRINT(2, ("Mali Mem: Unable to allocate %u bytes. Currently allocated: %lu, max limit %lu\n",
-                                    size,
-                                    atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE,
-                                    mali_mem_os_allocator.allocation_limit));
-               return NULL;
-       }
+       mali_mem_allocation *alloc;
+       u32 free_pages_nr = 0;
+       MALI_DEBUG_ASSERT_POINTER(mem_bkend);
+       MALI_DEBUG_ASSERT(MALI_MEM_OS == mem_bkend->type);
 
-       descriptor = mali_mem_descriptor_create(session, MALI_MEM_OS);
-       if (NULL == descriptor) return NULL;
+       alloc = mem_bkend->mali_allocation;
+       MALI_DEBUG_ASSERT_POINTER(alloc);
 
-       descriptor->mali_mapping.addr = mali_addr;
-       descriptor->size = size;
-       descriptor->cpu_mapping.addr = (void __user*)vma->vm_start;
-       descriptor->cpu_mapping.ref = 1;
-
-       if (VM_SHARED == (VM_SHARED & vma->vm_flags)) {
-               descriptor->mali_mapping.properties = MALI_MMU_FLAGS_DEFAULT;
+       /* Unmap the memory from the mali virtual address space. */
+       mali_mem_os_mali_unmap(alloc);
+       mutex_lock(&mem_bkend->mutex);
+       /* Free pages */
+       if (MALI_MEM_BACKEND_FLAG_COWED & mem_bkend->cow_flag) {
+               free_pages_nr = mali_mem_os_free(&mem_bkend->os_mem.pages, mem_bkend->os_mem.count, MALI_TRUE);
        } else {
-               /* Cached Mali memory mapping */
-               descriptor->mali_mapping.properties = MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE;
-               vma->vm_flags |= VM_SHARED;
+               free_pages_nr = mali_mem_os_free(&mem_bkend->os_mem.pages, mem_bkend->os_mem.count, MALI_FALSE);
        }
+       mutex_unlock(&mem_bkend->mutex);
 
-       err = mali_mem_os_alloc_pages(descriptor, size); /* Allocate pages */
-       if (0 != err) goto alloc_failed;
+       MALI_DEBUG_PRINT(4, ("OS Mem free : allocated size = 0x%x, free size = 0x%x\n", mem_bkend->os_mem.count * _MALI_OSK_MALI_PAGE_SIZE,
+                            free_pages_nr * _MALI_OSK_MALI_PAGE_SIZE));
 
-       /* Take session memory lock */
-       _mali_osk_mutex_wait(session->memory_lock);
-
-       err = mali_mem_os_mali_map(descriptor, session); /* Map on Mali */
-       if (0 != err) goto mali_map_failed;
-
-       _mali_osk_mutex_signal(session->memory_lock);
-
-       err = mali_mem_os_cpu_map(descriptor, vma); /* Map on CPU */
-       if (0 != err) goto cpu_map_failed;
-
-       return descriptor;
-
-cpu_map_failed:
-       mali_mem_os_mali_unmap(session, descriptor);
-mali_map_failed:
-       _mali_osk_mutex_signal(session->memory_lock);
-       mali_mem_os_free(descriptor);
-alloc_failed:
-       mali_mem_descriptor_destroy(descriptor);
-       MALI_DEBUG_PRINT(2, ("OS allocator: Failed to allocate memory (%d)\n", err));
-       return NULL;
-}
-
-void mali_mem_os_release(mali_mem_allocation *descriptor)
-{
-       struct mali_session_data *session = descriptor->session;
-
-       /* Unmap the memory from the mali virtual address space. */
-       mali_mem_os_mali_unmap(session, descriptor);
-
-       /* Free pages */
-       mali_mem_os_free(descriptor);
+       mem_bkend->os_mem.count = 0;
+       return free_pages_nr;
 }
 
 
 #define MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE 128
 static struct {
        struct {
-               u32 phys;
+               mali_dma_addr phys;
                mali_io_address mapping;
        } page[MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE];
-       u32 count;
+       size_t count;
        spinlock_t lock;
 } mali_mem_page_table_page_pool = {
        .count = 0,
        .lock = __SPIN_LOCK_UNLOCKED(pool_lock),
 };
 
-_mali_osk_errcode_t mali_mem_os_get_table_page(u32 *phys, mali_io_address *mapping)
+_mali_osk_errcode_t mali_mem_os_get_table_page(mali_dma_addr *phys, mali_io_address *mapping)
 {
        _mali_osk_errcode_t ret = _MALI_OSK_ERR_NOMEM;
+       dma_addr_t tmp_phys;
 
        spin_lock(&mali_mem_page_table_page_pool.lock);
        if (0 < mali_mem_page_table_page_pool.count) {
@@ -309,21 +391,32 @@ _mali_osk_errcode_t mali_mem_os_get_table_page(u32 *phys, mali_io_address *mappi
        spin_unlock(&mali_mem_page_table_page_pool.lock);
 
        if (_MALI_OSK_ERR_OK != ret) {
-               *mapping = dma_alloc_writecombine(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE, phys, GFP_KERNEL);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+               *mapping = dma_alloc_attrs(&mali_platform_device->dev,
+                                          _MALI_OSK_MALI_PAGE_SIZE, &tmp_phys,
+                                          GFP_KERNEL, &dma_attrs_wc);
+#else
+               *mapping = dma_alloc_writecombine(&mali_platform_device->dev,
+                                                 _MALI_OSK_MALI_PAGE_SIZE, &tmp_phys, GFP_KERNEL);
+#endif
                if (NULL != *mapping) {
                        ret = _MALI_OSK_ERR_OK;
+
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
+                       /* Verify that the "physical" address is 32-bit and
+                        * usable for Mali, when on a system with bus addresses
+                        * wider than 32-bit. */
+                       MALI_DEBUG_ASSERT(0 == (tmp_phys >> 32));
+#endif
+
+                       *phys = (mali_dma_addr)tmp_phys;
                }
        }
-       
-       if (ret != _MALI_OSK_ERR_OK)
-   {
-      MALI_DEBUG_PRINT(2, ("os_get_table_page fail: ret=%d, *mapping=%x\n", ret,  *mapping));
-      MALI_DEBUG_PRINT(2, ("os_get_table_page fail: mali_mem_page_table_page_pool.count=%x\n", mali_mem_page_table_page_pool.count));
-   }
+
        return ret;
 }
 
-void mali_mem_os_release_table_page(u32 phys, void *virt)
+void mali_mem_os_release_table_page(mali_dma_addr phys, void *virt)
 {
        spin_lock(&mali_mem_page_table_page_pool.lock);
        if (MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE > mali_mem_page_table_page_pool.count) {
@@ -337,20 +430,31 @@ void mali_mem_os_release_table_page(u32 phys, void *virt)
        } else {
                spin_unlock(&mali_mem_page_table_page_pool.lock);
 
-               dma_free_writecombine(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE, virt, phys);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+               dma_free_attrs(&mali_platform_device->dev,
+                              _MALI_OSK_MALI_PAGE_SIZE, virt, phys,
+                              &dma_attrs_wc);
+#else
+               dma_free_writecombine(&mali_platform_device->dev,
+                                     _MALI_OSK_MALI_PAGE_SIZE, virt, phys);
+#endif
        }
 }
 
-static void mali_mem_os_free_page(struct page *page)
+void mali_mem_os_free_page_node(struct mali_page_node *m_page)
 {
-       BUG_ON(page_count(page) != 1);
-
-       dma_unmap_page(&mali_platform_device->dev, page_private(page),
-                      _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
-
-       ClearPagePrivate(page);
+       struct page *page = m_page->page;
+       MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_OS);
 
+       if (1  == page_count(page)) {
+               dma_unmap_page(&mali_platform_device->dev, page_private(page),
+                              _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
+               ClearPagePrivate(page);
+       }
        __free_page(page);
+       m_page->page = NULL;
+       list_del(&m_page->list);
+       kfree(m_page);
 }
 
 /* The maximum number of page table pool pages to free in one go. */
@@ -362,7 +466,7 @@ static void mali_mem_os_free_page(struct page *page)
  */
 static void mali_mem_os_page_table_pool_free(size_t nr_to_free)
 {
-       u32 phys_arr[MALI_MEM_OS_CHUNK_TO_FREE];
+       mali_dma_addr phys_arr[MALI_MEM_OS_CHUNK_TO_FREE];
        void *virt_arr[MALI_MEM_OS_CHUNK_TO_FREE];
        u32 i;
 
@@ -382,7 +486,14 @@ static void mali_mem_os_page_table_pool_free(size_t nr_to_free)
 
        /* After releasing the spinlock: free the pages we removed from the pool. */
        for (i = 0; i < nr_to_free; i++) {
-               dma_free_writecombine(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE, virt_arr[i], phys_arr[i]);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+               dma_free_attrs(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE,
+                              virt_arr[i], (dma_addr_t)phys_arr[i], &dma_attrs_wc);
+#else
+               dma_free_writecombine(&mali_platform_device->dev,
+                                     _MALI_OSK_MALI_PAGE_SIZE,
+                                     virt_arr[i], (dma_addr_t)phys_arr[i]);
+#endif
        }
 }
 
@@ -407,35 +518,36 @@ static void mali_mem_os_trim_page_table_page_pool(void)
        mali_mem_os_page_table_pool_free(nr_to_free);
 }
 
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0)
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+static unsigned long mali_mem_os_shrink_count(struct shrinker *shrinker, struct shrink_control *sc)
+{
+       return mali_mem_os_allocator.pool_count;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
 static int mali_mem_os_shrink(int nr_to_scan, gfp_t gfp_mask)
 #else
 static int mali_mem_os_shrink(struct shrinker *shrinker, int nr_to_scan, gfp_t gfp_mask)
-#endif
+#endif /* Linux < 2.6.35 */
 #else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
 static int mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc)
-#endif
+#else
+static unsigned long mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc)
+#endif /* Linux < 3.12.0 */
+#endif /* Linux < 3.0.0 */
 {
-       struct page *page, *tmp;
+       struct mali_page_node *m_page, *m_tmp;
        unsigned long flags;
        struct list_head *le, pages;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
        int nr = nr_to_scan;
 #else
        int nr = sc->nr_to_scan;
 #endif
 
        if (0 == nr) {
-               //[BUGFIX]-Mod-BEGIN by SCDTABLET.zhangku.guo@tcl.com,06/09/2015,1017702,mtk patch for fix ANR issue when TAT
-               //return mali_mem_os_allocator.pool_count + mali_mem_page_table_page_pool.count;
-               return mali_mem_os_allocator.pool_count;
-               //[BUGFIX]-Mod-END by SCDTABLET.zhangku.guo@tcl.com
-       }
-
-       if (0 == mali_mem_os_allocator.pool_count) {
-               /* No pages availble */
-               return 0;
+               return mali_mem_os_shrink_count(shrinker, sc);
        }
 
        if (0 == spin_trylock_irqsave(&mali_mem_os_allocator.pool_lock, flags)) {
@@ -443,6 +555,12 @@ static int mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *
                return -1;
        }
 
+       if (0 == mali_mem_os_allocator.pool_count) {
+               /* No pages availble */
+               spin_unlock_irqrestore(&mali_mem_os_allocator.pool_lock, flags);
+               return 0;
+       }
+
        /* Release from general page pool */
        nr = min((size_t)nr, mali_mem_os_allocator.pool_count);
        mali_mem_os_allocator.pool_count -= nr;
@@ -453,30 +571,26 @@ static int mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *
        list_cut_position(&pages, &mali_mem_os_allocator.pool_pages, le);
        spin_unlock_irqrestore(&mali_mem_os_allocator.pool_lock, flags);
 
-       list_for_each_entry_safe(page, tmp, &pages, lru) {
-               mali_mem_os_free_page(page);
+       list_for_each_entry_safe(m_page, m_tmp, &pages, list) {
+               mali_mem_os_free_page_node(m_page);
        }
 
-       //[BUGFIX]-Del-BEGIN by SCDTABLET.zhangku.guo@tcl.com,06/09/2015,1017702,mtk patch for fix ANR issue when TAT
-       /* Release some pages from page table page pool */
-       //mali_mem_os_trim_page_table_page_pool();
-       //[BUGFIX]-Del-END by SCDTABLET.zhangku.guo@tcl.com
-
        if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES > mali_mem_os_allocator.pool_count) {
                /* Pools are empty, stop timer */
                MALI_DEBUG_PRINT(5, ("Stopping timer, only %u pages on pool\n", mali_mem_os_allocator.pool_count));
                cancel_delayed_work(&mali_mem_os_allocator.timed_shrinker);
        }
 
-       //[BUGFIX]-Mod-BEGIN by SCDTABLET.zhangku.guo@tcl.com,06/09/2015,1017702,mtk patch for fix ANR issue when TAT
-       //return mali_mem_os_allocator.pool_count + mali_mem_page_table_page_pool.count;
-       return mali_mem_os_allocator.pool_count;
-       //[BUGFIX]-Mod-END by SCDTABLET.zhangku.guo@tcl.com
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+       return mali_mem_os_shrink_count(shrinker, sc);
+#else
+       return nr;
+#endif
 }
 
 static void mali_mem_os_trim_pool(struct work_struct *data)
 {
-       struct page *page, *tmp;
+       struct mali_page_node *m_page, *m_tmp;
        struct list_head *le;
        LIST_HEAD(pages);
        size_t nr_to_free;
@@ -489,8 +603,10 @@ static void mali_mem_os_trim_pool(struct work_struct *data)
        spin_lock(&mali_mem_os_allocator.pool_lock);
        if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) {
                size_t count = mali_mem_os_allocator.pool_count - MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES;
+               const size_t min_to_free = min(64, MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES);
+
                /* Free half the pages on the pool above the static limit. Or 64 pages, 256KB. */
-               nr_to_free = max(count / 2, (size_t)64);
+               nr_to_free = max(count / 2, min_to_free);
 
                mali_mem_os_allocator.pool_count -= nr_to_free;
                list_for_each(le, &mali_mem_os_allocator.pool_pages) {
@@ -501,8 +617,8 @@ static void mali_mem_os_trim_pool(struct work_struct *data)
        }
        spin_unlock(&mali_mem_os_allocator.pool_lock);
 
-       list_for_each_entry_safe(page, tmp, &pages, lru) {
-               mali_mem_os_free_page(page);
+       list_for_each_entry_safe(m_page, m_tmp, &pages, list) {
+               mali_mem_os_free_page_node(m_page);
        }
 
        /* Release some pages from page table page pool */
@@ -521,6 +637,10 @@ _mali_osk_errcode_t mali_mem_os_init(void)
                return _MALI_OSK_ERR_NOMEM;
        }
 
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+       dma_set_attr(DMA_ATTR_WRITE_COMBINE, &dma_attrs_wc);
+#endif
+
        register_shrinker(&mali_mem_os_allocator.shrinker);
 
        return _MALI_OSK_ERR_OK;
@@ -528,15 +648,18 @@ _mali_osk_errcode_t mali_mem_os_init(void)
 
 void mali_mem_os_term(void)
 {
-       struct page *page, *tmp;
-
+       struct mali_page_node *m_page, *m_tmp;
        unregister_shrinker(&mali_mem_os_allocator.shrinker);
        cancel_delayed_work_sync(&mali_mem_os_allocator.timed_shrinker);
-       destroy_workqueue(mali_mem_os_allocator.wq);
+
+       if (NULL != mali_mem_os_allocator.wq) {
+               destroy_workqueue(mali_mem_os_allocator.wq);
+               mali_mem_os_allocator.wq = NULL;
+       }
 
        spin_lock(&mali_mem_os_allocator.pool_lock);
-       list_for_each_entry_safe(page, tmp, &mali_mem_os_allocator.pool_pages, lru) {
-               mali_mem_os_free_page(page);
+       list_for_each_entry_safe(m_page, m_tmp, &mali_mem_os_allocator.pool_pages, list) {
+               mali_mem_os_free_page_node(m_page);
 
                --mali_mem_os_allocator.pool_count;
        }
index 76f8ecfbfae487133a8e33007cb985452b0305e4..b0c444a6e0c2097d898693d344574e812303c24f 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013 ARM Limited
+ * (C) COPYRIGHT 2013-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 #define __MALI_MEMORY_OS_ALLOC_H__
 
 #include "mali_osk.h"
-#include "mali_session.h"
-
 #include "mali_memory_types.h"
 
-/* OS memory allocator */
-/** @brief Allocate memory from OS
- *
- * This function will create a descriptor, allocate pages and map these on the CPU and Mali.
- *
- * @param mali_addr Mali virtual address to use for Mali mapping
- * @param size Size to allocate
- * @param vma Pointer to vma for CPU mapping
- * @param session Pointer to session doing the allocation
- */
-mali_mem_allocation *mali_mem_os_alloc(u32 mali_addr, u32 size, struct vm_area_struct *vma, struct mali_session_data *session);
 
 /** @brief Release Mali OS memory
  *
  * The session memory_lock must be held when calling this function.
  *
- * @param descriptor Pointer to the descriptor to release
+ * @param mem_bkend Pointer to the mali_mem_backend to release
  */
-void mali_mem_os_release(mali_mem_allocation *descriptor);
+u32 mali_mem_os_release(mali_mem_backend *mem_bkend);
 
-_mali_osk_errcode_t mali_mem_os_get_table_page(u32 *phys, mali_io_address *mapping);
+_mali_osk_errcode_t mali_mem_os_get_table_page(mali_dma_addr *phys, mali_io_address *mapping);
 
-void mali_mem_os_release_table_page(u32 phys, void *virt);
+void mali_mem_os_release_table_page(mali_dma_addr phys, void *virt);
 
 _mali_osk_errcode_t mali_mem_os_init(void);
+
 void mali_mem_os_term(void);
+
 u32 mali_mem_os_stat(void);
 
+void mali_mem_os_free_page_node(struct mali_page_node *m_page);
+
+int mali_mem_os_alloc_pages(mali_mem_os_mem *os_mem, u32 size);
+
+u32 mali_mem_os_free(struct list_head *os_pages, u32 pages_count, mali_bool cow_flag);
+
+_mali_osk_errcode_t mali_mem_os_put_page(struct page *page);
+
+void mali_mem_os_mali_map(mali_mem_backend *mem_bkend, u32 vaddr, u32 props);
+
+void mali_mem_os_mali_unmap(mali_mem_allocation *alloc);
+
+int mali_mem_os_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma);
+
 #endif /* __MALI_MEMORY_OS_ALLOC_H__ */
index 97db7e981b2579589062be78ea84874b972c875b..0444d981e835cdfd3d8b01fcfda58f32aedd4dd5 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013 ARM Limited
+ * (C) COPYRIGHT 2013-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -23,8 +23,32 @@ typedef enum mali_mem_type {
        MALI_MEM_DMA_BUF,
        MALI_MEM_UMP,
        MALI_MEM_BLOCK,
+       MALI_MEM_COW,
+       MALI_MEM_TYPE_MAX,
 } mali_mem_type;
 
+typedef struct mali_block_item {
+       /* for block type, the block_phy is alway page size align
+       * so use low 12bit used for ref_cout.
+       */
+       unsigned long phy_addr;
+} mali_block_item;
+
+
+typedef enum mali_page_node_type {
+       MALI_PAGE_NODE_OS,
+       MALI_PAGE_NODE_BLOCK,
+} mali_page_node_type;
+
+typedef struct mali_page_node {
+       struct list_head list;
+       union {
+               struct page *page;
+               mali_block_item *blk_it; /*pointer to block item*/
+       };
+       u32 type;
+} mali_page_node;
+
 typedef struct mali_mem_os_mem {
        struct list_head pages;
        u32 count;
@@ -55,7 +79,8 @@ typedef struct block_allocator_allocation {
 } block_allocator_allocation;
 
 typedef struct mali_mem_block_mem {
-       block_allocator_allocation mem;
+       struct list_head pfns;
+       u32 count;
 } mali_mem_block_mem;
 
 typedef struct mali_mem_virt_mali_mapping {
@@ -65,22 +90,59 @@ typedef struct mali_mem_virt_mali_mapping {
 
 typedef struct mali_mem_virt_cpu_mapping {
        void __user *addr;
-       u32 ref;
+       struct vm_area_struct *vma;
 } mali_mem_virt_cpu_mapping;
 
 #define MALI_MEM_ALLOCATION_VALID_MAGIC 0xdeda110c
 #define MALI_MEM_ALLOCATION_FREED_MAGIC 0x10101010
 
+typedef struct mali_mm_node {
+       /* MALI GPU vaddr start, use u32 for mmu only support 32bit address*/
+       uint32_t start; /* GPU vaddr */
+       uint32_t size;  /* GPU allocation virtual size */
+       unsigned allocated : 1;
+} mali_mm_node;
+
+typedef struct mali_vma_node {
+       struct mali_mm_node vm_node;
+       struct rb_node vm_rb;
+} mali_vma_node;
+
+
 typedef struct mali_mem_allocation {
        MALI_DEBUG_CODE(u32 magic);
        mali_mem_type type;                /**< Type of memory */
-       int id;                            /**< ID in the descriptor map for this allocation */
-
-       u32 size;                          /**< Size of the allocation */
        u32 flags;                         /**< Flags for this allocation */
 
        struct mali_session_data *session; /**< Pointer to session that owns the allocation */
 
+       mali_mem_virt_cpu_mapping cpu_mapping; /**< CPU mapping */
+       mali_mem_virt_mali_mapping mali_mapping; /**< Mali mapping */
+
+       /* add for new memory system */
+       struct mali_vma_node mali_vma_node;
+       u32 vsize; /* virtual size*/
+       u32 psize; /* physical backend memory size*/
+       struct list_head list;
+       s32 backend_handle; /* idr for mem_backend */
+       _mali_osk_atomic_t mem_alloc_refcount;
+} mali_mem_allocation;
+
+/* COW backend memory type */
+typedef struct mali_mem_cow {
+       struct list_head pages;  /**< all pages for this cow backend allocation,
+                                                                including new allocated pages for modified range*/
+       u32 count;               /**< number of pages */
+       s32 change_pages_nr;
+} mali_mem_cow;
+
+#define MALI_MEM_BACKEND_FLAG_COWED                   0x1/* COW has happen on this backend */
+#define MALI_MEM_BACKEND_FLAG_COW_CPU_NO_WRITE        0x2/* this is an COW backend, mapped as not allowed cpu to write */
+
+typedef struct mali_mem_backend {
+       mali_mem_type type;                /**< Type of backend memory */
+       u32 flags;                         /**< Flags for this allocation */
+       u32 size;
        /* Union selected by type. */
        union {
                mali_mem_os_mem os_mem;       /**< MALI_MEM_OS */
@@ -88,11 +150,14 @@ typedef struct mali_mem_allocation {
                mali_mem_dma_buf dma_buf;     /**< MALI_MEM_DMA_BUF */
                mali_mem_ump ump_mem;         /**< MALI_MEM_UMP */
                mali_mem_block_mem block_mem; /**< MALI_MEM_BLOCK */
+               mali_mem_cow cow_mem;
        };
+       mali_mem_allocation *mali_allocation;
+       struct mutex mutex;
+       mali_mem_type cow_type;
 
-       mali_mem_virt_cpu_mapping cpu_mapping; /**< CPU mapping */
-       mali_mem_virt_mali_mapping mali_mapping; /**< Mali mapping */
-} mali_mem_allocation;
+       u32 cow_flag;
+} mali_mem_backend;
 
 #define MALI_MEM_FLAG_MALI_GUARD_PAGE (1 << 0)
 #define MALI_MEM_FLAG_DONT_CPU_MAP    (1 << 1)
index 2a95ae7403a5dad9f70bb72dd1de01bbb24554dc..664c5c1f8f66c553b94ca0b408fd4ab7364c0445 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2013 ARM Limited
+ * (C) COPYRIGHT 2012-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 #include "mali_kernel_common.h"
 #include "mali_session.h"
 #include "mali_kernel_linux.h"
-
 #include "mali_memory.h"
-
 #include "ump_kernel_interface.h"
 
-static int mali_ump_map(struct mali_session_data *session, mali_mem_allocation *descriptor)
+static int mali_mem_ump_map(mali_mem_backend *mem_backend)
 {
        ump_dd_handle ump_mem;
+       mali_mem_allocation *alloc;
+       struct mali_session_data *session;
        u32 nr_blocks;
        u32 i;
        ump_dd_physical_block *ump_blocks;
        struct mali_page_directory *pagedir;
        u32 offset = 0;
-       u32 prop;
        _mali_osk_errcode_t err;
 
+       MALI_DEBUG_ASSERT_POINTER(mem_backend);
+       MALI_DEBUG_ASSERT(MALI_MEM_UMP == mem_backend->type);
+
+       alloc = mem_backend->mali_allocation;
+       MALI_DEBUG_ASSERT_POINTER(alloc);
+
+       session = alloc->session;
        MALI_DEBUG_ASSERT_POINTER(session);
-       MALI_DEBUG_ASSERT_POINTER(descriptor);
-       MALI_DEBUG_ASSERT(MALI_MEM_UMP == descriptor->type);
 
-       ump_mem = descriptor->ump_mem.handle;
+       ump_mem = mem_backend->ump_mem.handle;
        MALI_DEBUG_ASSERT(UMP_DD_HANDLE_INVALID != ump_mem);
 
        nr_blocks = ump_dd_phys_block_count_get(ump_mem);
@@ -42,7 +46,7 @@ static int mali_ump_map(struct mali_session_data *session, mali_mem_allocation *
                return -EINVAL;
        }
 
-       ump_blocks = _mali_osk_malloc(sizeof(*ump_blocks)*nr_blocks);
+       ump_blocks = _mali_osk_malloc(sizeof(*ump_blocks) * nr_blocks);
        if (NULL == ump_blocks) {
                return -ENOMEM;
        }
@@ -53,163 +57,98 @@ static int mali_ump_map(struct mali_session_data *session, mali_mem_allocation *
        }
 
        pagedir = session->page_directory;
-       prop = descriptor->mali_mapping.properties;
 
-       err = mali_mem_mali_map_prepare(descriptor);
+       mali_session_memory_lock(session);
+
+       err = mali_mem_mali_map_prepare(alloc);
        if (_MALI_OSK_ERR_OK != err) {
                MALI_DEBUG_PRINT(1, ("Mapping of UMP memory failed\n"));
 
                _mali_osk_free(ump_blocks);
+               mali_session_memory_unlock(session);
                return -ENOMEM;
        }
 
-       for(i = 0; i < nr_blocks; ++i) {
-               u32 virt = descriptor->mali_mapping.addr + offset;
+       for (i = 0; i < nr_blocks; ++i) {
+               u32 virt = alloc->mali_vma_node.vm_node.start + offset;
 
                MALI_DEBUG_PRINT(7, ("Mapping in 0x%08x size %d\n", ump_blocks[i].addr , ump_blocks[i].size));
 
                mali_mmu_pagedir_update(pagedir, virt, ump_blocks[i].addr,
-                                       ump_blocks[i].size, prop);
+                                       ump_blocks[i].size, MALI_MMU_FLAGS_DEFAULT);
 
                offset += ump_blocks[i].size;
        }
 
-       if (descriptor->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
-               u32 virt = descriptor->mali_mapping.addr + offset;
+       if (alloc->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
+               u32 virt = alloc->mali_vma_node.vm_node.start + offset;
 
                /* Map in an extra virtual guard page at the end of the VMA */
                MALI_DEBUG_PRINT(6, ("Mapping in extra guard page\n"));
 
-               mali_mmu_pagedir_update(pagedir, virt, ump_blocks[0].addr, _MALI_OSK_MALI_PAGE_SIZE, prop);
+               mali_mmu_pagedir_update(pagedir, virt, ump_blocks[0].addr, _MALI_OSK_MALI_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);
 
                offset += _MALI_OSK_MALI_PAGE_SIZE;
        }
-
+       mali_session_memory_unlock(session);
        _mali_osk_free(ump_blocks);
-
        return 0;
 }
 
-void mali_ump_unmap(struct mali_session_data *session, mali_mem_allocation *descriptor)
+static void mali_mem_ump_unmap(mali_mem_allocation *alloc)
 {
-       ump_dd_handle ump_mem;
-       struct mali_page_directory *pagedir;
-
-       ump_mem = descriptor->ump_mem.handle;
-       pagedir = session->page_directory;
-
-       MALI_DEBUG_ASSERT(UMP_DD_HANDLE_INVALID != ump_mem);
-
-       mali_mem_mali_map_free(descriptor);
-
-       ump_dd_reference_release(ump_mem);
-       return;
+       struct mali_session_data *session;
+       MALI_DEBUG_ASSERT_POINTER(alloc);
+       session = alloc->session;
+       MALI_DEBUG_ASSERT_POINTER(session);
+       mali_session_memory_lock(session);
+       mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start,
+                              alloc->flags);
+       mali_session_memory_unlock(session);
 }
 
-_mali_osk_errcode_t _mali_ukk_attach_ump_mem(_mali_uk_attach_ump_mem_s *args)
+int mali_mem_bind_ump_buf(mali_mem_allocation *alloc, mali_mem_backend *mem_backend, u32  secure_id, u32 flags)
 {
        ump_dd_handle ump_mem;
-       struct mali_session_data *session;
-       mali_mem_allocation *descriptor;
-       int md, ret;
-
-       MALI_DEBUG_ASSERT_POINTER(args);
-       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
-
-       session = (struct mali_session_data *)args->ctx;
-       MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);
-
-       /* check arguments */
-       /* NULL might be a valid Mali address */
-       if (!args->size) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
-
-       /* size must be a multiple of the system page size */
-       if (args->size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+       int ret;
+       MALI_DEBUG_ASSERT_POINTER(alloc);
+       MALI_DEBUG_ASSERT_POINTER(mem_backend);
+       MALI_DEBUG_ASSERT(MALI_MEM_UMP == mem_backend->type);
 
        MALI_DEBUG_PRINT(3,
-                        ("Requested to map ump memory with secure id %d into virtual memory 0x%08X, size 0x%08X\n",
-                         args->secure_id, args->mali_address, args->size));
-
-       ump_mem = ump_dd_handle_create_from_secure_id((int)args->secure_id);
+                        ("Requested to map ump memory with secure id %d into virtual memory 0x%08X, size 0x%08X\n",
+                         secure_id, alloc->mali_vma_node.vm_node.start, alloc->mali_vma_node.vm_node.size));
 
+       ump_mem = ump_dd_handle_create_from_secure_id(secure_id);
        if (UMP_DD_HANDLE_INVALID == ump_mem) MALI_ERROR(_MALI_OSK_ERR_FAULT);
-
-       descriptor = mali_mem_descriptor_create(session, MALI_MEM_UMP);
-       if (NULL == descriptor) {
-               ump_dd_reference_release(ump_mem);
-               MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+       alloc->flags |= MALI_MEM_FLAG_DONT_CPU_MAP;
+       if (flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
+               alloc->flags |= MALI_MEM_FLAG_MALI_GUARD_PAGE;
        }
 
-       descriptor->ump_mem.handle = ump_mem;
-       descriptor->mali_mapping.addr = args->mali_address;
-       descriptor->size = args->size;
-       descriptor->mali_mapping.properties = MALI_MMU_FLAGS_DEFAULT;
-       descriptor->flags |= MALI_MEM_FLAG_DONT_CPU_MAP;
+       mem_backend->ump_mem.handle = ump_mem;
 
-       if (args->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
-               descriptor->flags = MALI_MEM_FLAG_MALI_GUARD_PAGE;
-       }
-
-       _mali_osk_mutex_wait(session->memory_lock);
-
-       ret = mali_ump_map(session, descriptor);
+       ret = mali_mem_ump_map(mem_backend);
        if (0 != ret) {
-               _mali_osk_mutex_signal(session->memory_lock);
-               ump_dd_reference_release(ump_mem);
-               mali_mem_descriptor_destroy(descriptor);
-               MALI_ERROR(_MALI_OSK_ERR_NOMEM);
-       }
-
-       _mali_osk_mutex_signal(session->memory_lock);
-
-
-       if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session->descriptor_mapping, descriptor, &md)) {
                ump_dd_reference_release(ump_mem);
-               mali_mem_descriptor_destroy(descriptor);
-               MALI_ERROR(_MALI_OSK_ERR_FAULT);
+               return _MALI_OSK_ERR_FAULT;
        }
-
-       args->cookie = md;
-
-       MALI_DEBUG_PRINT(5,("Returning from UMP attach\n"));
-
-       MALI_SUCCESS;
+       MALI_DEBUG_PRINT(3, ("Returning from UMP bind\n"));
+       return _MALI_OSK_ERR_OK;
 }
 
-void mali_mem_ump_release(mali_mem_allocation *descriptor)
+void mali_mem_unbind_ump_buf(mali_mem_backend *mem_backend)
 {
-       struct mali_session_data *session = descriptor->session;
-
-       MALI_DEBUG_ASSERT(MALI_MEM_UMP == descriptor->type);
+       ump_dd_handle ump_mem;
+       mali_mem_allocation *alloc;
+       MALI_DEBUG_ASSERT_POINTER(mem_backend);
+       MALI_DEBUG_ASSERT(MALI_MEM_UMP == mem_backend->type);
+       ump_mem = mem_backend->ump_mem.handle;
+       MALI_DEBUG_ASSERT(UMP_DD_HANDLE_INVALID != ump_mem);
 
-       mali_ump_unmap(session, descriptor);
+       alloc = mem_backend->mali_allocation;
+       MALI_DEBUG_ASSERT_POINTER(alloc);
+       mali_mem_ump_unmap(alloc);
+       ump_dd_reference_release(ump_mem);
 }
 
-_mali_osk_errcode_t _mali_ukk_release_ump_mem(_mali_uk_release_ump_mem_s *args)
-{
-       mali_mem_allocation * descriptor;
-       struct mali_session_data *session;
-
-       MALI_DEBUG_ASSERT_POINTER(args);
-       MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
-
-       session = (struct mali_session_data *)args->ctx;
-       MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);
-
-       if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_get(session->descriptor_mapping, args->cookie, (void**)&descriptor)) {
-               MALI_DEBUG_PRINT(1, ("Invalid memory descriptor %d used to release ump memory\n", args->cookie));
-               MALI_ERROR(_MALI_OSK_ERR_FAULT);
-       }
-
-       descriptor = mali_descriptor_mapping_free(session->descriptor_mapping, args->cookie);
-
-       if (NULL != descriptor) {
-               _mali_osk_mutex_wait(session->memory_lock);
-               mali_mem_ump_release(descriptor);
-               _mali_osk_mutex_signal(session->memory_lock);
-
-               mali_mem_descriptor_destroy(descriptor);
-       }
-
-       MALI_SUCCESS;
-}
diff --git a/drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_ump.h b/drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_ump.h
new file mode 100644 (file)
index 0000000..9ca0bb8
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * This confidential and proprietary software may be used only as
+ * authorised by a licensing agreement from ARM Limited
+ * (C) COPYRIGHT 2011-2015 ARM Limited
+ * ALL RIGHTS RESERVED
+ * The entire notice above must be reproduced on all authorised
+ * copies and copies may only be made to the extent permitted
+ * by a licensing agreement from ARM Limited.
+ */
+
+#ifndef __MALI_MEMORY_UMP_BUF_H__
+#define __MALI_MEMORY_UMP_BUF_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "mali_uk_types.h"
+#include "mali_osk.h"
+#include "mali_memory.h"
+
+int mali_mem_bind_ump_buf(mali_mem_allocation *alloc, mali_mem_backend *mem_backend, u32  secure_id, u32 flags);
+void mali_mem_unbind_ump_buf(mali_mem_backend *mem_backend);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_MEMORY_DMA_BUF_H__ */
diff --git a/drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_util.c b/drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_util.c
new file mode 100644 (file)
index 0000000..3f4dd59
--- /dev/null
@@ -0,0 +1,138 @@
+/*
+ * This confidential and proprietary software may be used only as
+ * authorised by a licensing agreement from ARM Limited
+ * (C) COPYRIGHT 2013-2015 ARM Limited
+ * ALL RIGHTS RESERVED
+ * The entire notice above must be reproduced on all authorised
+ * copies and copies may only be made to the extent permitted
+ * by a licensing agreement from ARM Limited.
+ */
+
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_kernel_linux.h"
+#include "mali_scheduler.h"
+
+#include "mali_memory.h"
+#include "mali_memory_os_alloc.h"
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#include "mali_memory_dma_buf.h"
+#endif
+#if defined(CONFIG_MALI400_UMP)
+#include "mali_memory_ump.h"
+#endif
+#include "mali_memory_external.h"
+#include "mali_memory_manager.h"
+#include "mali_memory_virtual.h"
+#include "mali_memory_cow.h"
+#include "mali_memory_block_alloc.h"
+
+
+/**
+*function @_mali_free_allocation_mem - free a memory allocation
+*/
+static u32 _mali_free_allocation_mem(mali_mem_allocation *mali_alloc)
+{
+       mali_mem_backend *mem_bkend = NULL;
+       u32 free_pages_nr = 0;
+
+       struct mali_session_data *session = mali_alloc->session;
+       MALI_DEBUG_PRINT(4, (" _mali_free_allocation_mem, psize =0x%x! \n", mali_alloc->psize));
+       if (0 == mali_alloc->psize)
+               goto out;
+
+       /* Get backend memory & Map on CPU */
+       mutex_lock(&mali_idr_mutex);
+       mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle);
+       mutex_unlock(&mali_idr_mutex);
+       MALI_DEBUG_ASSERT(NULL != mem_bkend);
+
+       switch (mem_bkend->type) {
+       case MALI_MEM_OS:
+               free_pages_nr = mali_mem_os_release(mem_bkend);
+               atomic_sub(free_pages_nr, &session->mali_mem_allocated_pages);
+               break;
+       case MALI_MEM_UMP:
+#if defined(CONFIG_MALI400_UMP)
+               mali_mem_unbind_ump_buf(mem_bkend);
+               atomic_sub(mem_bkend->size / MALI_MMU_PAGE_SIZE, &session->mali_mem_array[mem_bkend->type]);
+#else
+               MALI_DEBUG_PRINT(2, ("DMA not supported\n"));
+#endif
+               break;
+       case MALI_MEM_DMA_BUF:
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+               mali_mem_unbind_dma_buf(mem_bkend);
+               atomic_sub(mem_bkend->size / MALI_MMU_PAGE_SIZE, &session->mali_mem_array[mem_bkend->type]);
+#else
+               MALI_DEBUG_PRINT(2, ("DMA not supported\n"));
+#endif
+               break;
+       case MALI_MEM_EXTERNAL:
+               mali_mem_unbind_ext_buf(mem_bkend);
+               atomic_sub(mem_bkend->size / MALI_MMU_PAGE_SIZE, &session->mali_mem_array[mem_bkend->type]);
+               break;
+
+       case MALI_MEM_BLOCK:
+               free_pages_nr = mali_mem_block_release(mem_bkend);
+               atomic_sub(free_pages_nr, &session->mali_mem_allocated_pages);
+               break;
+
+       case MALI_MEM_COW:
+               free_pages_nr = mali_mem_cow_release(mem_bkend, MALI_TRUE);
+               atomic_sub(free_pages_nr, &session->mali_mem_allocated_pages);
+               break;
+       default:
+               MALI_DEBUG_PRINT(1, ("mem type %d is not in the mali_mem_type enum.\n", mem_bkend->type));
+               break;
+       }
+
+       /*Remove backend memory idex */
+       mutex_lock(&mali_idr_mutex);
+       idr_remove(&mali_backend_idr, mali_alloc->backend_handle);
+       mutex_unlock(&mali_idr_mutex);
+       kfree(mem_bkend);
+out:
+       /* remove memory allocation  */
+       mali_vma_offset_remove(&session->allocation_mgr, &mali_alloc->mali_vma_node);
+       mali_mem_allocation_struct_destory(mali_alloc);
+       return free_pages_nr;
+}
+
+/**
+*  ref_count for allocation
+*/
+u32 mali_allocation_unref(struct mali_mem_allocation **alloc)
+{
+       u32 free_pages_nr = 0;
+       mali_mem_allocation *mali_alloc = *alloc;
+       *alloc = NULL;
+       if (0 == _mali_osk_atomic_dec_return(&mali_alloc->mem_alloc_refcount)) {
+               free_pages_nr = _mali_free_allocation_mem(mali_alloc);
+       }
+       return free_pages_nr;
+}
+
+void mali_allocation_ref(struct mali_mem_allocation *alloc)
+{
+       _mali_osk_atomic_inc(&alloc->mem_alloc_refcount);
+}
+
+void mali_free_session_allocations(struct mali_session_data *session)
+{
+       struct mali_mem_allocation *entry, *next;
+
+       MALI_DEBUG_PRINT(4, (" mali_free_session_allocations! \n"));
+
+       list_for_each_entry_safe(entry, next, &session->allocation_mgr.head, list) {
+               mali_allocation_unref(&entry);
+       }
+}
diff --git a/drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_util.h b/drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_util.h
new file mode 100644 (file)
index 0000000..3338c7b
--- /dev/null
@@ -0,0 +1,20 @@
+/*\r
+ * This confidential and proprietary software may be used only as\r
+ * authorised by a licensing agreement from ARM Limited\r
+ * (C) COPYRIGHT 2013-2015 ARM Limited\r
+ * ALL RIGHTS RESERVED\r
+ * The entire notice above must be reproduced on all authorised\r
+ * copies and copies may only be made to the extent permitted\r
+ * by a licensing agreement from ARM Limited.\r
+ */\r
+\r
+#ifndef __MALI_MEMORY_UTIL_H__\r
+#define __MALI_MEMORY_UTIL_H__\r
+\r
+u32 mali_allocation_unref(struct mali_mem_allocation **alloc);\r
+\r
+void mali_allocation_ref(struct mali_mem_allocation *alloc);\r
+\r
+void mali_free_session_allocations(struct mali_session_data *session);\r
+\r
+#endif\r
diff --git a/drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_virtual.c b/drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_virtual.c
new file mode 100644 (file)
index 0000000..3cb2583
--- /dev/null
@@ -0,0 +1,127 @@
+/*\r
+ * This confidential and proprietary software may be used only as\r
+ * authorised by a licensing agreement from ARM Limited\r
+ * (C) COPYRIGHT 2013-2015 ARM Limited\r
+ * ALL RIGHTS RESERVED\r
+ * The entire notice above must be reproduced on all authorised\r
+ * copies and copies may only be made to the extent permitted\r
+ * by a licensing agreement from ARM Limited.\r
+ */\r
+\r
+#include <linux/list.h>\r
+#include <linux/mm.h>\r
+#include <linux/mm_types.h>\r
+#include <linux/fs.h>\r
+#include <linux/dma-mapping.h>\r
+#include <linux/slab.h>\r
+#include <linux/platform_device.h>\r
+\r
+#include "mali_osk.h"\r
+#include "mali_osk_mali.h"\r
+#include "mali_kernel_linux.h"\r
+#include "mali_scheduler.h"\r
+#include "mali_memory_os_alloc.h"\r
+#include "mali_memory_manager.h"\r
+#include "mali_memory_virtual.h"\r
+\r
+\r
+/**\r
+*internal helper to link node into the rb-tree\r
+*/\r
+static inline void _mali_vma_offset_add_rb(struct mali_allocation_manager *mgr,\r
+               struct mali_vma_node *node)\r
+{\r
+       struct rb_node **iter = &mgr->allocation_mgr_rb.rb_node;\r
+       struct rb_node *parent = NULL;\r
+       struct mali_vma_node *iter_node;\r
+\r
+       while (likely(*iter)) {\r
+               parent = *iter;\r
+               iter_node = rb_entry(*iter, struct mali_vma_node, vm_rb);\r
+\r
+               if (node->vm_node.start < iter_node->vm_node.start)\r
+                       iter = &(*iter)->rb_left;\r
+               else if (node->vm_node.start > iter_node->vm_node.start)\r
+                       iter = &(*iter)->rb_right;\r
+               else\r
+                       MALI_DEBUG_ASSERT(0);\r
+       }\r
+\r
+       rb_link_node(&node->vm_rb, parent, iter);\r
+       rb_insert_color(&node->vm_rb, &mgr->allocation_mgr_rb);\r
+}\r
+\r
+/**\r
+ * mali_vma_offset_add() - Add offset node to RB Tree\r
+ */\r
+int mali_vma_offset_add(struct mali_allocation_manager *mgr,\r
+                       struct mali_vma_node *node)\r
+{\r
+       int ret = 0;\r
+       write_lock(&mgr->vm_lock);\r
+\r
+       if (node->vm_node.allocated) {\r
+               goto out;\r
+       }\r
+\r
+       _mali_vma_offset_add_rb(mgr, node);\r
+       /* set to allocated */\r
+       node->vm_node.allocated = 1;\r
+\r
+out:\r
+       write_unlock(&mgr->vm_lock);\r
+       return ret;\r
+}\r
+\r
+/**\r
+ * mali_vma_offset_remove() - Remove offset node from RB tree\r
+ */\r
+void mali_vma_offset_remove(struct mali_allocation_manager *mgr,\r
+                           struct mali_vma_node *node)\r
+{\r
+       write_lock(&mgr->vm_lock);\r
+\r
+       if (node->vm_node.allocated) {\r
+               rb_erase(&node->vm_rb, &mgr->allocation_mgr_rb);\r
+               memset(&node->vm_node, 0, sizeof(node->vm_node));\r
+       }\r
+       write_unlock(&mgr->vm_lock);\r
+}\r
+\r
+/**\r
+* mali_vma_offset_search - Search the node in RB tree\r
+*/\r
+struct mali_vma_node *mali_vma_offset_search(struct mali_allocation_manager *mgr,\r
+               unsigned long start, unsigned long pages)\r
+{\r
+       struct mali_vma_node *node, *best;\r
+       struct rb_node *iter;\r
+       unsigned long offset;\r
+       read_lock(&mgr->vm_lock);\r
+\r
+       iter = mgr->allocation_mgr_rb.rb_node;\r
+       best = NULL;\r
+\r
+       while (likely(iter)) {\r
+               node = rb_entry(iter, struct mali_vma_node, vm_rb);\r
+               offset = node->vm_node.start;\r
+               if (start >= offset) {\r
+                       iter = iter->rb_right;\r
+                       best = node;\r
+                       if (start == offset)\r
+                               break;\r
+               } else {\r
+                       iter = iter->rb_left;\r
+               }\r
+       }\r
+\r
+       if (best) {\r
+               offset = best->vm_node.start + best->vm_node.size;\r
+               if (offset <= start + pages)\r
+                       best = NULL;\r
+       }\r
+       read_unlock(&mgr->vm_lock);\r
+\r
+       return best;\r
+}\r
+\r
diff --git a/drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_virtual.h b/drivers/misc/mediatek/gpu/mt8127/mali/mali/linux/mali_memory_virtual.h
new file mode 100644 (file)
index 0000000..d7b7099
--- /dev/null
@@ -0,0 +1,35 @@
+/*\r
+ * This confidential and proprietary software may be used only as\r
+ * authorised by a licensing agreement from ARM Limited\r
+ * (C) COPYRIGHT 2013-2015 ARM Limited\r
+ * ALL RIGHTS RESERVED\r
+ * The entire notice above must be reproduced on all authorised\r
+ * copies and copies may only be made to the extent permitted\r
+ * by a licensing agreement from ARM Limited.\r
+ */\r
+#ifndef __MALI_GPU_VMEM_H__\r
+#define __MALI_GPU_VMEM_H__\r
+\r
+#include "mali_osk.h"\r
+#include "mali_session.h"\r
+#include <linux/list.h>\r
+#include <linux/mm.h>\r
+#include <linux/rbtree.h>\r
+#include <linux/spinlock.h>\r
+#include <linux/types.h>\r
+#include "mali_memory_types.h"\r
+#include "mali_memory_os_alloc.h"\r
+#include "mali_memory_manager.h"\r
+\r
+\r
+\r
+int mali_vma_offset_add(struct mali_allocation_manager *mgr,\r
+                       struct mali_vma_node *node);\r
+\r
+void mali_vma_offset_remove(struct mali_allocation_manager *mgr,\r
+                           struct mali_vma_node *node);\r
+\r
+struct mali_vma_node *mali_vma_offset_search(struct mali_allocation_manager *mgr,\r
+               unsigned long start,    unsigned long pages);\r
+\r
+#endif\r
index a6231126f130f27bff62f0e1eebdef2e330d58f7..30d67dea37f7465569f0b84b45068a544a4edb6c 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2010, 2013 ARM Limited
+ * (C) COPYRIGHT 2008-2010, 2013-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 #include <asm/atomic.h>
 #include "mali_kernel_common.h"
 
-void _mali_osk_atomic_dec( _mali_osk_atomic_t *atom )
+void _mali_osk_atomic_dec(_mali_osk_atomic_t *atom)
 {
        atomic_dec((atomic_t *)&atom->u.val);
 }
 
-u32 _mali_osk_atomic_dec_return( _mali_osk_atomic_t *atom )
+u32 _mali_osk_atomic_dec_return(_mali_osk_atomic_t *atom)
 {
        return atomic_dec_return((atomic_t *)&atom->u.val);
 }
 
-void _mali_osk_atomic_inc( _mali_osk_atomic_t *atom )
+void _mali_osk_atomic_inc(_mali_osk_atomic_t *atom)
 {
        atomic_inc((atomic_t *)&atom->u.val);
 }
 
-u32 _mali_osk_atomic_inc_return( _mali_osk_atomic_t *atom )
+u32 _mali_osk_atomic_inc_return(_mali_osk_atomic_t *atom)
 {
        return atomic_inc_return((atomic_t *)&atom->u.val);
 }
 
-_mali_osk_errcode_t _mali_osk_atomic_init( _mali_osk_atomic_t *atom, u32 val )
+void _mali_osk_atomic_init(_mali_osk_atomic_t *atom, u32 val)
 {
-       MALI_CHECK_NON_NULL(atom, _MALI_OSK_ERR_INVALID_ARGS);
+       MALI_DEBUG_ASSERT_POINTER(atom);
        atomic_set((atomic_t *)&atom->u.val, val);
-       return _MALI_OSK_ERR_OK;
 }
 
-u32 _mali_osk_atomic_read( _mali_osk_atomic_t *atom )
+u32 _mali_osk_atomic_read(_mali_osk_atomic_t *atom)
 {
        return atomic_read((atomic_t *)&atom->u.val);
 }
 
-void _mali_osk_atomic_term( _mali_osk_atomic_t *atom )
+void _mali_osk_atomic_term(_mali_osk_atomic_t *atom)
 {
        MALI_IGNORE(atom);
 }
 
-u32 _mali_osk_atomic_xchg( _mali_osk_atomic_t *atom, u32 val )
+u32 _mali_osk_atomic_xchg(_mali_osk_atomic_t *atom, u32 val)
 {
-       return atomic_xchg((atomic_t*)&atom->u.val, val);
+       return atomic_xchg((atomic_t *)&atom->u.val, val);
 }
index f582b458554149e3b3e6ab97627979b1ba8ee861..b89f48d49f76840395cb46401d3a8e3afbf7d045 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
+ * (C) COPYRIGHT 2008-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -13,7 +13,7 @@
  * Implementation of the OS abstraction layer for the kernel device driver
  */
 
-#include <linux/slab.h>        /* For memory allocation */
+#include <linux/slab.h> /* For memory allocation */
 #include <linux/interrupt.h>
 #include <linux/wait.h>
 #include <linux/sched.h>
@@ -28,7 +28,7 @@ typedef struct _mali_osk_irq_t_struct {
 } mali_osk_irq_object_t;
 
 typedef irqreturn_t (*irq_handler_func_t)(int, void *, struct pt_regs *);
-static irqreturn_t irq_handler_upper_half (int port_name, void* dev_id ); /* , struct pt_regs *regs*/
+static irqreturn_t irq_handler_upper_half(int port_name, void *dev_id);   /* , struct pt_regs *regs*/
 
 #if defined(DEBUG)
 #if 0
@@ -55,10 +55,10 @@ static irqreturn_t test_interrupt_upper_half(int port_name, void *dev_id)
 }
 
 static _mali_osk_errcode_t test_interrupt(u32 irqnum,
-        _mali_osk_irq_trigger_t trigger_func,
-        _mali_osk_irq_ack_t ack_func,
-        void *probe_data,
-        const char *description)
+               _mali_osk_irq_trigger_t trigger_func,
+               _mali_osk_irq_ack_t ack_func,
+               void *probe_data,
+               const char *description)
 {
        unsigned long irq_flags = 0;
        struct test_interrupt_data data = {
@@ -95,10 +95,10 @@ static _mali_osk_errcode_t test_interrupt(u32 irqnum,
 
 #endif /* defined(DEBUG) */
 
-_mali_osk_irq_t *_mali_osk_irq_init( u32 irqnum, _mali_osk_irq_uhandler_t uhandler, void *int_data, _mali_osk_irq_trigger_t trigger_func, _mali_osk_irq_ack_t ack_func, void *probe_data, const char *description )
+_mali_osk_irq_t *_mali_osk_irq_init(u32 irqnum, _mali_osk_irq_uhandler_t uhandler, void *int_data, _mali_osk_irq_trigger_t trigger_func, _mali_osk_irq_ack_t ack_func, void *probe_data, const char *description)
 {
        mali_osk_irq_object_t *irq_object;
-       unsigned long irq_flags = 0;
+       //unsigned long irq_flags = 0;
 
 #if defined(CONFIG_MALI_SHARED_INTERRUPTS)
        irq_flags |= IRQF_SHARED;
@@ -111,7 +111,7 @@ _mali_osk_irq_t *_mali_osk_irq_init( u32 irqnum, _mali_osk_irq_uhandler_t uhandl
 
        if (-1 == irqnum) {
                /* Probe for IRQ */
-               if ( (NULL != trigger_func) && (NULL != ack_func) ) {
+               if ((NULL != trigger_func) && (NULL != ack_func)) {
                        unsigned long probe_count = 3;
                        _mali_osk_errcode_t err;
                        int irq;
@@ -156,7 +156,7 @@ _mali_osk_irq_t *_mali_osk_irq_init( u32 irqnum, _mali_osk_irq_uhandler_t uhandl
 #if 0
        /* Verify that the configured interrupt settings are working */
        if (_MALI_OSK_ERR_OK != test_interrupt(irqnum, trigger_func, ack_func, probe_data, description)) {
-               MALI_DEBUG_PRINT(2, ("Test of IRQ handler for core '%s' failed\n", description));
+               MALI_DEBUG_PRINT(2, ("Test of IRQ(%d) handler for core '%s' failed\n", irqnum, description));
                kfree(irq_object);
                return NULL;
        }
@@ -172,7 +172,7 @@ _mali_osk_irq_t *_mali_osk_irq_init( u32 irqnum, _mali_osk_irq_uhandler_t uhandl
        return irq_object;
 }
 
-void _mali_osk_irq_term( _mali_osk_irq_t *irq )
+void _mali_osk_irq_term(_mali_osk_irq_t *irq)
 {
        mali_osk_irq_object_t *irq_object = (mali_osk_irq_object_t *)irq;
        free_irq(irq_object->irqnum, irq_object);
@@ -191,7 +191,7 @@ void _mali_osk_irq_term( _mali_osk_irq_t *irq )
  * Then we schedule the mali_core_irq_handler_bottom_half to run as high priority
  * work queue job.
  */
-static irqreturn_t irq_handler_upper_half (int port_name, void* dev_id ) /* , struct pt_regs *regs*/
+static irqreturn_t irq_handler_upper_half(int port_name, void *dev_id)   /* , struct pt_regs *regs*/
 {
        irqreturn_t ret = IRQ_NONE;
        mali_osk_irq_object_t *irq_object = (mali_osk_irq_object_t *)dev_id;
index c4d8a235bd4e28a3a5055bf75c1ba7458cb9694b..55d2a9408d17a94889196ca66a12b70b2230cd75 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
+ * (C) COPYRIGHT 2008-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -23,7 +23,7 @@
 static DEFINE_SPINLOCK(lock_tracking_lock);
 static mali_bool add_lock_to_log_and_check(struct _mali_osk_lock_debug_s *lock, uint32_t tid);
 static void remove_lock_from_log(struct _mali_osk_lock_debug_s *lock, uint32_t tid);
-static const char * const lock_order_to_string(_mali_osk_lock_order_t order);
+static const char *const lock_order_to_string(_mali_osk_lock_order_t order);
 #endif /* LOCK_ORDER_CHECKING */
 
 void _mali_osk_locks_debug_init(struct _mali_osk_lock_debug_s *checker, _mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order)
@@ -44,7 +44,7 @@ void _mali_osk_locks_debug_add(struct _mali_osk_lock_debug_s *checker)
 #ifdef LOCK_ORDER_CHECKING
        if (!(checker->orig_flags & _MALI_OSK_LOCKFLAG_UNORDERED)) {
                if (!add_lock_to_log_and_check(checker, _mali_osk_get_tid())) {
-                       printk(KERN_ERR "%d: ERROR lock %p taken while holding a lock of a higher order.\n",
+                       pr_warn("%d: ERROR lock %p taken while holding a lock of a higher order.\n",
                               _mali_osk_get_tid(), checker);
                        dump_stack();
                }
@@ -95,11 +95,11 @@ static void dump_lock_tracking_list(void)
        l = lock_lookup_list;
 
        while (NULL != l) {
-               printk(" [lock: %p, tid_owner: %d, order: %d] ->", l, l->owner, l->order);
+               pr_warn(" [lock: %p, tid_owner: %d, order: %d] ->", l, l->owner, l->order);
                l = l->next;
                MALI_DEBUG_ASSERT(n++ < 100);
        }
-       printk(" NULL\n");
+       pr_warn(" NULL\n");
 }
 
 static int tracking_list_length(void)
@@ -137,8 +137,8 @@ static mali_bool add_lock_to_log_and_check(struct _mali_osk_lock_debug_s *lock,
                /* Traverse the locks taken and find the lock of the highest order.
                 * Since several threads may hold locks, each lock's owner must be
                 * checked so that locks not owned by this thread can be ignored. */
-               for(;;) {
-                       MALI_DEBUG_ASSERT_POINTER( l );
+               for (;;) {
+                       MALI_DEBUG_ASSERT_POINTER(l);
                        if (tid == l->owner && l->order >= highest_order_for_tid) {
                                highest_order_for_tid = l->order;
                                highest_order_lock = l;
@@ -160,15 +160,15 @@ static mali_bool add_lock_to_log_and_check(struct _mali_osk_lock_debug_s *lock,
        ret = highest_order_for_tid < lock->order;
 
        if (!ret) {
-               printk(KERN_ERR "Took lock of order %d (%s) while holding lock of order %d (%s)\n",
+               pr_warn("Took lock of order %d (%s) while holding lock of order %d (%s)\n",
                       lock->order, lock_order_to_string(lock->order),
                       highest_order_for_tid, lock_order_to_string(highest_order_for_tid));
                dump_lock_tracking_list();
        }
 
-       if (len+1 != tracking_list_length()) {
-               printk(KERN_ERR "************ lock: %p\n", lock);
-               printk(KERN_ERR "************ before: %d *** after: %d ****\n", len, tracking_list_length());
+       if (len + 1 != tracking_list_length()) {
+               pr_warn("************ lock: %p\n", lock);
+               pr_warn("************ before: %d *** after: %d ****\n", len, tracking_list_length());
                dump_lock_tracking_list();
                MALI_DEBUG_ASSERT_POINTER(NULL);
        }
@@ -190,7 +190,7 @@ static void remove_lock_from_log(struct _mali_osk_lock_debug_s *lock, uint32_t t
        curr = lock_lookup_list;
 
        if (NULL == curr) {
-               printk(KERN_ERR "Error: Lock tracking list was empty on call to remove_lock_from_log\n");
+               pr_warn("Error: Lock tracking list was empty on call to remove_lock_from_log\n");
                dump_lock_tracking_list();
        }
 
@@ -215,9 +215,9 @@ static void remove_lock_from_log(struct _mali_osk_lock_debug_s *lock, uint32_t t
 
        lock->next = NULL;
 
-       if (len-1 != tracking_list_length()) {
-               printk(KERN_ERR "************ lock: %p\n", lock);
-               printk(KERN_ERR "************ before: %d *** after: %d ****\n", len, tracking_list_length());
+       if (len - 1 != tracking_list_length()) {
+               pr_warn("************ lock: %p\n", lock);
+               pr_warn("************ before: %d *** after: %d ****\n", len, tracking_list_length());
                dump_lock_tracking_list();
                MALI_DEBUG_ASSERT_POINTER(NULL);
        }
@@ -225,7 +225,7 @@ static void remove_lock_from_log(struct _mali_osk_lock_debug_s *lock, uint32_t t
        spin_unlock_irqrestore(&lock_tracking_lock, local_lock_flag);
 }
 
-static const char * const lock_order_to_string(_mali_osk_lock_order_t order)
+static const char *const lock_order_to_string(_mali_osk_lock_order_t order)
 {
        switch (order) {
        case _MALI_OSK_LOCK_ORDER_SESSIONS:
@@ -243,38 +243,44 @@ static const char * const lock_order_to_string(_mali_osk_lock_order_t order)
        case _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP:
                return "_MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP";
                break;
-       case _MALI_OSK_LOCK_ORDER_GROUP_VIRTUAL:
-               return "_MALI_OSK_LOCK_ORDER_GROUP_VIRTUAL";
+       case _MALI_OSK_LOCK_ORDER_PM_EXECUTION:
+               return "_MALI_OSK_LOCK_ORDER_PM_EXECUTION";
                break;
-       case _MALI_OSK_LOCK_ORDER_GROUP:
-               return "_MALI_OSK_LOCK_ORDER_GROUP";
+       case _MALI_OSK_LOCK_ORDER_EXECUTOR:
+               return "_MALI_OSK_LOCK_ORDER_EXECUTOR";
+               break;
+       case _MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM:
+               return "_MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM";
                break;
        case _MALI_OSK_LOCK_ORDER_SCHEDULER:
                return "_MALI_OSK_LOCK_ORDER_SCHEDULER";
                break;
-       case _MALI_OSK_LOCK_ORDER_PM_CORE_STATE:
-               return "_MALI_OSK_LOCK_ORDER_PM_CORE_STATE";
+       case _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED:
+               return "_MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED";
                break;
-       case _MALI_OSK_LOCK_ORDER_L2_COMMAND:
-               return "_MALI_OSK_LOCK_ORDER_L2_COMMAND";
+       case _MALI_OSK_LOCK_ORDER_DMA_COMMAND:
+               return "_MALI_OSK_LOCK_ORDER_DMA_COMMAND";
                break;
        case _MALI_OSK_LOCK_ORDER_PROFILING:
                return "_MALI_OSK_LOCK_ORDER_PROFILING";
                break;
-       case _MALI_OSK_LOCK_ORDER_L2_COUNTER:
-               return "_MALI_OSK_LOCK_ORDER_L2_COUNTER";
+       case _MALI_OSK_LOCK_ORDER_L2:
+               return "_MALI_OSK_LOCK_ORDER_L2";
+               break;
+       case _MALI_OSK_LOCK_ORDER_L2_COMMAND:
+               return "_MALI_OSK_LOCK_ORDER_L2_COMMAND";
                break;
        case _MALI_OSK_LOCK_ORDER_UTILIZATION:
                return "_MALI_OSK_LOCK_ORDER_UTILIZATION";
                break;
-       case _MALI_OSK_LOCK_ORDER_PM_EXECUTE:
-               return "_MALI_OSK_LOCK_ORDER_PM_EXECUTE";
-               break;
        case _MALI_OSK_LOCK_ORDER_SESSION_PENDING_JOBS:
                return "_MALI_OSK_LOCK_ORDER_SESSION_PENDING_JOBS";
                break;
+       case _MALI_OSK_LOCK_ORDER_PM_STATE:
+               return "_MALI_OSK_LOCK_ORDER_PM_STATE";
+               break;
        default:
-               return "";
+               return "<UNKNOWN_LOCK_ORDER>";
        }
 }
 #endif /* LOCK_ORDER_CHECKING */
index 145313cc58fbdb1acad185699775e9a6ecc6cd5d..17d5d18a3de074382b93154d2300ec265768ee52 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
+ * (C) COPYRIGHT 2008-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -22,8 +22,6 @@
 #include <linux/slab.h>
 
 #include "mali_osk_types.h"
-#include "mali_kernel_common.h"
-#include <linux/lockdep.h>
 
 #ifdef _cplusplus
 extern "C" {
@@ -83,7 +81,7 @@ extern "C" {
        void _mali_osk_locks_debug_add(struct _mali_osk_lock_debug_s *checker);
        void _mali_osk_locks_debug_remove(struct _mali_osk_lock_debug_s *checker);
 
-       /** @brief This function can return a given lock's owner when DEBUG     is enabled. */
+       /** @brief This function can return a given lock's owner when DEBUG     is enabled. */
        static inline u32 _mali_osk_lock_get_owner(struct _mali_osk_lock_debug_s *lock)
        {
                return lock->owner;
@@ -104,13 +102,6 @@ extern "C" {
                        return NULL;
                }
                spin_lock_init(&lock->spinlock);
-
-/*
-               #ifdef CONFIG_PROVE_LOCKING
-         lockdep_skip_validate(&lock->spinlock.dep_map);
-               #endif
-*/
-               
                _mali_osk_locks_debug_init((struct _mali_osk_lock_debug_s *)lock, flags, order);
                return lock;
        }
@@ -157,13 +148,6 @@ extern "C" {
 
                lock->flags = 0;
                spin_lock_init(&lock->spinlock);
-
-/*
-               #ifdef CONFIG_PROVE_LOCKING
-         lockdep_skip_validate(&lock->spinlock.dep_map);
-               #endif
-*/
-                               
                _mali_osk_locks_debug_init((struct _mali_osk_lock_debug_s *)lock, flags, order);
                return lock;
        }
@@ -297,7 +281,7 @@ extern "C" {
                BUG_ON(NULL == lock);
 
                if (mutex_lock_interruptible(&lock->mutex)) {
-                       printk(KERN_WARNING "Mali: Can not lock mutex\n");
+                       pr_warn("Mali: Can not lock mutex\n");
                        err = _MALI_OSK_ERR_RESTARTSYSCALL;
                }
 
index f600b540d294496a9ef1861eb457d9bf4df46ffa..9471709f1b7c25e2dfa61d9127799a6266c44754 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
+ * (C) COPYRIGHT 2008-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 #include "mali_osk.h"
 #include "mali_ukk.h"
 
-void _mali_osk_mem_barrier( void )
+void _mali_osk_mem_barrier(void)
 {
        mb();
 }
 
-void _mali_osk_write_mem_barrier( void )
+void _mali_osk_write_mem_barrier(void)
 {
        wmb();
 }
 
-mali_io_address _mali_osk_mem_mapioregion( u32 phys, u32 size, const char *description )
+mali_io_address _mali_osk_mem_mapioregion(uintptr_t phys, u32 size, const char *description)
 {
        return (mali_io_address)ioremap_nocache(phys, size);
 }
 
-void _mali_osk_mem_unmapioregion( u32 phys, u32 size, mali_io_address virt )
+void _mali_osk_mem_unmapioregion(uintptr_t phys, u32 size, mali_io_address virt)
 {
-       iounmap((void*)virt);
+       iounmap((void *)virt);
 }
 
-_mali_osk_errcode_t inline _mali_osk_mem_reqregion( u32 phys, u32 size, const char *description )
+_mali_osk_errcode_t inline _mali_osk_mem_reqregion(uintptr_t phys, u32 size, const char *description)
 {
 #if MALI_LICENSE_IS_GPL
        return _MALI_OSK_ERR_OK; /* GPL driver gets the mem region for the resources registered automatically */
@@ -50,39 +50,39 @@ _mali_osk_errcode_t inline _mali_osk_mem_reqregion( u32 phys, u32 size, const ch
 #endif
 }
 
-void inline _mali_osk_mem_unreqregion( u32 phys, u32 size )
+void inline _mali_osk_mem_unreqregion(uintptr_t phys, u32 size)
 {
 #if !MALI_LICENSE_IS_GPL
        release_mem_region(phys, size);
 #endif
 }
 
-void inline _mali_osk_mem_iowrite32_relaxed( volatile mali_io_address addr, u32 offset, u32 val )
+void inline _mali_osk_mem_iowrite32_relaxed(volatile mali_io_address addr, u32 offset, u32 val)
 {
-       __raw_writel(cpu_to_le32(val),((u8*)addr) + offset);
+       __raw_writel(cpu_to_le32(val), ((u8 *)addr) + offset);
 }
 
-u32 inline _mali_osk_mem_ioread32( volatile mali_io_address addr, u32 offset )
+u32 inline _mali_osk_mem_ioread32(volatile mali_io_address addr, u32 offset)
 {
-       return ioread32(((u8*)addr) + offset);
+       return ioread32(((u8 *)addr) + offset);
 }
 
-void inline _mali_osk_mem_iowrite32( volatile mali_io_address addr, u32 offset, u32 val )
+void inline _mali_osk_mem_iowrite32(volatile mali_io_address addr, u32 offset, u32 val)
 {
-       iowrite32(val, ((u8*)addr) + offset);
+       iowrite32(val, ((u8 *)addr) + offset);
 }
 
-void _mali_osk_cache_flushall( void )
+void _mali_osk_cache_flushall(void)
 {
        /** @note Cached memory is not currently supported in this implementation */
 }
 
-void _mali_osk_cache_ensure_uncached_range_flushed( void *uncached_mapping, u32 offset, u32 size )
+void _mali_osk_cache_ensure_uncached_range_flushed(void *uncached_mapping, u32 offset, u32 size)
 {
        _mali_osk_write_mem_barrier();
 }
 
-u32 _mali_osk_mem_write_safe(void *dest, const void *src, u32 size)
+u32 _mali_osk_mem_write_safe(void __user *dest, const void __user *src, u32 size)
 {
 #define MALI_MEM_SAFE_COPY_BLOCK_SIZE 4096
        u32 retval = 0;
@@ -103,10 +103,10 @@ u32 _mali_osk_mem_write_safe(void *dest, const void *src, u32 size)
                                size_to_copy = bytes_left_to_copy;
                        }
 
-                       bytes_left = copy_from_user(temp_buf, ((char*)src) + i, size_to_copy);
+                       bytes_left = copy_from_user(temp_buf, ((char *)src) + i, size_to_copy);
                        size_copied = size_to_copy - bytes_left;
 
-                       bytes_left = copy_to_user(((char*)dest) + i, temp_buf, size_copied);
+                       bytes_left = copy_to_user(((char *)dest) + i, temp_buf, size_copied);
                        size_copied -= bytes_left;
 
                        bytes_left_to_copy -= size_copied;
@@ -125,13 +125,22 @@ u32 _mali_osk_mem_write_safe(void *dest, const void *src, u32 size)
 
 _mali_osk_errcode_t _mali_ukk_mem_write_safe(_mali_uk_mem_write_safe_s *args)
 {
+       void __user *src;
+       void __user *dst;
+       struct mali_session_data *session;
+
        MALI_DEBUG_ASSERT_POINTER(args);
 
-       if (NULL == args->ctx) {
+       session = (struct mali_session_data *)(uintptr_t)args->ctx;
+
+       if (NULL == session) {
                return _MALI_OSK_ERR_INVALID_ARGS;
        }
 
+       src = (void __user *)(uintptr_t)args->src;
+       dst = (void __user *)(uintptr_t)args->dest;
+
        /* Return number of bytes actually copied */
-       args->size = _mali_osk_mem_write_safe(args->dest, args->src, args->size);
+       args->size = _mali_osk_mem_write_safe(dst, src, args->size);
        return _MALI_OSK_ERR_OK;
 }
index f59d12ef9ed43b83ef2fc830a78286bfe796b635..ecb64268fb88e63bbf607f5c7d643b6ab42f0f18 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
+ * (C) COPYRIGHT 2008-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 #include <asm/uaccess.h>
 #include <linux/platform_device.h>
 #include <linux/mali/mali_utgard.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
 
 #include "mali_osk_mali.h"
 #include "mali_kernel_common.h" /* MALI_xxx macros */
 #include "mali_osk.h"           /* kernel side OS functions */
 #include "mali_kernel_linux.h"
 
+
+#ifdef CONFIG_MALI_DT
+
+#define MALI_OSK_INVALID_RESOURCE_ADDRESS 0xFFFFFFFF
+
+/**
+ * Define the max number of resource we could have.
+ */
+#define MALI_OSK_MAX_RESOURCE_NUMBER 27
+
+/**
+ * Define the max number of resource with interrupts, and they are
+ * the first 20 elements in array mali_osk_resource_bank.
+ */
+#define MALI_OSK_RESOURCE_WITH_IRQ_NUMBER 20
+
+/**
+ * pp core start and end location in mali_osk_resource_bank array.
+ */
+#define MALI_OSK_RESOURCE_PP_LOCATION_START 2
+#define MALI_OSK_RESOURCE_PP_LOCATION_END 17
+
+/**
+ * L2 cache start and end location in mali_osk_resource_bank array.
+ */
+#define MALI_OSK_RESOURCE_L2_LOCATION_START 20
+#define MALI_OSK_RESOURCE_l2_LOCATION_END 22
+
+/**
+ * DMA unit location.
+ */
+#define MALI_OSK_RESOURCE_DMA_LOCATION 26
+
+static _mali_osk_resource_t mali_osk_resource_bank[MALI_OSK_MAX_RESOURCE_NUMBER] = {
+       {.description = "Mali_GP", .base = MALI_OFFSET_GP, .irq_name = "IRQGP",},
+       {.description = "Mali_GP_MMU", .base = MALI_OFFSET_GP_MMU, .irq_name = "IRQGPMMU",},
+       {.description = "Mali_PP0", .base = MALI_OFFSET_PP0, .irq_name = "IRQPP0",},
+       {.description = "Mali_PP0_MMU", .base = MALI_OFFSET_PP0_MMU, .irq_name = "IRQPPMMU0",},
+       {.description = "Mali_PP1", .base = MALI_OFFSET_PP1, .irq_name = "IRQPP1",},
+       {.description = "Mali_PP1_MMU", .base = MALI_OFFSET_PP1_MMU, .irq_name = "IRQPPMMU1",},
+       {.description = "Mali_PP2", .base = MALI_OFFSET_PP2, .irq_name = "IRQPP2",},
+       {.description = "Mali_PP2_MMU", .base = MALI_OFFSET_PP2_MMU, .irq_name = "IRQPPMMU2",},
+       {.description = "Mali_PP3", .base = MALI_OFFSET_PP3, .irq_name = "IRQPP3",},
+       {.description = "Mali_PP3_MMU", .base = MALI_OFFSET_PP3_MMU, .irq_name = "IRQPPMMU3",},
+       {.description = "Mali_PP4", .base = MALI_OFFSET_PP4, .irq_name = "IRQPP4",},
+       {.description = "Mali_PP4_MMU", .base = MALI_OFFSET_PP4_MMU, .irq_name = "IRQPPMMU4",},
+       {.description = "Mali_PP5", .base = MALI_OFFSET_PP5, .irq_name = "IRQPP5",},
+       {.description = "Mali_PP5_MMU", .base = MALI_OFFSET_PP5_MMU, .irq_name = "IRQPPMMU5",},
+       {.description = "Mali_PP6", .base = MALI_OFFSET_PP6, .irq_name = "IRQPP6",},
+       {.description = "Mali_PP6_MMU", .base = MALI_OFFSET_PP6_MMU, .irq_name = "IRQPPMMU6",},
+       {.description = "Mali_PP7", .base = MALI_OFFSET_PP7, .irq_name = "IRQPP7",},
+       {.description = "Mali_PP7_MMU", .base = MALI_OFFSET_PP7_MMU, .irq_name = "IRQPPMMU",},
+       {.description = "Mali_PP_Broadcast", .base = MALI_OFFSET_PP_BCAST, .irq_name = "IRQPP",},
+       {.description = "Mali_PMU", .base = MALI_OFFSET_PMU, .irq_name = "IRQPMU",},
+       {.description = "Mali_L2", .base = MALI_OFFSET_L2_RESOURCE0,},
+       {.description = "Mali_L2", .base = MALI_OFFSET_L2_RESOURCE1,},
+       {.description = "Mali_L2", .base = MALI_OFFSET_L2_RESOURCE2,},
+       {.description = "Mali_PP_MMU_Broadcast", .base = MALI_OFFSET_PP_BCAST_MMU,},
+       {.description = "Mali_Broadcast", .base = MALI_OFFSET_BCAST,},
+       {.description = "Mali_DLBU", .base = MALI_OFFSET_DLBU,},
+       {.description = "Mali_DMA", .base = MALI_OFFSET_DMA,},
+};
+
+static int _mali_osk_get_compatible_name(const char **out_string)
+{
+       struct device_node *node = mali_platform_device->dev.of_node;
+
+       MALI_DEBUG_ASSERT(NULL != node);
+
+       return of_property_read_string(node, "compatible", out_string);
+}
+
+_mali_osk_errcode_t _mali_osk_resource_initialize(void)
+{
+       mali_bool mali_is_450 = MALI_FALSE, mali_is_470 = MALI_FALSE;
+       int i, pp_core_num = 0, l2_core_num = 0;
+       struct resource *res;
+       const char *compatible_name = NULL;
+
+       if (0 == _mali_osk_get_compatible_name(&compatible_name)) {
+               if (0 == strncmp(compatible_name, "arm,mali-450", strlen("arm,mali-450"))) {
+                       mali_is_450 = MALI_TRUE;
+                       MALI_DEBUG_PRINT(2, ("mali-450 device tree detected."));
+               } else if (0 == strncmp(compatible_name, "arm,mali-470", strlen("arm,mali-470"))) {
+                       mali_is_470 = MALI_TRUE;
+                       MALI_DEBUG_PRINT(2, ("mali-470 device tree detected."));
+               }
+       }
+
+       for (i = 0; i < MALI_OSK_RESOURCE_WITH_IRQ_NUMBER; i++) {
+               res = platform_get_resource_byname(mali_platform_device, IORESOURCE_IRQ, mali_osk_resource_bank[i].irq_name);
+               if (res) {
+                       mali_osk_resource_bank[i].irq = res->start;
+               } else {
+                       mali_osk_resource_bank[i].base = MALI_OSK_INVALID_RESOURCE_ADDRESS;
+               }
+       }
+
+       for (i = MALI_OSK_RESOURCE_PP_LOCATION_START; i <= MALI_OSK_RESOURCE_PP_LOCATION_END; i++) {
+               if (MALI_OSK_INVALID_RESOURCE_ADDRESS != mali_osk_resource_bank[i].base) {
+                       pp_core_num++;
+               }
+       }
+
+       /* We have to divide by 2, because we caculate twice for only one pp(pp_core and pp_mmu_core). */
+       if (0 != pp_core_num % 2) {
+               MALI_DEBUG_PRINT(2, ("The value of pp core number isn't normal."));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       pp_core_num /= 2;
+
+       /**
+        * we can caculate the number of l2 cache core according the number of pp core number
+        * and device type(mali400/mali450/mali470).
+        */
+       l2_core_num = 1;
+       if (mali_is_450) {
+               if (pp_core_num > 4) {
+                       l2_core_num = 3;
+               } else if (pp_core_num <= 4) {
+                       l2_core_num = 2;
+               }
+       }
+
+       for (i = MALI_OSK_RESOURCE_l2_LOCATION_END; i > MALI_OSK_RESOURCE_L2_LOCATION_START + l2_core_num - 1; i--) {
+               mali_osk_resource_bank[i].base = MALI_OSK_INVALID_RESOURCE_ADDRESS;
+       }
+
+       /* If device is not mali-450 type, we have to remove related resource from resource bank. */
+       if (!(mali_is_450 || mali_is_470)) {
+               for (i = MALI_OSK_RESOURCE_l2_LOCATION_END + 1; i < MALI_OSK_MAX_RESOURCE_NUMBER; i++) {
+                       mali_osk_resource_bank[i].base = MALI_OSK_INVALID_RESOURCE_ADDRESS;
+               }
+       }
+
+       if (mali_is_470)
+               mali_osk_resource_bank[MALI_OSK_RESOURCE_DMA_LOCATION].base = MALI_OSK_INVALID_RESOURCE_ADDRESS;
+
+       return _MALI_OSK_ERR_OK;
+}
+
 _mali_osk_errcode_t _mali_osk_resource_find(u32 addr, _mali_osk_resource_t *res)
 {
        int i;
 
+       if (NULL == mali_platform_device) {
+               return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+       }
+
+       /* Traverse all of resources in resources bank to find the matching one. */
+       for (i = 0; i < MALI_OSK_MAX_RESOURCE_NUMBER; i++) {
+               if (mali_osk_resource_bank[i].base == addr) {
+                       if (NULL != res) {
+                               res->base = addr + _mali_osk_resource_base_address();
+                               res->description = mali_osk_resource_bank[i].description;
+                               res->irq = mali_osk_resource_bank[i].irq;
+                       }
+                       return _MALI_OSK_ERR_OK;
+               }
+       }
+
+       return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+}
+
+uintptr_t _mali_osk_resource_base_address(void)
+{
+       struct resource *reg_res = NULL;
+       uintptr_t ret = 0;
+
+       reg_res = platform_get_resource(mali_platform_device, IORESOURCE_MEM, 0);
+
+       if (NULL != reg_res) {
+               ret = reg_res->start;
+       }
+
+       return ret;
+}
+
+void _mali_osk_device_data_pmu_config_get(u16 *domain_config_array, int array_size)
+{
+       struct device_node *node = mali_platform_device->dev.of_node;
+       struct property *prop;
+       const __be32 *p;
+       int length = 0, i = 0;
+       u32 u;
+
+       MALI_DEBUG_PRINT(2, ("Get pmu config from device tree configuration.\n"));
+
+       MALI_DEBUG_ASSERT(NULL != node);
+
+       if (!of_get_property(node, "pmu_domain_config", &length)) {
+               return;
+       }
+
+       if (array_size != length / sizeof(u32)) {
+               MALI_PRINT_ERROR(("Wrong pmu domain config in device tree."));
+               return;
+       }
+
+       of_property_for_each_u32(node, "pmu_domain_config", prop, p, u) {
+               domain_config_array[i] = (u16)u;
+               i++;
+       }
+
+       return;
+}
+
+u32 _mali_osk_get_pmu_switch_delay(void)
+{
+       struct device_node *node = mali_platform_device->dev.of_node;
+       u32 switch_delay;
+
+       MALI_DEBUG_ASSERT(NULL != node);
+
+       if (0 == of_property_read_u32(node, "pmu_switch_delay", &switch_delay)) {
+               return switch_delay;
+       } else {
+               MALI_DEBUG_PRINT(2, ("Couldn't find pmu_switch_delay in device tree configuration.\n"));
+       }
+
+       return 0;
+}
+
+#else /* CONFIG_MALI_DT */
+
+_mali_osk_errcode_t _mali_osk_resource_find(u32 addr, _mali_osk_resource_t *res)
+{
+       int i;
+       uintptr_t phys_addr;
+
        if (NULL == mali_platform_device) {
                /* Not connected to a device */
                return _MALI_OSK_ERR_ITEM_NOT_FOUND;
        }
 
+       phys_addr = addr + _mali_osk_resource_base_address();
        for (i = 0; i < mali_platform_device->num_resources; i++) {
                if (IORESOURCE_MEM == resource_type(&(mali_platform_device->resource[i])) &&
-                   mali_platform_device->resource[i].start == addr) {
+                   mali_platform_device->resource[i].start == phys_addr) {
                        if (NULL != res) {
-                               res->base = addr;
+                               res->base = phys_addr;
                                res->description = mali_platform_device->resource[i].name;
 
                                /* Any (optional) IRQ resource belonging to this resource will follow */
                                if ((i + 1) < mali_platform_device->num_resources &&
-                                   IORESOURCE_IRQ == resource_type(&(mali_platform_device->resource[i+1]))) {
-                                       res->irq = mali_platform_device->resource[i+1].start;
+                                   IORESOURCE_IRQ == resource_type(&(mali_platform_device->resource[i + 1]))) {
+                                       res->irq = mali_platform_device->resource[i + 1].start;
                                } else {
                                        res->irq = -1;
                                }
@@ -53,10 +283,10 @@ _mali_osk_errcode_t _mali_osk_resource_find(u32 addr, _mali_osk_resource_t *res)
        return _MALI_OSK_ERR_ITEM_NOT_FOUND;
 }
 
-u32 _mali_osk_resource_base_address(void)
+uintptr_t _mali_osk_resource_base_address(void)
 {
-       u32 lowest_addr = 0xFFFFFFFF;
-       u32 ret = 0;
+       uintptr_t lowest_addr = (uintptr_t)(0 - 1);
+       uintptr_t ret = 0;
 
        if (NULL != mali_platform_device) {
                int i;
@@ -72,28 +302,47 @@ u32 _mali_osk_resource_base_address(void)
        return ret;
 }
 
-_mali_osk_errcode_t _mali_osk_device_data_get(struct _mali_osk_device_data *data)
+void _mali_osk_device_data_pmu_config_get(u16 *domain_config_array, int array_size)
+{
+       _mali_osk_device_data data = { 0, };
+
+       MALI_DEBUG_PRINT(2, ("Get pmu config from platform device data.\n"));
+       if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+               /* Copy the custom customer power domain config */
+               _mali_osk_memcpy(domain_config_array, data.pmu_domain_config, sizeof(data.pmu_domain_config));
+       }
+
+       return;
+}
+
+u32 _mali_osk_get_pmu_switch_delay(void)
+{
+       _mali_osk_errcode_t err;
+       _mali_osk_device_data data = { 0, };
+
+       err = _mali_osk_device_data_get(&data);
+
+       if (_MALI_OSK_ERR_OK == err) {
+               return data.pmu_switch_delay;
+       }
+
+       return 0;
+}
+#endif /* CONFIG_MALI_DT */
+
+_mali_osk_errcode_t _mali_osk_device_data_get(_mali_osk_device_data *data)
 {
        MALI_DEBUG_ASSERT_POINTER(data);
 
        if (NULL != mali_platform_device) {
-               struct mali_gpu_device_dataos_data = NULL;
+               struct mali_gpu_device_data *os_data = NULL;
 
-               os_data = (struct mali_gpu_device_data*)mali_platform_device->dev.platform_data;
+               os_data = (struct mali_gpu_device_data *)mali_platform_device->dev.platform_data;
                if (NULL != os_data) {
                        /* Copy data from OS dependant struct to Mali neutral struct (identical!) */
-                       data->dedicated_mem_start = os_data->dedicated_mem_start;
-                       data->dedicated_mem_size = os_data->dedicated_mem_size;
-                       data->shared_mem_size = os_data->shared_mem_size;
-                       data->fb_start = os_data->fb_start;
-                       data->fb_size = os_data->fb_size;
-                       data->max_job_runtime = os_data->max_job_runtime;
-                       data->utilization_interval = os_data->utilization_interval;
-                       data->utilization_callback = os_data->utilization_callback;
-                       data->pmu_switch_delay = os_data->pmu_switch_delay;
-                       data->set_freq_callback = os_data->set_freq_callback;
-
-                       memcpy(data->pmu_domain_config, os_data->pmu_domain_config, sizeof(os_data->pmu_domain_config));
+                       BUILD_BUG_ON(sizeof(*os_data) != sizeof(*data));
+                       _mali_osk_memcpy(data, os_data, sizeof(*os_data));
+
                        return _MALI_OSK_ERR_OK;
                }
        }
@@ -101,6 +350,20 @@ _mali_osk_errcode_t _mali_osk_device_data_get(struct _mali_osk_device_data *data
        return _MALI_OSK_ERR_ITEM_NOT_FOUND;
 }
 
+u32 _mali_osk_identify_gpu_resource(void)
+{
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_L2_RESOURCE1, NULL))
+               /* Mali 450 */
+               return 0x450;
+
+       if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_DLBU, NULL))
+               /* Mali 470 */
+               return 0x470;
+
+       /* Mali 400 */
+       return 0x400;
+}
+
 mali_bool _mali_osk_shared_interrupts(void)
 {
        u32 irqs[128];
index 08d4ced5e19be3baa832c409b56b011b5e69944b..251bc5f12b7e1081ad6bfd89b7ab404064db01c7 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2010, 2013 ARM Limited
+ * (C) COPYRIGHT 2008-2010, 2013-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 #include "mali_osk.h"
 #include <linux/bitops.h>
 
-u32 _mali_osk_clz( u32 input )
+u32 _mali_osk_clz(u32 input)
 {
-       return 32-fls(input);
+       return 32 - fls(input);
 }
 
-u32 _mali_osk_fls( u32 input )
+u32 _mali_osk_fls(u32 input)
 {
        return fls(input);
 }
index 97742940d5a93691bfbc9468d044bbb782eec30b..1def3321d01ce23ee5574456f358626bc7fb6e90 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2011, 2013 ARM Limited
+ * (C) COPYRIGHT 2008-2011, 2013-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 
-void inline *_mali_osk_calloc( u32 n, u32 size )
+void inline *_mali_osk_calloc(u32 n, u32 size)
 {
        return kcalloc(n, size, GFP_KERNEL);
 }
 
-void inline *_mali_osk_malloc( u32 size )
+void inline *_mali_osk_malloc(u32 size)
 {
        return kmalloc(size, GFP_KERNEL);
 }
 
-void inline _mali_osk_free( void *ptr )
+void inline _mali_osk_free(void *ptr)
 {
        kfree(ptr);
 }
 
-void inline *_mali_osk_valloc( u32 size )
+void inline *_mali_osk_valloc(u32 size)
 {
        return vmalloc(size);
 }
 
-void inline _mali_osk_vfree( void *ptr )
+void inline _mali_osk_vfree(void *ptr)
 {
        vfree(ptr);
 }
 
-void inline *_mali_osk_memcpy( void *dst, const void *src, u32 len )
+void inline *_mali_osk_memcpy(void *dst, const void *src, u32  len)
 {
        return memcpy(dst, src, len);
 }
 
-void inline *_mali_osk_memset( void *s, u32 c, u32 n )
+void inline *_mali_osk_memset(void *s, u32 c, u32 n)
 {
        return memset(s, c, n);
 }
 
-mali_bool _mali_osk_mem_check_allocated( u32 max_allocated )
+mali_bool _mali_osk_mem_check_allocated(u32 max_allocated)
 {
        /* No need to prevent an out-of-memory dialogue appearing on Linux,
         * so we always return MALI_TRUE.
index 2721824fa38919ba95fe74734352be2c78a40c30..48781d9fc8afb25278f8848156bb7ba7b01f3166 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
+ * (C) COPYRIGHT 2008-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
 #include <linux/sched.h>
+#include <linux/seq_file.h>
 #include <linux/module.h>
 #include "mali_osk.h"
-#include "mt_reg_base.h"
 
-extern void smi_dumpDebugMsg(void);
-extern int m4u_dump_debug_registers(void);;
+extern void dump_clk_state(void);
 
-void _mali_osk_dbgmsg( const char *fmt, ... )
+#if !defined(CONFIG_MALI_QUIET)
+void _mali_osk_dbgmsg(const char *fmt, ...)
 {
        va_list args;
        va_start(args, fmt);
        vprintk(fmt, args);
        va_end(args);
 }
+#endif /* !defined(CONFIG_MALI_QUIET) */
 
-u32 _mali_osk_snprintf( char *buf, u32 size, const char *fmt, ... )
+u32 _mali_osk_snprintf(char *buf, u32 size, const char *fmt, ...)
 {
        int res;
        va_list args;
@@ -43,36 +44,27 @@ u32 _mali_osk_snprintf( char *buf, u32 size, const char *fmt, ... )
        return res;
 }
 
-#define CLK_CFG_0           (INFRA_BASE + 0x0040)
-#define VENCPLL_CON0        (DDRPHY_BASE+0x800)
-#define MMPLL_CON0          (APMIXEDSYS_BASE + 0x0230)
+void _mali_osk_ctxprintf(_mali_osk_print_ctx *print_ctx, const char *fmt, ...)
+{
+       va_list args;
+       char buf[512];
+
+       va_start(args, fmt);
+       vscnprintf(buf, 512, fmt, args);
+       seq_printf(print_ctx, buf);
+       va_end(args);
+}
 
 void _mali_osk_abort(void)
 {
-    int index;
-
        /* make a simple fault by dereferencing a NULL pointer */
        dump_stack();
-
-    for (index = 0; index < 5; index++)
-    {
-        MALI_DEBUG_PRINT(2, ("=== [MALI] PLL Dump %d ===\n", index));       
-        MALI_DEBUG_PRINT(2, ("CLK_CFG_0: 0x%08x\n", *((volatile unsigned int*)CLK_CFG_0)));
-        MALI_DEBUG_PRINT(2, ("VENCPLL_CON0: 0x%08x\n", *((volatile unsigned int*)VENCPLL_CON0)));
-        MALI_DEBUG_PRINT(2, ("MMPLL_CON0: 0x%08x\n", *((volatile unsigned int*)MMPLL_CON0)));
-
-        MALI_DEBUG_PRINT(2, ("=== [MALI] SMI Dump %d ===\n", index));
-        smi_dumpDebugMsg();
-
-        MALI_DEBUG_PRINT(2, ("=== [MALI] 8127 m4u not provide API? M4U Dump %d ===\n", index));
-        /*m4u_dump_debug_registers();*/
-    }
-
-       *(int *)0 = 0;
+        *(int *)0 = 0; 
 }
 
 void _mali_osk_break(void)
 {
+       dump_clk_state();
        _mali_osk_abort();
 }
 
@@ -82,6 +74,12 @@ u32 _mali_osk_get_pid(void)
        return (u32)current->tgid;
 }
 
+char *_mali_osk_get_comm(void)
+{
+       return (char *)current->comm;
+}
+
+
 u32 _mali_osk_get_tid(void)
 {
        /* pid is actually identifying the thread on Linux */
index 0afcb76318cf032609e0de2815c1de1e92c12b90..eb838fd4c95cc8c3aee56f61a2e4458be119ac2a 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
+ * (C) COPYRIGHT 2008-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -37,9 +37,9 @@ typedef struct _mali_osk_notification_wrapper_t_struct {
        _mali_osk_notification_t data;   /**< Notification data */
 } _mali_osk_notification_wrapper_t;
 
-_mali_osk_notification_queue_t *_mali_osk_notification_queue_init( void )
+_mali_osk_notification_queue_t *_mali_osk_notification_queue_init(void)
 {
-       _mali_osk_notification_queue_t *        result;
+       _mali_osk_notification_queue_t         *result;
 
        result = (_mali_osk_notification_queue_t *)kmalloc(sizeof(_mali_osk_notification_queue_t), GFP_KERNEL);
        if (NULL == result) return NULL;
@@ -51,13 +51,13 @@ _mali_osk_notification_queue_t *_mali_osk_notification_queue_init( void )
        return result;
 }
 
-_mali_osk_notification_t *_mali_osk_notification_create( u32 type, u32 size )
+_mali_osk_notification_t *_mali_osk_notification_create(u32 type, u32 size)
 {
        /* OPT Recycling of notification objects */
        _mali_osk_notification_wrapper_t *notification;
 
-       notification = (_mali_osk_notification_wrapper_t *)kmalloc( sizeof(_mali_osk_notification_wrapper_t) + size,
-                      GFP_KERNEL | __GFP_HIGH | __GFP_REPEAT);
+       notification = (_mali_osk_notification_wrapper_t *)kmalloc(sizeof(_mali_osk_notification_wrapper_t) + size,
+                       GFP_KERNEL | __GFP_HIGH | __GFP_REPEAT);
        if (NULL == notification) {
                MALI_DEBUG_PRINT(1, ("Failed to create a notification object\n"));
                return NULL;
@@ -67,7 +67,7 @@ _mali_osk_notification_t *_mali_osk_notification_create( u32 type, u32 size )
        INIT_LIST_HEAD(&notification->list);
 
        if (0 != size) {
-               notification->data.result_buffer = ((u8*)notification) + sizeof(_mali_osk_notification_wrapper_t);
+               notification->data.result_buffer = ((u8 *)notification) + sizeof(_mali_osk_notification_wrapper_t);
        } else {
                notification->data.result_buffer = NULL;
        }
@@ -80,40 +80,40 @@ _mali_osk_notification_t *_mali_osk_notification_create( u32 type, u32 size )
        return &(notification->data);
 }
 
-void _mali_osk_notification_delete( _mali_osk_notification_t *object )
+void _mali_osk_notification_delete(_mali_osk_notification_t *object)
 {
        _mali_osk_notification_wrapper_t *notification;
-       MALI_DEBUG_ASSERT_POINTER( object );
+       MALI_DEBUG_ASSERT_POINTER(object);
 
-       notification = container_of( object, _mali_osk_notification_wrapper_t, data );
+       notification = container_of(object, _mali_osk_notification_wrapper_t, data);
 
        /* Free the container */
        kfree(notification);
 }
 
-void _mali_osk_notification_queue_term( _mali_osk_notification_queue_t *queue )
+void _mali_osk_notification_queue_term(_mali_osk_notification_queue_t *queue)
 {
        _mali_osk_notification_t *result;
-       MALI_DEBUG_ASSERT_POINTER( queue );
+       MALI_DEBUG_ASSERT_POINTER(queue);
 
        while (_MALI_OSK_ERR_OK == _mali_osk_notification_queue_dequeue(queue, &result)) {
-               _mali_osk_notification_delete( result );
+               _mali_osk_notification_delete(result);
        }
 
        /* not much to do, just free the memory */
        kfree(queue);
 }
-void _mali_osk_notification_queue_send( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t *object )
+void _mali_osk_notification_queue_send(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t *object)
 {
 #if defined(MALI_UPPER_HALF_SCHEDULING)
        unsigned long irq_flags;
 #endif
 
        _mali_osk_notification_wrapper_t *notification;
-       MALI_DEBUG_ASSERT_POINTER( queue );
-       MALI_DEBUG_ASSERT_POINTER( object );
+       MALI_DEBUG_ASSERT_POINTER(queue);
+       MALI_DEBUG_ASSERT_POINTER(object);
 
-       notification = container_of( object, _mali_osk_notification_wrapper_t, data );
+       notification = container_of(object, _mali_osk_notification_wrapper_t, data);
 
 #if defined(MALI_UPPER_HALF_SCHEDULING)
        spin_lock_irqsave(&queue->mutex, irq_flags);
@@ -133,7 +133,7 @@ void _mali_osk_notification_queue_send( _mali_osk_notification_queue_t *queue, _
        wake_up(&queue->receive_queue);
 }
 
-_mali_osk_errcode_t _mali_osk_notification_queue_dequeue( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result )
+_mali_osk_errcode_t _mali_osk_notification_queue_dequeue(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result)
 {
 #if defined(MALI_UPPER_HALF_SCHEDULING)
        unsigned long irq_flags;
@@ -164,17 +164,17 @@ _mali_osk_errcode_t _mali_osk_notification_queue_dequeue( _mali_osk_notification
        return ret;
 }
 
-_mali_osk_errcode_t _mali_osk_notification_queue_receive( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result )
+_mali_osk_errcode_t _mali_osk_notification_queue_receive(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result)
 {
        /* check input */
-       MALI_DEBUG_ASSERT_POINTER( queue );
-       MALI_DEBUG_ASSERT_POINTER( result );
+       MALI_DEBUG_ASSERT_POINTER(queue);
+       MALI_DEBUG_ASSERT_POINTER(result);
 
        /* default result */
        *result = NULL;
 
        if (wait_event_interruptible(queue->receive_queue,
-                                    _MALI_OSK_ERR_OK == _mali_osk_notification_queue_dequeue(queue, result))) {
+                                    _MALI_OSK_ERR_OK == _mali_osk_notification_queue_dequeue(queue, result))) {
                return _MALI_OSK_ERR_RESTARTSYSCALL;
        }
 
index 16c1b5790752fbdbafead3bd616363e8ff1d4d6a..40e244ace140c1f300578270e9b58cdb97f8956f 100644 (file)
@@ -1,7 +1,7 @@
 /**
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2010-2013 ARM Limited
+ * (C) COPYRIGHT 2010-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 #include "mali_osk.h"
 #include "mali_kernel_common.h"
 #include "mali_kernel_linux.h"
-#include "mali_pm.h"
-#include "platform_pmm.h"
-#include "mali_kernel_utilization.h"
-
-/// For MFG sub-system clock control API
-#include <mach/mt_clkmgr.h> 
-#include <mach/mt_cpufreq.h>
-#include <linux/spinlock.h>
-
-/* DVFS from 92 source , currently not supported on 8127 */
-/*#define MTK_MALI_DVFS*/ 
-
-static _mali_osk_timer_t* pm_timer;
-static _mali_osk_atomic_t mali_suspend_called;
-static _mali_osk_atomic_t mali_pm_ref_count;
-static _mali_osk_mutex_t* pm_lock;
-
-
-#if MALI_LICENSE_IS_GPL
-static struct workqueue_struct *mali_pm_wq = NULL;
-#endif
-static struct work_struct mali_pm_wq_work_handle;
-
-#ifdef MTK_MALI_DVFS
-static int g_current_freq_level = 0;
-static unsigned int loading_threshold = 80;
-static unsigned int CPI_level0_threshold = 111411; // 1.7 * 65536
-static unsigned int CPI_level1_threshold = 131072; // 2.0 * 65536
-
-static void g3d_dvfs_enable(void)
-{
-    int iDVFS_Level = 0;
-    unsigned long loading = gpu_get_current_utilization();// 0 ~ 100
-    unsigned long cpi = mali_utilization_bw_get_period();
-    
-    if(loading >= loading_threshold)
-    {
-        switch(g_current_freq_level)
-        {
-        case 0:
-            if (cpi <= CPI_level0_threshold)
-            {
-                iDVFS_Level = 1;
-            }
-            break;
-        case 1:
-            if (cpi <= CPI_level1_threshold)
-            {
-                iDVFS_Level = 1;
-            }
-            break;
-        }
-    }
-
-    mt_soc_dvfs(SOC_DVFS_TYPE_GPU_HP, iDVFS_Level);
-    g_current_freq_level = iDVFS_Level;
-}
-
-static void g3d_dvfs_disable(void)
-{
-    mt_soc_dvfs(SOC_DVFS_TYPE_GPU_HP, 0);
-}
-
-#endif
-
-static void mali_bottom_half_pm ( struct work_struct *work )
-{
-    _mali_osk_mutex_wait(pm_lock);     
-     
-    if((_mali_osk_atomic_read(&mali_pm_ref_count) == 0) &&
-       (_mali_osk_atomic_read(&mali_suspend_called) == 0))
-    {
-        mali_pm_runtime_suspend();
-        _mali_osk_atomic_inc(&mali_suspend_called);
-        mali_platform_power_mode_change(MALI_POWER_MODE_DEEP_SLEEP);        
-    }
-     
-    _mali_osk_mutex_signal(pm_lock);
-}
-
-_mali_osk_errcode_t _mali_osk_pm_delete_callback_timer(void)
-{
-   _mali_osk_timer_del(pm_timer);
-#if MALI_LICENSE_IS_GPL
-   if (mali_pm_wq)
-   {
-       flush_workqueue(mali_pm_wq);
-   }
-#else
-   flush_scheduled_work();
-#endif
-   return _MALI_OSK_ERR_OK;
-}
-
-void _mali_pm_callback(void *arg)
-{
-#if MALI_LICENSE_IS_GPL
-    if (mali_pm_wq)
-    {
-        queue_work(mali_pm_wq, &mali_pm_wq_work_handle);
-    }
-    else
-    {
-        MALI_PRINTF(("mali_pm_wq is NULL !!!\n"));
-        mali_bottom_half_pm(NULL);
-    }
-#else
-    schedule_work(&mali_pm_wq_work_handle);
-#endif
-}
-
-void _mali_osk_pm_dev_enable(void)
-{
-       _mali_osk_atomic_init(&mali_pm_ref_count, 0);
-       _mali_osk_atomic_init(&mali_suspend_called, 0);
-       pm_timer = _mali_osk_timer_init();
-       _mali_osk_timer_setcallback(pm_timer, _mali_pm_callback, NULL); 
-       
-    pm_lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED, 0);
-
-#if MALI_LICENSE_IS_GPL
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
-    mali_pm_wq = alloc_workqueue("mali_pm", WQ_UNBOUND, 0);
-#else
-    mali_pm_wq = create_workqueue("mali_pm");
-#endif
-    if(NULL == mali_pm_wq)
-    {
-        MALI_PRINT_ERROR(("Unable to create Mali pm workqueue\n"));
-    }
-#endif
-    INIT_WORK( &mali_pm_wq_work_handle, mali_bottom_half_pm );
-}
-
-void _mali_osk_pm_dev_disable(void)
-{
-#if MALI_LICENSE_IS_GPL
-    if (mali_pm_wq)
-    {
-        flush_workqueue(mali_pm_wq);
-        destroy_workqueue(mali_pm_wq);
-        mali_pm_wq = NULL;
-    }
-#else
-    flush_scheduled_work();
-#endif
-       _mali_osk_atomic_term(&mali_pm_ref_count);
-       _mali_osk_atomic_term(&mali_suspend_called);
-       _mali_osk_timer_term(pm_timer);
-       _mali_osk_mutex_term(pm_lock);
-}
 
 /* Can NOT run in atomic context */
-_mali_osk_errcode_t _mali_osk_pm_dev_ref_add(void)
+_mali_osk_errcode_t _mali_osk_pm_dev_ref_get_sync(void)
 {
 #ifdef CONFIG_PM_RUNTIME
        int err;
        MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
        err = pm_runtime_get_sync(&(mali_platform_device->dev));
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
        pm_runtime_mark_last_busy(&(mali_platform_device->dev));
 #endif
-       if (0 > err)
-       {
+       if (0 > err) {
                MALI_PRINT_ERROR(("Mali OSK PM: pm_runtime_get_sync() returned error code %d\n", err));
                return _MALI_OSK_ERR_FAULT;
        }
-       _mali_osk_atomic_inc(&mali_pm_ref_count);
-       MALI_DEBUG_PRINT(4, ("Mali OSK PM: Power ref taken (%u)\n", _mali_osk_atomic_read(&mali_pm_ref_count)));
-#else /// CONFIG_PM_RUNTIME  
-
-    _mali_osk_pm_delete_callback_timer();
-       
-       _mali_osk_mutex_wait(pm_lock);
-       
-   mali_platform_power_mode_change(MALI_POWER_MODE_ON);
-#ifdef MTK_MALI_DVFS           
-   g3d_dvfs_enable();
-#endif         
-   if(_mali_osk_atomic_read(&mali_suspend_called))
-   {                   
-               mali_pm_runtime_resume();
-
-      _mali_osk_atomic_dec(&mali_suspend_called);
-       }
-       
-       _mali_osk_atomic_inc(&mali_pm_ref_count);       
-   
-    _mali_osk_mutex_signal(pm_lock);
-   
-   MALI_DEBUG_PRINT(4, ("Mali OSK PM: Power ref taken (%u)\n", _mali_osk_atomic_read(&mali_pm_ref_count)));              
-
 #endif
        return _MALI_OSK_ERR_OK;
 }
 
 /* Can run in atomic context */
-void _mali_osk_pm_dev_ref_dec(void)
+_mali_osk_errcode_t _mali_osk_pm_dev_ref_get_async(void)
 {
 #ifdef CONFIG_PM_RUNTIME
+       int err;
        MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
-       _mali_osk_atomic_dec(&mali_pm_ref_count);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+       err = pm_runtime_get(&(mali_platform_device->dev));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
        pm_runtime_mark_last_busy(&(mali_platform_device->dev));
-       pm_runtime_put_autosuspend(&(mali_platform_device->dev));
-#else
-       pm_runtime_put(&(mali_platform_device->dev));
 #endif
-       MALI_DEBUG_PRINT(4, ("Mali OSK PM: Power ref released (%u)\n", _mali_osk_atomic_read(&mali_pm_ref_count)));
-
-#else /// CONFIG_PM_RUNTIME
-       
-       if(_mali_osk_atomic_dec_return(&mali_pm_ref_count) == 0)
-       {
-               _mali_osk_timer_mod(pm_timer, _mali_osk_time_mstoticks(mali_pm_wq ? 15 : 3000));
+       if (0 > err && -EINPROGRESS != err) {
+               MALI_PRINT_ERROR(("Mali OSK PM: pm_runtime_get() returned error code %d\n", err));
+               return _MALI_OSK_ERR_FAULT;
        }
-
-       MALI_DEBUG_PRINT(4, ("Mali OSK PM: Power ref released (%u)\n", _mali_osk_atomic_read(&mali_pm_ref_count)));
 #endif
+       return _MALI_OSK_ERR_OK;
 }
 
-/* Can run in atomic context */
-mali_bool _mali_osk_pm_dev_ref_add_no_power_on(void)
-{
-#ifdef CONFIG_PM_RUNTIME
-       u32 ref;
-       MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
-       pm_runtime_get_noresume(&(mali_platform_device->dev));
-       ref = _mali_osk_atomic_read(&mali_pm_ref_count);
-       MALI_DEBUG_PRINT(4, ("Mali OSK PM: No-power ref taken (%u)\n", _mali_osk_atomic_read(&mali_pm_ref_count)));
-       return ref > 0 ? MALI_TRUE : MALI_FALSE;
-#else
-   _mali_osk_mutex_wait(pm_lock);     
-       return _mali_osk_atomic_read(&mali_suspend_called) == 0 ? MALI_TRUE : MALI_FALSE;
-       /*return MALI_TRUE;*/
-#endif
-}
 
 /* Can run in atomic context */
-void _mali_osk_pm_dev_ref_dec_no_power_on(void)
+void _mali_osk_pm_dev_ref_put(void)
 {
 #ifdef CONFIG_PM_RUNTIME
        MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
+       pm_runtime_mark_last_busy(&(mali_platform_device->dev));
        pm_runtime_put_autosuspend(&(mali_platform_device->dev));
 #else
        pm_runtime_put(&(mali_platform_device->dev));
 #endif
-       MALI_DEBUG_PRINT(4, ("Mali OSK PM: No-power ref released (%u)\n", _mali_osk_atomic_read(&mali_pm_ref_count)));
-#else
-   _mali_osk_mutex_signal(pm_lock);
 #endif
 }
 
@@ -283,8 +81,3 @@ void _mali_osk_pm_dev_barrier(void)
        pm_runtime_barrier(&(mali_platform_device->dev));
 #endif
 }
-#ifdef MTK_MALI_DVFS
-module_param(loading_threshold, ulong, 0644);
-module_param(CPI_level0_threshold, ulong, 0644);
-module_param(CPI_level1_threshold, ulong, 0644);
-#endif
index bb89be6bb4cccecc7b47e319af3812ae795463c7..51c150f586290d0949255082e3dd6d456db0525d 100644 (file)
@@ -1,14 +1,18 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2013 ARM Limited
+ * (C) COPYRIGHT 2012-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
  * by a licensing agreement from ARM Limited.
  */
-
+#include <linux/hrtimer.h>
 #include <linux/module.h>
+#include <linux/file.h>
+#include <linux/poll.h>
+#include <linux/anon_inodes.h>
+#include <linux/sched.h>
 
 #include <mali_profiling_gator_api.h>
 #include "mali_kernel_common.h"
 #include "mali_linux_trace.h"
 #include "mali_gp.h"
 #include "mali_pp.h"
-#include "mali_pp_scheduler.h"
 #include "mali_l2_cache.h"
 #include "mali_user_settings_db.h"
+#include "mali_executor.h"
+#include "mali_memory_manager.h"
 
-_mali_osk_errcode_t _mali_osk_profiling_init(mali_bool auto_start)
+#define MALI_PROFILING_STREAM_DATA_DEFAULT_SIZE 100
+#define MALI_PROFILING_STREAM_HOLD_TIME 1000000         /*1 ms */
+
+#define MALI_PROFILING_STREAM_BUFFER_SIZE       (1 << 12)
+#define MALI_PROFILING_STREAM_BUFFER_NUM        100
+
+/**
+ * Define the mali profiling stream struct.
+ */
+typedef struct mali_profiling_stream {
+       u8 data[MALI_PROFILING_STREAM_BUFFER_SIZE];
+       u32 used_size;
+       struct list_head list;
+} mali_profiling_stream;
+
+typedef struct mali_profiling_stream_list {
+       spinlock_t spin_lock;
+       struct list_head free_list;
+       struct list_head queue_list;
+} mali_profiling_stream_list;
+
+static const char mali_name[] = "4xx";
+static const char utgard_setup_version[] = "ANNOTATE_SETUP 1\n";
+
+static u32 profiling_sample_rate = 0;
+static u32 first_sw_counter_index = 0;
+
+static mali_bool l2_cache_counter_if_enabled = MALI_FALSE;
+static u32 num_counters_enabled = 0;
+static u32 mem_counters_enabled = 0;
+
+static _mali_osk_atomic_t stream_fd_if_used;
+
+static wait_queue_head_t stream_fd_wait_queue;
+static mali_profiling_counter *global_mali_profiling_counters = NULL;
+static u32 num_global_mali_profiling_counters = 0;
+
+static mali_profiling_stream_list *global_mali_stream_list = NULL;
+static mali_profiling_stream *mali_counter_stream = NULL;
+static mali_profiling_stream *mali_core_activity_stream = NULL;
+static u64 mali_core_activity_stream_dequeue_time = 0;
+static spinlock_t mali_activity_lock;
+static u32 mali_activity_cores_num =  0;
+static struct hrtimer profiling_sampling_timer;
+
+const char *_mali_mem_counter_descriptions[] = _MALI_MEM_COUTNER_DESCRIPTIONS;
+const char *_mali_special_counter_descriptions[] = _MALI_SPCIAL_COUNTER_DESCRIPTIONS;
+
+static u32 current_profiling_pid = 0;
+
+static void _mali_profiling_stream_list_destory(mali_profiling_stream_list *profiling_stream_list)
 {
-       if (MALI_TRUE == auto_start) {
-               mali_set_user_setting(_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, MALI_TRUE);
+       mali_profiling_stream *profiling_stream, *tmp_profiling_stream;
+       MALI_DEBUG_ASSERT_POINTER(profiling_stream_list);
+
+       list_for_each_entry_safe(profiling_stream, tmp_profiling_stream, &profiling_stream_list->free_list, list) {
+               list_del(&profiling_stream->list);
+               kfree(profiling_stream);
        }
 
-       return _MALI_OSK_ERR_OK;
+       list_for_each_entry_safe(profiling_stream, tmp_profiling_stream, &profiling_stream_list->queue_list, list) {
+               list_del(&profiling_stream->list);
+               kfree(profiling_stream);
+       }
+
+       kfree(profiling_stream_list);
 }
 
-void _mali_osk_profiling_term(void)
+static void _mali_profiling_global_stream_list_free(void)
 {
-       /* Nothing to do */
+       mali_profiling_stream *profiling_stream, *tmp_profiling_stream;
+       unsigned long irq_flags;
+
+       MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list);
+       spin_lock_irqsave(&global_mali_stream_list->spin_lock, irq_flags);
+       list_for_each_entry_safe(profiling_stream, tmp_profiling_stream, &global_mali_stream_list->queue_list, list) {
+               profiling_stream->used_size = 0;
+               list_move(&profiling_stream->list, &global_mali_stream_list->free_list);
+       }
+       spin_unlock_irqrestore(&global_mali_stream_list->spin_lock, irq_flags);
 }
 
-_mali_osk_errcode_t _mali_osk_profiling_start(u32 * limit)
+static _mali_osk_errcode_t _mali_profiling_global_stream_list_dequeue(struct list_head *stream_list, mali_profiling_stream **new_mali_profiling_stream)
 {
-       /* Nothing to do */
-       return _MALI_OSK_ERR_OK;
+       unsigned long irq_flags;
+       _mali_osk_errcode_t ret = _MALI_OSK_ERR_OK;
+       MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list);
+       MALI_DEBUG_ASSERT_POINTER(stream_list);
+
+       spin_lock_irqsave(&global_mali_stream_list->spin_lock, irq_flags);
+
+       if (!list_empty(stream_list)) {
+               *new_mali_profiling_stream = list_entry(stream_list->next, mali_profiling_stream, list);
+               list_del_init(&(*new_mali_profiling_stream)->list);
+       } else {
+               ret = _MALI_OSK_ERR_NOMEM;
+       }
+
+       spin_unlock_irqrestore(&global_mali_stream_list->spin_lock, irq_flags);
+
+       return ret;
 }
 
-_mali_osk_errcode_t _mali_osk_profiling_stop(u32 *count)
+static void _mali_profiling_global_stream_list_queue(struct list_head *stream_list, mali_profiling_stream *current_mali_profiling_stream)
 {
-       /* Nothing to do */
-       return _MALI_OSK_ERR_OK;
+       unsigned long irq_flags;
+       MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list);
+       MALI_DEBUG_ASSERT_POINTER(stream_list);
+
+       spin_lock_irqsave(&global_mali_stream_list->spin_lock, irq_flags);
+       list_add_tail(&current_mali_profiling_stream->list, stream_list);
+       spin_unlock_irqrestore(&global_mali_stream_list->spin_lock, irq_flags);
+}
+
+static mali_bool _mali_profiling_global_stream_queue_list_if_empty(void)
+{
+       MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list);
+       return list_empty(&global_mali_stream_list->queue_list);
+}
+
+static u32 _mali_profiling_global_stream_queue_list_next_size(void)
+{
+       unsigned long irq_flags;
+       u32 size = 0;
+       MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list);
+
+       spin_lock_irqsave(&global_mali_stream_list->spin_lock, irq_flags);
+       if (!list_empty(&global_mali_stream_list->queue_list)) {
+               mali_profiling_stream *next_mali_profiling_stream =
+                       list_entry(global_mali_stream_list->queue_list.next, mali_profiling_stream, list);
+               size = next_mali_profiling_stream->used_size;
+       }
+       spin_unlock_irqrestore(&global_mali_stream_list->spin_lock, irq_flags);
+       return size;
 }
 
-u32 _mali_osk_profiling_get_count(void)
+/* The mali profiling stream file operations functions. */
+static ssize_t _mali_profiling_stream_read(
+       struct file *filp,
+       char __user *buffer,
+       size_t      size,
+       loff_t      *f_pos);
+
+static unsigned int  _mali_profiling_stream_poll(struct file *filp, poll_table *wait);
+
+static int  _mali_profiling_stream_release(struct inode *inode, struct file *filp);
+
+/* The timeline stream file operations structure. */
+static const struct file_operations mali_profiling_stream_fops = {
+       .release = _mali_profiling_stream_release,
+       .read    = _mali_profiling_stream_read,
+       .poll    = _mali_profiling_stream_poll,
+};
+
+static ssize_t _mali_profiling_stream_read(
+       struct file *filp,
+       char __user *buffer,
+       size_t      size,
+       loff_t      *f_pos)
 {
+       u32 copy_len = 0;
+       mali_profiling_stream *current_mali_profiling_stream;
+       u32 used_size;
+       MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list);
+
+       while (!_mali_profiling_global_stream_queue_list_if_empty()) {
+               used_size = _mali_profiling_global_stream_queue_list_next_size();
+               if (used_size <= ((u32)size - copy_len)) {
+                       current_mali_profiling_stream = NULL;
+                       _mali_profiling_global_stream_list_dequeue(&global_mali_stream_list->queue_list,
+                                       &current_mali_profiling_stream);
+                       MALI_DEBUG_ASSERT_POINTER(current_mali_profiling_stream);
+                       if (copy_to_user(&buffer[copy_len], current_mali_profiling_stream->data, current_mali_profiling_stream->used_size)) {
+                               current_mali_profiling_stream->used_size = 0;
+                               _mali_profiling_global_stream_list_queue(&global_mali_stream_list->free_list, current_mali_profiling_stream);
+                               return -EFAULT;
+                       }
+                       copy_len += current_mali_profiling_stream->used_size;
+                       current_mali_profiling_stream->used_size = 0;
+                       _mali_profiling_global_stream_list_queue(&global_mali_stream_list->free_list, current_mali_profiling_stream);
+               } else {
+                       break;
+               }
+       }
+       return (ssize_t)copy_len;
+}
+
+static unsigned int  _mali_profiling_stream_poll(struct file *filp, poll_table *wait)
+{
+       poll_wait(filp, &stream_fd_wait_queue, wait);
+       if (!_mali_profiling_global_stream_queue_list_if_empty())
+               return POLLIN;
        return 0;
 }
 
-_mali_osk_errcode_t _mali_osk_profiling_get_event(u32 index, u64* timestamp, u32* event_id, u32 data[5])
+static int  _mali_profiling_stream_release(struct inode *inode, struct file *filp)
 {
-       /* Nothing to do */
-       return _MALI_OSK_ERR_OK;
+       _mali_osk_atomic_init(&stream_fd_if_used, 0);
+       return 0;
+}
+
+/* The funs for control packet and stream data.*/
+static void _mali_profiling_set_packet_size(unsigned char *const buf, const u32 size)
+{
+       u32 i;
+
+       for (i = 0; i < sizeof(size); ++i)
+               buf[i] = (size >> 8 * i) & 0xFF;
+}
+
+static u32 _mali_profiling_get_packet_size(unsigned char *const buf)
+{
+       u32 i;
+       u32 size = 0;
+       for (i = 0; i < sizeof(size); ++i)
+               size |= (u32)buf[i] << 8 * i;
+       return size;
+}
+
+static u32 _mali_profiling_read_packet_int(unsigned char *const buf, u32 *const pos, u32 const packet_size)
+{
+       u64 int_value = 0;
+       u8 shift = 0;
+       u8 byte_value = ~0;
+
+       while ((byte_value & 0x80) != 0) {
+               MALI_DEBUG_ASSERT((*pos) < packet_size);
+               byte_value = buf[*pos];
+               *pos += 1;
+               int_value |= (u32)(byte_value & 0x7f) << shift;
+               shift += 7;
+       }
+
+       if (shift < 8 * sizeof(int_value) && (byte_value & 0x40) != 0) {
+               int_value |= -(1 << shift);
+       }
+
+       return int_value;
+}
+
+static u32 _mali_profiling_pack_int(u8 *const buf, u32 const buf_size, u32 const pos, s32 value)
+{
+       u32 add_bytes = 0;
+       int more = 1;
+       while (more) {
+               /* low order 7 bits of val */
+               char byte_value = value & 0x7f;
+               value >>= 7;
+
+               if ((value == 0 && (byte_value & 0x40) == 0) || (value == -1 && (byte_value & 0x40) != 0)) {
+                       more = 0;
+               } else {
+                       byte_value |= 0x80;
+               }
+
+               MALI_DEBUG_ASSERT((pos + add_bytes) < buf_size);
+               buf[pos + add_bytes] = byte_value;
+               add_bytes++;
+       }
+
+       return add_bytes;
+}
+
+static int _mali_profiling_pack_long(uint8_t *const buf, u32 const buf_size, u32 const pos, s64 val)
+{
+       int add_bytes = 0;
+       int more = 1;
+       while (more) {
+               /* low order 7 bits of x */
+               char byte_value = val & 0x7f;
+               val >>= 7;
+
+               if ((val == 0 && (byte_value & 0x40) == 0) || (val == -1 && (byte_value & 0x40) != 0)) {
+                       more = 0;
+               } else {
+                       byte_value |= 0x80;
+               }
+
+               MALI_DEBUG_ASSERT((pos + add_bytes) < buf_size);
+               buf[pos + add_bytes] = byte_value;
+               add_bytes++;
+       }
+
+       return add_bytes;
+}
+
+static void _mali_profiling_stream_add_counter(mali_profiling_stream *profiling_stream, s64 current_time, u32 key, u32 counter_value)
+{
+       u32 add_size = STREAM_HEADER_SIZE;
+       MALI_DEBUG_ASSERT_POINTER(profiling_stream);
+       MALI_DEBUG_ASSERT((profiling_stream->used_size) < MALI_PROFILING_STREAM_BUFFER_SIZE);
+
+       profiling_stream->data[profiling_stream->used_size] = STREAM_HEADER_COUNTER_VALUE;
+
+       add_size += _mali_profiling_pack_long(profiling_stream->data, MALI_PROFILING_STREAM_BUFFER_SIZE,
+                                             profiling_stream->used_size + add_size, current_time);
+       add_size += _mali_profiling_pack_int(profiling_stream->data, MALI_PROFILING_STREAM_BUFFER_SIZE,
+                                            profiling_stream->used_size + add_size, (s32)0);
+       add_size += _mali_profiling_pack_int(profiling_stream->data, MALI_PROFILING_STREAM_BUFFER_SIZE,
+                                            profiling_stream->used_size + add_size, (s32)key);
+       add_size += _mali_profiling_pack_int(profiling_stream->data, MALI_PROFILING_STREAM_BUFFER_SIZE,
+                                            profiling_stream->used_size + add_size, (s32)counter_value);
+
+       _mali_profiling_set_packet_size(profiling_stream->data + profiling_stream->used_size + 1,
+                                       add_size - STREAM_HEADER_SIZE);
+
+       profiling_stream->used_size += add_size;
+}
+
+/* The callback function for sampling timer.*/
+static enum hrtimer_restart  _mali_profiling_sampling_counters(struct hrtimer *timer)
+{
+       u32 counter_index;
+       s64 current_time;
+       MALI_DEBUG_ASSERT_POINTER(global_mali_profiling_counters);
+       MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list);
+
+       MALI_DEBUG_ASSERT(NULL == mali_counter_stream);
+       if (_MALI_OSK_ERR_OK == _mali_profiling_global_stream_list_dequeue(
+                   &global_mali_stream_list->free_list, &mali_counter_stream)) {
+
+               MALI_DEBUG_ASSERT_POINTER(mali_counter_stream);
+               MALI_DEBUG_ASSERT(0 == mali_counter_stream->used_size);
+
+               /* Capture l2 cache counter values if enabled */
+               if (MALI_TRUE == l2_cache_counter_if_enabled) {
+                       int i, j = 0;
+                       _mali_profiling_l2_counter_values l2_counters_values;
+                       _mali_profiling_get_l2_counters(&l2_counters_values);
+
+                       for (i  = COUNTER_L2_0_C0; i <= COUNTER_L2_2_C1; i++) {
+                               if (0 == (j % 2))
+                                       _mali_osk_profiling_record_global_counters(i, l2_counters_values.cores[j / 2].value0);
+                               else
+                                       _mali_osk_profiling_record_global_counters(i, l2_counters_values.cores[j / 2].value1);
+                               j++;
+                       }
+               }
+
+               current_time = (s64)_mali_osk_boot_time_get_ns();
+
+               /* Add all enabled counter values into stream */
+               for (counter_index = 0; counter_index < num_global_mali_profiling_counters; counter_index++) {
+                       /* No need to sample these couners here. */
+                       if (global_mali_profiling_counters[counter_index].enabled) {
+                               if ((global_mali_profiling_counters[counter_index].counter_id >= FIRST_MEM_COUNTER &&
+                                    global_mali_profiling_counters[counter_index].counter_id <= LAST_MEM_COUNTER)
+                                   || (global_mali_profiling_counters[counter_index].counter_id == COUNTER_VP_ACTIVITY)
+                                   || (global_mali_profiling_counters[counter_index].counter_id == COUNTER_FP_ACTIVITY)
+                                   || (global_mali_profiling_counters[counter_index].counter_id == COUNTER_FILMSTRIP)) {
+
+                                       continue;
+                               }
+
+                               if (global_mali_profiling_counters[counter_index].counter_id >= COUNTER_L2_0_C0 &&
+                                   global_mali_profiling_counters[counter_index].counter_id <= COUNTER_L2_2_C1) {
+
+                                       u32 prev_val = global_mali_profiling_counters[counter_index].prev_counter_value;
+
+                                       _mali_profiling_stream_add_counter(mali_counter_stream, current_time, global_mali_profiling_counters[counter_index].key,
+                                                                          global_mali_profiling_counters[counter_index].current_counter_value - prev_val);
+
+                                       prev_val = global_mali_profiling_counters[counter_index].current_counter_value;
+
+                                       global_mali_profiling_counters[counter_index].prev_counter_value = prev_val;
+                               } else {
+
+                                       if (global_mali_profiling_counters[counter_index].counter_id == COUNTER_TOTAL_ALLOC_PAGES) {
+                                               u32 total_alloc_mem = _mali_ukk_report_memory_usage();
+                                               global_mali_profiling_counters[counter_index].current_counter_value = total_alloc_mem / _MALI_OSK_MALI_PAGE_SIZE;
+                                       }
+                                       _mali_profiling_stream_add_counter(mali_counter_stream, current_time, global_mali_profiling_counters[counter_index].key,
+                                                                          global_mali_profiling_counters[counter_index].current_counter_value);
+                                       if (global_mali_profiling_counters[counter_index].counter_id < FIRST_SPECIAL_COUNTER)
+                                               global_mali_profiling_counters[counter_index].current_counter_value = 0;
+                               }
+                       }
+               }
+               _mali_profiling_global_stream_list_queue(&global_mali_stream_list->queue_list, mali_counter_stream);
+               mali_counter_stream = NULL;
+       } else {
+               MALI_DEBUG_PRINT(1, ("Not enough mali profiling stream buffer!\n"));
+       }
+
+       wake_up_interruptible(&stream_fd_wait_queue);
+
+       /*Enable the sampling timer again*/
+       if (0 != num_counters_enabled && 0 != profiling_sample_rate) {
+               hrtimer_forward_now(&profiling_sampling_timer, ns_to_ktime(profiling_sample_rate));
+               return HRTIMER_RESTART;
+       }
+       return HRTIMER_NORESTART;
+}
+
+static void _mali_profiling_sampling_core_activity_switch(int counter_id, int core, u32 activity, u32 pid)
+{
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(&mali_activity_lock, irq_flags);
+       if (activity == 0)
+               mali_activity_cores_num--;
+       else
+               mali_activity_cores_num++;
+       spin_unlock_irqrestore(&mali_activity_lock, irq_flags);
+
+       if (NULL != global_mali_profiling_counters) {
+               int i ;
+               for (i = 0; i < num_global_mali_profiling_counters; i++) {
+                       if (counter_id == global_mali_profiling_counters[i].counter_id && global_mali_profiling_counters[i].enabled) {
+                               u64 current_time = _mali_osk_boot_time_get_ns();
+                               u32 add_size = STREAM_HEADER_SIZE;
+
+                               if (NULL != mali_core_activity_stream) {
+                                       if ((mali_core_activity_stream_dequeue_time +  MALI_PROFILING_STREAM_HOLD_TIME < current_time) ||
+                                           (MALI_PROFILING_STREAM_DATA_DEFAULT_SIZE > MALI_PROFILING_STREAM_BUFFER_SIZE
+                                            - mali_core_activity_stream->used_size)) {
+                                               _mali_profiling_global_stream_list_queue(&global_mali_stream_list->queue_list, mali_core_activity_stream);
+                                               mali_core_activity_stream = NULL;
+                                               wake_up_interruptible(&stream_fd_wait_queue);
+                                       }
+                               }
+
+                               if (NULL == mali_core_activity_stream) {
+                                       if (_MALI_OSK_ERR_OK == _mali_profiling_global_stream_list_dequeue(
+                                                   &global_mali_stream_list->free_list, &mali_core_activity_stream)) {
+                                               mali_core_activity_stream_dequeue_time = current_time;
+                                       } else {
+                                               MALI_DEBUG_PRINT(1, ("Not enough mali profiling stream buffer!\n"));
+                                               wake_up_interruptible(&stream_fd_wait_queue);
+                                               break;
+                                       }
+
+                               }
+
+                               mali_core_activity_stream->data[mali_core_activity_stream->used_size] = STREAM_HEADER_CORE_ACTIVITY;
+
+                               add_size += _mali_profiling_pack_long(mali_core_activity_stream->data,
+                                                                     MALI_PROFILING_STREAM_BUFFER_SIZE, mali_core_activity_stream->used_size + add_size, (s64)current_time);
+                               add_size += _mali_profiling_pack_int(mali_core_activity_stream->data,
+                                                                    MALI_PROFILING_STREAM_BUFFER_SIZE, mali_core_activity_stream->used_size + add_size, core);
+                               add_size += _mali_profiling_pack_int(mali_core_activity_stream->data,
+                                                                    MALI_PROFILING_STREAM_BUFFER_SIZE, mali_core_activity_stream->used_size + add_size, (s32)global_mali_profiling_counters[i].key);
+                               add_size += _mali_profiling_pack_int(mali_core_activity_stream->data,
+                                                                    MALI_PROFILING_STREAM_BUFFER_SIZE, mali_core_activity_stream->used_size + add_size, activity);
+                               add_size += _mali_profiling_pack_int(mali_core_activity_stream->data,
+                                                                    MALI_PROFILING_STREAM_BUFFER_SIZE, mali_core_activity_stream->used_size + add_size, pid);
+
+                               _mali_profiling_set_packet_size(mali_core_activity_stream->data + mali_core_activity_stream->used_size + 1,
+                                                               add_size - STREAM_HEADER_SIZE);
+
+                               mali_core_activity_stream->used_size += add_size;
+
+                               if (0 == mali_activity_cores_num) {
+                                       _mali_profiling_global_stream_list_queue(&global_mali_stream_list->queue_list, mali_core_activity_stream);
+                                       mali_core_activity_stream = NULL;
+                                       wake_up_interruptible(&stream_fd_wait_queue);
+                               }
+
+                               break;
+                       }
+               }
+       }
+}
+
+static mali_bool _mali_profiling_global_counters_init(void)
+{
+       int core_id, counter_index, counter_number, counter_id;
+       u32 num_l2_cache_cores;
+       u32 num_pp_cores;
+       u32 num_gp_cores = 1;
+
+       MALI_DEBUG_ASSERT(NULL == global_mali_profiling_counters);
+       num_pp_cores = mali_pp_get_glob_num_pp_cores();
+       num_l2_cache_cores =    mali_l2_cache_core_get_glob_num_l2_cores();
+
+       num_global_mali_profiling_counters = 3 * (num_gp_cores + num_pp_cores) + 2 * num_l2_cache_cores
+                                            + MALI_PROFILING_SW_COUNTERS_NUM
+                                            + MALI_PROFILING_SPECIAL_COUNTERS_NUM
+                                            + MALI_PROFILING_MEM_COUNTERS_NUM;
+       global_mali_profiling_counters = _mali_osk_calloc(num_global_mali_profiling_counters, sizeof(mali_profiling_counter));
+
+       if (NULL == global_mali_profiling_counters)
+               return MALI_FALSE;
+
+       counter_index = 0;
+       /*Vertex processor counters */
+       for (core_id = 0; core_id < num_gp_cores; core_id ++) {
+               global_mali_profiling_counters[counter_index].counter_id = ACTIVITY_VP_0 + core_id;
+               _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name,
+                                  sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_VP_%d_active", mali_name, core_id);
+
+               for (counter_number = 0; counter_number < 2; counter_number++) {
+                       counter_index++;
+                       global_mali_profiling_counters[counter_index].counter_id = COUNTER_VP_0_C0 + (2 * core_id) + counter_number;
+                       _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name,
+                                          sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_VP_%d_cnt%d", mali_name, core_id, counter_number);
+               }
+       }
+
+       /* Fragment processors' counters */
+       for (core_id = 0; core_id < num_pp_cores; core_id++) {
+               counter_index++;
+               global_mali_profiling_counters[counter_index].counter_id = ACTIVITY_FP_0 + core_id;
+               _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name,
+                                  sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_FP_%d_active", mali_name, core_id);
+
+               for (counter_number = 0; counter_number < 2; counter_number++) {
+                       counter_index++;
+                       global_mali_profiling_counters[counter_index].counter_id = COUNTER_FP_0_C0 + (2 * core_id) + counter_number;
+                       _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name,
+                                          sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_FP_%d_cnt%d", mali_name, core_id, counter_number);
+               }
+       }
+
+       /* L2 Cache counters */
+       for (core_id = 0; core_id < num_l2_cache_cores; core_id++) {
+               for (counter_number = 0; counter_number < 2; counter_number++) {
+                       counter_index++;
+                       global_mali_profiling_counters[counter_index].counter_id = COUNTER_L2_0_C0 + (2 * core_id) + counter_number;
+                       _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name,
+                                          sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_L2_%d_cnt%d", mali_name, core_id, counter_number);
+               }
+       }
+
+       /* Now set up the software counter entries */
+       for (counter_id = FIRST_SW_COUNTER; counter_id <= LAST_SW_COUNTER; counter_id++) {
+               counter_index++;
+
+               if (0 == first_sw_counter_index)
+                       first_sw_counter_index = counter_index;
+
+               global_mali_profiling_counters[counter_index].counter_id = counter_id;
+               _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name,
+                                  sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_SW_%d", mali_name, counter_id - FIRST_SW_COUNTER);
+       }
+
+       /* Now set up the special counter entries */
+       for (counter_id = FIRST_SPECIAL_COUNTER; counter_id <= LAST_SPECIAL_COUNTER; counter_id++) {
+
+               counter_index++;
+               _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name,
+                                  sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_%s",
+                                  mali_name, _mali_special_counter_descriptions[counter_id - FIRST_SPECIAL_COUNTER]);
+
+               global_mali_profiling_counters[counter_index].counter_id = counter_id;
+       }
+
+       /* Now set up the mem counter entries*/
+       for (counter_id = FIRST_MEM_COUNTER; counter_id <= LAST_MEM_COUNTER; counter_id++) {
+
+               counter_index++;
+               _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name,
+                                  sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_%s",
+                                  mali_name, _mali_mem_counter_descriptions[counter_id - FIRST_MEM_COUNTER]);
+
+               global_mali_profiling_counters[counter_index].counter_id = counter_id;
+       }
+
+       MALI_DEBUG_ASSERT((counter_index + 1) == num_global_mali_profiling_counters);
+
+       return MALI_TRUE;
 }
 
-_mali_osk_errcode_t _mali_osk_profiling_clear(void)
+void _mali_profiling_notification_mem_counter(struct mali_session_data *session, u32 counter_id, u32 key, int enable)
 {
-       /* Nothing to do */
+
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       if (NULL != session) {
+               _mali_osk_notification_t *notification;
+               _mali_osk_notification_queue_t *queue;
+
+               queue = session->ioctl_queue;
+               MALI_DEBUG_ASSERT(NULL != queue);
+
+               notification = _mali_osk_notification_create(_MALI_NOTIFICATION_ANNOTATE_PROFILING_MEM_COUNTER,
+                               sizeof(_mali_uk_annotate_profiling_mem_counter_s));
+
+               if (NULL != notification) {
+                       _mali_uk_annotate_profiling_mem_counter_s *data = notification->result_buffer;
+                       data->counter_id = counter_id;
+                       data->key = key;
+                       data->enable = enable;
+
+                       _mali_osk_notification_queue_send(queue, notification);
+               } else {
+                       MALI_PRINT_ERROR(("Failed to create notification object!\n"));
+               }
+       } else {
+               MALI_PRINT_ERROR(("Failed to find the right session!\n"));
+       }
+}
+
+void _mali_profiling_notification_enable(struct mali_session_data *session, u32 sampling_rate, int enable)
+{
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       if (NULL != session) {
+               _mali_osk_notification_t *notification;
+               _mali_osk_notification_queue_t *queue;
+
+               queue = session->ioctl_queue;
+               MALI_DEBUG_ASSERT(NULL != queue);
+
+               notification = _mali_osk_notification_create(_MALI_NOTIFICATION_ANNOTATE_PROFILING_ENABLE,
+                               sizeof(_mali_uk_annotate_profiling_enable_s));
+
+               if (NULL != notification) {
+                       _mali_uk_annotate_profiling_enable_s *data = notification->result_buffer;
+                       data->sampling_rate = sampling_rate;
+                       data->enable = enable;
+
+                       _mali_osk_notification_queue_send(queue, notification);
+               } else {
+                       MALI_PRINT_ERROR(("Failed to create notification object!\n"));
+               }
+       } else {
+               MALI_PRINT_ERROR(("Failed to find the right session!\n"));
+       }
+}
+
+
+_mali_osk_errcode_t _mali_osk_profiling_init(mali_bool auto_start)
+{
+       int i;
+       mali_profiling_stream *new_mali_profiling_stream = NULL;
+       mali_profiling_stream_list *new_mali_profiling_stream_list = NULL;
+       if (MALI_TRUE == auto_start) {
+               mali_set_user_setting(_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, MALI_TRUE);
+       }
+
+       /*Init the global_mali_stream_list*/
+       MALI_DEBUG_ASSERT(NULL == global_mali_stream_list);
+       new_mali_profiling_stream_list = (mali_profiling_stream_list *)kmalloc(sizeof(mali_profiling_stream_list), GFP_KERNEL);
+
+       if (NULL == new_mali_profiling_stream_list) {
+               return _MALI_OSK_ERR_NOMEM;
+       }
+
+       spin_lock_init(&new_mali_profiling_stream_list->spin_lock);
+       INIT_LIST_HEAD(&new_mali_profiling_stream_list->free_list);
+       INIT_LIST_HEAD(&new_mali_profiling_stream_list->queue_list);
+
+       spin_lock_init(&mali_activity_lock);
+       mali_activity_cores_num =  0;
+
+       for (i = 0; i < MALI_PROFILING_STREAM_BUFFER_NUM; i++) {
+               new_mali_profiling_stream = (mali_profiling_stream *)kmalloc(sizeof(mali_profiling_stream), GFP_KERNEL);
+               if (NULL == new_mali_profiling_stream) {
+                       _mali_profiling_stream_list_destory(new_mali_profiling_stream_list);
+                       return _MALI_OSK_ERR_NOMEM;
+               }
+
+               INIT_LIST_HEAD(&new_mali_profiling_stream->list);
+               new_mali_profiling_stream->used_size = 0;
+               list_add_tail(&new_mali_profiling_stream->list, &new_mali_profiling_stream_list->free_list);
+
+       }
+
+       _mali_osk_atomic_init(&stream_fd_if_used, 0);
+       init_waitqueue_head(&stream_fd_wait_queue);
+
+       hrtimer_init(&profiling_sampling_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+
+       profiling_sampling_timer.function = _mali_profiling_sampling_counters;
+
+       global_mali_stream_list = new_mali_profiling_stream_list;
+
        return _MALI_OSK_ERR_OK;
 }
 
-mali_bool _mali_osk_profiling_is_recording(void)
+void _mali_osk_profiling_term(void)
 {
-       return MALI_FALSE;
+       if (0 != profiling_sample_rate) {
+               hrtimer_cancel(&profiling_sampling_timer);
+               profiling_sample_rate = 0;
+       }
+       _mali_osk_atomic_term(&stream_fd_if_used);
+
+       if (NULL != global_mali_profiling_counters) {
+               _mali_osk_free(global_mali_profiling_counters);
+               global_mali_profiling_counters = NULL;
+               num_global_mali_profiling_counters = 0;
+       }
+
+       if (NULL != global_mali_stream_list) {
+               _mali_profiling_stream_list_destory(global_mali_stream_list);
+               global_mali_stream_list = NULL;
+       }
+
 }
 
-mali_bool _mali_osk_profiling_have_recording(void)
+void _mali_osk_profiling_stop_sampling(u32 pid)
 {
-       return MALI_FALSE;
+       if (pid == current_profiling_pid) {
+
+               int i;
+               /* Reset all counter states when closing connection.*/
+               for (i = 0; i < num_global_mali_profiling_counters; ++i) {
+                       _mali_profiling_set_event(global_mali_profiling_counters[i].counter_id, MALI_HW_CORE_NO_COUNTER);
+                       global_mali_profiling_counters[i].enabled = 0;
+                       global_mali_profiling_counters[i].prev_counter_value = 0;
+                       global_mali_profiling_counters[i].current_counter_value = 0;
+               }
+               l2_cache_counter_if_enabled = MALI_FALSE;
+               num_counters_enabled = 0;
+               mem_counters_enabled = 0;
+               _mali_profiling_control(FBDUMP_CONTROL_ENABLE, 0);
+               _mali_profiling_control(SW_COUNTER_ENABLE, 0);
+               /* Delete sampling timer when closing connection. */
+               if (0 != profiling_sample_rate) {
+                       hrtimer_cancel(&profiling_sampling_timer);
+                       profiling_sample_rate = 0;
+               }
+               current_profiling_pid = 0;
+       }
+}
+
+void    _mali_osk_profiling_add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4)
+{
+       /*Record the freq & volt to global_mali_profiling_counters here. */
+       if (0 != profiling_sample_rate) {
+               u32 channel;
+               u32 state;
+               channel = (event_id >> 16) & 0xFF;
+               state = ((event_id >> 24) & 0xF) << 24;
+
+               switch (state) {
+               case MALI_PROFILING_EVENT_TYPE_SINGLE:
+                       if ((MALI_PROFILING_EVENT_CHANNEL_GPU >> 16) == channel) {
+                               u32 reason = (event_id & 0xFFFF);
+                               if (MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE == reason) {
+                                       _mali_osk_profiling_record_global_counters(COUNTER_FREQUENCY, data0);
+                                       _mali_osk_profiling_record_global_counters(COUNTER_VOLTAGE, data1);
+                               }
+                       }
+                       break;
+               case MALI_PROFILING_EVENT_TYPE_START:
+                       if ((MALI_PROFILING_EVENT_CHANNEL_GP0 >> 16) == channel) {
+                               _mali_profiling_sampling_core_activity_switch(COUNTER_VP_ACTIVITY, 0, 1, data1);
+                       } else if (channel >= (MALI_PROFILING_EVENT_CHANNEL_PP0 >> 16) &&
+                                  (MALI_PROFILING_EVENT_CHANNEL_PP7 >> 16) >= channel) {
+                               u32 core_id = channel - (MALI_PROFILING_EVENT_CHANNEL_PP0 >> 16);
+                               _mali_profiling_sampling_core_activity_switch(COUNTER_FP_ACTIVITY, core_id, 1, data1);
+                       }
+                       break;
+               case MALI_PROFILING_EVENT_TYPE_STOP:
+                       if ((MALI_PROFILING_EVENT_CHANNEL_GP0 >> 16) == channel) {
+                               _mali_profiling_sampling_core_activity_switch(COUNTER_VP_ACTIVITY, 0, 0, 0);
+                       } else if (channel >= (MALI_PROFILING_EVENT_CHANNEL_PP0 >> 16) &&
+                                  (MALI_PROFILING_EVENT_CHANNEL_PP7 >> 16) >= channel) {
+                               u32 core_id = channel - (MALI_PROFILING_EVENT_CHANNEL_PP0 >> 16);
+                               _mali_profiling_sampling_core_activity_switch(COUNTER_FP_ACTIVITY, core_id, 0, 0);
+                       }
+                       break;
+               default:
+                       break;
+               }
+       }
+       trace_mali_timeline_event(event_id, data0, data1, data2, data3, data4);
 }
 
 void _mali_osk_profiling_report_sw_counters(u32 *counters)
@@ -81,10 +795,17 @@ void _mali_osk_profiling_report_sw_counters(u32 *counters)
        trace_mali_sw_counters(_mali_osk_get_pid(), _mali_osk_get_tid(), NULL, counters);
 }
 
-
-_mali_osk_errcode_t _mali_ukk_profiling_start(_mali_uk_profiling_start_s *args)
+void _mali_osk_profiling_record_global_counters(int counter_id, u32 value)
 {
-       return _mali_osk_profiling_start(&args->limit);
+       if (NULL != global_mali_profiling_counters) {
+               int i ;
+               for (i = 0; i < num_global_mali_profiling_counters; i++) {
+                       if (counter_id == global_mali_profiling_counters[i].counter_id && global_mali_profiling_counters[i].enabled) {
+                               global_mali_profiling_counters[i].current_counter_value = value;
+                               break;
+                       }
+               }
+       }
 }
 
 _mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args)
@@ -95,24 +816,277 @@ _mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s
        return _MALI_OSK_ERR_OK;
 }
 
-_mali_osk_errcode_t _mali_ukk_profiling_stop(_mali_uk_profiling_stop_s *args)
+_mali_osk_errcode_t _mali_ukk_sw_counters_report(_mali_uk_sw_counters_report_s *args)
 {
-       return _mali_osk_profiling_stop(&args->count);
-}
+       u32 *counters = (u32 *)(uintptr_t)args->counters;
 
-_mali_osk_errcode_t _mali_ukk_profiling_get_event(_mali_uk_profiling_get_event_s *args)
-{
-       return _mali_osk_profiling_get_event(args->index, &args->timestamp, &args->event_id, args->data);
+       _mali_osk_profiling_report_sw_counters(counters);
+
+       if (NULL != global_mali_profiling_counters) {
+               int i;
+               for (i = 0; i < MALI_PROFILING_SW_COUNTERS_NUM; i ++) {
+                       if (global_mali_profiling_counters[first_sw_counter_index + i].enabled) {
+                               global_mali_profiling_counters[first_sw_counter_index + i].current_counter_value = *(counters + i);
+                       }
+               }
+       }
+
+       return _MALI_OSK_ERR_OK;
 }
 
-_mali_osk_errcode_t _mali_ukk_profiling_clear(_mali_uk_profiling_clear_s *args)
+_mali_osk_errcode_t _mali_ukk_profiling_stream_fd_get(_mali_uk_profiling_stream_fd_get_s *args)
 {
-       return _mali_osk_profiling_clear();
+       struct  mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       if (1 == _mali_osk_atomic_inc_return(&stream_fd_if_used)) {
+
+               s32 fd = anon_inode_getfd("[mali_profiling_stream]", &mali_profiling_stream_fops,
+                                         session,
+                                         O_RDONLY | O_CLOEXEC);
+
+               args->stream_fd = fd;
+               if (0 > fd) {
+                       _mali_osk_atomic_dec(&stream_fd_if_used);
+                       return _MALI_OSK_ERR_FAULT;
+               }
+               args->stream_fd = fd;
+       } else {
+               _mali_osk_atomic_dec(&stream_fd_if_used);
+               args->stream_fd = -1;
+               return _MALI_OSK_ERR_BUSY;
+       }
+
+       return _MALI_OSK_ERR_OK;
 }
 
-_mali_osk_errcode_t _mali_ukk_sw_counters_report(_mali_uk_sw_counters_report_s *args)
+_mali_osk_errcode_t _mali_ukk_profiling_control_set(_mali_uk_profiling_control_set_s *args)
 {
-       _mali_osk_profiling_report_sw_counters(args->counters);
+       u32 control_packet_size;
+       u32 output_buffer_size;
+
+       struct  mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+       MALI_DEBUG_ASSERT_POINTER(session);
+
+       if (NULL == global_mali_profiling_counters && MALI_FALSE == _mali_profiling_global_counters_init()) {
+               MALI_PRINT_ERROR(("Failed to create global_mali_profiling_counters.\n"));
+               return _MALI_OSK_ERR_FAULT;
+       }
+
+       control_packet_size = args->control_packet_size;
+       output_buffer_size = args->response_packet_size;
+
+       if (0 != control_packet_size) {
+               u8 control_type;
+               u8 *control_packet_data;
+               u8 *response_packet_data;
+               u32 version_length = sizeof(utgard_setup_version) - 1;
+
+               control_packet_data = (u8 *)(uintptr_t)args->control_packet_data;
+               MALI_DEBUG_ASSERT_POINTER(control_packet_data);
+               response_packet_data = (u8 *)(uintptr_t)args->response_packet_data;
+               MALI_DEBUG_ASSERT_POINTER(response_packet_data);
+
+               /*Decide if need to ignore Utgard setup version.*/
+               if (control_packet_size >= version_length) {
+                       if (0 == memcmp(control_packet_data, utgard_setup_version, version_length)) {
+                               if (control_packet_size == version_length) {
+                                       args->response_packet_size = 0;
+                                       return _MALI_OSK_ERR_OK;
+                               } else {
+                                       control_packet_data += version_length;
+                                       control_packet_size -= version_length;
+                               }
+                       }
+               }
+
+               current_profiling_pid = _mali_osk_get_pid();
+
+               control_type = control_packet_data[0];
+               switch (control_type) {
+               case PACKET_HEADER_COUNTERS_REQUEST: {
+                       int i;
+
+                       if (PACKET_HEADER_SIZE > control_packet_size ||
+                           control_packet_size !=  _mali_profiling_get_packet_size(control_packet_data + 1)) {
+                               MALI_PRINT_ERROR(("Wrong control packet  size, type 0x%x,size 0x%x.\n", control_packet_data[0], control_packet_size));
+                               return _MALI_OSK_ERR_FAULT;
+                       }
+
+                       /* Send supported counters */
+                       *response_packet_data = PACKET_HEADER_COUNTERS_ACK;
+                       args->response_packet_size = PACKET_HEADER_SIZE;
+
+                       for (i = 0; i < num_global_mali_profiling_counters; ++i) {
+                               u32 name_size = strlen(global_mali_profiling_counters[i].counter_name);
+
+                               if ((args->response_packet_size + name_size + 1) > output_buffer_size) {
+                                       MALI_PRINT_ERROR(("Response packet data is too large..\n"));
+                                       return _MALI_OSK_ERR_FAULT;
+                               }
+
+                               memcpy(response_packet_data + args->response_packet_size,
+                                      global_mali_profiling_counters[i].counter_name, name_size + 1);
+
+                               args->response_packet_size += (name_size + 1);
+
+                               if (global_mali_profiling_counters[i].counter_id == COUNTER_VP_ACTIVITY) {
+                                       args->response_packet_size += _mali_profiling_pack_int(response_packet_data,
+                                                                     output_buffer_size, args->response_packet_size, (s32)1);
+                               } else if (global_mali_profiling_counters[i].counter_id == COUNTER_FP_ACTIVITY) {
+                                       args->response_packet_size += _mali_profiling_pack_int(response_packet_data,
+                                                                     output_buffer_size, args->response_packet_size, (s32)mali_pp_get_glob_num_pp_cores());
+                               } else {
+                                       args->response_packet_size += _mali_profiling_pack_int(response_packet_data,
+                                                                     output_buffer_size, args->response_packet_size, (s32) - 1);
+                               }
+                       }
+
+                       _mali_profiling_set_packet_size(response_packet_data + 1, args->response_packet_size);
+                       break;
+               }
+
+               case PACKET_HEADER_COUNTERS_ENABLE: {
+                       int i;
+                       u32 request_pos = PACKET_HEADER_SIZE;
+                       mali_bool sw_counter_if_enabled = MALI_FALSE;
+
+                       if (PACKET_HEADER_SIZE > control_packet_size ||
+                           control_packet_size !=  _mali_profiling_get_packet_size(control_packet_data + 1)) {
+                               MALI_PRINT_ERROR(("Wrong control packet  size , type 0x%x,size 0x%x.\n", control_packet_data[0], control_packet_size));
+                               return _MALI_OSK_ERR_FAULT;
+                       }
+
+                       /* Init all counter states before enable requested counters.*/
+                       for (i = 0; i < num_global_mali_profiling_counters; ++i) {
+                               _mali_profiling_set_event(global_mali_profiling_counters[i].counter_id, MALI_HW_CORE_NO_COUNTER);
+                               global_mali_profiling_counters[i].enabled = 0;
+                               global_mali_profiling_counters[i].prev_counter_value = 0;
+                               global_mali_profiling_counters[i].current_counter_value = 0;
+
+                               if (global_mali_profiling_counters[i].counter_id >= FIRST_MEM_COUNTER &&
+                                   global_mali_profiling_counters[i].counter_id <= LAST_MEM_COUNTER) {
+                                       _mali_profiling_notification_mem_counter(session, global_mali_profiling_counters[i].counter_id, 0, 0);
+                               }
+                       }
+
+                       l2_cache_counter_if_enabled = MALI_FALSE;
+                       num_counters_enabled = 0;
+                       mem_counters_enabled = 0;
+                       _mali_profiling_control(FBDUMP_CONTROL_ENABLE, 0);
+                       _mali_profiling_control(SW_COUNTER_ENABLE, 0);
+                       _mali_profiling_notification_enable(session, 0, 0);
+
+                       /* Enable requested counters */
+                       while (request_pos < control_packet_size) {
+                               u32 begin = request_pos;
+                               u32 event;
+                               u32 key;
+
+                               while (request_pos < control_packet_size && control_packet_data[request_pos] != '\0') {
+                                       ++request_pos;
+                               }
+
+                               ++request_pos;
+                               event = _mali_profiling_read_packet_int(control_packet_data, &request_pos, control_packet_size);
+                               key = _mali_profiling_read_packet_int(control_packet_data, &request_pos, control_packet_size);
+
+                               for (i = 0; i < num_global_mali_profiling_counters; ++i) {
+                                       u32 name_size = strlen((char *)(control_packet_data + begin));
+                                       if (strncmp(global_mali_profiling_counters[i].counter_name, (char *)(control_packet_data + begin), name_size) == 0) {
+                                               if (!sw_counter_if_enabled && (FIRST_SW_COUNTER <= global_mali_profiling_counters[i].counter_id
+                                                                              && global_mali_profiling_counters[i].counter_id <= LAST_SW_COUNTER)) {
+                                                       sw_counter_if_enabled = MALI_TRUE;
+                                                       _mali_profiling_control(SW_COUNTER_ENABLE, 1);
+                                               }
+
+                                               if (COUNTER_FILMSTRIP == global_mali_profiling_counters[i].counter_id) {
+                                                       _mali_profiling_control(FBDUMP_CONTROL_ENABLE, 1);
+                                                       _mali_profiling_control(FBDUMP_CONTROL_RATE, event & 0xff);
+                                                       _mali_profiling_control(FBDUMP_CONTROL_RESIZE_FACTOR, (event >> 8) & 0xff);
+                                               }
+
+                                               if (global_mali_profiling_counters[i].counter_id >= FIRST_MEM_COUNTER &&
+                                                   global_mali_profiling_counters[i].counter_id <= LAST_MEM_COUNTER) {
+                                                       _mali_profiling_notification_mem_counter(session, global_mali_profiling_counters[i].counter_id,
+                                                                       key, 1);
+                                                       mem_counters_enabled++;
+                                               }
+
+                                               global_mali_profiling_counters[i].counter_event = event;
+                                               global_mali_profiling_counters[i].key = key;
+                                               global_mali_profiling_counters[i].enabled = 1;
+
+                                               _mali_profiling_set_event(global_mali_profiling_counters[i].counter_id,
+                                                                         global_mali_profiling_counters[i].counter_event);
+                                               num_counters_enabled++;
+                                               break;
+                                       }
+                               }
+
+                               if (i == num_global_mali_profiling_counters) {
+                                       MALI_PRINT_ERROR(("Counter name does not match for type %u.\n", control_type));
+                                       return _MALI_OSK_ERR_FAULT;
+                               }
+                       }
+
+                       if (PACKET_HEADER_SIZE <= output_buffer_size) {
+                               *response_packet_data = PACKET_HEADER_ACK;
+                               _mali_profiling_set_packet_size(response_packet_data + 1, PACKET_HEADER_SIZE);
+                               args->response_packet_size = PACKET_HEADER_SIZE;
+                       } else {
+                               return _MALI_OSK_ERR_FAULT;
+                       }
+
+                       break;
+               }
+
+               case PACKET_HEADER_START_CAPTURE_VALUE: {
+                       u32 live_rate;
+                       u32 request_pos = PACKET_HEADER_SIZE;
+
+                       if (PACKET_HEADER_SIZE > control_packet_size ||
+                           control_packet_size !=  _mali_profiling_get_packet_size(control_packet_data + 1)) {
+                               MALI_PRINT_ERROR(("Wrong control packet  size , type 0x%x,size 0x%x.\n", control_packet_data[0], control_packet_size));
+                               return _MALI_OSK_ERR_FAULT;
+                       }
+
+                       /* Read samping rate in nanoseconds and live rate, start capture.*/
+                       profiling_sample_rate =  _mali_profiling_read_packet_int(control_packet_data,
+                                                &request_pos, control_packet_size);
+
+                       live_rate = _mali_profiling_read_packet_int(control_packet_data, &request_pos, control_packet_size);
+
+                       if (PACKET_HEADER_SIZE <= output_buffer_size) {
+                               *response_packet_data = PACKET_HEADER_ACK;
+                               _mali_profiling_set_packet_size(response_packet_data + 1, PACKET_HEADER_SIZE);
+                               args->response_packet_size = PACKET_HEADER_SIZE;
+                       } else {
+                               return _MALI_OSK_ERR_FAULT;
+                       }
+
+                       if (0 != num_counters_enabled && 0 != profiling_sample_rate) {
+                               _mali_profiling_global_stream_list_free();
+                               if (mem_counters_enabled > 0) {
+                                       _mali_profiling_notification_enable(session, profiling_sample_rate, 1);
+                               }
+                               hrtimer_start(&profiling_sampling_timer,
+                                             ktime_set(profiling_sample_rate / 1000000000, profiling_sample_rate % 1000000000),
+                                             HRTIMER_MODE_REL_PINNED);
+                       }
+
+                       break;
+               }
+               default:
+                       MALI_PRINT_ERROR(("Unsupported  profiling packet header type %u.\n", control_type));
+                       args->response_packet_size  = 0;
+                       return _MALI_OSK_ERR_FAULT;
+               }
+       } else {
+               _mali_osk_profiling_stop_sampling(current_profiling_pid);
+               _mali_profiling_notification_enable(session, 0, 0);
+       }
+
        return _MALI_OSK_ERR_OK;
 }
 
@@ -197,15 +1171,13 @@ int _mali_profiling_set_event(u32 counter_id, s32 event_id)
                }
        } else if (COUNTER_L2_0_C0 <= counter_id && COUNTER_L2_2_C1 >= counter_id) {
                u32 core_id = (counter_id - COUNTER_L2_0_C0) >> 1;
-               struct mali_l2_cache_corel2_cache_core = mali_l2_cache_core_get_glob_l2_core(core_id);
+               struct mali_l2_cache_core *l2_cache_core = mali_l2_cache_core_get_glob_l2_core(core_id);
 
                if (NULL != l2_cache_core) {
                        u32 counter_src = (counter_id - COUNTER_L2_0_C0) & 1;
-                       if (0 == counter_src) {
-                               mali_l2_cache_core_set_counter_src0(l2_cache_core, event_id);
-                       } else {
-                               mali_l2_cache_core_set_counter_src1(l2_cache_core, event_id);
-                       }
+                       mali_l2_cache_core_set_counter_src(l2_cache_core,
+                                                          counter_src, event_id);
+                       l2_cache_counter_if_enabled = MALI_TRUE;
                }
        } else {
                return 0; /* Failure, unknown event */
@@ -225,35 +1197,26 @@ int _mali_profiling_set_event(u32 counter_id, s32 event_id)
  */
 u32 _mali_profiling_get_l2_counters(_mali_profiling_l2_counter_values *values)
 {
-       struct mali_l2_cache_core *l2_cache;
        u32 l2_cores_num = mali_l2_cache_core_get_glob_num_l2_cores();
        u32 i;
-       u32 ret = 0;
 
        MALI_DEBUG_ASSERT(l2_cores_num <= 3);
 
        for (i = 0; i < l2_cores_num; i++) {
-               l2_cache = mali_l2_cache_core_get_glob_l2_core(i);
+               struct mali_l2_cache_core *l2_cache = mali_l2_cache_core_get_glob_l2_core(i);
 
                if (NULL == l2_cache) {
                        continue;
                }
 
-               if (MALI_TRUE == mali_l2_cache_lock_power_state(l2_cache)) {
-                       /* It is now safe to access the L2 cache core in order to retrieve the counters */
-                       mali_l2_cache_core_get_counter_values(l2_cache,
-                                                             &values->cores[i].source0,
-                                                             &values->cores[i].value0,
-                                                             &values->cores[i].source1,
-                                                             &values->cores[i].value1);
-               } else {
-                       /* The core was not available, set the right bit in the mask. */
-                       ret |= (1 << i);
-               }
-               mali_l2_cache_unlock_power_state(l2_cache);
+               mali_l2_cache_core_get_counter_values(l2_cache,
+                                                     &values->cores[i].source0,
+                                                     &values->cores[i].value0,
+                                                     &values->cores[i].source1,
+                                                     &values->cores[i].value1);
        }
 
-       return ret;
+       return 0;
 }
 
 /**
@@ -261,7 +1224,7 @@ u32 _mali_profiling_get_l2_counters(_mali_profiling_l2_counter_values *values)
  */
 void _mali_profiling_control(u32 action, u32 value)
 {
-       switch(action) {
+       switch (action) {
        case FBDUMP_CONTROL_ENABLE:
                mali_set_user_setting(_MALI_UK_USER_SETTING_COLORBUFFER_CAPTURE_ENABLED, (value == 0 ? MALI_FALSE : MALI_TRUE));
                break;
@@ -275,7 +1238,7 @@ void _mali_profiling_control(u32 action, u32 value)
                mali_set_user_setting(_MALI_UK_USER_SETTING_BUFFER_CAPTURE_RESIZE_FACTOR, value);
                break;
        default:
-               break;  /* Ignore unimplemented actions */
+               break;  /* Ignore unimplemented actions */
        }
 }
 
@@ -297,10 +1260,11 @@ void _mali_profiling_get_mali_version(struct _mali_profiling_mali_version *value
        values->mali_version_major = mali_kernel_core_get_gpu_major_version();
        values->mali_version_minor = mali_kernel_core_get_gpu_minor_version();
        values->num_of_l2_cores = mali_l2_cache_core_get_glob_num_l2_cores();
-       values->num_of_fp_cores = mali_pp_scheduler_get_num_cores_total();
+       values->num_of_fp_cores = mali_executor_get_num_cores_total();
        values->num_of_vp_cores = 1;
 }
 
+
 EXPORT_SYMBOL(_mali_profiling_set_event);
 EXPORT_SYMBOL(_mali_profiling_get_l2_counters);
 EXPORT_SYMBOL(_mali_profiling_control);
index c0d4f5d6dc627162ef9663879ac7604bc11f1ae9..6f9ee5ac48f51ad7680590ee90c44f396a4a5cb8 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2010, 2012-2013 ARM Limited
+ * (C) COPYRIGHT 2008-2010, 2012-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 
 #include <asm/uaccess.h>
 #include <linux/platform_device.h>
-#include <linux/dmapool.h>
 #include <linux/gfp.h>
 #include <linux/hardirq.h>
 
+
 #include "mali_osk_types.h"
 #include "mali_kernel_linux.h"
 
 #define MALI_STATIC_INLINE static inline
 #define MALI_NON_STATIC_INLINE inline
 
-typedef struct dma_pool * mali_dma_pool;
-
-
-MALI_STATIC_INLINE mali_dma_pool mali_dma_pool_create(u32 size, u32 alignment, u32 boundary)
-{
-       return dma_pool_create("mali-dma", &mali_platform_device->dev, size, alignment, boundary);
-}
-
-MALI_STATIC_INLINE void mali_dma_pool_destroy(mali_dma_pool pool)
-{
-       dma_pool_destroy(pool);
-}
-
-MALI_STATIC_INLINE mali_io_address mali_dma_pool_alloc(mali_dma_pool pool, u32 *phys_addr)
-{
-       return dma_pool_alloc(pool, GFP_KERNEL, phys_addr);
-}
-
-MALI_STATIC_INLINE void mali_dma_pool_free(mali_dma_pool pool, void* virt_addr, u32 phys_addr)
-{
-       dma_pool_free(pool, virt_addr, phys_addr);
-}
+typedef struct dma_pool *mali_dma_pool;
 
+typedef u32 mali_dma_addr;
 
 #if MALI_ENABLE_CPU_CYCLES
 /* Reads out the clock cycle performance counter of the current cpu.
@@ -69,7 +49,7 @@ static inline unsigned int mali_get_cpu_cyclecount(void)
 {
        unsigned int value;
        /* Reading the CCNT Register - CPU clock counter */
-       asm volatile ("MRC p15, 0, %0, c9, c13, 0\t\n": "=r"(value));
+       asm volatile("MRC p15, 0, %0, c9, c13, 0\t\n": "=r"(value));
        return value;
 }
 
index 3a0b354c95837e89e27fd2d7962767a0e2ce2adf..fdaf0393b48f0a68a87f90af78a5f8f73988dd90 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2010, 2013 ARM Limited
+ * (C) COPYRIGHT 2008-2010, 2013-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 #include <linux/time.h>
 #include <asm/delay.h>
 
-int    _mali_osk_time_after( u32 ticka, u32 tickb )
+mali_bool _mali_osk_time_after_eq(unsigned long ticka, unsigned long tickb)
 {
-       return time_after((unsigned long)ticka, (unsigned long)tickb);
+       return time_after_eq(ticka, tickb) ?
+              MALI_TRUE : MALI_FALSE;
 }
 
-u32    _mali_osk_time_mstoticks( u32 ms )
+unsigned long _mali_osk_time_mstoticks(u32 ms)
 {
        return msecs_to_jiffies(ms);
 }
 
-u32    _mali_osk_time_tickstoms( u32 ticks )
+u32 _mali_osk_time_tickstoms(unsigned long ticks)
 {
        return jiffies_to_msecs(ticks);
 }
 
-u32    _mali_osk_time_tickcount( void )
+unsigned long _mali_osk_time_tickcount(void)
 {
        return jiffies;
 }
 
-void _mali_osk_time_ubusydelay( u32 usecs )
+void _mali_osk_time_ubusydelay(u32 usecs)
 {
        udelay(usecs);
 }
 
-u64 _mali_osk_time_get_ns( void )
+u64 _mali_osk_time_get_ns(void)
 {
        struct timespec tsval;
        getnstimeofday(&tsval);
        return (u64)timespec_to_ns(&tsval);
 }
+
+u64 _mali_osk_boot_time_get_ns(void)
+{
+       struct timespec tsval;
+       get_monotonic_boottime(&tsval);
+       return (u64)timespec_to_ns(&tsval);
+}
index e1f0df75f5cf1e27401829c22ac16e8d65b7ae55..1079af13f6e3f27bbc41abb76c44a9ab107c24f7 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
+ * (C) COPYRIGHT 2008-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -26,50 +26,50 @@ typedef void (*timer_timeout_function_t)(unsigned long);
 
 _mali_osk_timer_t *_mali_osk_timer_init(void)
 {
-       _mali_osk_timer_t *t = (_mali_osk_timer_t*)kmalloc(sizeof(_mali_osk_timer_t), GFP_KERNEL);
+       _mali_osk_timer_t *t = (_mali_osk_timer_t *)kmalloc(sizeof(_mali_osk_timer_t), GFP_KERNEL);
        if (NULL != t) init_timer(&t->timer);
        return t;
 }
 
-void _mali_osk_timer_add( _mali_osk_timer_t *tim, u32 ticks_to_expire )
+void _mali_osk_timer_add(_mali_osk_timer_t *tim, unsigned long ticks_to_expire)
 {
        MALI_DEBUG_ASSERT_POINTER(tim);
        tim->timer.expires = jiffies + ticks_to_expire;
        add_timer(&(tim->timer));
 }
 
-void _mali_osk_timer_mod( _mali_osk_timer_t *tim, u32 ticks_to_expire)
+void _mali_osk_timer_mod(_mali_osk_timer_t *tim, unsigned long ticks_to_expire)
 {
        MALI_DEBUG_ASSERT_POINTER(tim);
        mod_timer(&(tim->timer), jiffies + ticks_to_expire);
 }
 
-void _mali_osk_timer_del( _mali_osk_timer_t *tim )
+void _mali_osk_timer_del(_mali_osk_timer_t *tim)
 {
        MALI_DEBUG_ASSERT_POINTER(tim);
        del_timer_sync(&(tim->timer));
 }
 
-void _mali_osk_timer_del_async( _mali_osk_timer_t *tim )
+void _mali_osk_timer_del_async(_mali_osk_timer_t *tim)
 {
        MALI_DEBUG_ASSERT_POINTER(tim);
        del_timer(&(tim->timer));
 }
 
-mali_bool _mali_osk_timer_pending( _mali_osk_timer_t *tim )
+mali_bool _mali_osk_timer_pending(_mali_osk_timer_t *tim)
 {
        MALI_DEBUG_ASSERT_POINTER(tim);
        return 1 == timer_pending(&(tim->timer));
 }
 
-void _mali_osk_timer_setcallback( _mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data )
+void _mali_osk_timer_setcallback(_mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data)
 {
        MALI_DEBUG_ASSERT_POINTER(tim);
        tim->timer.data = (unsigned long)data;
        tim->timer.function = (timer_timeout_function_t)callback;
 }
 
-void _mali_osk_timer_term( _mali_osk_timer_t *tim )
+void _mali_osk_timer_term(_mali_osk_timer_t *tim)
 {
        MALI_DEBUG_ASSERT_POINTER(tim);
        kfree(tim);
index fe2cc8bbe09ad53d5cdb009a13d4f8efa0e9bcc5..696681484af2b4efd6b9f49ab204d08bf0b065fd 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2013 ARM Limited
+ * (C) COPYRIGHT 2012-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -24,9 +24,9 @@ struct _mali_osk_wait_queue_t_struct {
        wait_queue_head_t wait_queue;
 };
 
-_mali_osk_wait_queue_t* _mali_osk_wait_queue_init( void )
+_mali_osk_wait_queue_t *_mali_osk_wait_queue_init(void)
 {
-       _mali_osk_wait_queue_tret = NULL;
+       _mali_osk_wait_queue_t *ret = NULL;
 
        ret = kmalloc(sizeof(_mali_osk_wait_queue_t), GFP_KERNEL);
 
@@ -40,23 +40,23 @@ _mali_osk_wait_queue_t* _mali_osk_wait_queue_init( void )
        return ret;
 }
 
-void _mali_osk_wait_queue_wait_event( _mali_osk_wait_queue_t *queue, mali_bool (*condition)(void *), void *data )
+void _mali_osk_wait_queue_wait_event(_mali_osk_wait_queue_t *queue, mali_bool(*condition)(void *), void *data)
 {
-       MALI_DEBUG_ASSERT_POINTER( queue );
+       MALI_DEBUG_ASSERT_POINTER(queue);
        MALI_DEBUG_PRINT(6, ("Adding to wait queue %p\n", queue));
        wait_event(queue->wait_queue, condition(data));
 }
 
-void _mali_osk_wait_queue_wait_event_timeout( _mali_osk_wait_queue_t *queue, mali_bool (*condition)(void *), void *data, u32 timeout )
+void _mali_osk_wait_queue_wait_event_timeout(_mali_osk_wait_queue_t *queue, mali_bool(*condition)(void *), void *data, u32 timeout)
 {
-       MALI_DEBUG_ASSERT_POINTER( queue );
+       MALI_DEBUG_ASSERT_POINTER(queue);
        MALI_DEBUG_PRINT(6, ("Adding to wait queue %p\n", queue));
        wait_event_timeout(queue->wait_queue, condition(data), _mali_osk_time_mstoticks(timeout));
 }
 
-void _mali_osk_wait_queue_wake_up( _mali_osk_wait_queue_t *queue )
+void _mali_osk_wait_queue_wake_up(_mali_osk_wait_queue_t *queue)
 {
-       MALI_DEBUG_ASSERT_POINTER( queue );
+       MALI_DEBUG_ASSERT_POINTER(queue);
 
        /* if queue is empty, don't attempt to wake up its elements */
        if (!waitqueue_active(&queue->wait_queue)) return;
@@ -68,10 +68,10 @@ void _mali_osk_wait_queue_wake_up( _mali_osk_wait_queue_t *queue )
        MALI_DEBUG_PRINT(6, ("... elements in wait queue %p woken up\n", queue));
 }
 
-void _mali_osk_wait_queue_term( _mali_osk_wait_queue_t *queue )
+void _mali_osk_wait_queue_term(_mali_osk_wait_queue_t *queue)
 {
        /* Parameter validation  */
-       MALI_DEBUG_ASSERT_POINTER( queue );
+       MALI_DEBUG_ASSERT_POINTER(queue);
 
        /* Linux requires no explicit termination of wait queues */
        kfree(queue);
index 27babb4de7413a792acb1b04f16e5802d2e07698..eaf5823d3dd68182cc56fd06f85f8f61af4aa8b2 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
+ * (C) COPYRIGHT 2008-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -13,7 +13,7 @@
  * Implementation of the OS abstraction layer for the kernel device driver
  */
 
-#include <linux/slab.h>        /* For memory allocation */
+#include <linux/slab.h> /* For memory allocation */
 #include <linux/workqueue.h>
 #include <linux/version.h>
 #include <linux/sched.h>
@@ -37,8 +37,8 @@ typedef struct _mali_osk_wq_delayed_work_s {
 } mali_osk_wq_delayed_work_object_t;
 
 #if MALI_LICENSE_IS_GPL
-struct workqueue_struct *mali_wq_normal = NULL;
-struct workqueue_struct *mali_wq_high = NULL;
+static struct workqueue_struct *mali_wq_normal = NULL;
+static struct workqueue_struct *mali_wq_high = NULL;
 #endif
 
 static void _mali_osk_wq_work_func(struct work_struct *work);
@@ -49,7 +49,7 @@ _mali_osk_errcode_t _mali_osk_wq_init(void)
        MALI_DEBUG_ASSERT(NULL == mali_wq_normal);
        MALI_DEBUG_ASSERT(NULL == mali_wq_high);
 
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
        mali_wq_normal = alloc_workqueue("mali", WQ_UNBOUND, 0);
        mali_wq_high = alloc_workqueue("mali_high_pri", WQ_HIGHPRI | WQ_UNBOUND, 0);
 #else
@@ -101,7 +101,7 @@ void _mali_osk_wq_term(void)
 #endif
 }
 
-_mali_osk_wq_work_t *_mali_osk_wq_create_work( _mali_osk_wq_work_handler_t handler, void *data )
+_mali_osk_wq_work_t *_mali_osk_wq_create_work(_mali_osk_wq_work_handler_t handler, void *data)
 {
        mali_osk_wq_work_object_t *work = kmalloc(sizeof(mali_osk_wq_work_object_t), GFP_KERNEL);
 
@@ -111,12 +111,12 @@ _mali_osk_wq_work_t *_mali_osk_wq_create_work( _mali_osk_wq_work_handler_t handl
        work->data = data;
        work->high_pri = MALI_FALSE;
 
-       INIT_WORK( &work->work_handle, _mali_osk_wq_work_func);
+       INIT_WORK(&work->work_handle, _mali_osk_wq_work_func);
 
        return work;
 }
 
-_mali_osk_wq_work_t *_mali_osk_wq_create_work_high_pri( _mali_osk_wq_work_handler_t handler, void *data )
+_mali_osk_wq_work_t *_mali_osk_wq_create_work_high_pri(_mali_osk_wq_work_handler_t handler, void *data)
 {
        mali_osk_wq_work_object_t *work = kmalloc(sizeof(mali_osk_wq_work_object_t), GFP_KERNEL);
 
@@ -126,25 +126,25 @@ _mali_osk_wq_work_t *_mali_osk_wq_create_work_high_pri( _mali_osk_wq_work_handle
        work->data = data;
        work->high_pri = MALI_TRUE;
 
-       INIT_WORK( &work->work_handle, _mali_osk_wq_work_func );
+       INIT_WORK(&work->work_handle, _mali_osk_wq_work_func);
 
        return work;
 }
 
-void _mali_osk_wq_delete_work( _mali_osk_wq_work_t *work )
+void _mali_osk_wq_delete_work(_mali_osk_wq_work_t *work)
 {
        mali_osk_wq_work_object_t *work_object = (mali_osk_wq_work_object_t *)work;
        _mali_osk_wq_flush();
        kfree(work_object);
 }
 
-void _mali_osk_wq_delete_work_nonflush( _mali_osk_wq_work_t *work )
+void _mali_osk_wq_delete_work_nonflush(_mali_osk_wq_work_t *work)
 {
        mali_osk_wq_work_object_t *work_object = (mali_osk_wq_work_object_t *)work;
        kfree(work_object);
 }
 
-void _mali_osk_wq_schedule_work( _mali_osk_wq_work_t *work )
+void _mali_osk_wq_schedule_work(_mali_osk_wq_work_t *work)
 {
        mali_osk_wq_work_object_t *work_object = (mali_osk_wq_work_object_t *)work;
 #if MALI_LICENSE_IS_GPL
@@ -154,7 +154,7 @@ void _mali_osk_wq_schedule_work( _mali_osk_wq_work_t *work )
 #endif
 }
 
-void _mali_osk_wq_schedule_work_high_pri( _mali_osk_wq_work_t *work )
+void _mali_osk_wq_schedule_work_high_pri(_mali_osk_wq_work_t *work)
 {
        mali_osk_wq_work_object_t *work_object = (mali_osk_wq_work_object_t *)work;
 #if MALI_LICENSE_IS_GPL
@@ -164,7 +164,7 @@ void _mali_osk_wq_schedule_work_high_pri( _mali_osk_wq_work_t *work )
 #endif
 }
 
-static void _mali_osk_wq_work_func( struct work_struct *work )
+static void _mali_osk_wq_work_func(struct work_struct *work)
 {
        mali_osk_wq_work_object_t *work_object;
 
@@ -172,7 +172,12 @@ static void _mali_osk_wq_work_func( struct work_struct *work )
 
 #if MALI_LICENSE_IS_GPL
 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
-       /* We want higher priority than the Dynamic Priority, setting it to the lowest of the RT priorities */
+       /* We want highest Dynamic priority of the thread so that the Jobs depending
+       ** on this thread could be scheduled in time. Without this, this thread might
+       ** sometimes need to wait for some threads in user mode to finish its round-robin
+       ** time, causing *bubble* in the Mali pipeline. Thanks to the new implementation
+       ** of high-priority workqueue in new kernel, this only happens in older kernel.
+       */
        if (MALI_TRUE == work_object->high_pri) {
                set_user_nice(current, -19);
        }
@@ -182,7 +187,7 @@ static void _mali_osk_wq_work_func( struct work_struct *work )
        work_object->handler(work_object->data);
 }
 
-static void _mali_osk_wq_delayed_work_func( struct work_struct *work )
+static void _mali_osk_wq_delayed_work_func(struct work_struct *work)
 {
        mali_osk_wq_delayed_work_object_t *work_object;
 
@@ -190,7 +195,7 @@ static void _mali_osk_wq_delayed_work_func( struct work_struct *work )
        work_object->handler(work_object->data);
 }
 
-mali_osk_wq_delayed_work_object_t *_mali_osk_wq_delayed_create_work( _mali_osk_wq_work_handler_t handler, void *data)
+mali_osk_wq_delayed_work_object_t *_mali_osk_wq_delayed_create_work(_mali_osk_wq_work_handler_t handler, void *data)
 {
        mali_osk_wq_delayed_work_object_t *work = kmalloc(sizeof(mali_osk_wq_delayed_work_object_t), GFP_KERNEL);
 
@@ -204,25 +209,25 @@ mali_osk_wq_delayed_work_object_t *_mali_osk_wq_delayed_create_work( _mali_osk_w
        return work;
 }
 
-void _mali_osk_wq_delayed_delete_work_nonflush( _mali_osk_wq_delayed_work_t *work )
+void _mali_osk_wq_delayed_delete_work_nonflush(_mali_osk_wq_delayed_work_t *work)
 {
        mali_osk_wq_delayed_work_object_t *work_object = (mali_osk_wq_delayed_work_object_t *)work;
        kfree(work_object);
 }
 
-void _mali_osk_wq_delayed_cancel_work_async( _mali_osk_wq_delayed_work_t *work )
+void _mali_osk_wq_delayed_cancel_work_async(_mali_osk_wq_delayed_work_t *work)
 {
        mali_osk_wq_delayed_work_object_t *work_object = (mali_osk_wq_delayed_work_object_t *)work;
        cancel_delayed_work(&work_object->work);
 }
 
-void _mali_osk_wq_delayed_cancel_work_sync( _mali_osk_wq_delayed_work_t *work )
+void _mali_osk_wq_delayed_cancel_work_sync(_mali_osk_wq_delayed_work_t *work)
 {
        mali_osk_wq_delayed_work_object_t *work_object = (mali_osk_wq_delayed_work_object_t *)work;
        cancel_delayed_work_sync(&work_object->work);
 }
 
-void _mali_osk_wq_delayed_schedule_work( _mali_osk_wq_delayed_work_t *work, u32 delay )
+void _mali_osk_wq_delayed_schedule_work(_mali_osk_wq_delayed_work_t *work, u32 delay)
 {
        mali_osk_wq_delayed_work_object_t *work_object = (mali_osk_wq_delayed_work_object_t *)work;
 
index 7ad03de8b88642d842688b175ace07d92ee95c9f..8b762d92e3be272d4de67496aecf3c3602eaa76d 100644 (file)
@@ -1,7 +1,7 @@
 /**
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2010, 2012-2013 ARM Limited
+ * (C) COPYRIGHT 2010, 2012-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
  * @file mali_pmu_power_up_down.c
  */
 
-#include <linux/version.h>
-#include <linux/sched.h>
 #include <linux/module.h>
-#include "mali_osk.h"
-#include "mali_kernel_common.h"
-#include "mali_pmu.h"
-#include "mali_pp_scheduler.h"
-#include "linux/mali/mali_utgard.h"
-
-/* Mali PMU power up/down APIs */
-
-int mali_pmu_powerup(void)
-{
-       struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
-
-       MALI_DEBUG_PRINT(5, ("Mali PMU: Power up\n"));
-
-       MALI_DEBUG_ASSERT_POINTER(pmu);
-       if (NULL == pmu) {
-               return -ENXIO;
-       }
-
-       if (_MALI_OSK_ERR_OK != mali_pmu_power_up_all(pmu)) {
-               return -EFAULT;
-       }
-
-       return 0;
-}
-
-EXPORT_SYMBOL(mali_pmu_powerup);
-
-int mali_pmu_powerdown(void)
-{
-       struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
-
-       MALI_DEBUG_PRINT(5, ("Mali PMU: Power down\n"));
-
-       MALI_DEBUG_ASSERT_POINTER(pmu);
-       if (NULL == pmu) {
-               return -ENXIO;
-       }
-
-       if (_MALI_OSK_ERR_OK != mali_pmu_power_down_all(pmu)) {
-               return -EFAULT;
-       }
-
-       return 0;
-}
-
-EXPORT_SYMBOL(mali_pmu_powerdown);
+#include "mali_executor.h"
 
 int mali_perf_set_num_pp_cores(unsigned int num_cores)
 {
-       return mali_pp_scheduler_set_perf_level(num_cores, MALI_FALSE);
+       return mali_executor_set_perf_level(num_cores, MALI_FALSE);
 }
 
 EXPORT_SYMBOL(mali_perf_set_num_pp_cores);
index 0164e8135f67d92770aae7c6cdc4bfda19d182ab..1e821cd83d122be370a91fa6bd8463d1ce2f7a19 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012 ARM Limited
+ * (C) COPYRIGHT 2012, 2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
index 9736b4af4c4bae28ed8ebcc9f07121cd4f320923..b3cd58c16db947e51fa3617d86edafc7841e5ea6 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2013 ARM Limited
+ * (C) COPYRIGHT 2012-2013, 2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
index 90dce622f4e0d3ef716a796bb4e60617ed1de9b5..001cc014725769586aa8c230a13f4c3ffb78dce7 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2010-2013 ARM Limited
+ * (C) COPYRIGHT 2010-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -32,14 +32,14 @@ typedef enum mali_profiling_state {
 
 static _mali_osk_mutex_t *lock = NULL;
 static mali_profiling_state prof_state = MALI_PROFILING_STATE_UNINITIALIZED;
-static mali_profiling_entryprofile_entries = NULL;
+static mali_profiling_entry *profile_entries = NULL;
 static _mali_osk_atomic_t profile_insert_index;
 static u32 profile_mask = 0;
 
 static inline void add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4);
 
 void probe_mali_timeline_event(void *data, TP_PROTO(unsigned int event_id, unsigned int d0, unsigned int d1, unsigned
-                               int d2, unsigned int d3, unsigned int d4))
+                              int d2, unsigned int d3, unsigned int d4))
 {
        add_event(event_id, d0, d1, d2, d3, d4);
 }
@@ -89,7 +89,7 @@ void _mali_internal_profiling_term(void)
        }
 }
 
-_mali_osk_errcode_t _mali_internal_profiling_start(u32 * limit)
+_mali_osk_errcode_t _mali_internal_profiling_start(u32 *limit)
 {
        _mali_osk_errcode_t ret;
        mali_profiling_entry *new_profile_entries;
@@ -104,6 +104,7 @@ _mali_osk_errcode_t _mali_internal_profiling_start(u32 * limit)
        new_profile_entries = _mali_osk_valloc(*limit * sizeof(mali_profiling_entry));
 
        if (NULL == new_profile_entries) {
+               _mali_osk_mutex_signal(lock);
                _mali_osk_vfree(new_profile_entries);
                return _MALI_OSK_ERR_NOMEM;
        }
@@ -160,12 +161,12 @@ static inline void add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32
        /* If event is "leave API function", add current memory usage to the event
         * as data point 4.  This is used in timeline profiling to indicate how
         * much memory was used when leaving a function. */
-       if (event_id == (MALI_PROFILING_EVENT_TYPE_SINGLE|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_SINGLE_SW_LEAVE_API_FUNC)) {
+       if (event_id == (MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_SINGLE_SW_LEAVE_API_FUNC)) {
                profile_entries[cur_index].data[4] = _mali_ukk_report_memory_usage();
        }
 }
 
-_mali_osk_errcode_t _mali_internal_profiling_stop(u32 * count)
+_mali_osk_errcode_t _mali_internal_profiling_stop(u32 *count)
 {
        _mali_osk_mutex_wait(lock);
 
@@ -203,7 +204,7 @@ u32 _mali_internal_profiling_get_count(void)
        return retval;
 }
 
-_mali_osk_errcode_t _mali_internal_profiling_get_event(u32 index, u64* timestamp, u32* event_id, u32 data[5])
+_mali_osk_errcode_t _mali_internal_profiling_get_event(u32 index, u64 *timestamp, u32 *event_id, u32 data[5])
 {
        u32 raw_index = _mali_osk_atomic_read(&profile_insert_index);
 
@@ -220,7 +221,7 @@ _mali_osk_errcode_t _mali_internal_profiling_get_event(u32 index, u64* timestamp
                        return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
                }
 
-               if(index >= raw_index) {
+               if (index >= raw_index) {
                        _mali_osk_mutex_signal(lock);
                        return _MALI_OSK_ERR_FAULT;
                }
index 07ffbb5026a2c6c1ea671754e06bcb40cbfb910d..5a8b0136a4111c1db4bd7a7614e9d1823932184e 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2013 ARM Limited
+ * (C) COPYRIGHT 2012-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -23,10 +23,10 @@ void _mali_internal_profiling_term(void);
 mali_bool _mali_internal_profiling_is_recording(void);
 mali_bool _mali_internal_profiling_have_recording(void);
 _mali_osk_errcode_t _mali_internal_profiling_clear(void);
-_mali_osk_errcode_t _mali_internal_profiling_get_event(u32 index, u64* timestamp, u32* event_id, u32 data[5]);
+_mali_osk_errcode_t _mali_internal_profiling_get_event(u32 index, u64 *timestamp, u32 *event_id, u32 data[5]);
 u32 _mali_internal_profiling_get_count(void);
-int _mali_internal_profiling_stop(u32 * count);
-int _mali_internal_profiling_start(u32 * limit);
+int _mali_internal_profiling_stop(u32 *count);
+int _mali_internal_profiling_start(u32 *limit);
 
 #ifdef __cplusplus
 }
index 190386493299b7321e08edbe899f81110ac1f30b..2a609e6ea7d187d5db134157766a647241688d42 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2013 ARM Limited
+ * (C) COPYRIGHT 2012-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -13,6 +13,7 @@
 #include "mali_osk.h"
 #include "mali_kernel_common.h"
 #include "mali_timeline.h"
+#include "mali_executor.h"
 
 #include <linux/file.h>
 #include <linux/seq_file.h>
@@ -21,6 +22,7 @@
 struct mali_sync_pt {
        struct sync_pt         sync_pt;
        struct mali_sync_flag *flag;
+       struct sync_timeline *sync_tl;  /**< Sync timeline this pt is connected to. */
 };
 
 /**
@@ -34,11 +36,25 @@ struct mali_sync_flag {
        struct kref           refcount; /**< Reference count. */
 };
 
+/**
+ * Mali sync timeline is used to connect mali timeline to sync_timeline.
+ * When fence timeout can print more detailed mali timeline system info.
+ */
+struct mali_sync_timeline_container {
+       struct sync_timeline sync_timeline;
+       struct mali_timeline *timeline;
+};
+
 MALI_STATIC_INLINE struct mali_sync_pt *to_mali_sync_pt(struct sync_pt *pt)
 {
        return container_of(pt, struct mali_sync_pt, sync_pt);
 }
 
+MALI_STATIC_INLINE struct mali_sync_timeline_container *to_mali_sync_tl_container(struct sync_timeline *sync_tl)
+{
+       return container_of(sync_tl, struct mali_sync_timeline_container, sync_timeline);
+}
+
 static struct sync_pt *timeline_dup(struct sync_pt *pt)
 {
        struct mali_sync_pt *mpt, *new_mpt;
@@ -47,13 +63,14 @@ static struct sync_pt *timeline_dup(struct sync_pt *pt)
        MALI_DEBUG_ASSERT_POINTER(pt);
        mpt = to_mali_sync_pt(pt);
 
-       new_pt = sync_pt_create(pt->parent, sizeof(struct mali_sync_pt));
+       new_pt = sync_pt_create(mpt->sync_tl, sizeof(struct mali_sync_pt));
        if (NULL == new_pt) return NULL;
 
        new_mpt = to_mali_sync_pt(new_pt);
 
        mali_sync_flag_get(mpt->flag);
        new_mpt->flag = mpt->flag;
+       new_mpt->sync_tl = mpt->sync_tl;
 
        return new_pt;
 }
@@ -85,7 +102,7 @@ static int timeline_compare(struct sync_pt *pta, struct sync_pt *ptb)
        MALI_DEBUG_ASSERT_POINTER(mptb->flag);
 
        a = mpta->flag->point;
-       b = mpta->flag->point;
+       b = mptb->flag->point;
 
        if (a == b) return 0;
 
@@ -104,9 +121,28 @@ static void timeline_free_pt(struct sync_pt *pt)
 
 static void timeline_release(struct sync_timeline *sync_timeline)
 {
+       struct mali_sync_timeline_container *mali_sync_tl = NULL;
+       struct mali_timeline *mali_tl = NULL;
+
+       MALI_DEBUG_ASSERT_POINTER(sync_timeline);
+
+       mali_sync_tl = to_mali_sync_tl_container(sync_timeline);
+       MALI_DEBUG_ASSERT_POINTER(mali_sync_tl);
+
+       mali_tl = mali_sync_tl->timeline;
+
+       /* always signaled timeline didn't have mali container */
+       if (mali_tl) {
+               if (NULL != mali_tl->spinlock) {
+                       mali_spinlock_reentrant_term(mali_tl->spinlock);
+               }
+               _mali_osk_free(mali_tl);
+       }
+
        module_put(THIS_MODULE);
 }
 
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
 static void timeline_print_pt(struct seq_file *s, struct sync_pt *sync_pt)
 {
        struct mali_sync_pt *mpt;
@@ -115,11 +151,112 @@ static void timeline_print_pt(struct seq_file *s, struct sync_pt *sync_pt)
        MALI_DEBUG_ASSERT_POINTER(sync_pt);
 
        mpt = to_mali_sync_pt(sync_pt);
-       MALI_DEBUG_ASSERT_POINTER(mpt->flag);
 
-       seq_printf(s, "%u", mpt->flag->point);
+       /* It is possible this sync point is just under construct,
+        * make sure the flag is valid before accessing it
+       */
+       if (mpt->flag) {
+               seq_printf(s, "%u", mpt->flag->point);
+       } else {
+               seq_printf(s, "uninitialized");
+       }
 }
 
+static void timeline_print_obj(struct seq_file *s, struct sync_timeline *sync_tl)
+{
+       struct mali_sync_timeline_container *mali_sync_tl = NULL;
+       struct mali_timeline *mali_tl = NULL;
+
+       MALI_DEBUG_ASSERT_POINTER(sync_tl);
+
+       mali_sync_tl = to_mali_sync_tl_container(sync_tl);
+       MALI_DEBUG_ASSERT_POINTER(mali_sync_tl);
+
+       mali_tl = mali_sync_tl->timeline;
+
+       if (NULL != mali_tl) {
+               seq_printf(s, "oldest (%u) ", mali_tl->point_oldest);
+               seq_printf(s, "next (%u)", mali_tl->point_next);
+               seq_printf(s, "\n");
+
+#if defined(MALI_TIMELINE_DEBUG_FUNCTIONS)
+               {
+                       u32 tid = _mali_osk_get_tid();
+                       struct mali_timeline_system *system = mali_tl->system;
+
+                       mali_spinlock_reentrant_wait(mali_tl->spinlock, tid);
+                       if (!mali_tl->destroyed) {
+                               mali_spinlock_reentrant_wait(system->spinlock, tid);
+                               mali_timeline_debug_print_timeline(mali_tl, s);
+                               mali_spinlock_reentrant_signal(system->spinlock, tid);
+                       }
+                       mali_spinlock_reentrant_signal(mali_tl->spinlock, tid);
+
+                       /* dump job queue status and group running status */
+                       mali_executor_status_dump();
+               }
+#endif
+       }
+}
+#else
+static void timeline_pt_value_str(struct sync_pt *pt, char *str, int size)
+{
+       struct mali_sync_pt *mpt;
+
+       MALI_DEBUG_ASSERT_POINTER(str);
+       MALI_DEBUG_ASSERT_POINTER(pt);
+
+       mpt = to_mali_sync_pt(pt);
+
+       /* It is possible this sync point is just under construct,
+        * make sure the flag is valid before accessing it
+       */
+       if (mpt->flag) {
+               _mali_osk_snprintf(str, size, "%u", mpt->flag->point);
+       } else {
+               _mali_osk_snprintf(str, size, "uninitialized");
+       }
+}
+
+static void timeline_value_str(struct sync_timeline *timeline, char *str, int size)
+{
+       struct mali_sync_timeline_container *mali_sync_tl = NULL;
+       struct mali_timeline *mali_tl = NULL;
+
+       MALI_DEBUG_ASSERT_POINTER(timeline);
+
+       mali_sync_tl = to_mali_sync_tl_container(timeline);
+       MALI_DEBUG_ASSERT_POINTER(mali_sync_tl);
+
+       mali_tl = mali_sync_tl->timeline;
+
+       if (NULL != mali_tl) {
+               _mali_osk_snprintf(str, size, "oldest (%u) ", mali_tl->point_oldest);
+               _mali_osk_snprintf(str, size, "next (%u)", mali_tl->point_next);
+               _mali_osk_snprintf(str, size, "\n");
+
+#if defined(MALI_TIMELINE_DEBUG_FUNCTIONS)
+               {
+                       u32 tid = _mali_osk_get_tid();
+                       struct mali_timeline_system *system = mali_tl->system;
+
+                       mali_spinlock_reentrant_wait(mali_tl->spinlock, tid);
+                       if (!mali_tl->destroyed) {
+                               mali_spinlock_reentrant_wait(system->spinlock, tid);
+                               mali_timeline_debug_direct_print_timeline(mali_tl);
+                               mali_spinlock_reentrant_signal(system->spinlock, tid);
+                       }
+                       mali_spinlock_reentrant_signal(mali_tl->spinlock, tid);
+
+                       /* dump job queue status and group running status */
+                       mali_executor_status_dump();
+               }
+#endif
+       }
+}
+#endif
+
+
 static struct sync_timeline_ops mali_timeline_ops = {
        .driver_name    = "Mali",
        .dup            = timeline_dup,
@@ -127,16 +264,26 @@ static struct sync_timeline_ops mali_timeline_ops = {
        .compare        = timeline_compare,
        .free_pt        = timeline_free_pt,
        .release_obj    = timeline_release,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
        .print_pt       = timeline_print_pt,
+       .print_obj      = timeline_print_obj,
+#else
+       .pt_value_str = timeline_pt_value_str,
+       .timeline_value_str = timeline_value_str,
+#endif
 };
 
-struct sync_timeline *mali_sync_timeline_create(const char *name)
+struct sync_timeline *mali_sync_timeline_create(struct mali_timeline *timeline, const char *name)
 {
        struct sync_timeline *sync_tl;
+       struct mali_sync_timeline_container *mali_sync_tl;
 
-       sync_tl = sync_timeline_create(&mali_timeline_ops, sizeof(struct sync_timeline), name);
+       sync_tl = sync_timeline_create(&mali_timeline_ops, sizeof(struct mali_sync_timeline_container), name);
        if (NULL == sync_tl) return NULL;
 
+       mali_sync_tl = to_mali_sync_tl_container(sync_tl);
+       mali_sync_tl->timeline = timeline;
+
        /* Grab a reference on the module to ensure the callbacks are present
         * as long some timeline exists. The reference is released when the
         * timeline is freed.
@@ -147,12 +294,6 @@ struct sync_timeline *mali_sync_timeline_create(const char *name)
        return sync_tl;
 }
 
-mali_bool mali_sync_timeline_is_ours(struct sync_timeline *sync_tl)
-{
-       MALI_DEBUG_ASSERT_POINTER(sync_tl);
-       return (sync_tl->ops == &mali_timeline_ops) ? MALI_TRUE : MALI_FALSE;
-}
-
 s32 mali_sync_fence_fd_alloc(struct sync_fence *sync_fence)
 {
        s32 fd = -1;
@@ -160,7 +301,6 @@ s32 mali_sync_fence_fd_alloc(struct sync_fence *sync_fence)
        fd = get_unused_fd();
        if (fd < 0) {
                sync_fence_put(sync_fence);
-               MALI_DEBUG_PRINT(1, ("get_unused_fd() got fd < 0, fd=%x\n", fd));
                return -1;
        }
        sync_fence_install(sync_fence, fd);
@@ -280,6 +420,7 @@ static struct sync_pt *mali_sync_flag_create_pt(struct mali_sync_flag *flag)
 
        mpt = to_mali_sync_pt(pt);
        mpt->flag = flag;
+       mpt->sync_tl = flag->sync_tl;
 
        return pt;
 }
index 45ae32b70529cc3aace890e4d5f6393bc4fe4f24..3236af30483bb65b99084e0a6e062ec9a44eeb5d 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2013 ARM Limited
+ * (C) COPYRIGHT 2012-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 
 #if defined(CONFIG_SYNC)
 
-#include <linux/version.h>
-
 #include <linux/seq_file.h>
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
-#include <sync.h>
-#else
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
 #include <linux/sync.h>
+#else
+#include <sync.h>
 #endif
 
+
 #include "mali_osk.h"
 
 struct mali_sync_flag;
+struct mali_timeline;
 
 /**
  * Create a sync timeline.
@@ -38,15 +39,7 @@ struct mali_sync_flag;
  * @param name Name of the sync timeline.
  * @return The new sync timeline if successful, NULL if not.
  */
-struct sync_timeline *mali_sync_timeline_create(const char *name);
-
-/**
- * Check if sync timeline belongs to Mali.
- *
- * @param sync_tl Sync timeline to check.
- * @return MALI_TRUE if sync timeline belongs to Mali, MALI_FALSE if not.
- */
-mali_bool mali_sync_timeline_is_ours(struct sync_timeline *sync_tl);
+struct sync_timeline *mali_sync_timeline_create(struct mali_timeline *timeline, const char *name);
 
 /**
  * Creates a file descriptor representing the sync fence.  Will release sync fence if allocation of
index fac43ae4a848a1261187e95c596ad0d0cdd40400..06bc93d95736adb77e7566876f0dbe4899035b8b 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012 ARM Limited
+ * (C) COPYRIGHT 2012, 2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
index 281089b4e1b5684a653cbaf747c88b678d17024e..730549308b84200679f46550bd06f4bf7d37f05f 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
+ * (C) COPYRIGHT 2008-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -26,7 +26,7 @@ int get_api_version_wrapper(struct mali_session_data *session_data, _mali_uk_get
 
        if (0 != get_user(kargs.version, &uargs->version)) return -EFAULT;
 
-       kargs.ctx = session_data;
+       kargs.ctx = (uintptr_t)session_data;
        err = _mali_ukk_get_api_version(&kargs);
        if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
 
@@ -36,6 +36,25 @@ int get_api_version_wrapper(struct mali_session_data *session_data, _mali_uk_get
        return 0;
 }
 
+int get_api_version_v2_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_v2_s __user *uargs)
+{
+       _mali_uk_get_api_version_v2_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+       if (0 != get_user(kargs.version, &uargs->version)) return -EFAULT;
+
+       kargs.ctx = (uintptr_t)session_data;
+       err = _mali_ukk_get_api_version_v2(&kargs);
+       if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+       if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+       if (0 != put_user(kargs.compatible, &uargs->compatible)) return -EFAULT;
+
+       return 0;
+}
+
 int wait_for_notification_wrapper(struct mali_session_data *session_data, _mali_uk_wait_for_notification_s __user *uargs)
 {
        _mali_uk_wait_for_notification_s kargs;
@@ -43,12 +62,12 @@ int wait_for_notification_wrapper(struct mali_session_data *session_data, _mali_
 
        MALI_CHECK_NON_NULL(uargs, -EINVAL);
 
-       kargs.ctx = session_data;
+       kargs.ctx = (uintptr_t)session_data;
        err = _mali_ukk_wait_for_notification(&kargs);
        if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
 
-       if(_MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS != kargs.type) {
-               kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+       if (_MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS != kargs.type) {
+               kargs.ctx = (uintptr_t)NULL; /* prevent kernel address to be returned to user space */
                if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_wait_for_notification_s))) return -EFAULT;
        } else {
                if (0 != put_user(kargs.type, &uargs->type)) return -EFAULT;
@@ -64,7 +83,7 @@ int post_notification_wrapper(struct mali_session_data *session_data, _mali_uk_p
 
        MALI_CHECK_NON_NULL(uargs, -EINVAL);
 
-       kargs.ctx = session_data;
+       kargs.ctx = (uintptr_t)session_data;
 
        if (0 != get_user(kargs.type, &uargs->type)) {
                return -EFAULT;
@@ -85,13 +104,13 @@ int get_user_settings_wrapper(struct mali_session_data *session_data, _mali_uk_g
 
        MALI_CHECK_NON_NULL(uargs, -EINVAL);
 
-       kargs.ctx = session_data;
+       kargs.ctx = (uintptr_t)session_data;
        err = _mali_ukk_get_user_settings(&kargs);
        if (_MALI_OSK_ERR_OK != err) {
                return map_errcode(err);
        }
 
-       kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+       kargs.ctx = 0; /* prevent kernel address to be returned to user space */
        if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_get_user_settings_s))) return -EFAULT;
 
        return 0;
@@ -104,10 +123,10 @@ int request_high_priority_wrapper(struct mali_session_data *session_data, _mali_
 
        MALI_CHECK_NON_NULL(uargs, -EINVAL);
 
-       kargs.ctx = session_data;
+       kargs.ctx = (uintptr_t)session_data;
        err = _mali_ukk_request_high_priority(&kargs);
 
-       kargs.ctx = NULL;
+       kargs.ctx = 0;
 
        return map_errcode(err);
 }
index d3992d1f8fa997d58e1c8860e8a811b4b8809ecc..5b989134073a714d6fcc05d3cfa684b9d24a7bc7 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2010, 2012-2013 ARM Limited
+ * (C) COPYRIGHT 2008-2010, 2012-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -40,7 +40,7 @@ int gp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk
        MALI_CHECK_NON_NULL(uargs, -EINVAL);
        MALI_CHECK_NON_NULL(session_data, -EINVAL);
 
-       kargs.ctx = session_data;
+       kargs.ctx = (uintptr_t)session_data;
        err =  _mali_ukk_get_gp_core_version(&kargs);
        if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
 
@@ -61,7 +61,7 @@ int gp_suspend_response_wrapper(struct mali_session_data *session_data, _mali_uk
 
        if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_gp_suspend_response_s))) return -EFAULT;
 
-       kargs.ctx = session_data;
+       kargs.ctx = (uintptr_t)session_data;
        err = _mali_ukk_gp_suspend_response(&kargs);
        if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
 
@@ -79,7 +79,7 @@ int gp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali
        MALI_CHECK_NON_NULL(uargs, -EINVAL);
        MALI_CHECK_NON_NULL(session_data, -EINVAL);
 
-       kargs.ctx = session_data;
+       kargs.ctx = (uintptr_t)session_data;
        err = _mali_ukk_get_gp_number_of_cores(&kargs);
        if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
 
index 6baf151a615a0e08047498ccb721202be306e4a4..8e3f117c432218053cc10c4baa22a0bd41533832 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
+ * (C) COPYRIGHT 2008-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 #include "mali_session.h"
 #include "mali_ukk_wrappers.h"
 
-int mem_write_safe_wrapper(struct mali_session_data *session_data, _mali_uk_mem_write_safe_s __user * uargs)
+int mem_alloc_wrapper(struct mali_session_data *session_data, _mali_uk_alloc_mem_s __user *uargs)
 {
-       _mali_uk_mem_write_safe_s kargs;
+       _mali_uk_alloc_mem_s kargs;
        _mali_osk_errcode_t err;
 
        MALI_CHECK_NON_NULL(uargs, -EINVAL);
        MALI_CHECK_NON_NULL(session_data, -EINVAL);
 
-       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_mem_write_safe_s))) {
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_alloc_mem_s))) {
                return -EFAULT;
        }
+       kargs.ctx = (uintptr_t)session_data;
 
-       kargs.ctx = session_data;
+       err = _mali_ukk_mem_allocate(&kargs);
 
-       /* Check if we can access the buffers */
-       if (!access_ok(VERIFY_WRITE, kargs.dest, kargs.size)
-           || !access_ok(VERIFY_READ, kargs.src, kargs.size)) {
-               return -EINVAL;
+       if (_MALI_OSK_ERR_OK != err) {
+               return map_errcode(err);
        }
 
-       /* Check if size wraps */
-       if ((kargs.size + kargs.dest) <= kargs.dest
-           || (kargs.size + kargs.src) <= kargs.src) {
-               return -EINVAL;
+       if (0 != put_user(kargs.backend_handle, &uargs->backend_handle)) {
+               return -EFAULT;
        }
 
-       err = _mali_ukk_mem_write_safe(&kargs);
+       return 0;
+}
+
+int mem_free_wrapper(struct mali_session_data *session_data, _mali_uk_free_mem_s __user *uargs)
+{
+       _mali_uk_free_mem_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_free_mem_s))) {
+               return -EFAULT;
+       }
+       kargs.ctx = (uintptr_t)session_data;
+
+       err = _mali_ukk_mem_free(&kargs);
+
        if (_MALI_OSK_ERR_OK != err) {
                return map_errcode(err);
        }
 
-       if (0 != put_user(kargs.size, &uargs->size)) {
+       if (0 != put_user(kargs.free_pages_nr, &uargs->free_pages_nr)) {
                return -EFAULT;
        }
 
        return 0;
 }
 
-int mem_map_ext_wrapper(struct mali_session_data *session_data, _mali_uk_map_external_mem_s __user * argument)
+int mem_bind_wrapper(struct mali_session_data *session_data, _mali_uk_bind_mem_s __user *uargs)
 {
-       _mali_uk_map_external_mem_s uk_args;
-       _mali_osk_errcode_t err_code;
+       _mali_uk_bind_mem_s kargs;
+       _mali_osk_errcode_t err;
 
-       /* validate input */
-       /* the session_data pointer was validated by caller */
-       MALI_CHECK_NON_NULL( argument, -EINVAL);
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
 
-       /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
-       if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_map_external_mem_s)) ) {
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_bind_mem_s))) {
                return -EFAULT;
        }
+       kargs.ctx = (uintptr_t)session_data;
 
-       uk_args.ctx = session_data;
-       err_code = _mali_ukk_map_external_mem( &uk_args );
+       err = _mali_ukk_mem_bind(&kargs);
 
-       if (0 != put_user(uk_args.cookie, &argument->cookie)) {
-               if (_MALI_OSK_ERR_OK == err_code) {
-                       /* Rollback */
-                       _mali_uk_unmap_external_mem_s uk_args_unmap;
+       if (_MALI_OSK_ERR_OK != err) {
+               return map_errcode(err);
+       }
+
+       return 0;
+}
 
-                       uk_args_unmap.ctx = session_data;
-                       uk_args_unmap.cookie = uk_args.cookie;
-                       err_code = _mali_ukk_unmap_external_mem( &uk_args_unmap );
-                       if (_MALI_OSK_ERR_OK != err_code) {
-                               MALI_DEBUG_PRINT(4, ("reverting _mali_ukk_unmap_external_mem, as a result of failing put_user(), failed\n"));
-                       }
-               }
+int mem_unbind_wrapper(struct mali_session_data *session_data, _mali_uk_unbind_mem_s __user *uargs)
+{
+       _mali_uk_unbind_mem_s kargs;
+       _mali_osk_errcode_t err;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_unbind_mem_s))) {
                return -EFAULT;
        }
+       kargs.ctx = (uintptr_t)session_data;
+
+       err = _mali_ukk_mem_unbind(&kargs);
+
+       if (_MALI_OSK_ERR_OK != err) {
+               return map_errcode(err);
+       }
 
-       /* Return the error that _mali_ukk_free_big_block produced */
-       return map_errcode(err_code);
+       return 0;
 }
 
-int mem_unmap_ext_wrapper(struct mali_session_data *session_data, _mali_uk_unmap_external_mem_s __user * argument)
+
+int mem_cow_wrapper(struct mali_session_data *session_data, _mali_uk_cow_mem_s __user *uargs)
 {
-       _mali_uk_unmap_external_mem_s uk_args;
-       _mali_osk_errcode_t err_code;
+       _mali_uk_cow_mem_s kargs;
+       _mali_osk_errcode_t err;
 
-       /* validate input */
-       /* the session_data pointer was validated by caller */
-       MALI_CHECK_NON_NULL( argument, -EINVAL);
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
 
-       /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
-       if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_unmap_external_mem_s)) ) {
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_cow_mem_s))) {
                return -EFAULT;
        }
+       kargs.ctx = (uintptr_t)session_data;
+
+       err = _mali_ukk_mem_cow(&kargs);
+
+       if (_MALI_OSK_ERR_OK != err) {
+               return map_errcode(err);
+       }
 
-       uk_args.ctx = session_data;
-       err_code = _mali_ukk_unmap_external_mem( &uk_args );
+       if (0 != put_user(kargs.backend_handle, &uargs->backend_handle)) {
+               return -EFAULT;
+       }
 
-       /* Return the error that _mali_ukk_free_big_block produced */
-       return map_errcode(err_code);
+       return 0;
 }
 
-#if defined(CONFIG_MALI400_UMP)
-int mem_release_ump_wrapper(struct mali_session_data *session_data, _mali_uk_release_ump_mem_s __user * argument)
+int mem_cow_modify_range_wrapper(struct mali_session_data *session_data, _mali_uk_cow_modify_range_s __user *uargs)
 {
-       _mali_uk_release_ump_mem_s uk_args;
-       _mali_osk_errcode_t err_code;
+       _mali_uk_cow_modify_range_s kargs;
+       _mali_osk_errcode_t err;
 
-       /* validate input */
-       /* the session_data pointer was validated by caller */
-       MALI_CHECK_NON_NULL( argument, -EINVAL);
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
 
-       /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
-       if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_release_ump_mem_s)) ) {
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_cow_modify_range_s))) {
                return -EFAULT;
        }
+       kargs.ctx = (uintptr_t)session_data;
 
-       uk_args.ctx = session_data;
-       err_code = _mali_ukk_release_ump_mem( &uk_args );
+       err = _mali_ukk_mem_cow_modify_range(&kargs);
 
-       /* Return the error that _mali_ukk_free_big_block produced */
-       return map_errcode(err_code);
+       if (_MALI_OSK_ERR_OK != err) {
+               return map_errcode(err);
+       }
+
+       if (0 != put_user(kargs.change_pages_nr, &uargs->change_pages_nr)) {
+               return -EFAULT;
+       }
+       return 0;
 }
 
-int mem_attach_ump_wrapper(struct mali_session_data *session_data, _mali_uk_attach_ump_mem_s __user * argument)
+int mem_write_safe_wrapper(struct mali_session_data *session_data, _mali_uk_mem_write_safe_s __user *uargs)
 {
-       _mali_uk_attach_ump_mem_s uk_args;
-       _mali_osk_errcode_t err_code;
+       _mali_uk_mem_write_safe_s kargs;
+       _mali_osk_errcode_t err;
 
-       /* validate input */
-       /* the session_data pointer was validated by caller */
-       MALI_CHECK_NON_NULL( argument, -EINVAL);
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
 
-       /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
-       if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_attach_ump_mem_s)) ) {
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_mem_write_safe_s))) {
                return -EFAULT;
        }
 
-       uk_args.ctx = session_data;
-       err_code = _mali_ukk_attach_ump_mem( &uk_args );
+       kargs.ctx = (uintptr_t)session_data;
 
-       if (0 != put_user(uk_args.cookie, &argument->cookie)) {
-               if (_MALI_OSK_ERR_OK == err_code) {
-                       /* Rollback */
-                       _mali_uk_release_ump_mem_s uk_args_unmap;
+       /* Check if we can access the buffers */
+       if (!access_ok(VERIFY_WRITE, kargs.dest, kargs.size)
+           || !access_ok(VERIFY_READ, kargs.src, kargs.size)) {
+               return -EINVAL;
+       }
 
-                       uk_args_unmap.ctx = session_data;
-                       uk_args_unmap.cookie = uk_args.cookie;
-                       err_code = _mali_ukk_release_ump_mem( &uk_args_unmap );
-                       if (_MALI_OSK_ERR_OK != err_code) {
-                               MALI_DEBUG_PRINT(4, ("reverting _mali_ukk_attach_mem, as a result of failing put_user(), failed\n"));
-                       }
-               }
+       /* Check if size wraps */
+       if ((kargs.size + kargs.dest) <= kargs.dest
+           || (kargs.size + kargs.src) <= kargs.src) {
+               return -EINVAL;
+       }
+
+       err = _mali_ukk_mem_write_safe(&kargs);
+       if (_MALI_OSK_ERR_OK != err) {
+               return map_errcode(err);
+       }
+
+       if (0 != put_user(kargs.size, &uargs->size)) {
                return -EFAULT;
        }
 
-       /* Return the error that _mali_ukk_map_external_ump_mem produced */
-       return map_errcode(err_code);
+       return 0;
 }
-#endif /* CONFIG_MALI400_UMP */
 
-int mem_query_mmu_page_table_dump_size_wrapper(struct mali_session_data *session_data, _mali_uk_query_mmu_page_table_dump_size_s __user * uargs)
+
+
+int mem_query_mmu_page_table_dump_size_wrapper(struct mali_session_data *session_data, _mali_uk_query_mmu_page_table_dump_size_s __user *uargs)
 {
        _mali_uk_query_mmu_page_table_dump_size_s kargs;
        _mali_osk_errcode_t err;
@@ -178,7 +212,7 @@ int mem_query_mmu_page_table_dump_size_wrapper(struct mali_session_data *session
        MALI_CHECK_NON_NULL(uargs, -EINVAL);
        MALI_CHECK_NON_NULL(session_data, -EINVAL);
 
-       kargs.ctx = session_data;
+       kargs.ctx = (uintptr_t)session_data;
 
        err = _mali_ukk_query_mmu_page_table_dump_size(&kargs);
        if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
@@ -188,35 +222,41 @@ int mem_query_mmu_page_table_dump_size_wrapper(struct mali_session_data *session
        return 0;
 }
 
-int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mali_uk_dump_mmu_page_table_s __user * uargs)
+int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mali_uk_dump_mmu_page_table_s __user *uargs)
 {
        _mali_uk_dump_mmu_page_table_s kargs;
        _mali_osk_errcode_t err;
-       void *buffer;
+       void __user *user_buffer;
+       void *buffer = NULL;
        int rc = -EFAULT;
 
        /* validate input */
        MALI_CHECK_NON_NULL(uargs, -EINVAL);
        /* the session_data pointer was validated by caller */
 
-       kargs.buffer = NULL;
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_dump_mmu_page_table_s)))
+               goto err_exit;
 
-       /* get location of user buffer */
-       if (0 != get_user(buffer, &uargs->buffer)) goto err_exit;
-       /* get size of mmu page table info buffer from user space */
-       if ( 0 != get_user(kargs.size, &uargs->size) ) goto err_exit;
-       /* verify we can access the whole of the user buffer */
-       if (!access_ok(VERIFY_WRITE, buffer, kargs.size)) goto err_exit;
+       user_buffer = (void __user *)(uintptr_t)kargs.buffer;
+       if (!access_ok(VERIFY_WRITE, user_buffer, kargs.size))
+               goto err_exit;
 
        /* allocate temporary buffer (kernel side) to store mmu page table info */
-       MALI_CHECK(kargs.size > 0, -ENOMEM);
-       kargs.buffer = _mali_osk_valloc(kargs.size);
-       if (NULL == kargs.buffer) {
+       if (kargs.size <= 0)
+               return -EINVAL;
+       /* Allow at most 8MiB buffers, this is more than enough to dump a fully
+        * populated page table. */
+       if (kargs.size > SZ_8M)
+               return -EINVAL;
+
+       buffer = (void *)(uintptr_t)_mali_osk_valloc(kargs.size);
+       if (NULL == buffer) {
                rc = -ENOMEM;
                goto err_exit;
        }
 
-       kargs.ctx = session_data;
+       kargs.ctx = (uintptr_t)session_data;
+       kargs.buffer = (uintptr_t)buffer;
        err = _mali_ukk_dump_mmu_page_table(&kargs);
        if (_MALI_OSK_ERR_OK != err) {
                rc = map_errcode(err);
@@ -224,14 +264,47 @@ int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mal
        }
 
        /* copy mmu page table info back to user space and update pointers */
-       if (0 != copy_to_user(uargs->buffer, kargs.buffer, kargs.size) ) goto err_exit;
-       if (0 != put_user((kargs.register_writes - (u32 *)kargs.buffer) + (u32 *)uargs->buffer, &uargs->register_writes)) goto err_exit;
-       if (0 != put_user((kargs.page_table_dump - (u32 *)kargs.buffer) + (u32 *)uargs->buffer, &uargs->page_table_dump)) goto err_exit;
-       if (0 != put_user(kargs.register_writes_size, &uargs->register_writes_size)) goto err_exit;
-       if (0 != put_user(kargs.page_table_dump_size, &uargs->page_table_dump_size)) goto err_exit;
+       if (0 != copy_to_user(user_buffer, buffer, kargs.size))
+               goto err_exit;
+
+       kargs.register_writes = kargs.register_writes -
+                               (uintptr_t)buffer + (uintptr_t)user_buffer;
+       kargs.page_table_dump = kargs.page_table_dump -
+                               (uintptr_t)buffer + (uintptr_t)user_buffer;
+
+       if (0 != copy_to_user(uargs, &kargs, sizeof(kargs)))
+               goto err_exit;
+
        rc = 0;
 
 err_exit:
-       if (kargs.buffer) _mali_osk_vfree(kargs.buffer);
+       if (buffer) _mali_osk_vfree(buffer);
        return rc;
 }
+
+int mem_usage_get_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_memory_usage_get_s __user *uargs)
+{
+       _mali_osk_errcode_t err;
+       _mali_uk_profiling_memory_usage_get_s kargs;
+
+       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_memory_usage_get_s))) {
+               return -EFAULT;
+       }
+
+       kargs.ctx = (uintptr_t)session_data;
+       err = _mali_ukk_mem_usage_get(&kargs);
+       if (_MALI_OSK_ERR_OK != err) {
+               return map_errcode(err);
+       }
+
+       kargs.ctx = (uintptr_t)NULL; /* prevent kernel address to be returned to user space */
+       if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_profiling_memory_usage_get_s))) {
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
index 1519230b8be085bd6ee47aac3fe217c757ae23c1..d9ad52590c24690a0e55ab8887e4a3c0f8ce6fa0 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2010, 2012-2013 ARM Limited
+ * (C) COPYRIGHT 2008-2010, 2012-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -57,14 +57,14 @@ int pp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali
        MALI_CHECK_NON_NULL(uargs, -EINVAL);
        MALI_CHECK_NON_NULL(session_data, -EINVAL);
 
-       kargs.ctx = session_data;
+       kargs.ctx = (uintptr_t)session_data;
 
        err = _mali_ukk_get_pp_number_of_cores(&kargs);
        if (_MALI_OSK_ERR_OK != err) {
                return map_errcode(err);
        }
 
-       kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+       kargs.ctx = (uintptr_t)NULL; /* prevent kernel address to be returned to user space */
        if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_get_pp_number_of_cores_s))) {
                return -EFAULT;
        }
@@ -80,7 +80,7 @@ int pp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk
        MALI_CHECK_NON_NULL(uargs, -EINVAL);
        MALI_CHECK_NON_NULL(session_data, -EINVAL);
 
-       kargs.ctx = session_data;
+       kargs.ctx = (uintptr_t)session_data;
        err = _mali_ukk_get_pp_core_version(&kargs);
        if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
 
@@ -98,7 +98,7 @@ int pp_disable_wb_wrapper(struct mali_session_data *session_data, _mali_uk_pp_di
 
        if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_pp_disable_wb_s))) return -EFAULT;
 
-       kargs.ctx = session_data;
+       kargs.ctx = (uintptr_t)session_data;
        _mali_ukk_pp_job_disable_wb(&kargs);
 
        return 0;
index d77ca2517dd2b630fee757b2155f8e4ec19de4c3..6f96940a82744170e46e013687f7127aafcdb305 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2010-2013 ARM Limited
+ * (C) COPYRIGHT 2010-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 #include "mali_session.h"
 #include "mali_ukk_wrappers.h"
 
-int profiling_start_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_start_s __user *uargs)
+int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_add_event_s __user *uargs)
 {
-       _mali_uk_profiling_start_s kargs;
+       _mali_uk_profiling_add_event_s kargs;
        _mali_osk_errcode_t err;
 
        MALI_CHECK_NON_NULL(uargs, -EINVAL);
 
-       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_start_s))) {
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_add_event_s))) {
                return -EFAULT;
        }
 
-       kargs.ctx = session_data;
-       err = _mali_ukk_profiling_start(&kargs);
+       kargs.ctx = (uintptr_t)session_data;
+       err = _mali_ukk_profiling_add_event(&kargs);
        if (_MALI_OSK_ERR_OK != err) {
                return map_errcode(err);
        }
 
-       if (0 != put_user(kargs.limit, &uargs->limit)) {
-               return -EFAULT;
-       }
-
        return 0;
 }
 
-int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_add_event_s __user *uargs)
+int profiling_report_sw_counters_wrapper(struct mali_session_data *session_data, _mali_uk_sw_counters_report_s __user *uargs)
 {
-       _mali_uk_profiling_add_event_s kargs;
+       _mali_uk_sw_counters_report_s kargs;
        _mali_osk_errcode_t err;
+       u32 *counter_buffer;
+       u32 __user *counters;
 
        MALI_CHECK_NON_NULL(uargs, -EINVAL);
 
-       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_add_event_s))) {
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_sw_counters_report_s))) {
                return -EFAULT;
        }
 
-       kargs.ctx = session_data;
-       err = _mali_ukk_profiling_add_event(&kargs);
-       if (_MALI_OSK_ERR_OK != err) {
-               return map_errcode(err);
+       /* make sure that kargs.num_counters is [at least somewhat] sane */
+       if (kargs.num_counters > 10000) {
+               MALI_DEBUG_PRINT(1, ("User space attempted to allocate too many counters.\n"));
+               return -EINVAL;
        }
 
-       return 0;
-}
+       counter_buffer = (u32 *)kmalloc(sizeof(u32) * kargs.num_counters, GFP_KERNEL);
+       if (NULL == counter_buffer) {
+               return -ENOMEM;
+       }
 
-int profiling_stop_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stop_s __user *uargs)
-{
-       _mali_uk_profiling_stop_s kargs;
-       _mali_osk_errcode_t err;
+       counters = (u32 *)(uintptr_t)kargs.counters;
 
-       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+       if (0 != copy_from_user(counter_buffer, counters, sizeof(u32) * kargs.num_counters)) {
+               kfree(counter_buffer);
+               return -EFAULT;
+       }
+
+       kargs.ctx = (uintptr_t)session_data;
+       kargs.counters = (uintptr_t)counter_buffer;
+
+       err = _mali_ukk_sw_counters_report(&kargs);
+
+       kfree(counter_buffer);
 
-       kargs.ctx = session_data;
-       err = _mali_ukk_profiling_stop(&kargs);
        if (_MALI_OSK_ERR_OK != err) {
                return map_errcode(err);
        }
 
-       if (0 != put_user(kargs.count, &uargs->count)) {
-               return -EFAULT;
-       }
-
        return 0;
 }
 
-int profiling_get_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_get_event_s __user *uargs)
+int profiling_get_stream_fd_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stream_fd_get_s __user *uargs)
 {
-       _mali_uk_profiling_get_event_s kargs;
+       _mali_uk_profiling_stream_fd_get_s kargs;
        _mali_osk_errcode_t err;
 
        MALI_CHECK_NON_NULL(uargs, -EINVAL);
 
-       if (0 != get_user(kargs.index, &uargs->index)) {
+       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_stream_fd_get_s))) {
                return -EFAULT;
        }
 
-       kargs.ctx = session_data;
-
-       err = _mali_ukk_profiling_get_event(&kargs);
+       kargs.ctx = (uintptr_t)session_data;
+       err = _mali_ukk_profiling_stream_fd_get(&kargs);
        if (_MALI_OSK_ERR_OK != err) {
                return map_errcode(err);
        }
 
-       kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
-       if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_profiling_get_event_s))) {
+       if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_profiling_stream_fd_get_s))) {
                return -EFAULT;
        }
 
        return 0;
 }
 
-int profiling_clear_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_clear_s __user *uargs)
+int profiling_control_set_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_control_set_s __user *uargs)
 {
-       _mali_uk_profiling_clear_s kargs;
+       _mali_uk_profiling_control_set_s kargs;
        _mali_osk_errcode_t err;
+       u8 *kernel_control_data = NULL;
+       u8 *kernel_response_data = NULL;
 
        MALI_CHECK_NON_NULL(uargs, -EINVAL);
 
-       kargs.ctx = session_data;
-       err = _mali_ukk_profiling_clear(&kargs);
-       if (_MALI_OSK_ERR_OK != err) {
-               return map_errcode(err);
-       }
+       if (0 != get_user(kargs.control_packet_size, &uargs->control_packet_size)) return -EFAULT;
+       if (0 != get_user(kargs.response_packet_size, &uargs->response_packet_size)) return -EFAULT;
 
-       return 0;
-}
+       kargs.ctx = (uintptr_t)session_data;
 
-int profiling_report_sw_counters_wrapper(struct mali_session_data *session_data, _mali_uk_sw_counters_report_s __user *uargs)
-{
-       _mali_uk_sw_counters_report_s kargs;
-       _mali_osk_errcode_t err;
-       u32 *counter_buffer;
+       if (0 !=  kargs.control_packet_size) {
 
-       MALI_CHECK_NON_NULL(uargs, -EINVAL);
+               kernel_control_data = _mali_osk_calloc(1, kargs.control_packet_size);
+               if (NULL == kernel_control_data) {
+                       return -ENOMEM;
+               }
 
-       if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_sw_counters_report_s))) {
-               return -EFAULT;
-       }
+               MALI_DEBUG_ASSERT(0 != kargs.response_packet_size);
 
-       /* make sure that kargs.num_counters is [at least somewhat] sane */
-       if (kargs.num_counters > 10000) {
-               MALI_DEBUG_PRINT(1, ("User space attempted to allocate too many counters.\n"));
-               return -EINVAL;
-       }
+               kernel_response_data = _mali_osk_calloc(1, kargs.response_packet_size);
+               if (NULL == kernel_response_data) {
+                       _mali_osk_free(kernel_control_data);
+                       return -ENOMEM;
+               }
 
-       counter_buffer = (u32*)kmalloc(sizeof(u32) * kargs.num_counters, GFP_KERNEL);
-       if (NULL == counter_buffer) {
-               return -ENOMEM;
-       }
+               kargs.control_packet_data = (uintptr_t)kernel_control_data;
+               kargs.response_packet_data = (uintptr_t)kernel_response_data;
 
-       if (0 != copy_from_user(counter_buffer, kargs.counters, sizeof(u32) * kargs.num_counters)) {
-               kfree(counter_buffer);
-               return -EFAULT;
-       }
+               if (0 != copy_from_user((void *)(uintptr_t)kernel_control_data, (void *)(uintptr_t)uargs->control_packet_data, kargs.control_packet_size)) {
+                       _mali_osk_free(kernel_control_data);
+                       _mali_osk_free(kernel_response_data);
+                       return -EFAULT;
+               }
 
-       kargs.ctx = session_data;
-       kargs.counters = counter_buffer;
+               err = _mali_ukk_profiling_control_set(&kargs);
+               if (_MALI_OSK_ERR_OK != err) {
+                       _mali_osk_free(kernel_control_data);
+                       _mali_osk_free(kernel_response_data);
+                       return map_errcode(err);
+               }
 
-       err = _mali_ukk_sw_counters_report(&kargs);
+               if (0 != kargs.response_packet_size && 0 != copy_to_user(((void *)(uintptr_t)uargs->response_packet_data), ((void *)(uintptr_t)kargs.response_packet_data), kargs.response_packet_size)) {
+                       _mali_osk_free(kernel_control_data);
+                       _mali_osk_free(kernel_response_data);
+                       return -EFAULT;
+               }
 
-       kfree(counter_buffer);
+               if (0 != put_user(kargs.response_packet_size, &uargs->response_packet_size)) {
+                       _mali_osk_free(kernel_control_data);
+                       _mali_osk_free(kernel_response_data);
+                       return -EFAULT;
+               }
 
-       if (_MALI_OSK_ERR_OK != err) {
-               return map_errcode(err);
-       }
+               _mali_osk_free(kernel_control_data);
+               _mali_osk_free(kernel_response_data);
+       } else {
 
+               err = _mali_ukk_profiling_control_set(&kargs);
+               if (_MALI_OSK_ERR_OK != err) {
+                       return map_errcode(err);
+               }
+
+       }
        return 0;
 }
-
-
index 4dd005dca253067578bb03194028b10bb0a074c9..375cd1d98a67aaec0d272d05ca3a94510e8136b8 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013 ARM Limited
+ * (C) COPYRIGHT 2013-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -21,8 +21,9 @@
 
 int soft_job_start_wrapper(struct mali_session_data *session, _mali_uk_soft_job_start_s __user *uargs)
 {
-       u32 type, user_job, point;
-       _mali_uk_fence_t uk_fence;
+       _mali_uk_soft_job_start_s kargs;
+       u32 type, point;
+       u64 user_job;
        struct mali_timeline_fence fence;
        struct mali_soft_job *job = NULL;
        u32 __user *job_id_ptr = NULL;
@@ -35,14 +36,17 @@ int soft_job_start_wrapper(struct mali_session_data *session, _mali_uk_soft_job_
 
        MALI_DEBUG_ASSERT_POINTER(session->soft_job_system);
 
-       if (0 != get_user(type, &uargs->type))                 return -EFAULT;
-       if (0 != get_user(user_job, &uargs->user_job))         return -EFAULT;
-       if (0 != get_user(job_id_ptr, &uargs->job_id_ptr))     return -EFAULT;
+       if (0 != copy_from_user(&kargs, uargs, sizeof(kargs))) {
+               return -EFAULT;
+       }
+
+       type = kargs.type;
+       user_job = kargs.user_job;
+       job_id_ptr = (u32 __user *)(uintptr_t)kargs.job_id_ptr;
 
-       if (0 != copy_from_user(&uk_fence, &uargs->fence, sizeof(_mali_uk_fence_t))) return -EFAULT;
-       mali_timeline_fence_copy_uk_fence(&fence, &uk_fence);
+       mali_timeline_fence_copy_uk_fence(&fence, &kargs.fence);
 
-       if (MALI_SOFT_JOB_TYPE_USER_SIGNALED < type) {
+       if ((MALI_SOFT_JOB_TYPE_USER_SIGNALED != type) && (MALI_SOFT_JOB_TYPE_SELF_SIGNALED != type)) {
                MALI_DEBUG_PRINT_ERROR(("Invalid soft job type specified\n"));
                return -EINVAL;
        }
index 40ef766c0fb5a6981ed51c8b2554d3feed79d09f..ac36c34502a2bb2688ec42c92c4767a46d9e8b07 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013 ARM Limited
+ * (C) COPYRIGHT 2013, 2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -59,7 +59,7 @@ int timeline_wait_wrapper(struct mali_session_data *session, _mali_uk_timeline_w
 
        ret = mali_timeline_fence_wait(session->timeline_system, &fence, timeout);
        status = (MALI_TRUE == ret ? 1 : 0);
-   
+
        if (0 != put_user(status, &uargs->status)) return -EFAULT;
 
        return 0;
@@ -78,10 +78,6 @@ int timeline_create_sync_fence_wrapper(struct mali_session_data *session, _mali_
 
 #if defined(CONFIG_SYNC)
        sync_fd = mali_timeline_sync_fence_create(session->timeline_system, &fence);
-       if (sync_fd < 0)
-       {
-          MALI_DEBUG_PRINT(1, ("mali_timeline_sync_fence_create() fail!, return sync_fd=%x\n", sync_fd));   
-       }
 #else
        sync_fd = -1;
 #endif /* defined(CONFIG_SYNC) */
index 18fdef68d780cc06b96aea45ff807560cf6eb4d4..e44a1dc29db6d835f59b10658866a49a6aa9cb57 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2013 ARM Limited
+ * (C) COPYRIGHT 2011-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -28,7 +28,7 @@ int vsync_event_report_wrapper(struct mali_session_data *session_data, _mali_uk_
                return -EFAULT;
        }
 
-       kargs.ctx = session_data;
+       kargs.ctx = (uintptr_t)session_data;
        err = _mali_ukk_vsync_event_report(&kargs);
        if (_MALI_OSK_ERR_OK != err) {
                return map_errcode(err);
index c28e554a56fb16d86b69eecfbec6b43b186c51df..e963943d9c6e411369ce5c47d197835d3320cd17 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
+ * (C) COPYRIGHT 2008-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -25,27 +25,27 @@ extern "C" {
 
 int wait_for_notification_wrapper(struct mali_session_data *session_data, _mali_uk_wait_for_notification_s __user *uargs);
 int get_api_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_s __user *uargs);
+int get_api_version_v2_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_v2_s __user *uargs);
 int get_user_settings_wrapper(struct mali_session_data *session_data, _mali_uk_get_user_settings_s __user *uargs);
 int post_notification_wrapper(struct mali_session_data *session_data, _mali_uk_post_notification_s __user *uargs);
 int request_high_priority_wrapper(struct mali_session_data *session_data, _mali_uk_request_high_priority_s __user *uargs);
 
-int mem_write_safe_wrapper(struct mali_session_data *session_data, _mali_uk_mem_write_safe_s __user * uargs);
-int mem_map_ext_wrapper(struct mali_session_data *session_data, _mali_uk_map_external_mem_s __user * argument);
-int mem_unmap_ext_wrapper(struct mali_session_data *session_data, _mali_uk_unmap_external_mem_s __user * argument);
-int mem_query_mmu_page_table_dump_size_wrapper(struct mali_session_data *session_data, _mali_uk_query_mmu_page_table_dump_size_s __user * uargs);
-int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mali_uk_dump_mmu_page_table_s __user * uargs);
+int mem_alloc_wrapper(struct mali_session_data *session_data, _mali_uk_alloc_mem_s __user *uargs);
+int mem_free_wrapper(struct mali_session_data *session_data, _mali_uk_free_mem_s __user *uargs);
+int mem_bind_wrapper(struct mali_session_data *session_data, _mali_uk_bind_mem_s __user *uargs);
+int mem_unbind_wrapper(struct mali_session_data *session_data, _mali_uk_unbind_mem_s __user *uargs);
+int mem_cow_wrapper(struct mali_session_data *session_data, _mali_uk_cow_mem_s __user *uargs);
+int mem_cow_modify_range_wrapper(struct mali_session_data *session_data, _mali_uk_cow_modify_range_s __user *uargs);
+int mem_write_safe_wrapper(struct mali_session_data *session_data, _mali_uk_mem_write_safe_s __user *uargs);
+int mem_query_mmu_page_table_dump_size_wrapper(struct mali_session_data *session_data, _mali_uk_query_mmu_page_table_dump_size_s __user *uargs);
+int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mali_uk_dump_mmu_page_table_s __user *uargs);
+int mem_usage_get_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_memory_usage_get_s __user *uargs);
 
 int timeline_get_latest_point_wrapper(struct mali_session_data *session, _mali_uk_timeline_get_latest_point_s __user *uargs);
 int timeline_wait_wrapper(struct mali_session_data *session, _mali_uk_timeline_wait_s __user *uargs);
 int timeline_create_sync_fence_wrapper(struct mali_session_data *session, _mali_uk_timeline_create_sync_fence_s __user *uargs);
 int soft_job_start_wrapper(struct mali_session_data *session, _mali_uk_soft_job_start_s __user *uargs);
 int soft_job_signal_wrapper(struct mali_session_data *session, _mali_uk_soft_job_signal_s __user *uargs);
-
-#if defined(CONFIG_MALI400_UMP)
-int mem_attach_ump_wrapper(struct mali_session_data *session_data, _mali_uk_attach_ump_mem_s __user * argument);
-int mem_release_ump_wrapper(struct mali_session_data *session_data, _mali_uk_release_ump_mem_s __user * argument);
-#endif
-
 int pp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_start_job_s __user *uargs);
 int pp_and_gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_and_gp_start_job_s __user *uargs);
 int pp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_number_of_cores_s __user *uargs);
@@ -56,17 +56,15 @@ int gp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali
 int gp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_core_version_s __user *uargs);
 int gp_suspend_response_wrapper(struct mali_session_data *session_data, _mali_uk_gp_suspend_response_s __user *uargs);
 
-int profiling_start_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_start_s __user *uargs);
 int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_add_event_s __user *uargs);
-int profiling_stop_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stop_s __user *uargs);
-int profiling_get_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_get_event_s __user *uargs);
-int profiling_clear_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_clear_s __user *uargs);
 int profiling_report_sw_counters_wrapper(struct mali_session_data *session_data, _mali_uk_sw_counters_report_s __user *uargs);
+int profiling_get_stream_fd_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stream_fd_get_s __user *uargs);
+int profiling_control_set_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_control_set_s __user *uargs);
 
 int vsync_event_report_wrapper(struct mali_session_data *session_data, _mali_uk_vsync_event_report_s __user *uargs);
 
 
-int map_errcode( _mali_osk_errcode_t err );
+int map_errcode(_mali_osk_errcode_t err);
 
 #ifdef __cplusplus
 }
index 6275ea16288fd72bfb6855b759332ebbd348fb93..af496e06c8df93ae469fd48e02e2e727513f87db 100644 (file)
@@ -1,3 +1,4 @@
+
 #ifndef __MTK_DEBUG_H__
 #define __MTK_DEBUG_H__
 
index 8eee1692d0eb8aeb7dd5c14403c36c6b5bdb7c82..503abebcaaef30f51c98a7d563cbf7a7faaea7c1 100644 (file)
@@ -1,3 +1,4 @@
+
 #include "mtk_mem_record.h"
 #include <linux/uaccess.h>
 #include <linux/mutex.h>
@@ -5,6 +6,8 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/version.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
 
 #define NAME_MEM_USAGE  "mem_usage"
 #define NAME_MEM_USAGES "mem_usages"
@@ -43,8 +46,12 @@ static int proc_mem_usage_show(struct seq_file *m, void *v)
 //-----------------------------------------------------------------------------
 static int proc_mem_usage_open(struct inode *inode, struct file *file)
 {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
+    return single_open(file, proc_mem_usage_show, PDE_DATA(inode));
+#else
        struct proc_dir_entry* pentry = PDE(inode);
     return single_open(file, proc_mem_usage_show, pentry ? pentry->data : NULL);
+#endif
 }
 //-----------------------------------------------------------------------------
 static const struct file_operations proc_mem_usage_operations = {
@@ -205,21 +212,21 @@ int MTKMemRecordInit(void)
     g_gpu_pentry = proc_mkdir("gpu", NULL);
     if (!g_gpu_pentry)
     {
-        printk(KERN_WARNING "unable to create /proc/gpu entry\n");
+        pr_warn("unable to create /proc/gpu entry\n");
         return -ENOMEM;
     }
 
     g_mem_usage_pentry = proc_mkdir(NAME_MEM_USAGE, g_gpu_pentry);
     if (!g_gpu_pentry)
     {
-        printk(KERN_WARNING "unable to create /proc/gpu/%s entry\n", NAME_MEM_USAGE);
+        pr_warn("unable to create /proc/gpu/%s entry\n", NAME_MEM_USAGE);
         return -ENOMEM;
     }
  
     pentry = proc_create(NAME_MEM_USAGES, 0, g_gpu_pentry, &proc_mem_usages_operations);
     if (pentry == NULL)
     {
-        printk(KERN_WARNING "unable to create /proc/gpu/%s entry\n", NAME_MEM_USAGES);
+        pr_warn("unable to create /proc/gpu/%s entry\n", NAME_MEM_USAGES);
         return -ENOMEM;
     }
 
index 13f26355f9a07acda723f67bb0ee399785397f44..a539cf0f88bde398d6cd4d668659f6d74889f197 100644 (file)
@@ -1,3 +1,4 @@
+
 #ifndef __MTK_MEM_RECORD_H__
 #define __MTK_MEM_RECORD_H__
 
index 15240166fc2c688e3afb8c52ace5f5c00f88473b..8e1776c501fb635a0fc8cd1d749a5ca8e8cb3f01 100644 (file)
@@ -1,3 +1,4 @@
+
 #include <linux/sched.h>
 #include <linux/string.h>
 #include <linux/vmalloc.h>
@@ -27,7 +28,7 @@ static void MTKPP_UnLock(MTK_PROC_PRINT_DATA *data)
 
 static void MTKPP_PrintQueueBuffer(MTK_PROC_PRINT_DATA *data, const char *fmt, ...) MTK_PP_FORMAT_PRINTF(2,3);
 
-static void MTKPP_PrintQueueBuffer2(MTK_PROC_PRINT_DATA *data, const char *fmt, ...) MTK_PP_FORMAT_PRINTF(2,3);
+//static void MTKPP_PrintQueueBuffer2(MTK_PROC_PRINT_DATA *data, const char *fmt, ...) MTK_PP_FORMAT_PRINTF(2,3);
 
 static void MTKPP_PrintRingBuffer(MTK_PROC_PRINT_DATA *data, const char *fmt, ...) MTK_PP_FORMAT_PRINTF(2,3);
 
@@ -72,6 +73,7 @@ static void MTKPP_PrintQueueBuffer(MTK_PROC_PRINT_DATA *data, const char *fmt, .
        MTKPP_UnLock(data);
 }
 
+#if 0
 static void MTKPP_PrintQueueBuffer2(MTK_PROC_PRINT_DATA *data, const char *fmt, ...)
 {
        va_list args;
@@ -105,6 +107,7 @@ static void MTKPP_PrintQueueBuffer2(MTK_PROC_PRINT_DATA *data, const char *fmt,
        
        MTKPP_UnLock(data);
 }
+#endif
 
 static void MTKPP_PrintRingBuffer(MTK_PROC_PRINT_DATA *data, const char *fmt, ...)
 {
@@ -404,8 +407,7 @@ void MTKPP_Init(void)
                }
        }
        
-       g_MTKPP_proc = create_proc_entry("gpulog", 0, NULL);
-       g_MTKPP_proc->proc_fops = &g_MTKPP_proc_ops;
+       g_MTKPP_proc = proc_create("gpulog", 0, NULL, &g_MTKPP_proc_ops);
 
        return;
        
index 3c347f9c188dfa0728e8e53277bb17b238f26dab..1b3394235dfe2bf19c47e44180f0ad3ae71e60d7 100644 (file)
@@ -1,3 +1,4 @@
+
 #ifndef __MTK_PP_H__
 #define __MTK_PP_H__
 
index 314363af350cd30792880e6db6b591bcda2819c8..c31a448b23aef93289997a5978f5726262c5acd4 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2009-2010, 2012-2013 ARM Limited
+ * (C) COPYRIGHT 2009-2010, 2012-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 #include <linux/moduleparam.h>
 
 #include "arm_core_scaling.h"
-#include "mali_pp_scheduler.h"
+#include "mali_executor.h"
 
-static void mali_platform_device_release(struct device *device);
+
+static int mali_core_scaling_enable = 0;
+
+void mali_gpu_utilization_callback(struct mali_gpu_utilization_data *data);
 static u32 mali_read_phys(u32 phys_addr);
 #if defined(CONFIG_ARCH_REALVIEW)
 static void mali_write_phys(u32 phys_addr, u32 value);
 #endif
 
-static int mali_core_scaling_enable = 1;
-
-void mali_gpu_utilization_callback(struct mali_gpu_utilization_data *data);
+#ifndef CONFIG_MALI_DT
+static void mali_platform_device_release(struct device *device);
 
 #if defined(CONFIG_ARCH_VEXPRESS)
 
+#if defined(CONFIG_ARM64)
+/* Juno + Mali-450 MP6 in V7 FPGA */
+static struct resource mali_gpu_resources_m450_mp6[] = {
+       MALI_GPU_RESOURCES_MALI450_MP6_PMU(0x6F040000, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200)
+};
+
+static struct resource mali_gpu_resources_m470_mp4[] = {
+       MALI_GPU_RESOURCES_MALI470_MP4_PMU(0x6F040000, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200)
+};
+
+/* [ScottTODO]*/
+#else
 static struct resource mali_gpu_resources_m450_mp8[] = {
        MALI_GPU_RESOURCES_MALI450_MP8_PMU(0xFC040000, -1, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 68)
 };
 
+static struct resource mali_gpu_resources_m450_mp6[] = {
+       MALI_GPU_RESOURCES_MALI450_MP6_PMU(0xFC040000, -1, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 68)
+};
+
+static struct resource mali_gpu_resources_m450_mp4[] = {
+       MALI_GPU_RESOURCES_MALI450_MP4_PMU(0xFC040000, -1, 70, 70, 70, 70, 70, 70, 70, 70, 70, 68)
+};
+
+static struct resource mali_gpu_resources_m470_mp4[] = {
+       MALI_GPU_RESOURCES_MALI470_MP4_PMU(0xFC040000, -1, 70, 70, 70, 70, 70, 70, 70, 70, 70, 68)
+};
+/* [ScottTODO]*/
+#endif /* CONFIG_ARM64 */
+
 #elif defined(CONFIG_ARCH_REALVIEW)
 
 static struct resource mali_gpu_resources_m300[] = {
@@ -59,28 +87,42 @@ static struct resource mali_gpu_resources_m400_mp2[] = {
        MALI_GPU_RESOURCES_MALI400_MP2_PMU(0xC0000000, -1, -1, -1, -1, -1, -1)
 };
 
+#endif
 #endif
 
 static struct mali_gpu_device_data mali_gpu_data = {
+#ifndef CONFIG_MALI_DT
+       .pmu_switch_delay = 0xFF, /* do not have to be this high on FPGA, but it is good for testing to have a delay */
+       .max_job_runtime = 60000, /* 60 seconds */
 #if defined(CONFIG_ARCH_VEXPRESS)
-       .shared_mem_size =256 * 1024 * 1024, /* 256MB */
-#elif defined(CONFIG_ARCH_REALVIEW)
+       .shared_mem_size = 256 * 1024 * 1024, /* 256MB */
+#endif
+#endif
+
+#if defined(CONFIG_ARCH_REALVIEW)
        .dedicated_mem_start = 0x80000000, /* Physical start address (use 0xD0000000 for old indirect setup) */
        .dedicated_mem_size = 0x10000000, /* 256MB */
 #endif
+#if defined(CONFIG_ARM64)
+       .fb_start = 0x5f000000,
+       .fb_size = 0x91000000,
+#else
        .fb_start = 0xe0000000,
        .fb_size = 0x01000000,
-       .max_job_runtime = 60000, /* 60 seconds */
-       .utilization_interval = 1000, /* 1000ms */
+#endif
+       .control_interval = 1000, /* 1000ms */
        .utilization_callback = mali_gpu_utilization_callback,
-       .pmu_switch_delay = 0xFF, /* do not have to be this high on FPGA, but it is good for testing to have a delay */
-       .pmu_domain_config = {0x1, 0x2, 0x4, 0x4, 0x4, 0x8, 0x8, 0x8, 0x8, 0x1, 0x2, 0x8},
+       .get_clock_info = NULL,
+       .get_freq = NULL,
+       .set_freq = NULL,
 };
 
+#ifndef CONFIG_MALI_DT
 static struct platform_device mali_gpu_device = {
        .name = MALI_GPU_NAME_UTGARD,
        .id = 0,
        .dev.release = mali_platform_device_release,
+       .dev.dma_mask = &mali_gpu_device.dev.coherent_dma_mask,
        .dev.coherent_dma_mask = DMA_BIT_MASK(32),
 
        .dev.platform_data = &mali_gpu_data,
@@ -99,12 +141,42 @@ int mali_platform_device_register(void)
        /* Detect present Mali GPU and connect the correct resources to the device */
 #if defined(CONFIG_ARCH_VEXPRESS)
 
-       if (mali_read_phys(0xFC020000) == 0x00010100) {
+#if defined(CONFIG_ARM64)
+       mali_gpu_device.dev.archdata.dma_ops = dma_ops;
+       if ((mali_read_phys(0x6F000000) & 0x00600450) == 0x00600450) {
+               MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP6 device\n"));
+               num_pp_cores = 6;
+               mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m450_mp6);
+               mali_gpu_device.resource = mali_gpu_resources_m450_mp6;
+       } else if (mali_read_phys(0x6F000000) == 0x40400430) {
+               MALI_DEBUG_PRINT(4, ("Registering Mali-470 MP4 device\n"));
+               num_pp_cores = 4;
+               mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m470_mp4);
+               mali_gpu_device.resource = mali_gpu_resources_m470_mp4;
+       }
+#else
+       if (mali_read_phys(0xFC000000) == 0x00000450) {
                MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP8 device\n"));
                num_pp_cores = 8;
                mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m450_mp8);
                mali_gpu_device.resource = mali_gpu_resources_m450_mp8;
+       } else if (mali_read_phys(0xFC000000) == 0x40600450) {
+               MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP6 device\n"));
+               num_pp_cores = 6;
+               mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m450_mp6);
+               mali_gpu_device.resource = mali_gpu_resources_m450_mp6;
+       } else if (mali_read_phys(0xFC000000) == 0x40400450) {
+               MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP4 device\n"));
+               num_pp_cores = 4;
+               mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m450_mp4);
+               mali_gpu_device.resource = mali_gpu_resources_m450_mp4;
+       } else if (mali_read_phys(0xFC000000) == 0xFFFFFFFF) { /* [ScottTODO] */
+               MALI_DEBUG_PRINT(4, ("Registering Mali-470 MP4 device\n"));
+               num_pp_cores = 4;
+               mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m470_mp4);
+               mali_gpu_device.resource = mali_gpu_resources_m470_mp4;
        }
+#endif /* CONFIG_ARM64 */
 
 #elif defined(CONFIG_ARCH_REALVIEW)
 
@@ -139,7 +211,7 @@ int mali_platform_device_register(void)
        err = platform_device_register(&mali_gpu_device);
        if (0 == err) {
 #ifdef CONFIG_PM_RUNTIME
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
                pm_runtime_set_autosuspend_delay(&(mali_gpu_device.dev), 1000);
                pm_runtime_use_autosuspend(&(mali_gpu_device.dev));
 #endif
@@ -173,6 +245,108 @@ static void mali_platform_device_release(struct device *device)
        MALI_DEBUG_PRINT(4, ("mali_platform_device_release() called\n"));
 }
 
+#else /* CONFIG_MALI_DT */
+int mali_platform_device_init(struct platform_device *device)
+{
+       int num_pp_cores = 0;
+       int err = -1;
+#if defined(CONFIG_ARCH_REALVIEW)
+       u32 m400_gp_version;
+#endif
+
+       /* Detect present Mali GPU and connect the correct resources to the device */
+#if defined(CONFIG_ARCH_VEXPRESS)
+
+#if defined(CONFIG_ARM64)
+       if ((mali_read_phys(0x6F000000) & 0x00600450) == 0x00600450) {
+               MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP6 device\n"));
+               num_pp_cores = 6;
+       } else if (mali_read_phys(0x6F000000) == 0x40400430) {
+               MALI_DEBUG_PRINT(4, ("Registering Mali-470 MP4 device\n"));
+               num_pp_cores = 4;
+       }
+#else
+       if (mali_read_phys(0xFC000000) == 0x00000450) {
+               MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP8 device\n"));
+               num_pp_cores = 8;
+       } else if (mali_read_phys(0xFC000000) == 0x40400450) {
+               MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP4 device\n"));
+               num_pp_cores = 4;
+       } else if (mali_read_phys(0xFC000000) == 0xFFFFFFFF) { /* [ScottTODO] */
+               MALI_DEBUG_PRINT(4, ("Registering Mali-470 MP4 device\n"));
+               num_pp_cores = 4;
+       }
+#endif
+
+#elif defined(CONFIG_ARCH_REALVIEW)
+
+       m400_gp_version = mali_read_phys(0xC000006C);
+       if ((m400_gp_version & 0xFFFF0000) == 0x0C070000) {
+               MALI_DEBUG_PRINT(4, ("Registering Mali-300 device\n"));
+               num_pp_cores = 1;
+               mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */
+       } else if ((m400_gp_version & 0xFFFF0000) == 0x0B070000) {
+               u32 fpga_fw_version = mali_read_phys(0xC0010000);
+               if (fpga_fw_version == 0x130C008F || fpga_fw_version == 0x110C008F) {
+                       /* Mali-400 MP1 r1p0 or r1p1 */
+                       MALI_DEBUG_PRINT(4, ("Registering Mali-400 MP1 device\n"));
+                       num_pp_cores = 1;
+                       mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */
+               } else if (fpga_fw_version == 0x130C000F) {
+                       /* Mali-400 MP2 r1p1 */
+                       MALI_DEBUG_PRINT(4, ("Registering Mali-400 MP2 device\n"));
+                       num_pp_cores = 2;
+                       mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */
+               }
+       }
+#endif
+
+       /* After kernel 3.15 device tree will default set dev
+        * related parameters in of_platform_device_create_pdata.
+        * But kernel changes from version to version,
+        * For example 3.10 didn't include device->dev.dma_mask parameter setting,
+        * if we didn't include here will cause dma_mapping error,
+        * but in kernel 3.15 it include  device->dev.dma_mask parameter setting,
+        * so it's better to set must need paramter by DDK itself.
+        */
+       if (!device->dev.dma_mask)
+               device->dev.dma_mask = &device->dev.coherent_dma_mask;
+       device->dev.archdata.dma_ops = dma_ops;
+
+       err = platform_device_add_data(device, &mali_gpu_data, sizeof(mali_gpu_data));
+
+       if (0 == err) {
+#ifdef CONFIG_PM_RUNTIME
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
+               pm_runtime_set_autosuspend_delay(&(device->dev), 1000);
+               pm_runtime_use_autosuspend(&(device->dev));
+#endif
+               pm_runtime_enable(&(device->dev));
+#endif
+               MALI_DEBUG_ASSERT(0 < num_pp_cores);
+               mali_core_scaling_init(num_pp_cores);
+       }
+
+       return err;
+}
+
+int mali_platform_device_deinit(struct platform_device *device)
+{
+       MALI_IGNORE(device);
+
+       MALI_DEBUG_PRINT(4, ("mali_platform_device_deinit() called\n"));
+
+       mali_core_scaling_term();
+
+#if defined(CONFIG_ARCH_REALVIEW)
+       mali_write_phys(0xC0010020, 0x9); /* Restore default (legacy) memory mapping */
+#endif
+
+       return 0;
+}
+
+#endif /* CONFIG_MALI_DT */
+
 static u32 mali_read_phys(u32 phys_addr)
 {
        u32 phys_addr_page = phys_addr & 0xFFFFE000;
@@ -181,7 +355,7 @@ static u32 mali_read_phys(u32 phys_addr)
        u32 ret = 0xDEADBEEF;
        void *mem_mapped = ioremap_nocache(phys_addr_page, map_size);
        if (NULL != mem_mapped) {
-               ret = (u32)ioread32(((u8*)mem_mapped) + phys_offset);
+               ret = (u32)ioread32(((u8 *)mem_mapped) + phys_offset);
                iounmap(mem_mapped);
        }
 
@@ -196,7 +370,7 @@ static void mali_write_phys(u32 phys_addr, u32 value)
        u32 map_size       = phys_offset + sizeof(u32);
        void *mem_mapped = ioremap_nocache(phys_addr_page, map_size);
        if (NULL != mem_mapped) {
-               iowrite32(value, ((u8*)mem_mapped) + phys_offset);
+               iowrite32(value, ((u8 *)mem_mapped) + phys_offset);
                iounmap(mem_mapped);
        }
 }
@@ -207,7 +381,7 @@ static int param_set_core_scaling(const char *val, const struct kernel_param *kp
        int ret = param_set_int(val, kp);
 
        if (1 == mali_core_scaling_enable) {
-               mali_core_scaling_sync(mali_pp_scheduler_get_num_cores_enabled());
+               mali_core_scaling_sync(mali_executor_get_num_cores_enabled());
        }
        return ret;
 }
index e9fcf80c6456d489ed5bdf5d093a3e9515e152b1..70bbbcc957fb90595900d27082897ab4fd0e9fea 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013 ARM Limited
+ * (C) COPYRIGHT 2013-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -40,7 +40,7 @@ static void enable_one_core(void)
                MALI_DEBUG_PRINT(3, ("Core scaling: Enabling one more core\n"));
        }
 
-       MALI_DEBUG_ASSERT(              1 <= num_cores_enabled);
+       MALI_DEBUG_ASSERT(1 <= num_cores_enabled);
        MALI_DEBUG_ASSERT(num_cores_total >= num_cores_enabled);
 }
 
@@ -52,7 +52,7 @@ static void disable_one_core(void)
                MALI_DEBUG_PRINT(3, ("Core scaling: Disabling one core\n"));
        }
 
-       MALI_DEBUG_ASSERT(              1 <= num_cores_enabled);
+       MALI_DEBUG_ASSERT(1 <= num_cores_enabled);
        MALI_DEBUG_ASSERT(num_cores_total >= num_cores_enabled);
 }
 
@@ -108,13 +108,13 @@ void mali_core_scaling_update(struct mali_gpu_utilization_data *data)
        /* NOTE: this function is normally called directly from the utilization callback which is in
         * timer context. */
 
-       if (     PERCENT_OF(90, 256) < data->utilization_pp) {
+       if (PERCENT_OF(90, 256) < data->utilization_pp) {
                enable_max_num_cores();
        } else if (PERCENT_OF(50, 256) < data->utilization_pp) {
                enable_one_core();
        } else if (PERCENT_OF(40, 256) < data->utilization_pp) {
                /* do nothing */
-       } else if (PERCENT_OF( 0, 256) < data->utilization_pp) {
+       } else if (PERCENT_OF(0, 256) < data->utilization_pp) {
                disable_one_core();
        } else {
                /* do nothing */
index bab8598b8d311b8b6af819dd60f78d8c951800d4..8881c41c8b17dd67053f5d11cc3fe3703aedeef5 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013 ARM Limited
+ * (C) COPYRIGHT 2013, 2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
index e337db062d5e94939caa1a9a8e6d99b4e1297fbd..58d5ae20c18f46e8fac70ab108a57d08ed6a6d84 100644 (file)
@@ -1,12 +1,3 @@
-/*
- * Copyright (C) 2013 ARM Limited. All rights reserved.
- * 
- * This program is free software and is provided to you under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
- * 
- * A copy of the licence is included with the program, and can also be obtained from Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
- */
 
 /**
  * @file arm_core_scaling.c
@@ -17,7 +8,6 @@
 
 #include <linux/mali/mali_utgard.h>
 #include "mali_kernel_common.h"
-#include "mali_pp_scheduler.h"
 
 #include <linux/workqueue.h>
 
@@ -26,8 +16,6 @@ static int num_cores_enabled;
 
 static struct work_struct wq_work;
 
-int mali_core_scaling_enable = 0; /*Currenly not support on 8127*/
-
 static void set_num_cores(struct work_struct *work)
 {
        int err = mali_perf_set_num_pp_cores(num_cores_enabled);
@@ -37,34 +25,31 @@ static void set_num_cores(struct work_struct *work)
 
 static void enable_one_core(void)
 {
-       if (num_cores_enabled < num_cores_total)
-       {
+       if (num_cores_enabled < num_cores_total) {
                ++num_cores_enabled;
                schedule_work(&wq_work);
                MALI_DEBUG_PRINT(3, ("Core scaling: Enabling one more core\n"));
        }
 
-       MALI_DEBUG_ASSERT(              1 <= num_cores_enabled);
+       MALI_DEBUG_ASSERT(1 <= num_cores_enabled);
        MALI_DEBUG_ASSERT(num_cores_total >= num_cores_enabled);
 }
 
 static void disable_one_core(void)
 {
-       if (1 < num_cores_enabled)
-       {
+       if (1 < num_cores_enabled) {
                --num_cores_enabled;
                schedule_work(&wq_work);
                MALI_DEBUG_PRINT(3, ("Core scaling: Disabling one core\n"));
        }
 
-       MALI_DEBUG_ASSERT(              1 <= num_cores_enabled);
+       MALI_DEBUG_ASSERT(1 <= num_cores_enabled);
        MALI_DEBUG_ASSERT(num_cores_total >= num_cores_enabled);
 }
 
 static void enable_max_num_cores(void)
 {
-       if (num_cores_enabled < num_cores_total)
-       {
+       if (num_cores_enabled < num_cores_total) {
                num_cores_enabled = num_cores_total;
                schedule_work(&wq_work);
                MALI_DEBUG_PRINT(3, ("Core scaling: Enabling maximum number of cores\n"));
@@ -88,7 +73,6 @@ void mali_core_scaling_sync(int num_cores)
        num_cores_enabled = num_cores;
 }
 
-
 void mali_core_scaling_term(void)
 {
        flush_scheduled_work();
@@ -115,44 +99,15 @@ void mali_core_scaling_update(struct mali_gpu_utilization_data *data)
        /* NOTE: this function is normally called directly from the utilization callback which is in
         * timer context. */
 
-       if (     PERCENT_OF(90, 256) < data->utilization_pp)
-       {
+       if (PERCENT_OF(90, 256) < data->utilization_pp) {
                enable_max_num_cores();
-       }
-       else if (PERCENT_OF(50, 256) < data->utilization_pp)
-       {
+       } else if (PERCENT_OF(50, 256) < data->utilization_pp) {
                enable_one_core();
-       }
-       else if (PERCENT_OF(40, 256) < data->utilization_pp)
-       {
+       } else if (PERCENT_OF(40, 256) < data->utilization_pp) {
                /* do nothing */
-       }
-       else if (PERCENT_OF( 0, 256) < data->utilization_pp)
-       {
+       } else if (PERCENT_OF(0, 256) < data->utilization_pp) {
                disable_one_core();
-       }
-       else
-       {
+       } else {
                /* do nothing */
        }
 }
-
-
-static int param_set_core_scaling(const char *val, const struct kernel_param *kp)
-{
-       int ret = param_set_int(val, kp);
-
-       if (1 == mali_core_scaling_enable) {
-               mali_core_scaling_sync(mali_pp_scheduler_get_num_cores_enabled());
-       }
-       return ret;
-}
-
-static struct kernel_param_ops param_ops_core_scaling = {
-       .set = param_set_core_scaling,
-       .get = param_get_int,
-};
-
-module_param_cb(mali_core_scaling_enable, &param_ops_core_scaling, &mali_core_scaling_enable, 0644);
-MODULE_PARM_DESC(mali_core_scaling_enable, "1 means to enable core scaling policy, 0 means to disable core scaling policy");
-
index 193f43cc77789043a65acaff0e75cf97aca29075..3c39cfc1d4ba6eddc442132896151dad5989129d 100644 (file)
@@ -1,12 +1,3 @@
-/*
- * Copyright (C) 2013 ARM Limited. All rights reserved.
- * 
- * This program is free software and is provided to you under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
- * 
- * A copy of the licence is included with the program, and can also be obtained from Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
- */
 
 /**
  * @file arm_core_scaling.h
index f50e6de700fa00ef83a4d932a254cfaf0ff86794..571b7e569de2f93928642db0842a625182d16659 100644 (file)
@@ -1,3 +1,10 @@
+
+/**
+ * @file mali_platform.c
+ * Platform specific Mali driver functions for:
+ * - Realview Versatile platforms with ARM11 Mpcore and virtex 5.
+ * - Versatile Express platforms with ARM Cortex-A9 and virtex 6.
+ */
 #include <linux/platform_device.h>
 #include <linux/version.h>
 #include <linux/pm.h>
 #include <linux/mali/mali_utgard.h>
 #include "mali_kernel_common.h"
 #include <linux/dma-mapping.h>
-#include <mach/mt_irq.h>
+#include <linux/moduleparam.h>
+
 #include "arm_core_scaling.h"
+#include "mali_executor.h"
 #include "platform_pmm.h"
-#include "mali_pm.h"
-#include "mali_osk.h"
-#include "mt_reg_base.h"
 
+
+static int mali_core_scaling_enable = 0;
+extern unsigned int current_sample_utilization;
+
+void mali_gpu_utilization_callback(struct mali_gpu_utilization_data *data);
+#if defined(CONFIG_MALI_DVFS)
+int  mali_gpu_set_freq(int setting_clock_step);
+void mali_gpu_get_clock_info(struct mali_gpu_clock **data);
+int  mali_gpu_get_freq(void);
+#endif
+
+#if defined(CONFIG_ARCH_REALVIEW)
+static u32 mali_read_phys(u32 phys_addr);
+static void mali_write_phys(u32 phys_addr, u32 value);
+#endif
+
+#ifndef CONFIG_MALI_DT
 static void mali_platform_device_release(struct device *device);
-static int mali_pm_suspend(struct device *device);
-static int mali_pm_resume(struct device *device);
 
+#if defined(CONFIG_ARCH_VEXPRESS)
 
-static struct mali_gpu_device_data mali_gpu_data =
-{
-    // System memory
-    .shared_mem_size = 1024 * 1024 * 1024, /* 1GB */
-    // Framebuffer physical address, only for validation usage
-    .fb_start = 0x80000000,
-    .fb_size  = 0x80000000,
-    // DVFS
-    .utilization_interval = 200, /* ms */
-    .utilization_callback = mali_pmm_utilization_handler, /*<utilization function>,*/
+#if defined(CONFIG_ARM64)
+/* Juno + Mali-450 MP6 in V7 FPGA */
+static struct resource mali_gpu_resources_m450_mp6[] = {
+       MALI_GPU_RESOURCES_MALI450_MP6_PMU(0x6F040000, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200)
 };
 
-static struct resource mali_gpu_resources[] =
-{
-    MALI_GPU_RESOURCES_MALI450_MP4(
-                    IO_VIRT_TO_PHYS(MALI_BASE),
-                    MT_MFG_IRQ0_ID,
-                    MT_MFG_IRQ1_ID,
-                    MT_MFG_IRQ2_ID,
-                    MT_MFG_IRQ3_ID,
-                    MT_MFG_IRQ4_ID,
-                    MT_MFG_IRQ5_ID,
-                    MT_MFG_IRQ6_ID,
-                    MT_MFG_IRQ7_ID,
-                    MT_MFG_IRQ8_ID,
-                    MT_MFG_IRQ9_ID,
-                    MT_MFG_IRQ10_ID
-                )
+#else
+static struct resource mali_gpu_resources_m450_mp8[] = {
+       MALI_GPU_RESOURCES_MALI450_MP8_PMU(0xFC040000, -1, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 68)
 };
 
-static struct resource mali_gpu_resources_MP3[] =
-{
-    MALI_GPU_RESOURCES_MALI450_MP3(
-                    IO_VIRT_TO_PHYS(MALI_BASE),
-                    MT_MFG_IRQ0_ID,
-                    MT_MFG_IRQ1_ID,
-                    MT_MFG_IRQ2_ID,
-                    MT_MFG_IRQ3_ID,
-                    MT_MFG_IRQ4_ID,
-                    MT_MFG_IRQ5_ID,
-                    MT_MFG_IRQ6_ID,
-                    MT_MFG_IRQ7_ID,                   
-                    MT_MFG_IRQ10_ID
-                )
+static struct resource mali_gpu_resources_m450_mp6[] = {
+       MALI_GPU_RESOURCES_MALI450_MP6_PMU(0xFC040000, -1, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 68)
 };
 
-
-static struct resource mali_gpu_resources_MP2[] =
-{
-    MALI_GPU_RESOURCES_MALI450_MP2(
-                    IO_VIRT_TO_PHYS(MALI_BASE),
-                    MT_MFG_IRQ0_ID,
-                    MT_MFG_IRQ1_ID,
-                    MT_MFG_IRQ2_ID,
-                    MT_MFG_IRQ3_ID,
-                    MT_MFG_IRQ4_ID,
-                    MT_MFG_IRQ5_ID,
-                    MT_MFG_IRQ10_ID
-                )
+static struct resource mali_gpu_resources_m450_mp4[] = {
+       MALI_GPU_RESOURCES_MALI450_MP4_PMU(0xFC040000, -1, 70, 70, 70, 70, 70, 70, 70, 70, 70, 68)
 };
+#endif /* CONFIG_ARM64 */
 
+#elif defined(CONFIG_ARCH_REALVIEW)
 
-static struct dev_pm_ops mali_gpu_device_type_pm_ops =
-{
-    .suspend = mali_pm_suspend,
-    .resume  = mali_pm_resume, 
-    .freeze  = mali_pm_suspend, 
-    .thaw    = mali_pm_resume,   
-       .restore = mali_pm_resume,
-       
-#ifdef CONFIG_PM_RUNTIME
-       .runtime_suspend = mali_runtime_suspend,
-       .runtime_resume  = mali_runtime_resume,
-       .runtime_idle   = mali_runtime_idle,
-#endif
+static struct resource mali_gpu_resources_m300[] = {
+       MALI_GPU_RESOURCES_MALI300_PMU(0xC0000000, -1, -1, -1, -1)
 };
 
-static struct device_type mali_gpu_device_device_type =
-{
-       .pm = &mali_gpu_device_type_pm_ops,
+static struct resource mali_gpu_resources_m400_mp1[] = {
+       MALI_GPU_RESOURCES_MALI400_MP1_PMU(0xC0000000, -1, -1, -1, -1)
 };
 
-
-static struct platform_device mali_gpu_device =
-{
-   .name = MALI_GPU_NAME_UTGARD,
-   .id = 0,
-   .num_resources = ARRAY_SIZE(mali_gpu_resources),
-   .resource = (struct resource *)&mali_gpu_resources,
-   .dev.platform_data = &mali_gpu_data,
-   .dev.release = mali_platform_device_release,
-       .dev.coherent_dma_mask = DMA_BIT_MASK(32),    
-        /// Ideally .dev.pm_domain should be used instead, as this is the new framework designed
-        /// to control the power of devices.    
-       .dev.type = &mali_gpu_device_device_type /// We should probably use the pm_domain instead of type on newer kernels
+static struct resource mali_gpu_resources_m400_mp2[] = {
+       MALI_GPU_RESOURCES_MALI400_MP2_PMU(0xC0000000, -1, -1, -1, -1, -1, -1)
 };
 
+#endif
+#endif
 
-extern u32 get_devinfo_with_index(u32 index);
-
-static u32 get_devinfo() {
-       /*TODO: replace this with get_devinfo_with_index*/
-    return *(volatile u32 *)0xf0206174;
-}
-static u32 get_gpuinfo() {
-       /*TODO: replace this with get_devinfo_with_index*/
-    return *(volatile u32 *)0xf0206040;
-}
+static struct mali_gpu_device_data mali_gpu_data = {
+#ifndef CONFIG_MALI_DT
+       .pmu_switch_delay = 0xFF, /* do not have to be this high on FPGA, but it is good for testing to have a delay */
+       .max_job_runtime = 60000, /* 60 seconds */
+#if defined(CONFIG_ARCH_VEXPRESS)
+       .shared_mem_size = 1024 * 1024 * 1024,  /* 1GB */
+#endif
+#endif
 
-#define MALI_REASSIGN_RESOURCE(device, X) \
-do {\
-       device->resource = (struct resource *)&(X);\
-       device->num_resources = ARRAY_SIZE((X));\
-}while(0)
-
-static void update_dev_info(struct platform_device * device ) {
-    u32 info = get_devinfo();
-       MALI_DEBUG_PRINT(1, ("devinfo %#x\n", info));
-
-    /*if(0x0 == (info & (0x1 << 31))) { t or b*/
-    /*T*/
-    u32 gpuinfo = get_gpuinfo();
-       MALI_DEBUG_PRINT(1, ("gpuinfo %#x\n", gpuinfo));
-    u32 pp = (gpuinfo & 0x60000) >> 17;
-    if(pp == 0x1) {
-               MALI_DEBUG_PRINT(1, ("Found devinfo of MP3 %s\n", __FUNCTION__));
-               MALI_REASSIGN_RESOURCE(device, mali_gpu_resources_MP3);
-    } else if(pp == 0x2 || pp == 0x3) {
-               MALI_DEBUG_PRINT(1, ("Found devinfo of MP2 %s, %d\n", __FUNCTION__, pp));
-               MALI_REASSIGN_RESOURCE(device, mali_gpu_resources_MP2);
-    } else {
-#ifdef MTK_NR_MALI_PP
-#if (MTK_NR_MALI_PP == 3)
-        MALI_DEBUG_PRINT(1, ("Mali MP3 %s (MTK_NR_MALI_PP)\n", __FUNCTION__));
-        MALI_REASSIGN_RESOURCE(device, mali_gpu_resources_MP3);            
-#elif (MTK_NR_MALI_PP == 2)
-        MALI_DEBUG_PRINT(1, ("Mali MP2 %s (MTK_NR_MALI_PP)\n", __FUNCTION__));
-        MALI_REASSIGN_RESOURCE(device, mali_gpu_resources_MP2);            
+#if defined(CONFIG_ARCH_REALVIEW)
+       .dedicated_mem_start = 0x80000000, /* Physical start address (use 0xD0000000 for old indirect setup) */
+       .dedicated_mem_size = 0x10000000, /* 256MB */
+#endif
+#if defined(CONFIG_ARM64)
+       .fb_start = 0x5f000000,
+       .fb_size = 0x91000000,
 #else
-               MALI_DEBUG_PRINT(1, ("Default MP4 %s, ignore cfg: %d\n", __FUNCTION__, MTK_NR_MALI_PP));
+       .fb_start = 0x80000000,
+       .fb_size = 0x80000000,
 #endif
-#else 
-        MALI_DEBUG_PRINT(1, ("Default MP4 %s\n", __FUNCTION__));
+       .control_interval = 200, /* 200ms */
+       .utilization_callback = mali_gpu_utilization_callback,
+#if defined(CONFIG_MALI_DVFS)
+       .get_clock_info = mali_gpu_get_clock_info,
+       .get_freq = mali_gpu_get_freq,
+       .set_freq = mali_gpu_set_freq,
+#else
+       .get_clock_info = NULL,
+       .get_freq = NULL,
+       .set_freq = NULL,
 #endif
-       }
-}
+};
+
+
+#ifndef CONFIG_MALI_DT
+static struct platform_device mali_gpu_device = {
+       .name = MALI_GPU_NAME_UTGARD,
+       .id = 0,
+       .dev.release = mali_platform_device_release,
+       .dev.dma_mask = &mali_gpu_device.dev.coherent_dma_mask,
+       .dev.coherent_dma_mask = DMA_BIT_MASK(32),
 
+       .dev.platform_data = &mali_gpu_data,
+#if defined(CONFIG_ARM64)
+       .dev.archdata.dma_ops = &noncoherent_swiotlb_dma_ops,
+#endif
+};
 
-extern unsigned int get_max_DRAM_size (void);
 int mali_platform_device_register(void)
 {
-    int err = -1;
-    int num_pp_cores = 4; //TODO: Need specify if we are using diff config
-    MALI_DEBUG_PRINT(1, ("%s\n", __FUNCTION__));
-    mali_gpu_data.shared_mem_size = get_max_DRAM_size();
-
-    update_dev_info(&mali_gpu_device);
-    
-    err = platform_device_register(&mali_gpu_device);
-                   
-    if (0 == err) 
-    {         
-        mali_pmm_init();
-        
-#ifdef CONFIG_PM_RUNTIME
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
-                               pm_runtime_set_autosuspend_delay(&(mali_gpu_device.dev), 1000);
-                               pm_runtime_use_autosuspend(&(mali_gpu_device.dev));
-#endif
-                               pm_runtime_enable(&(mali_gpu_device.dev));
+       int err = -1;
+       int num_pp_cores = 0;
+#if defined(CONFIG_ARCH_REALVIEW)
+       u32 m400_gp_version;
 #endif
 
-#if defined(__MALI_CORE_SCALING_ENABLE__)
-        mali_core_scaling_init(num_pp_cores);
-#endif        
-        return 0;
-    }
+       MALI_DEBUG_PRINT(4, ("mali_platform_device_register() called\n"));
+
+       /* Detect present Mali GPU and connect the correct resources to the device */
+#if defined(CONFIG_ARCH_VEXPRESS)
 
-    MALI_DEBUG_PRINT(1, ("%s err=%d\n",__FUNCTION__, err));
+#if defined(CONFIG_ARM64)
+       if (mali_read_phys(0x6F000000) == 0x40601450) {
+               MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP6 device\n"));
+               num_pp_cores = 6;
+               mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m450_mp6);
+               mali_gpu_device.resource = mali_gpu_resources_m450_mp6;
+       }
+#else
+       if (mali_read_phys(0xFC000000) == 0x00000450) {
+               MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP8 device\n"));
+               num_pp_cores = 8;
+               mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m450_mp8);
+               mali_gpu_device.resource = mali_gpu_resources_m450_mp8;
+       } else if (mali_read_phys(0xFC000000) == 0x40600450) {
+               MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP6 device\n"));
+               num_pp_cores = 6;
+               mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m450_mp6);
+               mali_gpu_device.resource = mali_gpu_resources_m450_mp6;
+       } else if (mali_read_phys(0xFC000000) == 0x40400450) {
+               MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP4 device\n"));
+               num_pp_cores = 4;
+               mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m450_mp4);
+               mali_gpu_device.resource = mali_gpu_resources_m450_mp4;
+       }
+#endif /* CONFIG_ARM64 */
+
+#elif defined(CONFIG_ARCH_REALVIEW)
+
+       m400_gp_version = mali_read_phys(0xC000006C);
+       if ((m400_gp_version & 0xFFFF0000) == 0x0C070000) {
+               MALI_DEBUG_PRINT(4, ("Registering Mali-300 device\n"));
+               num_pp_cores = 1;
+               mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m300);
+               mali_gpu_device.resource = mali_gpu_resources_m300;
+               mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */
+       } else if ((m400_gp_version & 0xFFFF0000) == 0x0B070000) {
+               u32 fpga_fw_version = mali_read_phys(0xC0010000);
+               if (fpga_fw_version == 0x130C008F || fpga_fw_version == 0x110C008F) {
+                       /* Mali-400 MP1 r1p0 or r1p1 */
+                       MALI_DEBUG_PRINT(4, ("Registering Mali-400 MP1 device\n"));
+                       num_pp_cores = 1;
+                       mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m400_mp1);
+                       mali_gpu_device.resource = mali_gpu_resources_m400_mp1;
+                       mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */
+               } else if (fpga_fw_version == 0x130C000F) {
+                       /* Mali-400 MP2 r1p1 */
+                       MALI_DEBUG_PRINT(4, ("Registering Mali-400 MP2 device\n"));
+                       num_pp_cores = 2;
+                       mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m400_mp2);
+                       mali_gpu_device.resource = mali_gpu_resources_m400_mp2;
+                       mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */
+               }
+       }
+
+#endif
+
+       /* Register the platform device */
+       err = platform_device_register(&mali_gpu_device);
+       if (0 == err) {
+#ifdef CONFIG_PM_RUNTIME
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
+               pm_runtime_set_autosuspend_delay(&(mali_gpu_device.dev), 1000);
+               pm_runtime_use_autosuspend(&(mali_gpu_device.dev));
+#endif
+               pm_runtime_enable(&(mali_gpu_device.dev));
+#endif
+               MALI_DEBUG_ASSERT(0 < num_pp_cores);
+               mali_core_scaling_init(num_pp_cores);
 
-    platform_device_unregister(&mali_gpu_device);
+               return 0;
+       }
 
-    return err;
+       return err;
 }
 
 void mali_platform_device_unregister(void)
 {
-    MALI_DEBUG_PRINT(1, ("%s\n", __FUNCTION__));    
-    
-#if defined(__MALI_CORE_SCALING_ENABLE__)    
-    mali_core_scaling_term();
+       MALI_DEBUG_PRINT(4, ("mali_platform_device_unregister() called\n"));
+
+       mali_core_scaling_term();
+       platform_device_unregister(&mali_gpu_device);
+
+       platform_device_put(&mali_gpu_device);
+
+#if defined(CONFIG_ARCH_REALVIEW)
+       mali_write_phys(0xC0010020, 0x9); /* Restore default (legacy) memory mapping */
 #endif
-    
-    mali_pmm_deinit();
-    platform_device_unregister(&mali_gpu_device);
 }
 
 static void mali_platform_device_release(struct device *device)
 {
-    MALI_DEBUG_PRINT(1, ("%s\n", __FUNCTION__));
+       MALI_DEBUG_PRINT(4, ("mali_platform_device_release() called\n"));
 }
 
-static int mali_pm_suspend(struct device *device)
-{
-    int ret = 0;
-
-    MALI_DEBUG_PRINT(1, ("Mali PM:%s\n", __FUNCTION__));
-
-    if (NULL != device->driver &&
-        NULL != device->driver->pm &&
-        NULL != device->driver->pm->suspend)
-    {
-        /* Need to notify Mali driver about this event */
-        ret = device->driver->pm->suspend(device);
-    }
-   
-    _mali_osk_pm_delete_callback_timer();
-    mali_platform_power_mode_change(MALI_POWER_MODE_DEEP_SLEEP);
-
-    return ret;
-}
+#else /* CONFIG_MALI_DT */
 
-static int mali_pm_resume(struct device *device)
+static int mali_pm_suspend(struct device *device)
 {
-    int ret = 0;
+       int ret = 0;
 
-    MALI_DEBUG_PRINT(1, ("Mali PM: %s\n", __FUNCTION__));
+       MALI_DEBUG_PRINT(3, ("Mali PM:%s\n", __func__));
 
-    mali_platform_power_mode_change(MALI_POWER_MODE_ON);
+       if (NULL != device->driver && NULL != device->driver->pm
+           && NULL != device->driver->pm->suspend) {
+               /* Need to notify Mali driver about this event */
+               ret = device->driver->pm->suspend(device);
+       }
 
-    if (NULL != device->driver &&
-        NULL != device->driver->pm &&
-        NULL != device->driver->pm->resume)
-    {
-        /* Need to notify Mali driver about this event */
-        ret = device->driver->pm->resume(device);
-    }
+       /* _mali_osk_pm_delete_callback_timer();*/
+       mali_platform_power_mode_change(device, MALI_POWER_MODE_DEEP_SLEEP);
 
-    return ret;
+       return ret;
 }
 
-
-
-#if 0//because not used
-static int mali_pm_freeze(struct device *device)
+static int mali_pm_resume(struct device *device)
 {
-    int ret = 0;
-    
-    MALI_DEBUG_PRINT(1, ("Mali PM: %s\n", __FUNCTION__));
-    
-    if (NULL != device->driver &&
-        NULL != device->driver->pm &&
-        NULL != device->driver->pm->freeze)
-    {
-        /* Need to notify Mali driver about this event */
-        ret = device->driver->pm->freeze(device);
-    }
-
-    return ret;
-}
+       int ret = 0;
 
-static int mali_pm_thaw(struct device *device)
-{
-    int ret = 0;
+       MALI_DEBUG_PRINT(3, ("Mali PM: %s\n", __func__));
 
-    MALI_DEBUG_PRINT(1, ("Mali PM: %s\n", __FUNCTION__));
+       mali_platform_power_mode_change(device, MALI_POWER_MODE_ON);
 
-    if (NULL != device->driver &&
-        NULL != device->driver->pm &&
-        NULL != device->driver->pm->thaw)
-    {
-        /* Need to notify Mali driver about this event */
-        ret = device->driver->pm->thaw(device);
-    }
+       if (NULL != device->driver && NULL != device->driver->pm
+           && NULL != device->driver->pm->resume) {
+               /* Need to notify Mali driver about this event */
+               ret = device->driver->pm->resume(device);
+       }
 
-    return ret;
+       return ret;
 }
-#endif
 
 #ifdef CONFIG_PM_RUNTIME
 static int mali_runtime_suspend(struct device *device)
 {
-       int ret = 0;
+    int ret = 0;
 
        MALI_DEBUG_PRINT(4, ("mali_runtime_suspend() called\n"));
 
-       if (NULL != device->driver &&
-           NULL != device->driver->pm &&
-           NULL != device->driver->pm->runtime_suspend)
-       {
+       if (NULL != device->driver && NULL != device->driver->pm
+           && NULL != device->driver->pm->runtime_suspend) {
                /* Need to notify Mali driver about this event */
                ret = device->driver->pm->runtime_suspend(device);
        }
 
-       mali_platform_power_mode_change(MALI_POWER_MODE_LIGHT_SLEEP);
+       mali_platform_power_mode_change(device, MALI_POWER_MODE_LIGHT_SLEEP);
 
        return ret;
 }
@@ -326,12 +295,10 @@ static int mali_runtime_resume(struct device *device)
 
        MALI_DEBUG_PRINT(4, ("mali_runtime_resume() called\n"));
 
-       mali_platform_power_mode_change(MALI_POWER_MODE_ON);
+       mali_platform_power_mode_change(device, MALI_POWER_MODE_ON);
 
-       if (NULL != device->driver &&
-           NULL != device->driver->pm &&
-           NULL != device->driver->pm->runtime_resume)
-       {
+       if (NULL != device->driver && NULL != device->driver->pm
+           && NULL != device->driver->pm->runtime_resume) {
                /* Need to notify Mali driver about this event */
                ret = device->driver->pm->runtime_resume(device);
        }
@@ -343,20 +310,209 @@ static int mali_runtime_idle(struct device *device)
 {
        MALI_DEBUG_PRINT(4, ("mali_runtime_idle() called\n"));
 
-       if (NULL != device->driver &&
-           NULL != device->driver->pm &&
-           NULL != device->driver->pm->runtime_idle)
-       {
+       if (NULL != device->driver && NULL != device->driver->pm
+           && NULL != device->driver->pm->runtime_idle) {
                /* Need to notify Mali driver about this event */
                int ret = device->driver->pm->runtime_idle(device);
                if (0 != ret)
-               {
                        return ret;
-               }
        }
 
        pm_runtime_suspend(device);
 
        return 0;
 }
-#endif /// CONFIG_PM_RUNTIME
+#endif /* CONFIG_PM_RUNTIME */
+
+static const struct dev_pm_ops mali_gpu_device_type_pm_ops = {
+       .suspend = mali_pm_suspend,
+       .resume = mali_pm_resume,
+       .freeze = mali_pm_suspend,
+       .thaw = mali_pm_resume,
+       .restore = mali_pm_resume,
+
+#ifdef CONFIG_PM_RUNTIME
+       .runtime_suspend = mali_runtime_suspend,
+       .runtime_resume = mali_runtime_resume,
+       .runtime_idle = mali_runtime_idle,
+#endif
+};
+
+static struct device_type mali_gpu_device_device_type = {
+       .pm = &mali_gpu_device_type_pm_ops,
+};
+
+int mali_platform_device_init(struct platform_device *device)
+{
+       int num_pp_cores = 4;
+       int err = -1;
+#if defined(CONFIG_ARCH_REALVIEW)
+       u32 m400_gp_version;
+#endif
+
+       /* Detect present Mali GPU and connect the correct resources to the device */
+#if defined(CONFIG_ARCH_VEXPRESS)
+
+#if defined(CONFIG_ARM64)
+       if (mali_read_phys(0x6F000000) == 0x40601450) {
+               MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP6 device\n"));
+               num_pp_cores = 6;
+       }
+#else
+       if (mali_read_phys(0xFC000000) == 0x00000450) {
+               MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP8 device\n"));
+               num_pp_cores = 8;
+       } else if (mali_read_phys(0xFC000000) == 0x40400450) {
+               MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP4 device\n"));
+               num_pp_cores = 4;
+       }
+#endif
+
+#elif defined(CONFIG_ARCH_REALVIEW)
+
+       m400_gp_version = mali_read_phys(0xC000006C);
+       if ((m400_gp_version & 0xFFFF0000) == 0x0C070000) {
+               MALI_DEBUG_PRINT(4, ("Registering Mali-300 device\n"));
+               num_pp_cores = 1;
+               mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */
+       } else if ((m400_gp_version & 0xFFFF0000) == 0x0B070000) {
+               u32 fpga_fw_version = mali_read_phys(0xC0010000);
+               if (fpga_fw_version == 0x130C008F || fpga_fw_version == 0x110C008F) {
+                       /* Mali-400 MP1 r1p0 or r1p1 */
+                       MALI_DEBUG_PRINT(4, ("Registering Mali-400 MP1 device\n"));
+                       num_pp_cores = 1;
+                       mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */
+               } else if (fpga_fw_version == 0x130C000F) {
+                       /* Mali-400 MP2 r1p1 */
+                       MALI_DEBUG_PRINT(4, ("Registering Mali-400 MP2 device\n"));
+                       num_pp_cores = 2;
+                       mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */
+               }
+       }
+#endif
+
+
+       if (mali_pmm_init(device))
+               return err;
+       
+       device->dev.type = &mali_gpu_device_device_type;
+       
+       err = platform_device_add_data(device, &mali_gpu_data, sizeof(mali_gpu_data));
+
+       if (0 == err) {
+               
+#ifdef CONFIG_PM_RUNTIME
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
+               pm_runtime_set_autosuspend_delay(&(device->dev), 2000);
+               pm_runtime_use_autosuspend(&(device->dev));
+#endif
+               pm_runtime_enable(&(device->dev));
+#endif
+               MALI_DEBUG_ASSERT(0 < num_pp_cores);
+               mali_core_scaling_init(num_pp_cores);
+       }
+
+       return err;
+}
+
+int mali_platform_device_deinit(struct platform_device *device)
+{
+       /*MALI_IGNORE(device);*/
+
+       MALI_DEBUG_PRINT(4, ("mali_platform_device_deinit() called\n"));
+
+       mali_core_scaling_term();
+
+       mali_pmm_deinit(device);
+
+#if defined(CONFIG_ARCH_REALVIEW)
+       mali_write_phys(0xC0010020, 0x9); /* Restore default (legacy) memory mapping */
+#endif
+
+       return 0;
+}
+
+#endif /* CONFIG_MALI_DT */
+
+#if defined(CONFIG_ARCH_REALVIEW)
+static u32 mali_read_phys(u32 phys_addr)
+{
+       u32 phys_addr_page = phys_addr & 0xFFFFE000;
+       u32 phys_offset    = phys_addr & 0x00001FFF;
+       u32 map_size       = phys_offset + sizeof(u32);
+       u32 ret = 0xDEADBEEF;
+       void *mem_mapped = ioremap_nocache(phys_addr_page, map_size);
+       if (NULL != mem_mapped) {
+               ret = (u32)ioread32(((u8 *)mem_mapped) + phys_offset);
+               iounmap(mem_mapped);
+       }
+
+       return ret;
+}
+
+static void mali_write_phys(u32 phys_addr, u32 value)
+{
+       u32 phys_addr_page = phys_addr & 0xFFFFE000;
+       u32 phys_offset    = phys_addr & 0x00001FFF;
+       u32 map_size       = phys_offset + sizeof(u32);
+       void *mem_mapped = ioremap_nocache(phys_addr_page, map_size);
+       if (NULL != mem_mapped) {
+               iowrite32(value, ((u8 *)mem_mapped) + phys_offset);
+               iounmap(mem_mapped);
+       }
+}
+#endif
+
+static int param_set_core_scaling(const char *val, const struct kernel_param *kp)
+{
+       int ret = param_set_int(val, kp);
+
+       if (1 == mali_core_scaling_enable) {
+               mali_core_scaling_sync(mali_executor_get_num_cores_enabled());
+       }
+       return ret;
+}
+
+static struct kernel_param_ops param_ops_core_scaling = {
+       .set = param_set_core_scaling,
+       .get = param_get_int,
+};
+
+module_param_cb(mali_core_scaling_enable, &param_ops_core_scaling, &mali_core_scaling_enable, 0644);
+MODULE_PARM_DESC(mali_core_scaling_enable, "1 means to enable core scaling policy, 0 means to disable core scaling policy");
+
+void mali_gpu_utilization_callback(struct mali_gpu_utilization_data *data)
+{
+       if (1 == mali_core_scaling_enable) {
+               mali_core_scaling_update(data);
+       }
+       current_sample_utilization = (unsigned int)data->utilization_gpu;
+}
+
+#if defined(CONFIG_MALI_DVFS)
+int  mali_gpu_set_freq(int setting_clock_step)
+{
+       MALI_DEBUG_PRINT(1, ("mali_gpu_set_freq : incomplete\n"));
+}
+
+int  mali_gpu_get_freq(void)
+{
+       /* return clock_step */
+       MALI_DEBUG_PRINT(1, ("mali_gpu_get_freq : incomplete\n"));
+
+       return 0;
+}
+
+static struct mali_gpu_clk_item clk_item[] = { {455,1} };
+static struct mali_gpu_clock mali_clock_info =
+{
+       .item = &clk_item[0],
+       .num_of_steps = 1,
+};
+
+void mali_gpu_get_clock_info(struct mali_gpu_clock **data)
+{
+       MALI_DEBUG_PRINT(1, ("mali_gpu_set_freq : incomplete\n"));
+       *data = &mali_clock_info;
+}
+#endif
index 5238c5f65ef70d67048c78199358f72a26475d6c..25c1715cc767c3fe62d6daa303f6355528938110 100644 (file)
@@ -1,3 +1,4 @@
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/random.h>
index 7779bec07e55b3ae1d23d7dac79f8c1b4fb1af94..d75b6eee52b708489734ce7ed51774f4d32e611e 100644 (file)
+
 #include <linux/mali/mali_utgard.h>
 #include "mali_kernel_common.h"
 #include "mali_osk.h"
 #include "platform_pmm.h"
-#include "mach/mt_gpufreq.h"
+#include <linux/kernel.h>
 #include <asm/atomic.h>
 #include "arm_core_scaling.h"
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/regulator/consumer.h>
 
 #if defined(CONFIG_MALI400_PROFILING)
 #include "mali_osk_profiling.h"
 #endif
 
+#ifdef CONFIG_MALI_DT
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/clk-private.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/init.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/of_irq.h>
+#include "mt-plat/mt_smi.h"
+#include <linux/proc_fs.h>
+#else
+#include "mach/mt_gpufreq.h"
+#endif
 
-extern unsigned long (*mtk_thermal_get_gpu_loading_fp)(void);
-extern unsigned int (*mtk_get_gpu_loading_fp)(void);
+/*
+extern unsigned long (*mtk_thermal_get_gpu_loading_fp) (void);
+extern unsigned long (*mtk_get_gpu_loading_fp) (void);
+*/
 
 static int bPoweroff;
+unsigned int current_sample_utilization;
 
-/// #define __POWER_CLK_CTRL_SYNC__
-/// For MFG sub-system clock control API
-#include <mach/mt_clkmgr.h>
-#include <linux/kernel.h>
+extern u32 get_devinfo_with_index(u32 index);
+static int _need_univpll;
+
+#ifdef CONFIG_MALI_DT
+
+/* MFG begin
+ * GPU controller.
+ *
+ * If GPU power domain needs to be enabled after disp, we will break flow
+ * of mfg_enable_gpu to be 2 functions like mfg_prepare/mfg_enable.
+ *
+ * Currently no lock for mfg, Mali driver will take .
+ */
+
+#define MFG_CG_CON 0x0
+#define MFG_CG_SET 0x4
+#define MFG_CG_CLR 0x8
+#define MFG_DEBUG_SEL 0x180
+#define MFG_DEBUG_STAT 0x184
+#define MFG_SPD_MASK 0x80000
+#define MFG_GPU_QUAL_MASK 0x3
+
+#define MFG_READ32(r) __raw_readl((void __iomem *)((unsigned long)mfg_start + (r)))
+#define MFG_WRITE32(v, r) __raw_writel((v), (void __iomem *)((unsigned long)mfg_start + (r)))
 
-static unsigned int current_sample_utilization = 0;
+static struct platform_device *mfg_dev;
+static void __iomem *mfg_start;
+static void __iomem * scp_start;
 
-#if defined(__MALI_CORE_SCALING_ENABLE__)
-   extern int mali_core_scaling_enable;
+#ifdef CONFIG_OF
+static const struct of_device_id mfg_dt_ids[] = {
+       {.compatible = "mediatek,mt8127-mfg"},
+       { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, mfg_dt_ids);
 #endif
 
-static DEFINE_SPINLOCK(mali_pwr_lock);
+static int mfg_device_probe(struct platform_device *pdev)
+{
+
+       /* Make sure disp pm is ready to operate .. */
+       if(!mtk_smi_larb_get_base(0)) {
+               pr_warn("MFG is defer for disp domain ready\n");
+               mfg_dev = NULL;
+               return -EPROBE_DEFER;
+       }else
+               pr_info("MFG domain ready\n");
 
-#define mfg_pwr_lock(flags) \
-do { \
-    spin_lock_irqsave(&mali_pwr_lock, flags); \
-} while(0)
+       mfg_start = of_iomap(pdev->dev.of_node, 0);
+       if (IS_ERR_OR_NULL(mfg_start)) {
+               mfg_start = NULL;
+               goto error_out;
+       }
+       pr_info("MFG start is mapped %p\n", mfg_start);
 
-#define mfg_pwr_unlock(flags) \
-do { \
-    spin_unlock_irqrestore(&mali_pwr_lock, flags); \
-} while(0)
+       pm_runtime_set_autosuspend_delay(&(pdev->dev), 300);
+       pm_runtime_use_autosuspend(&(pdev->dev));
 
-extern u32 get_devinfo_with_index(u32 index);
-static u32 check_need_univpll() {
-    u32 info = *(volatile u32 *)0xf0206174;
-       /*get_devinfo_with_index (15);*/
-    /*if(0x0 == (info & (0x1 << 31))) { t or b?*/
-        /*T*/
-       u32 devinfo = *(volatile u32* )0xf0206040;/*get_devinfo_with_index(3);*/
-    if(devinfo & 0x80000) {
-        MALI_DEBUG_PRINT(1, ("GPU use univ with devinfo 0x%x\n", devinfo));
-        return 1;
-    } else {
-#ifdef MTK_MALI_UNIV
-        MALI_DEBUG_PRINT(1, ("GPU use univ with MTK_MALI_UNIV\n"));
-        return 1;
-#else
-        return 0;
-#endif  
-    }
-    
+       pm_runtime_enable(&pdev->dev);
+
+       {
+               struct device_node * node;
+               static const struct of_device_id scp_ids[] = {
+                       {.compatible = "mediatek,mt8127-scpsys"},
+                               { /* sentinel */ }
+               };
+               node = of_find_matching_node(NULL, scp_ids);
+               if (node)
+                       scp_start = of_iomap(node, 0);
+               pr_info("MFG scp_start is mapped %p\n", scp_ids);
+       }
+       mfg_dev = pdev;
+       pr_info("MFG device probed done\n");
+       return 0;
+error_out:
+       if (mfg_start)
+               iounmap(mfg_start);
+       if(scp_start)
+               iounmap(scp_start);
+
+       return -1;
+}
+
+static int mfg_device_remove(struct platform_device *pdev)
+{
+       pm_runtime_disable(&pdev->dev);
+
+       if (mfg_start)
+               iounmap(mfg_start);
+       if(scp_start)
+               iounmap(scp_start);
+
+       return 0;
 }
-static int _need_univpll = 0;
-
-void mali_pmm_init(void)
-{
-    MALI_DEBUG_PRINT(1, ("%s\n", __FUNCTION__));
-
-       _need_univpll = check_need_univpll();
-       
-    MALI_DEBUG_PRINT(1, ("need univ src pll %d\n", _need_univpll));
-
-    mtk_thermal_get_gpu_loading_fp = gpu_get_current_utilization;
-    mtk_get_gpu_loading_fp = gpu_get_current_utilization;
-       
-       unsigned long flags;
-
-    /* Because clkmgr may do 'default on' for some clock.
-       We check the clock state on init and set power state atomic.
-    */
-    mfg_pwr_lock(flags);
-    MALI_DEBUG_PRINT(1, ("MFG G3D init enable if it is on\n"));
-    if(clock_is_on(MT_CG_MFG_G3D)) {
-        MALI_DEBUG_PRINT(1, ("MFG G3D default on\n"));
-        atomic_set((atomic_t *)&bPoweroff, 0);
-        /* Need call enable first for 'default on' clocks. 
-         * Canbe removed if clkmgr remove this requirement.
-         */
-        enable_clock(MT_CG_DISP0_SMI_COMMON, "MFG");
-        enable_clock(MT_CG_MFG_G3D, "MFG");
-    } else {
-        MALI_DEBUG_PRINT(1, ("MFG G3D init default off\n"));
-        atomic_set((atomic_t *)&bPoweroff, 1);
-    }
-    mfg_pwr_unlock(flags);
-    mali_platform_power_mode_change(MALI_POWER_MODE_ON);
+
+static struct platform_driver mtk_mfg_driver = {
+       .probe = mfg_device_probe,
+       .remove = mfg_device_remove,
+       .driver = {
+                  .name = "mfg",
+                  .owner = THIS_MODULE,
+                  .of_match_table = of_match_ptr(mfg_dt_ids),
+                  },
+};
+
+#define MFG_DUMP_FOR(base, off) \
+do {\
+       u32 val;\
+       val = __raw_readl(base + off);\
+       pr_info("pwr_dump %s [%#x]: 0x%x\n", #base, (u32)off, val); \
+       }while(0)
+
+#define DEBUG_MFG_STAT \
+do {\
+       u32 con = 0xDEAC;\
+       con = MFG_READ32(MFG_CG_CON);\
+       pr_debug("MFG %s #%d CON: 0x%x\n", __func__, __LINE__, con);    \
+} while (0)
+
+static int mfg_enable_gpu(void)
+{
+       int ret = -1, i = 10;
+       u32 con;
+       if (mfg_start == NULL)
+               return ret;
+       ret = pm_runtime_get_sync(&mfg_dev->dev);
+       if (ret < 0){
+               /*
+               pm_runtime_enable(&mfg_dev->dev);
+               ret = pm_runtime_get_sync(&mfg_dev->dev);
+               */
+               pr_warn("MFG %s #%d get DISP[%d] \n", __func__, __LINE__, ret);
+       }
+       ret = mtk_smi_larb_clock_on(0, false);
+
+       i = 10;
+       DEBUG_MFG_STAT;
+       do{
+               MFG_WRITE32(0x1, MFG_CG_CLR);
+               con = MFG_READ32(MFG_CG_CON);
+               if (con == 0)
+                       break;
+               else
+                       pr_warn("MFG MFG_CG_CON[0x%x]", con);
+       }while(i--);
+       DEBUG_MFG_STAT;
+
+       return ret;
 }
 
-void mali_pmm_deinit(void)
+static void mfg_disable_gpu(void)
 {
-    MALI_DEBUG_PRINT(1, ("%s\n", __FUNCTION__));
+       if (mfg_start == NULL)
+               return;
 
-    mali_platform_power_mode_change(MALI_POWER_MODE_DEEP_SLEEP);
+       DEBUG_MFG_STAT;
+       MFG_WRITE32(0x1, MFG_CG_SET);
+       DEBUG_MFG_STAT;
+       mtk_smi_larb_clock_off(0, false);
+       pm_runtime_mark_last_busy(&mfg_dev->dev);
+       pm_runtime_put_autosuspend(&mfg_dev->dev);
 }
 
-/* this function will be called periodically with sampling period 200ms~1000ms */
-void mali_pmm_utilization_handler(struct mali_gpu_utilization_data *data)
+static int __init mfg_driver_init(void)
 {
-   current_sample_utilization = (unsigned int )data->utilization_gpu;
-   
-   MALI_DEBUG_PRINT(4, ("%s: GPU utilization=%d\n", __FUNCTION__, current_sample_utilization));
-       
-#if defined(__MALI_CORE_SCALING_ENABLE__)      
-       if (1 == mali_core_scaling_enable) {
-               mali_core_scaling_update(data);
+       int ret;
+
+       ret = platform_driver_register(&mtk_mfg_driver);
+       return ret;
+}
+
+bool mtk_mfg_is_ready(void)
+{
+       return (mfg_dev !=NULL && mfg_start != NULL);
+}
+
+/* We need make mfg probed before GPU */
+late_initcall(mfg_driver_init);
+
+/* MFG end */
+
+struct _mfg_base
+{
+       void __iomem *g3d_base;
+       struct clk *mm_smi;
+       struct clk *mfg_pll;
+       struct clk *mfg_sel;
+       /*struct regulator *vdd_g3d;*/
+};
+
+static struct _mfg_base mfg_base;
+
+#define REG_MFG_G3D BIT(0)
+
+#define REG_MFG_CG_STA 0x00
+#define REG_MFG_CG_SET 0x04
+#define REG_MFG_CG_CLR 0x08
+
+int mali_mfgsys_init(struct platform_device *device)
+{
+       int err = 0;
+       struct clk *parent;
+       unsigned long freq;
+
+       mfg_base.g3d_base = mfg_start;
+
+       mfg_base.mm_smi = devm_clk_get(&device->dev, "mm_smi");
+       if (IS_ERR(mfg_base.mm_smi)) {
+               err = PTR_ERR(mfg_base.mm_smi);
+               dev_err(&device->dev, "devm_clk_get mm_smi failed\n");
+               goto err_iounmap_reg_base;
+       }
+       if(!_need_univpll){
+               mfg_base.mfg_pll = devm_clk_get(&device->dev, "mfg_pll");
+               if (IS_ERR(mfg_base.mfg_pll)) {
+                       err = PTR_ERR(mfg_base.mfg_pll);
+                       dev_err(&device->dev, "devm_clk_get mfg_pll failed\n");
+                       goto err_iounmap_reg_base;
+               }
+       }else{
+                       mfg_base.mfg_pll = devm_clk_get(&device->dev, "mfg_pll_univ");
+               if (IS_ERR(mfg_base.mfg_pll)) {
+                       err = PTR_ERR(mfg_base.mfg_pll);
+                       dev_err(&device->dev, "devm_clk_get mfg_pll_univ failed\n");
+                       goto err_iounmap_reg_base;
+               }
+       }
+       mfg_base.mfg_sel = devm_clk_get(&device->dev, "mfg_sel");
+       if (IS_ERR(mfg_base.mfg_sel)) {
+               err = PTR_ERR(mfg_base.mfg_sel);
+               dev_err(&device->dev, "devm_clk_get mfg_sel failed\n");
+               goto err_iounmap_reg_base;
+       }
+       clk_prepare_enable(mfg_base.mfg_sel);
+
+       err = clk_set_parent(mfg_base.mfg_sel, mfg_base.mfg_pll);
+       if (err != 0) {
+               dev_err(&device->dev, "failed to clk_set_parent\n");
+               goto err_iounmap_reg_base;
+       }
+       parent = clk_get_parent(mfg_base.mfg_sel);
+       if (!IS_ERR_OR_NULL(parent)) {
+               pr_info("0MFG is now selected to %s\n", parent->name);
+               freq = clk_get_rate(parent);
+               pr_info("MFG parent rate %lu\n", freq);
+               /* Don't set rate here, gpufreq will do this */
+       } else {
+               pr_err("Failed to select mfg\n");
+       }
+
+       /*
+       clk_disable_unprepare(mfg_base.mfg_sel);
+
+       mfg_base.vdd_g3d = devm_regulator_get(&device->dev, "vdd_g3d");
+       if (IS_ERR(mfg_base.vdd_g3d)) {
+               err = PTR_ERR(mfg_base.vdd_g3d);
+               goto err_iounmap_reg_base;
        }
-#endif 
+
+       err = regulator_enable(mfg_base.vdd_g3d);
+       if (err != 0) {
+               dev_err(&device->dev, "failed to enable regulator vdd_g3d\n");
+               goto err_iounmap_reg_base;
+       }
+       */
+
+       return 0;
+
+err_iounmap_reg_base:
+
+       return err;     
 }
 
-unsigned long gpu_get_current_utilization(void)
+void mali_mfgsys_deinit(struct platform_device *device)
 {
-    return (current_sample_utilization * 100)/256;
+       MALI_IGNORE(device);
+       pm_runtime_disable(&device->dev);
+       /*regulator_disable(mfg_base.vdd_g3d);*/
 }
+void dump_clk_state(void)
+{
+       MALI_DEBUG_PRINT(2, ("mali platform_mmt dump_clk_state smi_ref[%d], smi_enabled[%d]\n",
+               __clk_get_enable_count(mfg_base.mm_smi), __clk_is_enabled(mfg_base.mm_smi)));
+       MALI_DEBUG_PRINT(2, ("MFG %s #%d MFG_DEBUG_SEL: 0x%x\n", __func__, __LINE__, MFG_READ32(MFG_DEBUG_SEL)));
+       MALI_DEBUG_PRINT(2, ("MFG %s #%d MFG_DEBUG_CON: %x\n", __func__, __LINE__, MFG_READ32(MFG_CG_CON)));
+       if(scp_start) {
+               MFG_DUMP_FOR(scp_start, 0x060c); /*SPM_PWR_STATUS*/
+               MFG_DUMP_FOR(scp_start, 0x0610); /*SPM_PWR_STATUS_2ND*/
+       }
+       mali_platform_power_mode_change(NULL, MALI_POWER_MODE_ON);
+}
+int mali_clk_enable(struct device *device)
+{
+       int ret;
+       /*clk_prepare_enable(mfg_base.mfg_sel);*/
 
+       ret = mfg_enable_gpu();
 
+       MALI_DEBUG_PRINT(3, ("MFG %s #%d MFG_DEBUG_SEL: 0x%x\n", __func__, __LINE__, MFG_READ32(MFG_DEBUG_SEL)));
+       MALI_DEBUG_PRINT(3, ("MFG %s #%d MFG_DEBUG_CON: %x\n", __func__, __LINE__, MFG_READ32(MFG_CG_CON)));
+       MALI_DEBUG_PRINT(2, ("mali_clk_enable![%d]\n", ret));
+
+       return 0;
+}
 
-void g3d_power_domain_control(int bpower_on)
+int mali_clk_disable(struct device *device)
 {
-   if (bpower_on)
-   {
-      MALI_DEBUG_PRINT(2,("enable_subsys \n"));
-      //enable_subsys(SYS_MFG, "G3D_MFG");
-   }
-   else
-   {
-      MALI_DEBUG_PRINT(2,("disable_subsys_force \n"));
-      //disable_subsys(SYS_MFG, "G3D_MFG");
-   }
+       mfg_disable_gpu();
+       /*clk_disable_unprepare(mfg_base.mfg_sel);*/
+       MALI_DEBUG_PRINT(2, ("mali_clk_disable done\n"));
+
+       return 0;
+}
+#endif
+
+int mali_pmm_init(struct platform_device *device)
+{
+       int err = 0;
+       u32 idx = 0;
+       MALI_DEBUG_PRINT(1, ("%s\n", __FUNCTION__));
+       idx = get_devinfo_with_index(3);
+       if (idx & MFG_SPD_MASK)
+               _need_univpll = 1;
+       else
+               _need_univpll = 0;
+       MALI_DEBUG_PRINT(2, ("need univ src pll idx0x%d %d\n", idx, _need_univpll));
+
+       /* Because clkmgr may do 'default on' for some clock.
+          We check the clock state on init and set power state atomic.
+        */
+
+       MALI_DEBUG_PRINT(1, ("MFG G3D init enable if it is on0621\n"));
+#ifndef CONFIG_MALI_DT
+       mtk_thermal_get_gpu_loading_fp = gpu_get_current_utilization;
+       mtk_get_gpu_loading_fp = gpu_get_current_utilization;
+       if (clock_is_on(MT_CG_MFG_G3D)) {
+               MALI_DEBUG_PRINT(1, ("MFG G3D default on\n"));
+               atomic_set((atomic_t *) & bPoweroff, 0);
+               /* Need call enable first for 'default on' clocks. 
+                * Canbe removed if clkmgr remove this requirement.
+                */
+               enable_clock(MT_CG_DISP0_SMI_COMMON, "MFG");
+               enable_clock(MT_CG_MFG_G3D, "MFG");
+       } else {
+               MALI_DEBUG_PRINT(1, ("MFG G3D init default off\n"));
+               atomic_set((atomic_t *) & bPoweroff, 1);
+       }
+#else
+       err = mali_mfgsys_init(device);
+       if (err)
+               return err;
+       atomic_set((atomic_t *) & bPoweroff, 1);
+#endif
+       mali_platform_power_mode_change(&(device->dev), MALI_POWER_MODE_ON);
+
+       return err;
 }
 
+void mali_pmm_deinit(struct platform_device *device)
+{
+       MALI_DEBUG_PRINT(1, ("%s\n", __FUNCTION__));
+
+       mali_platform_power_mode_change(&device->dev, MALI_POWER_MODE_DEEP_SLEEP);
+       mali_mfgsys_deinit(device);
+}
 
+unsigned int gpu_get_current_utilization(void)
+{
+       return (current_sample_utilization * 100) / 256;
+}
 
-void mali_platform_power_mode_change(mali_power_mode power_mode)
-{
-   unsigned long flags;
-   switch (power_mode)
-   {
-      case MALI_POWER_MODE_ON:
-         mfg_pwr_lock(flags);
-         MALI_DEBUG_PRINT(3, ("Mali platform: Got MALI_POWER_MODE_ON event, %s\n",
-                              atomic_read((atomic_t *)&bPoweroff) ? "powering on" : "already on"));
-         if (atomic_read((atomic_t *)&bPoweroff) == 1)
-         {
-            /*Leave this to undepend ref count of clkmgr*/
-            if (!clock_is_on(MT_CG_MFG_G3D))
-            {
-                       MALI_DEBUG_PRINT(3,("MFG enable_clock \n"));
-                               if(_need_univpll) {
-                                       enable_pll(UNIVPLL, "GPU"); 
+void mali_platform_power_mode_change(struct device *device,
+                                    mali_power_mode power_mode)
+{
+       switch (power_mode) {
+       case MALI_POWER_MODE_ON:
+               MALI_DEBUG_PRINT(3,
+                                ("Mali platform: Got MALI_POWER_MODE_ON event, %s\n",
+                                 atomic_read((atomic_t *) & bPoweroff) ?
+                                 "powering on" : "already on"));
+               if (atomic_read((atomic_t *) & bPoweroff) == 1) {
+                       /*Leave this to undepend ref count of clkmgr */
+                       #ifndef CONFIG_MALI_DT
+                       if (!clock_is_on(MT_CG_MFG_G3D)) {
+                               MALI_DEBUG_PRINT(3, ("MFG enable_clock \n"));
+                               if (_need_univpll) {
+                                       enable_pll(UNIVPLL, "GPU");
                                }
-                enable_clock(MT_CG_DISP0_SMI_COMMON, "MFG");
-                enable_clock(MT_CG_MFG_G3D, "MFG");
-                               if(_need_univpll) {
+                               enable_clock(MT_CG_DISP0_SMI_COMMON, "MFG");
+                               enable_clock(MT_CG_MFG_G3D, "MFG");
+                               if (_need_univpll) {
                                        clkmux_sel(MT_MUX_MFG, 6, "GPU");
                                }
-            }
-
+                               atomic_set((atomic_t *) & bPoweroff, 0);
+                       }
+                       #else
+                       if (!mali_clk_enable(device))
+                               atomic_set((atomic_t *) & bPoweroff, 0);
+                       #endif
 #if defined(CONFIG_MALI400_PROFILING)
-            _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
-                  MALI_PROFILING_EVENT_CHANNEL_GPU |
-                  MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE, 500,
-                  1200/1000, 0, 0, 0);
+                       _mali_osk_profiling_add_event
+                           (MALI_PROFILING_EVENT_TYPE_SINGLE |
+                            MALI_PROFILING_EVENT_CHANNEL_GPU |
+                            MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+                            500, 1200 / 1000, 0, 0, 0);
 
 #endif
-            atomic_set((atomic_t *)&bPoweroff, 0);
-         }
-         mfg_pwr_unlock(flags);
-         break;
-      case MALI_POWER_MODE_LIGHT_SLEEP:
-      case MALI_POWER_MODE_DEEP_SLEEP:
-
-         mfg_pwr_lock(flags);
-         MALI_DEBUG_PRINT(3, ("Mali platform: Got %s event, %s\n", power_mode ==
-                  MALI_POWER_MODE_LIGHT_SLEEP ?  "MALI_POWER_MODE_LIGHT_SLEEP" :
-                  "MALI_POWER_MODE_DEEP_SLEEP",  atomic_read((atomic_t *)&bPoweroff) ? "already off" : "powering off"));
-       
-         if (atomic_read((atomic_t *)&bPoweroff) == 0)
-         { 
-            //trace_printk("[GPU power] MFG OFF\n");
-            if (clock_is_on(MT_CG_MFG_G3D))
-            {
-                       MALI_DEBUG_PRINT(3,("MFG disable_clock \n"));
-                disable_clock(MT_CG_MFG_G3D, "MFG");
-                disable_clock(MT_CG_DISP0_SMI_COMMON, "MFG");
-                               if(_need_univpll) {
-                                       disable_pll(UNIVPLL, "GPU"); 
-                               }
-            }
+               }
+               break;
+       case MALI_POWER_MODE_LIGHT_SLEEP:
+       case MALI_POWER_MODE_DEEP_SLEEP:
+               MALI_DEBUG_PRINT(3,
+                                ("Mali platform: Got %s event, %s\n",
+                                 power_mode ==
+                                 MALI_POWER_MODE_LIGHT_SLEEP ?
+                                 "MALI_POWER_MODE_LIGHT_SLEEP" :
+                                 "MALI_POWER_MODE_DEEP_SLEEP",
+                                 atomic_read((atomic_t *) & bPoweroff) ?
+                                 "already off" : "powering off"));
 
+               if (atomic_read((atomic_t *) & bPoweroff) == 0) {
+                       #ifndef CONFIG_MALI_DT
+                       if (clock_is_on(MT_CG_MFG_G3D)) {
+                               MALI_DEBUG_PRINT(3, ("MFG disable_clock \n"));
+                               disable_clock(MT_CG_MFG_G3D, "MFG");
+                               disable_clock(MT_CG_DISP0_SMI_COMMON, "MFG");
+                               if (_need_univpll) {
+                                       disable_pll(UNIVPLL, "GPU");
+                               }
+                               atomic_set((atomic_t *) & bPoweroff, 1);
+                       }
+                       #else
+                       if (!mali_clk_disable(device))
+                               atomic_set((atomic_t *) & bPoweroff, 1);
+                       #endif
 #if defined(CONFIG_MALI400_PROFILING)
-            _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
-                  MALI_PROFILING_EVENT_CHANNEL_GPU |
-                  MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE, 0, 0, 0, 0, 0);
+                       _mali_osk_profiling_add_event
+                           (MALI_PROFILING_EVENT_TYPE_SINGLE |
+                            MALI_PROFILING_EVENT_CHANNEL_GPU |
+                            MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+                            0, 0, 0, 0, 0);
 #endif
-            atomic_set((atomic_t *)&bPoweroff, 1);
-         }
-
-         mfg_pwr_unlock(flags);
-         break;
-   }
+               }
+               break;
+       }
 }
index edd4b199f180ade7e7e179812e00e93c170c34e6..f627e3e27b97329621c76cdfe382d5b992e218a9 100644 (file)
@@ -1,3 +1,4 @@
+
 #ifndef __PLATFORM_PMM_H__
 #define __PLATFORM_PMM_H__
 
@@ -11,20 +12,26 @@ typedef enum mali_power_mode
     //MALI_POWER_MODE_NUM
 } mali_power_mode;
 
+#ifdef CONFIG_MALI_DT
+
+int mali_clk_enable(struct device *device);
+int mali_clk_disable(struct device *device);
+
+#endif
+
 /** @brief Platform power management initialisation of MALI
  *
  * This is called from the entrypoint of the driver to initialize the platform
  *
  */
-void mali_pmm_init(void);
+int mali_pmm_init(struct platform_device *device);
 
 /** @brief Platform power management deinitialisation of MALI
  *
  * This is called on the exit of the driver to terminate the platform
  *
- * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
  */
-void mali_pmm_deinit(void);
+void mali_pmm_deinit(struct platform_device *device);
 
 /** @brief Platform power management mode change of MALI
  *
@@ -38,13 +45,18 @@ void mali_pmm_tri_mode(mali_power_mode mode);
  * When GPU utilization handler is enabled, this function will be
  * periodically called.
  *
- * @param utilization The Mali GPU's work loading from 0 ~ 256. 0 = no utilization, 256 = full utilization.
+ * @param utilization The Mali GPU's work loading from 0 ~ 256. 
+ * 0 = no utilization, 256 = full utilization.
  */
 void mali_pmm_utilization_handler(struct mali_gpu_utilization_data *data);
 
-unsigned long gpu_get_current_utilization(void);
+unsigned int gpu_get_current_utilization(void);
+
+void mali_platform_power_mode_change(struct device *device,
+                                    mali_power_mode power_mode);
+bool mtk_mfg_is_ready(void);
+void dump_clk_state(void);
 
-void mali_platform_power_mode_change(mali_power_mode power_mode);
 
 #endif
 
diff --git a/drivers/misc/mediatek/gpu/mt8127/mali/mali/readme.txt b/drivers/misc/mediatek/gpu/mt8127/mali/mali/readme.txt
deleted file mode 100755 (executable)
index 2609506..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-Building the Mali Device Driver for Linux
------------------------------------------
-
-Build the Mali Device Driver for Linux by running the following make command:
-
-KDIR=<kdir_path> USING_UMP=<ump_option> BUILD=<build_option> make
-
-where
-    kdir_path: Path to your Linux Kernel directory
-    ump_option: 1 = Enable UMP support(*)
-                0 = disable UMP support
-    build_option: debug = debug build of driver
-                  release = release build of driver
-
-(*)  For newer Linux Kernels, the Module.symvers file for the UMP device driver
-     must be available. The UMP_SYMVERS_FILE variable in the Makefile should
-     point to this file. This file is generated when the UMP driver is built.
-
-The result will be a mali.ko file, which can be loaded into the Linux kernel
-by using the insmod command.
-
-The kernel needs to be provided with a platform_device struct for the Mali GPU
-device. See the mali_utgard.h header file for how to set up the Mali GPU
-resources.
index 1aa67e74ba805d21b3c6039dd5836dca37fde7ea..9c71e6f25224560743c5caad2b689cdf5b4a5ab4 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2007-2010, 2012-2013 ARM Limited
+ * (C) COPYRIGHT 2007-2010, 2012-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -25,12 +25,11 @@ enum mali200_mgmt_reg {
        MALI200_REG_ADDR_MGMT_INT_MASK                             = 0x1028,
        MALI200_REG_ADDR_MGMT_INT_STATUS                           = 0x102c,
 
-       MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW                   = 0x1044,
-
        MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS                     = 0x1050,
 
        MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE                    = 0x1080,
        MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC                       = 0x1084,
+       MALI200_REG_ADDR_MGMT_PERF_CNT_0_LIMIT                     = 0x1088,
        MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE                     = 0x108c,
 
        MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE                    = 0x10a0,
@@ -47,58 +46,58 @@ enum mali200_mgmt_reg {
 #define MALI200_REG_VAL_PERF_CNT_ENABLE 1
 
 enum mali200_mgmt_ctrl_mgmt {
-       MALI200_REG_VAL_CTRL_MGMT_STOP_BUS         = (1<<0),
-       MALI200_REG_VAL_CTRL_MGMT_FLUSH_CACHES     = (1<<3),
-       MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET      = (1<<5),
-       MALI200_REG_VAL_CTRL_MGMT_START_RENDERING  = (1<<6),
-       MALI400PP_REG_VAL_CTRL_MGMT_SOFT_RESET     = (1<<7), /* Only valid for Mali-300 and later */
+       MALI200_REG_VAL_CTRL_MGMT_STOP_BUS         = (1 << 0),
+       MALI200_REG_VAL_CTRL_MGMT_FLUSH_CACHES     = (1 << 3),
+       MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET      = (1 << 5),
+       MALI200_REG_VAL_CTRL_MGMT_START_RENDERING  = (1 << 6),
+       MALI400PP_REG_VAL_CTRL_MGMT_SOFT_RESET     = (1 << 7), /* Only valid for Mali-300 and later */
 };
 
 enum mali200_mgmt_irq {
-       MALI200_REG_VAL_IRQ_END_OF_FRAME          = (1<<0),
-       MALI200_REG_VAL_IRQ_END_OF_TILE           = (1<<1),
-       MALI200_REG_VAL_IRQ_HANG                  = (1<<2),
-       MALI200_REG_VAL_IRQ_FORCE_HANG            = (1<<3),
-       MALI200_REG_VAL_IRQ_BUS_ERROR             = (1<<4),
-       MALI200_REG_VAL_IRQ_BUS_STOP              = (1<<5),
-       MALI200_REG_VAL_IRQ_CNT_0_LIMIT           = (1<<6),
-       MALI200_REG_VAL_IRQ_CNT_1_LIMIT           = (1<<7),
-       MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR  = (1<<8),
-       MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND = (1<<9),
-       MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW  = (1<<10),
-       MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW   = (1<<11),
-       MALI400PP_REG_VAL_IRQ_RESET_COMPLETED       = (1<<12),
+       MALI200_REG_VAL_IRQ_END_OF_FRAME          = (1 << 0),
+       MALI200_REG_VAL_IRQ_END_OF_TILE           = (1 << 1),
+       MALI200_REG_VAL_IRQ_HANG                  = (1 << 2),
+       MALI200_REG_VAL_IRQ_FORCE_HANG            = (1 << 3),
+       MALI200_REG_VAL_IRQ_BUS_ERROR             = (1 << 4),
+       MALI200_REG_VAL_IRQ_BUS_STOP              = (1 << 5),
+       MALI200_REG_VAL_IRQ_CNT_0_LIMIT           = (1 << 6),
+       MALI200_REG_VAL_IRQ_CNT_1_LIMIT           = (1 << 7),
+       MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR  = (1 << 8),
+       MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND = (1 << 9),
+       MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW  = (1 << 10),
+       MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW   = (1 << 11),
+       MALI400PP_REG_VAL_IRQ_RESET_COMPLETED       = (1 << 12),
 };
 
 #define MALI200_REG_VAL_IRQ_MASK_ALL  ((enum mali200_mgmt_irq) (\
-    MALI200_REG_VAL_IRQ_END_OF_FRAME                           |\
-    MALI200_REG_VAL_IRQ_END_OF_TILE                            |\
-    MALI200_REG_VAL_IRQ_HANG                                   |\
-    MALI200_REG_VAL_IRQ_FORCE_HANG                             |\
-    MALI200_REG_VAL_IRQ_BUS_ERROR                              |\
-    MALI200_REG_VAL_IRQ_BUS_STOP                               |\
-    MALI200_REG_VAL_IRQ_CNT_0_LIMIT                            |\
-    MALI200_REG_VAL_IRQ_CNT_1_LIMIT                            |\
-    MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR                   |\
-    MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND                  |\
-    MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW                   |\
-    MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW                    |\
-    MALI400PP_REG_VAL_IRQ_RESET_COMPLETED))
+                                      MALI200_REG_VAL_IRQ_END_OF_FRAME                           |\
+                                      MALI200_REG_VAL_IRQ_END_OF_TILE                            |\
+                                      MALI200_REG_VAL_IRQ_HANG                                   |\
+                                      MALI200_REG_VAL_IRQ_FORCE_HANG                             |\
+                                      MALI200_REG_VAL_IRQ_BUS_ERROR                              |\
+                                      MALI200_REG_VAL_IRQ_BUS_STOP                               |\
+                                      MALI200_REG_VAL_IRQ_CNT_0_LIMIT                            |\
+                                      MALI200_REG_VAL_IRQ_CNT_1_LIMIT                            |\
+                                      MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR                   |\
+                                      MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND                  |\
+                                      MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW                   |\
+                                      MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW                    |\
+                                      MALI400PP_REG_VAL_IRQ_RESET_COMPLETED))
 
 #define MALI200_REG_VAL_IRQ_MASK_USED ((enum mali200_mgmt_irq) (\
-    MALI200_REG_VAL_IRQ_END_OF_FRAME                           |\
-    MALI200_REG_VAL_IRQ_FORCE_HANG                             |\
-    MALI200_REG_VAL_IRQ_BUS_ERROR                              |\
-    MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR                   |\
-    MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND                  |\
-    MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW                   |\
-    MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW))
+                                      MALI200_REG_VAL_IRQ_END_OF_FRAME                           |\
+                                      MALI200_REG_VAL_IRQ_FORCE_HANG                             |\
+                                      MALI200_REG_VAL_IRQ_BUS_ERROR                              |\
+                                      MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR                   |\
+                                      MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND                  |\
+                                      MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW                   |\
+                                      MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW))
 
 #define MALI200_REG_VAL_IRQ_MASK_NONE ((enum mali200_mgmt_irq)(0))
 
 enum mali200_mgmt_status {
-       MALI200_REG_VAL_STATUS_RENDERING_ACTIVE     = (1<<0),
-       MALI200_REG_VAL_STATUS_BUS_STOPPED          = (1<<4),
+       MALI200_REG_VAL_STATUS_RENDERING_ACTIVE     = (1 << 0),
+       MALI200_REG_VAL_STATUS_BUS_STOPPED          = (1 << 4),
 };
 
 enum mali200_render_unit {
@@ -125,6 +124,8 @@ enum mali200_wb_unit_regs {
 #define MALI300_PP_PRODUCT_ID 0xCE07
 #define MALI400_PP_PRODUCT_ID 0xCD07
 #define MALI450_PP_PRODUCT_ID 0xCF07
+#define MALI470_PP_PRODUCT_ID 0xCF08
+
 
 
 #endif /* _MALI200_REGS_H_ */
index e865e13bbb8c9e586ee3b837b4d98bcc0040159d..779f7b84d33ea40fdd819437d7ee3491a9d9b0d1 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2007-2010, 2012-2013 ARM Limited
+ * (C) COPYRIGHT 2007-2010, 2012-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -31,13 +31,13 @@ typedef enum {
        MALIGP2_REG_ADDR_MGMT_INT_CLEAR                 = 0x28,
        MALIGP2_REG_ADDR_MGMT_INT_MASK                  = 0x2C,
        MALIGP2_REG_ADDR_MGMT_INT_STAT                  = 0x30,
-       MALIGP2_REG_ADDR_MGMT_WRITE_BOUND_LOW           = 0x34,
        MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE         = 0x3C,
        MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE         = 0x40,
        MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC            = 0x44,
        MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC            = 0x48,
        MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE          = 0x4C,
        MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE          = 0x50,
+       MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_LIMIT          = 0x54,
        MALIGP2_REG_ADDR_MGMT_STATUS                    = 0x68,
        MALIGP2_REG_ADDR_MGMT_VERSION                   = 0x6C,
        MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR_READ      = 0x80,
@@ -53,13 +53,13 @@ typedef enum {
  *  @see MALIGP2_CTRL_REG_CMD
  */
 typedef enum {
-       MALIGP2_REG_VAL_CMD_START_VS                    = (1<< 0),
-       MALIGP2_REG_VAL_CMD_START_PLBU                  = (1<< 1),
-       MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC   = (1<< 4),
-       MALIGP2_REG_VAL_CMD_RESET                               = (1<< 5),
-       MALIGP2_REG_VAL_CMD_FORCE_HANG                  = (1<< 6),
-       MALIGP2_REG_VAL_CMD_STOP_BUS                    = (1<< 9),
-       MALI400GP_REG_VAL_CMD_SOFT_RESET                = (1<<10), /* only valid for Mali-300 and later */
+       MALIGP2_REG_VAL_CMD_START_VS                    = (1 << 0),
+       MALIGP2_REG_VAL_CMD_START_PLBU                  = (1 << 1),
+       MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC   = (1 << 4),
+       MALIGP2_REG_VAL_CMD_RESET                               = (1 << 5),
+       MALIGP2_REG_VAL_CMD_FORCE_HANG                  = (1 << 6),
+       MALIGP2_REG_VAL_CMD_STOP_BUS                    = (1 << 9),
+       MALI400GP_REG_VAL_CMD_SOFT_RESET                = (1 << 10), /* only valid for Mali-300 and later */
 } mgp_contr_reg_val_cmd;
 
 
@@ -92,41 +92,41 @@ typedef enum {
 /* Mask defining all IRQs in Mali GP */
 #define MALIGP2_REG_VAL_IRQ_MASK_ALL \
        (\
-               MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST      | \
-               MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST    | \
-               MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM     | \
-               MALIGP2_REG_VAL_IRQ_VS_SEM_IRQ          | \
-               MALIGP2_REG_VAL_IRQ_PLBU_SEM_IRQ        | \
-               MALIGP2_REG_VAL_IRQ_HANG                | \
-               MALIGP2_REG_VAL_IRQ_FORCE_HANG          | \
-               MALIGP2_REG_VAL_IRQ_PERF_CNT_0_LIMIT    | \
-               MALIGP2_REG_VAL_IRQ_PERF_CNT_1_LIMIT    | \
-               MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR     | \
-               MALIGP2_REG_VAL_IRQ_SYNC_ERROR          | \
-               MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR       | \
-               MALI400GP_REG_VAL_IRQ_AXI_BUS_STOPPED     | \
-               MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD      | \
-               MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD     | \
-               MALI400GP_REG_VAL_IRQ_RESET_COMPLETED     | \
-               MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW | \
-               MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW  | \
-               MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
+        MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST      | \
+        MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST    | \
+        MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM     | \
+        MALIGP2_REG_VAL_IRQ_VS_SEM_IRQ          | \
+        MALIGP2_REG_VAL_IRQ_PLBU_SEM_IRQ        | \
+        MALIGP2_REG_VAL_IRQ_HANG                | \
+        MALIGP2_REG_VAL_IRQ_FORCE_HANG          | \
+        MALIGP2_REG_VAL_IRQ_PERF_CNT_0_LIMIT    | \
+        MALIGP2_REG_VAL_IRQ_PERF_CNT_1_LIMIT    | \
+        MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR     | \
+        MALIGP2_REG_VAL_IRQ_SYNC_ERROR          | \
+        MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR       | \
+        MALI400GP_REG_VAL_IRQ_AXI_BUS_STOPPED     | \
+        MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD      | \
+        MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD     | \
+        MALI400GP_REG_VAL_IRQ_RESET_COMPLETED     | \
+        MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW | \
+        MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW  | \
+        MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
 
 /* Mask defining the IRQs in Mali GP which we use */
 #define MALIGP2_REG_VAL_IRQ_MASK_USED \
        (\
-               MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST      | \
-               MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST    | \
-               MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM     | \
-               MALIGP2_REG_VAL_IRQ_FORCE_HANG          | \
-               MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR     | \
-               MALIGP2_REG_VAL_IRQ_SYNC_ERROR          | \
-               MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR       | \
-               MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD      | \
-               MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD     | \
-               MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW | \
-               MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW  | \
-               MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
+        MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST      | \
+        MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST    | \
+        MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM     | \
+        MALIGP2_REG_VAL_IRQ_FORCE_HANG          | \
+        MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR     | \
+        MALIGP2_REG_VAL_IRQ_SYNC_ERROR          | \
+        MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR       | \
+        MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD      | \
+        MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD     | \
+        MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW | \
+        MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW  | \
+        MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
 
 /* Mask defining non IRQs on MaliGP2*/
 #define MALIGP2_REG_VAL_IRQ_MASK_NONE 0
@@ -146,13 +146,13 @@ typedef enum {
 /** }@ defgroup MALIGP2_STATUS*/
 
 #define MALIGP2_REG_VAL_STATUS_MASK_ACTIVE (\
-       MALIGP2_REG_VAL_STATUS_VS_ACTIVE|\
-       MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE)
+               MALIGP2_REG_VAL_STATUS_VS_ACTIVE|\
+               MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE)
 
 
 #define MALIGP2_REG_VAL_STATUS_MASK_ERROR (\
-       MALIGP2_REG_VAL_STATUS_BUS_ERROR |\
-       MALIGP2_REG_VAL_STATUS_WRITE_BOUND_ERR )
+               MALIGP2_REG_VAL_STATUS_BUS_ERROR |\
+               MALIGP2_REG_VAL_STATUS_WRITE_BOUND_ERR )
 
 /* This should be in the top 16 bit of the version register of gp.*/
 #define MALI200_GP_PRODUCT_ID 0xA07
index dc44c4b16b03b5a4bce05633dc1f4611c0028c01..9e98885086486f669f767049ea7da01309f53ae7 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2010-2011, 2013 ARM Limited
+ * (C) COPYRIGHT 2010-2011, 2013, 2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
index 0b53257210aee9fa4432107731afc048afd6a7c5..e6aa3008493562899c90ea5c56e85d408f03302e 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2010-2011, 2013 ARM Limited
+ * (C) COPYRIGHT 2010-2011, 2013-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -20,17 +20,17 @@ MALI_STATIC_INLINE _mali_osk_errcode_t _mali_timestamp_reset(void)
         */
 
        u32 mask = (1 << 0) | /* enable all three counters */
-                  (0 << 1) | /* reset both Count Registers to 0x0 */
-                  (1 << 2) | /* reset the Cycle Counter Register to 0x0 */
-                  (0 << 3) | /* 1 = Cycle Counter Register counts every 64th processor clock cycle */
-                  (0 << 4) | /* Count Register 0 interrupt enable */
-                  (0 << 5) | /* Count Register 1 interrupt enable */
-                  (0 << 6) | /* Cycle Counter interrupt enable */
-                  (0 << 8) | /* Count Register 0 overflow flag (clear or write, flag on read) */
-                  (0 << 9) | /* Count Register 1 overflow flag (clear or write, flag on read) */
-                  (1 << 10); /* Cycle Counter Register overflow flag (clear or write, flag on read) */
-
-       __asm__ __volatile__ ("MCR    p15, 0, %0, c15, c12, 0" : : "r" (mask) );
+                  (0 << 1) | /* reset both Count Registers to 0x0 */
+                  (1 << 2) | /* reset the Cycle Counter Register to 0x0 */
+                  (0 << 3) | /* 1 = Cycle Counter Register counts every 64th processor clock cycle */
+                  (0 << 4) | /* Count Register 0 interrupt enable */
+                  (0 << 5) | /* Count Register 1 interrupt enable */
+                  (0 << 6) | /* Cycle Counter interrupt enable */
+                  (0 << 8) | /* Count Register 0 overflow flag (clear or write, flag on read) */
+                  (0 << 9) | /* Count Register 1 overflow flag (clear or write, flag on read) */
+                  (1 << 10); /* Cycle Counter Register overflow flag (clear or write, flag on read) */
+
+       __asm__ __volatile__("MCR    p15, 0, %0, c15, c12, 0" : : "r"(mask));
 
        return _MALI_OSK_ERR_OK;
 }
@@ -40,7 +40,7 @@ MALI_STATIC_INLINE u64 _mali_timestamp_get(void)
        u32 result;
 
        /* this is for the clock cycles */
-       __asm__ __volatile__ ("MRC    p15, 0, %0, c15, c12, 1" : "=r" (result));
+       __asm__ __volatile__("MRC    p15, 0, %0, c15, c12, 1" : "=r"(result));
 
        return (u64)result;
 }
index dc44c4b16b03b5a4bce05633dc1f4611c0028c01..9e98885086486f669f767049ea7da01309f53ae7 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2010-2011, 2013 ARM Limited
+ * (C) COPYRIGHT 2010-2011, 2013, 2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
index a059ebb77830301e9197de32e00067ebae6add61..fcd4e8474c84bb06a76090e09fd6af31b273a75f 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2010-2011, 2013 ARM Limited
+ * (C) COPYRIGHT 2010-2011, 2013-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -20,7 +20,7 @@ MALI_STATIC_INLINE _mali_osk_errcode_t _mali_timestamp_reset(void)
 
 MALI_STATIC_INLINE u64 _mali_timestamp_get(void)
 {
-       return _mali_osk_time_get_ns();
+       return _mali_osk_boot_time_get_ns();
 }
 
 #endif /* __MALI_TIMESTAMP_H__ */
old mode 100755 (executable)
new mode 100644 (file)
index 644bfb9..36df19f
@@ -10,7 +10,7 @@
 
 # Set default configuration to use, if Makefile didn't provide one.
 # Change this to use a different config.h
-CONFIG ?= os_memory_64m
+CONFIG ?= default
 
 # Validate selected config
 ifneq ($(shell [ -d $(src)/arch-$(CONFIG) ] && [ -f  $(src)/arch-$(CONFIG)/config.h ] && echo "OK"), OK)
@@ -84,6 +84,7 @@ ump-y = common/ump_kernel_common.o \
        linux/ump_osk_atomics.o \
        linux/ump_osk_low_level_mem.o \
        linux/ump_osk_misc.o \
+       linux/ump_kernel_random_mapping.o \
        $(UDD_FILE_PREFIX)linux/mali_osk_atomics.o \
        $(UDD_FILE_PREFIX)linux/mali_osk_locks.o \
        $(UDD_FILE_PREFIX)linux/mali_osk_memory.o \
old mode 100755 (executable)
new mode 100644 (file)
index 86b9711..ff2efd6
@@ -1,7 +1,7 @@
 #
 # This confidential and proprietary software may be used only as
 # authorised by a licensing agreement from ARM Limited
-# (C) COPYRIGHT 2008-2012 ARM Limited
+# (C) COPYRIGHT 2008-2012, 2014-2015 ARM Limited
 # ALL RIGHTS RESERVED
 # The entire notice above must be reproduced on all authorised
 # copies and copies may only be made to the extent permitted
@@ -23,7 +23,7 @@ check_cc2 = \
 
 # Check that required parameters are supplied.
 ifeq ($(CONFIG),)
-$(error "CONFIG must be specified.")
+CONFIG := default
 endif
 ifeq ($(CPU)$(KDIR),)
 $(error "KDIR or CPU must be specified.")
old mode 100755 (executable)
new mode 100644 (file)
index 8e62aa0..8e3f699
@@ -1,7 +1,7 @@
 #
 # This confidential and proprietary software may be used only as
 # authorised by a licensing agreement from ARM Limited
-# (C) COPYRIGHT 2008-2011, 2013 ARM Limited
+# (C) COPYRIGHT 2008-2011, 2013, 2015 ARM Limited
 # ALL RIGHTS RESERVED
 # The entire notice above must be reproduced on all authorised
 # copies and copies may only be made to the extent permitted
diff --git a/drivers/misc/mediatek/gpu/mt8127/mali/ump/arch-default/config.h b/drivers/misc/mediatek/gpu/mt8127/mali/ump/arch-default/config.h
new file mode 100644 (file)
index 0000000..f2ff7df
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+ * This confidential and proprietary software may be used only as
+ * authorised by a licensing agreement from ARM Limited
+ * (C) COPYRIGHT 2008-2010, 2012, 2014-2015 ARM Limited
+ * ALL RIGHTS RESERVED
+ * The entire notice above must be reproduced on all authorised
+ * copies and copies may only be made to the extent permitted
+ * by a licensing agreement from ARM Limited.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+/* Use OS memory. */
+#define ARCH_UMP_BACKEND_DEFAULT          1
+
+/* OS memory won't need a base address. */
+#define ARCH_UMP_MEMORY_ADDRESS_DEFAULT   0x00000000
+
+/* 512 MB maximum limit for UMP allocations. */
+#define ARCH_UMP_MEMORY_SIZE_DEFAULT 512UL * 1024UL * 1024UL
+
+
+#endif /* __ARCH_CONFIG_H__ */
index db140365a0a7144792f25aa2fb6a30b277939238..4b7162e829afb3f70b71bac9d831e65c9877af6e 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
+ * (C) COPYRIGHT 2008-2013, 2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
index e77c822fb9fd49dcd37f90758b07e98f9d5920a4..f8c28d353f7e1bd8c75905c6c4f3df791d9f711e 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
+ * (C) COPYRIGHT 2008-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -14,6 +14,7 @@
 #include "ump_uk_types.h"
 #include "ump_kernel_interface.h"
 #include "ump_kernel_common.h"
+#include "ump_kernel_random_mapping.h"
 
 
 
@@ -23,7 +24,7 @@
 
 UMP_KERNEL_API_EXPORT ump_secure_id ump_dd_secure_id_get(ump_dd_handle memh)
 {
-       ump_dd_mem * mem = (ump_dd_mem *)memh;
+       ump_dd_mem *mem = (ump_dd_mem *)memh;
 
        DEBUG_ASSERT_POINTER(mem);
 
@@ -36,20 +37,16 @@ UMP_KERNEL_API_EXPORT ump_secure_id ump_dd_secure_id_get(ump_dd_handle memh)
 
 UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_secure_id(ump_secure_id secure_id)
 {
-       ump_dd_mem * mem;
-
-       _mali_osk_mutex_wait(device.secure_id_map_lock);
+       ump_dd_mem *mem;
 
        DBG_MSG(5, ("Getting handle from secure ID. ID: %u\n", secure_id));
-       if (0 != ump_descriptor_mapping_get(device.secure_id_map, (int)secure_id, (void**)&mem)) {
-               _mali_osk_mutex_signal(device.secure_id_map_lock);
+       mem = ump_random_mapping_get(device.secure_id_map, (int)secure_id);
+       if (NULL == mem) {
                DBG_MSG(1, ("Secure ID not found. ID: %u\n", secure_id));
                return UMP_DD_HANDLE_INVALID;
        }
 
-       ump_dd_reference_add(mem);
-
-       _mali_osk_mutex_signal(device.secure_id_map_lock);
+       /* Keep the reference taken in ump_random_mapping_get() */
 
        return (ump_dd_handle)mem;
 }
@@ -58,7 +55,7 @@ UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_secure_id(ump_secu
 
 UMP_KERNEL_API_EXPORT unsigned long ump_dd_phys_block_count_get(ump_dd_handle memh)
 {
-       ump_dd_mem * mem = (ump_dd_mem*) memh;
+       ump_dd_mem *mem = (ump_dd_mem *) memh;
 
        DEBUG_ASSERT_POINTER(mem);
 
@@ -67,9 +64,9 @@ UMP_KERNEL_API_EXPORT unsigned long ump_dd_phys_block_count_get(ump_dd_handle me
 
 
 
-UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_blocks_get(ump_dd_handle memh, ump_dd_physical_block * blocks, unsigned long num_blocks)
+UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_blocks_get(ump_dd_handle memh, ump_dd_physical_block *blocks, unsigned long num_blocks)
 {
-       ump_dd_mem * mem = (ump_dd_mem *)memh;
+       ump_dd_mem *mem = (ump_dd_mem *)memh;
 
        DEBUG_ASSERT_POINTER(mem);
 
@@ -92,9 +89,9 @@ UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_blocks_get(ump_dd_handle me
 
 
 
-UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_block_get(ump_dd_handle memh, unsigned long index, ump_dd_physical_block * block)
+UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_block_get(ump_dd_handle memh, unsigned long index, ump_dd_physical_block *block)
 {
-       ump_dd_mem * mem = (ump_dd_mem *)memh;
+       ump_dd_mem *mem = (ump_dd_mem *)memh;
 
        DEBUG_ASSERT_POINTER(mem);
 
@@ -119,7 +116,7 @@ UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_block_get(ump_dd_handle mem
 
 UMP_KERNEL_API_EXPORT unsigned long ump_dd_size_get(ump_dd_handle memh)
 {
-       ump_dd_mem * mem = (ump_dd_mem*)memh;
+       ump_dd_mem *mem = (ump_dd_mem *)memh;
 
        DEBUG_ASSERT_POINTER(mem);
 
@@ -132,7 +129,7 @@ UMP_KERNEL_API_EXPORT unsigned long ump_dd_size_get(ump_dd_handle memh)
 
 UMP_KERNEL_API_EXPORT void ump_dd_reference_add(ump_dd_handle memh)
 {
-       ump_dd_mem * mem = (ump_dd_mem*)memh;
+       ump_dd_mem *mem = (ump_dd_mem *)memh;
        int new_ref;
 
        DEBUG_ASSERT_POINTER(mem);
@@ -146,32 +143,11 @@ UMP_KERNEL_API_EXPORT void ump_dd_reference_add(ump_dd_handle memh)
 
 UMP_KERNEL_API_EXPORT void ump_dd_reference_release(ump_dd_handle memh)
 {
-       int new_ref;
-       ump_dd_mem * mem = (ump_dd_mem*)memh;
+       ump_dd_mem *mem = (ump_dd_mem *)memh;
 
        DEBUG_ASSERT_POINTER(mem);
 
-       /* We must hold this mutex while doing the atomic_dec_and_read, to protect
-       that elements in the ump_descriptor_mapping table is always valid.  If they
-       are not, userspace may accidently map in this secure_ids right before its freed
-       giving a mapped backdoor into unallocated memory.*/
-       _mali_osk_mutex_wait(device.secure_id_map_lock);
-
-       new_ref = _ump_osk_atomic_dec_and_read(&mem->ref_count);
-
-       DBG_MSG(5, ("Memory reference decremented. ID: %u, new value: %d\n", mem->secure_id, new_ref));
-
-       if (0 == new_ref) {
-               DBG_MSG(3, ("Final release of memory. ID: %u\n", mem->secure_id));
-
-               ump_descriptor_mapping_free(device.secure_id_map, (int)mem->secure_id);
-
-               _mali_osk_mutex_signal(device.secure_id_map_lock);
-               mem->release_func(mem->ctx, mem);
-               _mali_osk_free(mem);
-       } else {
-               _mali_osk_mutex_signal(device.secure_id_map_lock);
-       }
+       ump_random_mapping_put(mem);
 }
 
 
@@ -179,26 +155,24 @@ UMP_KERNEL_API_EXPORT void ump_dd_reference_release(ump_dd_handle memh)
 /* --------------- Handling of user space requests follows --------------- */
 
 
-_mali_osk_errcode_t _ump_uku_get_api_version( _ump_uk_api_version_s *args )
+_mali_osk_errcode_t _ump_uku_get_api_version(_ump_uk_api_version_s *args)
 {
-       ump_session_data * session_data;
+       ump_session_data *session_data;
 
-       DEBUG_ASSERT_POINTER( args );
-       DEBUG_ASSERT_POINTER( args->ctx );
+       DEBUG_ASSERT_POINTER(args);
+       DEBUG_ASSERT_POINTER(args->ctx);
 
        session_data = (ump_session_data *)args->ctx;
 
        /* check compatability */
        if (args->version == UMP_IOCTL_API_VERSION) {
-               DBG_MSG(3, ("API version set to newest %d (compatible)\n", GET_VERSION(args->version)));
-               args->compatible = 1;
-               session_data->api_version = args->version;
-       } else if (args->version == MAKE_VERSION_ID(1)) {
-               DBG_MSG(2, ("API version set to depricated: %d (compatible)\n", GET_VERSION(args->version)));
+               DBG_MSG(3, ("API version set to newest %d (compatible)\n",
+                           GET_VERSION(args->version)));
                args->compatible = 1;
                session_data->api_version = args->version;
        } else {
-               DBG_MSG(2, ("API version set to %d (incompatible with client version %d)\n", GET_VERSION(UMP_IOCTL_API_VERSION), GET_VERSION(args->version)));
+               DBG_MSG(2, ("API version set to %d (incompatible with client version %d)\n",
+                           GET_VERSION(UMP_IOCTL_API_VERSION), GET_VERSION(args->version)));
                args->compatible = 0;
                args->version = UMP_IOCTL_API_VERSION; /* report our version */
        }
@@ -207,19 +181,19 @@ _mali_osk_errcode_t _ump_uku_get_api_version( _ump_uk_api_version_s *args )
 }
 
 
-_mali_osk_errcode_t _ump_ukk_release( _ump_uk_release_s *release_info )
+_mali_osk_errcode_t _ump_ukk_release(_ump_uk_release_s *release_info)
 {
-       ump_session_memory_list_element * session_memory_element;
-       ump_session_memory_list_element * tmp;
-       ump_session_data * session_data;
+       ump_session_memory_list_element *session_memory_element;
+       ump_session_memory_list_element *tmp;
+       ump_session_data *session_data;
        _mali_osk_errcode_t ret = _MALI_OSK_ERR_INVALID_FUNC;
        int secure_id;
 
-       DEBUG_ASSERT_POINTER( release_info );
-       DEBUG_ASSERT_POINTER( release_info->ctx );
+       DEBUG_ASSERT_POINTER(release_info);
+       DEBUG_ASSERT_POINTER(release_info->ctx);
 
        /* Retreive the session data */
-       session_data = (ump_session_data*)release_info->ctx;
+       session_data = (ump_session_data *)release_info->ctx;
 
        /* If there are many items in the memory session list we
         * could be de-referencing this pointer a lot so keep a local copy
@@ -231,7 +205,7 @@ _mali_osk_errcode_t _ump_ukk_release( _ump_uk_release_s *release_info )
        /* Iterate through the memory list looking for the requested secure ID */
        _mali_osk_mutex_wait(session_data->lock);
        _MALI_OSK_LIST_FOREACHENTRY(session_memory_element, tmp, &session_data->list_head_session_memory_list, ump_session_memory_list_element, list) {
-               if ( session_memory_element->mem->secure_id == secure_id) {
+               if (session_memory_element->mem->secure_id == secure_id) {
                        ump_dd_mem *release_mem;
 
                        release_mem = session_memory_element->mem;
@@ -251,124 +225,122 @@ _mali_osk_errcode_t _ump_ukk_release( _ump_uk_release_s *release_info )
        return ret;
 }
 
-_mali_osk_errcode_t _ump_ukk_size_get( _ump_uk_size_get_s *user_interaction )
+_mali_osk_errcode_t _ump_ukk_size_get(_ump_uk_size_get_s *user_interaction)
 {
-       ump_dd_mem * mem;
+       ump_dd_mem *mem;
        _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
 
-       DEBUG_ASSERT_POINTER( user_interaction );
+       DEBUG_ASSERT_POINTER(user_interaction);
 
        /* We lock the mappings so things don't get removed while we are looking for the memory */
-       _mali_osk_mutex_wait(device.secure_id_map_lock);
-       if (0 == ump_descriptor_mapping_get(device.secure_id_map, (int)user_interaction->secure_id, (void**)&mem)) {
+       mem = ump_random_mapping_get(device.secure_id_map, user_interaction->secure_id);
+       if (NULL != mem) {
                user_interaction->size = mem->size_bytes;
-               DBG_MSG(4, ("Returning size. ID: %u, size: %lu ", (ump_secure_id)user_interaction->secure_id, (unsigned long)user_interaction->size));
+               DBG_MSG(4, ("Returning size. ID: %u, size: %lu ",
+                           (ump_secure_id)user_interaction->secure_id,
+                           (unsigned long)user_interaction->size));
+               ump_random_mapping_put(mem);
                ret = _MALI_OSK_ERR_OK;
        } else {
                user_interaction->size = 0;
-               DBG_MSG(1, ("Failed to look up mapping in ump_ioctl_size_get(). ID: %u\n", (ump_secure_id)user_interaction->secure_id));
+               DBG_MSG(1, ("Failed to look up mapping in ump_ioctl_size_get(). ID: %u\n",
+                           (ump_secure_id)user_interaction->secure_id));
        }
 
-       _mali_osk_mutex_signal(device.secure_id_map_lock);
        return ret;
 }
 
 
 
-void _ump_ukk_msync( _ump_uk_msync_s *args )
+void _ump_ukk_msync(_ump_uk_msync_s *args)
 {
-       ump_dd_mem * mem = NULL;
+       ump_dd_mem *mem = NULL;
        void *virtual = NULL;
        u32 size = 0;
        u32 offset = 0;
 
-       _mali_osk_mutex_wait(device.secure_id_map_lock);
-       ump_descriptor_mapping_get(device.secure_id_map, (int)args->secure_id, (void**)&mem);
-
+       mem = ump_random_mapping_get(device.secure_id_map, (int)args->secure_id);
        if (NULL == mem) {
-               _mali_osk_mutex_signal(device.secure_id_map_lock);
-               DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_msync(). ID: %u\n", (ump_secure_id)args->secure_id));
+               DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_msync(). ID: %u\n",
+                           (ump_secure_id)args->secure_id));
                return;
        }
-       /* Ensure the memory doesn't dissapear when we are flushing it. */
-       ump_dd_reference_add(mem);
-       _mali_osk_mutex_signal(device.secure_id_map_lock);
 
        /* Returns the cache settings back to Userspace */
-       args->is_cached=mem->is_cached;
+       args->is_cached = mem->is_cached;
 
        /* If this flag is the only one set, we should not do the actual flush, only the readout */
-       if ( _UMP_UK_MSYNC_READOUT_CACHE_ENABLED==args->op ) {
+       if (_UMP_UK_MSYNC_READOUT_CACHE_ENABLED == args->op) {
                DBG_MSG(3, ("_ump_ukk_msync READOUT  ID: %u Enabled: %d\n", (ump_secure_id)args->secure_id, mem->is_cached));
                goto msync_release_and_return;
        }
 
        /* Nothing to do if the memory is not caches */
-       if ( 0==mem->is_cached ) {
+       if (0 == mem->is_cached) {
                DBG_MSG(3, ("_ump_ukk_msync IGNORING ID: %u Enabled: %d  OP: %d\n", (ump_secure_id)args->secure_id, mem->is_cached, args->op));
                goto msync_release_and_return;
        }
        DBG_MSG(3, ("UMP[%02u] _ump_ukk_msync  Flush  OP: %d Address: 0x%08x Mapping: 0x%08x\n",
-                   (ump_secure_id)args->secure_id, args->op, args->address, args->mapping));
+                   (ump_secure_id)args->secure_id, args->op, args->address, args->mapping));
 
-       if ( args->address ) {
+       if (args->address) {
                virtual = (void *)((u32)args->address);
                offset = (u32)((args->address) - (args->mapping));
        } else {
                /* Flush entire mapping when no address is specified. */
                virtual = args->mapping;
        }
-       if ( args->size ) {
+       if (args->size) {
                size = args->size;
        } else {
                /* Flush entire mapping when no size is specified. */
                size = mem->size_bytes - offset;
        }
 
-       if ( (offset + size) > mem->size_bytes ) {
+       if ((offset + size) > mem->size_bytes) {
                DBG_MSG(1, ("Trying to flush more than the entire UMP allocation: offset: %u + size: %u > %u\n", offset, size, mem->size_bytes));
                goto msync_release_and_return;
        }
 
        /* The actual cache flush - Implemented for each OS*/
-       _ump_osk_msync( mem, virtual, offset, size, args->op, NULL);
+       _ump_osk_msync(mem, virtual, offset, size, args->op, NULL);
 
 msync_release_and_return:
-       ump_dd_reference_release(mem);
+       ump_random_mapping_put(mem);
        return;
 }
 
-void _ump_ukk_cache_operations_control(_ump_uk_cache_operations_control_sargs)
+void _ump_ukk_cache_operations_control(_ump_uk_cache_operations_control_s *args)
 {
-       ump_session_data * session_data;
+       ump_session_data *session_data;
        ump_uk_cache_op_control op;
 
-       DEBUG_ASSERT_POINTER( args );
-       DEBUG_ASSERT_POINTER( args->ctx );
+       DEBUG_ASSERT_POINTER(args);
+       DEBUG_ASSERT_POINTER(args->ctx);
 
        op = args->op;
        session_data = (ump_session_data *)args->ctx;
 
        _mali_osk_mutex_wait(session_data->lock);
-       if ( op== _UMP_UK_CACHE_OP_START ) {
+       if (op == _UMP_UK_CACHE_OP_START) {
                session_data->cache_operations_ongoing++;
-               DBG_MSG(4, ("Cache ops start\n" ));
-               if ( session_data->cache_operations_ongoing != 1 ) {
-                       DBG_MSG(2, ("UMP: Number of simultanious cache control ops: %d\n", session_data->cache_operations_ongoing) );
+               DBG_MSG(4, ("Cache ops start\n"));
+               if (session_data->cache_operations_ongoing != 1) {
+                       DBG_MSG(2, ("UMP: Number of simultanious cache control ops: %d\n", session_data->cache_operations_ongoing));
                }
-       } else if ( op== _UMP_UK_CACHE_OP_FINISH ) {
+       } else if (op == _UMP_UK_CACHE_OP_FINISH) {
                DBG_MSG(4, ("Cache ops finish\n"));
                session_data->cache_operations_ongoing--;
 #if 0
-               if ( session_data->has_pending_level1_cache_flush) {
+               if (session_data->has_pending_level1_cache_flush) {
                        /* This function will set has_pending_level1_cache_flush=0 */
-                       _ump_osk_msync( NULL, NULL, 0, 0, _UMP_UK_MSYNC_FLUSH_L1, session_data);
+                       _ump_osk_msync(NULL, NULL, 0, 0, _UMP_UK_MSYNC_FLUSH_L1, session_data);
                }
 #endif
 
                /* to be on the safe side: always flush l1 cache when cache operations are done */
-               _ump_osk_msync( NULL, NULL, 0, 0, _UMP_UK_MSYNC_FLUSH_L1, session_data);
-               DBG_MSG(4, ("Cache ops finish end\n" ));
+               _ump_osk_msync(NULL, NULL, 0, 0, _UMP_UK_MSYNC_FLUSH_L1, session_data);
+               DBG_MSG(4, ("Cache ops finish end\n"));
        } else {
                DBG_MSG(1, ("Illegal call to %s at line %d\n", __FUNCTION__, __LINE__));
        }
@@ -376,117 +348,108 @@ void _ump_ukk_cache_operations_control(_ump_uk_cache_operations_control_s* args)
 
 }
 
-void _ump_ukk_switch_hw_usage(_ump_uk_switch_hw_usage_s *args )
+void _ump_ukk_switch_hw_usage(_ump_uk_switch_hw_usage_s *args)
 {
-       ump_dd_mem * mem = NULL;
+       ump_dd_mem *mem = NULL;
        ump_uk_user old_user;
        ump_uk_msync_op cache_op = _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE;
        ump_session_data *session_data;
 
-       DEBUG_ASSERT_POINTER( args );
-       DEBUG_ASSERT_POINTER( args->ctx );
+       DEBUG_ASSERT_POINTER(args);
+       DEBUG_ASSERT_POINTER(args->ctx);
 
        session_data = (ump_session_data *)args->ctx;
 
-       _mali_osk_mutex_wait(device.secure_id_map_lock);
-       ump_descriptor_mapping_get(device.secure_id_map, (int)args->secure_id, (void**)&mem);
-
+       mem = ump_random_mapping_get(device.secure_id_map, (int)args->secure_id);
        if (NULL == mem) {
-               _mali_osk_mutex_signal(device.secure_id_map_lock);
-               DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_switch_hw_usage(). ID: %u\n", (ump_secure_id)args->secure_id));
+               DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_switch_hw_usage(). ID: %u\n",
+                           (ump_secure_id)args->secure_id));
                return;
        }
 
        old_user = mem->hw_device;
        mem->hw_device = args->new_user;
 
-       DBG_MSG(3, ("UMP[%02u] Switch usage  Start  New: %s  Prev: %s.\n", (ump_secure_id)args->secure_id, args->new_user?"MALI":"CPU",old_user?"MALI":"CPU"));
+       DBG_MSG(3, ("UMP[%02u] Switch usage  Start  New: %s  Prev: %s.\n",
+                   (ump_secure_id)args->secure_id,
+                   args->new_user ? "MALI" : "CPU",
+                   old_user ? "MALI" : "CPU"));
 
-       if ( ! mem->is_cached ) {
-               _mali_osk_mutex_signal(device.secure_id_map_lock);
-               DBG_MSG(3, ("UMP[%02u] Changing owner of uncached memory. Cache flushing not needed.\n", (ump_secure_id)args->secure_id));
-               return;
+       if (!mem->is_cached) {
+               DBG_MSG(3, ("UMP[%02u] Changing owner of uncached memory. Cache flushing not needed.\n",
+                           (ump_secure_id)args->secure_id));
+               goto out;
        }
 
-       if ( old_user == args->new_user) {
-               _mali_osk_mutex_signal(device.secure_id_map_lock);
-               DBG_MSG(4, ("UMP[%02u] Setting the new_user equal to previous for. Cache flushing not needed.\n", (ump_secure_id)args->secure_id));
-               return;
+       if (old_user == args->new_user) {
+               DBG_MSG(4, ("UMP[%02u] Setting the new_user equal to previous for. Cache flushing not needed.\n",
+                           (ump_secure_id)args->secure_id));
+               goto out;
        }
        if (
-           /* Previous AND new is both different from CPU */
-           (old_user != _UMP_UK_USED_BY_CPU) && (args->new_user != _UMP_UK_USED_BY_CPU  )
+               /* Previous AND new is both different from CPU */
+               (old_user != _UMP_UK_USED_BY_CPU) && (args->new_user != _UMP_UK_USED_BY_CPU)
        ) {
-               _mali_osk_mutex_signal(device.secure_id_map_lock);
-               DBG_MSG(4, ("UMP[%02u] Previous and new user is not CPU. Cache flushing not needed.\n", (ump_secure_id)args->secure_id));
-               return;
+               DBG_MSG(4, ("UMP[%02u] Previous and new user is not CPU. Cache flushing not needed.\n",
+                           (ump_secure_id)args->secure_id));
+               goto out;
        }
 
-       if ( (old_user != _UMP_UK_USED_BY_CPU ) && (args->new_user==_UMP_UK_USED_BY_CPU) ) {
-               cache_op =_UMP_UK_MSYNC_INVALIDATE;
+       if ((old_user != _UMP_UK_USED_BY_CPU) && (args->new_user == _UMP_UK_USED_BY_CPU)) {
+               cache_op = _UMP_UK_MSYNC_INVALIDATE;
                DBG_MSG(4, ("UMP[%02u] Cache invalidation needed\n", (ump_secure_id)args->secure_id));
 #ifdef UMP_SKIP_INVALIDATION
 #error
-               _mali_osk_mutex_signal(device.secure_id_map_lock);
                DBG_MSG(4, ("UMP[%02u] Performing Cache invalidation SKIPPED\n", (ump_secure_id)args->secure_id));
-               return;
+               goto out;
 #endif
        }
-       /* Ensure the memory doesn't dissapear when we are flushing it. */
-       ump_dd_reference_add(mem);
-       _mali_osk_mutex_signal(device.secure_id_map_lock);
 
        /* Take lock to protect: session->cache_operations_ongoing and session->has_pending_level1_cache_flush */
        _mali_osk_mutex_wait(session_data->lock);
        /* Actual cache flush */
-       _ump_osk_msync( mem, NULL, 0, mem->size_bytes, cache_op, session_data);
+       _ump_osk_msync(mem, NULL, 0, mem->size_bytes, cache_op, session_data);
        _mali_osk_mutex_signal(session_data->lock);
 
-       ump_dd_reference_release(mem);
+out:
+       ump_random_mapping_put(mem);
        DBG_MSG(4, ("UMP[%02u] Switch usage  Finish\n", (ump_secure_id)args->secure_id));
        return;
 }
 
-void _ump_ukk_lock(_ump_uk_lock_s *args )
+void _ump_ukk_lock(_ump_uk_lock_s *args)
 {
-       ump_dd_mem * mem = NULL;
-
-       _mali_osk_mutex_wait(device.secure_id_map_lock);
-       ump_descriptor_mapping_get(device.secure_id_map, (int)args->secure_id, (void**)&mem);
+       ump_dd_mem *mem = NULL;
 
+       mem = ump_random_mapping_get(device.secure_id_map, (int)args->secure_id);
        if (NULL == mem) {
-               _mali_osk_mutex_signal(device.secure_id_map_lock);
-               DBG_MSG(1, ("UMP[%02u] Failed to look up mapping in _ump_ukk_lock(). ID: %u\n", (ump_secure_id)args->secure_id));
+               DBG_MSG(1, ("UMP[%02u] Failed to look up mapping in _ump_ukk_lock(). ID: %u\n",
+                           (ump_secure_id)args->secure_id));
                return;
        }
-       ump_dd_reference_add(mem);
-       _mali_osk_mutex_signal(device.secure_id_map_lock);
 
-       DBG_MSG(1, ("UMP[%02u] Lock. New lock flag: %d. Old Lock flag:\n", (u32)args->secure_id, (u32)args->lock_usage, (u32) mem->lock_usage ));
+       DBG_MSG(1, ("UMP[%02u] Lock. New lock flag: %d. Old Lock flag:\n", (u32)args->secure_id, (u32)args->lock_usage, (u32) mem->lock_usage));
 
        mem->lock_usage = (ump_lock_usage) args->lock_usage;
 
-       ump_dd_reference_release(mem);
+       ump_random_mapping_put(mem);
 }
 
-void _ump_ukk_unlock(_ump_uk_unlock_s *args )
+void _ump_ukk_unlock(_ump_uk_unlock_s *args)
 {
-       ump_dd_mem * mem = NULL;
-
-       _mali_osk_mutex_wait(device.secure_id_map_lock);
-       ump_descriptor_mapping_get(device.secure_id_map, (int)args->secure_id, (void**)&mem);
+       ump_dd_mem *mem = NULL;
 
+       mem = ump_random_mapping_get(device.secure_id_map, (int)args->secure_id);
        if (NULL == mem) {
-               _mali_osk_mutex_signal(device.secure_id_map_lock);
-               DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_unlock(). ID: %u\n", (ump_secure_id)args->secure_id));
+               DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_unlock(). ID: %u\n",
+                           (ump_secure_id)args->secure_id));
                return;
        }
-       ump_dd_reference_add(mem);
-       _mali_osk_mutex_signal(device.secure_id_map_lock);
 
-       DBG_MSG(1, ("UMP[%02u] Unlocking. Old Lock flag:\n", (u32)args->secure_id, (u32) mem->lock_usage ));
+       DBG_MSG(1, ("UMP[%02u] Unlocking. Old Lock flag:\n",
+                   (u32)args->secure_id, (u32) mem->lock_usage));
 
        mem->lock_usage = (ump_lock_usage) UMP_NOT_LOCKED;
 
-       ump_dd_reference_release(mem);
+       ump_random_mapping_put(mem);
 }
index d57920c9a91b90e99eafaa44b68c76b0485d8f53..480d3ab4a22cbe9136c2c5536b1b00fe371f5a86 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2009-2013 ARM Limited
+ * (C) COPYRIGHT 2009-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -45,24 +45,17 @@ _mali_osk_errcode_t ump_kernel_constructor(void)
 
        /* Perform OS Specific initialization */
        err = _ump_osk_init();
-       if( _MALI_OSK_ERR_OK != err ) {
+       if (_MALI_OSK_ERR_OK != err) {
                MSG_ERR(("Failed to initiaze the UMP Device Driver"));
                return err;
        }
 
        /* Init the global device */
-       _mali_osk_memset(&device, 0, sizeof(device) );
+       _mali_osk_memset(&device, 0, sizeof(device));
 
        /* Create the descriptor map, which will be used for mapping secure ID to ump_dd_mem structs */
-       device.secure_id_map_lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_UNORDERED, 0);
-       if (NULL == device.secure_id_map_lock) {
-               MSG_ERR(("Failed to create OSK lock for secure id lookup table\n"));
-               return _MALI_OSK_ERR_NOMEM;
-       }
-
-       device.secure_id_map = ump_descriptor_mapping_create(UMP_SECURE_ID_TABLE_ENTRIES_INITIAL, UMP_SECURE_ID_TABLE_ENTRIES_MAXIMUM);
+       device.secure_id_map = ump_random_mapping_create();
        if (NULL == device.secure_id_map) {
-               _mali_osk_mutex_term(device.secure_id_map_lock);
                MSG_ERR(("Failed to create secure id lookup table\n"));
                return _MALI_OSK_ERR_NOMEM;
        }
@@ -71,8 +64,7 @@ _mali_osk_errcode_t ump_kernel_constructor(void)
        device.backend = ump_memory_backend_create();
        if (NULL == device.backend) {
                MSG_ERR(("Failed to create memory backend\n"));
-               _mali_osk_mutex_term(device.secure_id_map_lock);
-               ump_descriptor_mapping_destroy(device.secure_id_map);
+               ump_random_mapping_destroy(device.secure_id_map);
                return _MALI_OSK_ERR_NOMEM;
        }
 
@@ -82,12 +74,8 @@ _mali_osk_errcode_t ump_kernel_constructor(void)
 void ump_kernel_destructor(void)
 {
        DEBUG_ASSERT_POINTER(device.secure_id_map);
-       DEBUG_ASSERT_POINTER(device.secure_id_map_lock);
-
-       _mali_osk_mutex_term(device.secure_id_map_lock);
-       device.secure_id_map_lock = NULL;
 
-       ump_descriptor_mapping_destroy(device.secure_id_map);
+       ump_random_mapping_destroy(device.secure_id_map);
        device.secure_id_map = NULL;
 
        device.backend->shutdown(device.backend);
@@ -100,9 +88,9 @@ void ump_kernel_destructor(void)
 
 /** Creates a new UMP session
  */
-_mali_osk_errcode_t _ump_ukk_open( void** context )
+_mali_osk_errcode_t _ump_ukk_open(void **context)
 {
-       struct ump_session_data * session_data;
+       struct ump_session_data *session_data;
 
        /* allocated struct to track this session */
        session_data = (struct ump_session_data *)_mali_osk_malloc(sizeof(struct ump_session_data));
@@ -112,19 +100,21 @@ _mali_osk_errcode_t _ump_ukk_open( void** context )
        }
 
        session_data->lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_UNORDERED, 0);
-       if( NULL == session_data->lock ) {
+       if (NULL == session_data->lock) {
                MSG_ERR(("Failed to initialize lock for ump_session_data in ump_file_open()\n"));
                _mali_osk_free(session_data);
                return _MALI_OSK_ERR_NOMEM;
        }
 
-       session_data->cookies_map = ump_descriptor_mapping_create( UMP_COOKIES_PER_SESSION_INITIAL, UMP_COOKIES_PER_SESSION_MAXIMUM );
+       session_data->cookies_map = ump_descriptor_mapping_create(
+                                           UMP_COOKIES_PER_SESSION_INITIAL,
+                                           UMP_COOKIES_PER_SESSION_MAXIMUM);
 
-       if ( NULL == session_data->cookies_map ) {
+       if (NULL == session_data->cookies_map) {
                MSG_ERR(("Failed to create descriptor mapping for _ump_ukk_map_mem cookies\n"));
 
                _mali_osk_mutex_term(session_data->lock);
-               _mali_osk_free( session_data );
+               _mali_osk_free(session_data);
                return _MALI_OSK_ERR_NOMEM;
        }
 
@@ -138,7 +128,7 @@ _mali_osk_errcode_t _ump_ukk_open( void** context )
           to the correct one.*/
        session_data->api_version = MAKE_VERSION_ID(1);
 
-       *context = (void*)session_data;
+       *context = (void *)session_data;
 
        session_data->cache_operations_ongoing = 0 ;
        session_data->has_pending_level1_cache_flush = 0;
@@ -148,11 +138,11 @@ _mali_osk_errcode_t _ump_ukk_open( void** context )
        return _MALI_OSK_ERR_OK;
 }
 
-_mali_osk_errcode_t _ump_ukk_close( void** context )
+_mali_osk_errcode_t _ump_ukk_close(void **context)
 {
-       struct ump_session_data * session_data;
-       ump_session_memory_list_element * item;
-       ump_session_memory_list_element * tmp;
+       struct ump_session_data *session_data;
+       ump_session_memory_list_element *item;
+       ump_session_memory_list_element *tmp;
 
        session_data = (struct ump_session_data *)*context;
        if (NULL == session_data) {
@@ -171,21 +161,21 @@ _mali_osk_errcode_t _ump_ukk_close( void** context )
                _MALI_OSK_LIST_FOREACHENTRY(descriptor, temp, &session_data->list_head_session_memory_mappings_list, ump_memory_allocation, list) {
                        _ump_uk_unmap_mem_s unmap_args;
                        DBG_MSG(4, ("Freeing block with phys address 0x%x size 0x%x mapped in user space at 0x%x\n",
-                                   descriptor->phys_addr, descriptor->size, descriptor->mapping));
-                       unmap_args.ctx = (void*)session_data;
+                                   descriptor->phys_addr, descriptor->size, descriptor->mapping));
+                       unmap_args.ctx = (void *)session_data;
                        unmap_args.mapping = descriptor->mapping;
                        unmap_args.size = descriptor->size;
                        unmap_args._ukk_private = NULL; /* NOTE: unused */
                        unmap_args.cookie = descriptor->cookie;
 
                        /* NOTE: This modifies the list_head_session_memory_mappings_list */
-                       _ump_ukk_unmap_mem( &unmap_args );
+                       _ump_ukk_unmap_mem(&unmap_args);
                }
        }
 
        /* ASSERT that we really did free everything, because _ump_ukk_unmap_mem()
         * can fail silently. */
-       DEBUG_ASSERT( _mali_osk_list_empty(&session_data->list_head_session_memory_mappings_list) );
+       DEBUG_ASSERT(_mali_osk_list_empty(&session_data->list_head_session_memory_mappings_list));
 
        _MALI_OSK_LIST_FOREACHENTRY(item, tmp, &session_data->list_head_session_memory_list, ump_session_memory_list_element, list) {
                _mali_osk_list_del(&item->list);
@@ -194,7 +184,7 @@ _mali_osk_errcode_t _ump_ukk_close( void** context )
                _mali_osk_free(item);
        }
 
-       ump_descriptor_mapping_destroy( session_data->cookies_map );
+       ump_descriptor_mapping_destroy(session_data->cookies_map);
 
        _mali_osk_mutex_term(session_data->lock);
        _mali_osk_free(session_data);
@@ -204,38 +194,38 @@ _mali_osk_errcode_t _ump_ukk_close( void** context )
        return _MALI_OSK_ERR_OK;
 }
 
-_mali_osk_errcode_t _ump_ukk_map_mem( _ump_uk_map_mem_s *args )
+_mali_osk_errcode_t _ump_ukk_map_mem(_ump_uk_map_mem_s *args)
 {
-       struct ump_session_data * session_data;
-       ump_memory_allocation * descriptor;  /* Describes current mapping of memory */
+       struct ump_session_data *session_data;
+       ump_memory_allocation *descriptor;   /* Describes current mapping of memory */
        _mali_osk_errcode_t err;
        unsigned long offset = 0;
        unsigned long left;
        ump_dd_handle handle;  /* The real UMP handle for this memory. Its real datatype is ump_dd_mem*  */
-       ump_dd_mem * mem;      /* The real UMP memory. It is equal to the handle, but with exposed struct */
+       ump_dd_mem *mem;       /* The real UMP memory. It is equal to the handle, but with exposed struct */
        u32 block;
        int map_id;
 
        session_data = (ump_session_data *)args->ctx;
-       if( NULL == session_data ) {
+       if (NULL == session_data) {
                MSG_ERR(("Session data is NULL in _ump_ukk_map_mem()\n"));
                return _MALI_OSK_ERR_INVALID_ARGS;
        }
 
-       descriptor = (ump_memory_allocation*) _mali_osk_calloc( 1, sizeof(ump_memory_allocation));
+       descriptor = (ump_memory_allocation *) _mali_osk_calloc(1, sizeof(ump_memory_allocation));
        if (NULL == descriptor) {
                MSG_ERR(("ump_ukk_map_mem: descriptor allocation failed\n"));
                return _MALI_OSK_ERR_NOMEM;
        }
 
        handle = ump_dd_handle_create_from_secure_id(args->secure_id);
-       if ( UMP_DD_HANDLE_INVALID == handle) {
+       if (UMP_DD_HANDLE_INVALID == handle) {
                _mali_osk_free(descriptor);
                DBG_MSG(1, ("Trying to map unknown secure ID %u\n", args->secure_id));
                return _MALI_OSK_ERR_FAULT;
        }
 
-       mem = (ump_dd_mem*)handle;
+       mem = (ump_dd_mem *)handle;
        DEBUG_ASSERT(mem);
        if (mem->size_bytes != args->size) {
                _mali_osk_free(descriptor);
@@ -244,7 +234,7 @@ _mali_osk_errcode_t _ump_ukk_map_mem( _ump_uk_map_mem_s *args )
                return _MALI_OSK_ERR_FAULT;
        }
 
-       map_id = ump_descriptor_mapping_allocate_mapping( session_data->cookies_map, (void*) descriptor );
+       map_id = ump_descriptor_mapping_allocate_mapping(session_data->cookies_map, (void *) descriptor);
 
        if (map_id < 0) {
                _mali_osk_free(descriptor);
@@ -261,7 +251,7 @@ _mali_osk_errcode_t _ump_ukk_map_mem( _ump_uk_map_mem_s *args )
        descriptor->ump_session = session_data;
        descriptor->cookie = (u32)map_id;
 
-       if ( mem->is_cached ) {
+       if (mem->is_cached) {
                descriptor->is_cached = 1;
                args->is_cached       = 1;
                DBG_MSG(3, ("Mapping UMP secure_id: %d as cached.\n", args->secure_id));
@@ -271,22 +261,22 @@ _mali_osk_errcode_t _ump_ukk_map_mem( _ump_uk_map_mem_s *args )
                DBG_MSG(3, ("Mapping UMP secure_id: %d  as Uncached.\n", args->secure_id));
        }
 
-       _mali_osk_list_init( &descriptor->list );
+       _mali_osk_list_init(&descriptor->list);
 
-       err = _ump_osk_mem_mapregion_init( descriptor );
-       if( _MALI_OSK_ERR_OK != err ) {
+       err = _ump_osk_mem_mapregion_init(descriptor);
+       if (_MALI_OSK_ERR_OK != err) {
                DBG_MSG(1, ("Failed to initialize memory mapping in _ump_ukk_map_mem(). ID: %u\n", args->secure_id));
-               ump_descriptor_mapping_free( session_data->cookies_map, map_id );
+               ump_descriptor_mapping_free(session_data->cookies_map, map_id);
                _mali_osk_free(descriptor);
                ump_dd_reference_release(mem);
                return err;
        }
 
        DBG_MSG(4, ("Mapping virtual to physical memory: ID: %u, size:%lu, first physical addr: 0x%08lx, number of regions: %lu\n",
-                   mem->secure_id,
-                   mem->size_bytes,
-                   ((NULL != mem->block_array) ? mem->block_array->addr : 0),
-                   mem->nr_blocks));
+                   mem->secure_id,
+                   mem->size_bytes,
+                   ((NULL != mem->block_array) ? mem->block_array->addr : 0),
+                   mem->nr_blocks));
 
        left = descriptor->size;
        /* loop over all blocks and map them in */
@@ -299,11 +289,11 @@ _mali_osk_errcode_t _ump_ukk_map_mem( _ump_uk_map_mem_s *args )
                        size_to_map = left;
                }
 
-               if (_MALI_OSK_ERR_OK != _ump_osk_mem_mapregion_map(descriptor, offset, (u32 *)&(mem->block_array[block].addr), size_to_map ) ) {
+               if (_MALI_OSK_ERR_OK != _ump_osk_mem_mapregion_map(descriptor, offset, (u32 *) & (mem->block_array[block].addr), size_to_map)) {
                        DBG_MSG(1, ("WARNING: _ump_ukk_map_mem failed to map memory into userspace\n"));
-                       ump_descriptor_mapping_free( session_data->cookies_map, map_id );
+                       ump_descriptor_mapping_free(session_data->cookies_map, map_id);
                        ump_dd_reference_release(mem);
-                       _ump_osk_mem_mapregion_term( descriptor );
+                       _ump_osk_mem_mapregion_term(descriptor);
                        _mali_osk_free(descriptor);
                        return _MALI_OSK_ERR_FAULT;
                }
@@ -313,7 +303,7 @@ _mali_osk_errcode_t _ump_ukk_map_mem( _ump_uk_map_mem_s *args )
 
        /* Add to the ump_memory_allocation tracking list */
        _mali_osk_mutex_wait(session_data->lock);
-       _mali_osk_list_add( &descriptor->list, &session_data->list_head_session_memory_mappings_list );
+       _mali_osk_list_add(&descriptor->list, &session_data->list_head_session_memory_mappings_list);
        _mali_osk_mutex_signal(session_data->lock);
 
        args->mapping = descriptor->mapping;
@@ -322,48 +312,48 @@ _mali_osk_errcode_t _ump_ukk_map_mem( _ump_uk_map_mem_s *args )
        return _MALI_OSK_ERR_OK;
 }
 
-void _ump_ukk_unmap_mem( _ump_uk_unmap_mem_s *args )
+void _ump_ukk_unmap_mem(_ump_uk_unmap_mem_s *args)
 {
-       struct ump_session_data * session_data;
-       ump_memory_allocation * descriptor;
+       struct ump_session_data *session_data;
+       ump_memory_allocation *descriptor;
        ump_dd_handle handle;
 
        session_data = (ump_session_data *)args->ctx;
 
-       if( NULL == session_data ) {
+       if (NULL == session_data) {
                MSG_ERR(("Session data is NULL in _ump_ukk_map_mem()\n"));
                return;
        }
 
-       if (0 != ump_descriptor_mapping_get( session_data->cookies_map, (int)args->cookie, (void**)&descriptor) ) {
-               MSG_ERR(("_ump_ukk_map_mem: cookie 0x%X not found for this session\n", args->cookie ));
+       if (0 != ump_descriptor_mapping_get(session_data->cookies_map, (int)args->cookie, (void **)&descriptor)) {
+               MSG_ERR(("_ump_ukk_map_mem: cookie 0x%X not found for this session\n", args->cookie));
                return;
        }
 
        DEBUG_ASSERT_POINTER(descriptor);
 
        handle = descriptor->handle;
-       if ( UMP_DD_HANDLE_INVALID == handle) {
+       if (UMP_DD_HANDLE_INVALID == handle) {
                DBG_MSG(1, ("WARNING: Trying to unmap unknown handle: UNKNOWN\n"));
                return;
        }
 
        /* Remove the ump_memory_allocation from the list of tracked mappings */
        _mali_osk_mutex_wait(session_data->lock);
-       _mali_osk_list_del( &descriptor->list );
+       _mali_osk_list_del(&descriptor->list);
        _mali_osk_mutex_signal(session_data->lock);
 
-       ump_descriptor_mapping_free( session_data->cookies_map, (int)args->cookie );
+       ump_descriptor_mapping_free(session_data->cookies_map, (int)args->cookie);
 
        ump_dd_reference_release(handle);
 
-       _ump_osk_mem_mapregion_term( descriptor );
+       _ump_osk_mem_mapregion_term(descriptor);
        _mali_osk_free(descriptor);
 }
 
-u32 _ump_ukk_report_memory_usage( void )
+u32 _ump_ukk_report_memory_usage(void)
 {
-       if(device.backend->stat)
+       if (device.backend->stat)
                return device.backend->stat(device.backend);
        else
                return 0;
index 59aee9ee1e102a4d3542d3315bc9e0f407213ac5..4e6e7013ce974a93d863d5978e336312fc426b4b 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
+ * (C) COPYRIGHT 2008-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -14,6 +14,7 @@
 #include "ump_kernel_types.h"
 #include "ump_kernel_interface.h"
 #include "ump_kernel_descriptor_mapping.h"
+#include "ump_kernel_random_mapping.h"
 #include "ump_kernel_memory_backend.h"
 
 
@@ -25,19 +26,19 @@ extern int ump_debug_level;
                ((level) <=  ump_debug_level)?\
                UMP_DEBUG_PRINT(("UMP<" #level ">: ")), \
                UMP_DEBUG_PRINT(args):0; \
-               } while (0)
+       } while (0)
 
 #define DBG_MSG_IF(level,condition,args) /* args should be in brackets */ \
-               if((condition)&&((level) <=  ump_debug_level)) {\
+       if((condition)&&((level) <=  ump_debug_level)) {\
                UMP_DEBUG_PRINT(("UMP<" #level ">: ")); \
                UMP_DEBUG_PRINT(args); \
-               }
+       }
 
 #define DBG_MSG_ELSE(level,args) /* args should be in brackets */ \
-               else if((level) <=  ump_debug_level) { \
+       else if((level) <=  ump_debug_level) { \
                UMP_DEBUG_PRINT(("UMP<" #level ">: ")); \
                UMP_DEBUG_PRINT(args); \
-               }
+       }
 
 #define DEBUG_ASSERT_POINTER(pointer) do  {if( (pointer)== NULL) MSG_ERR(("NULL pointer " #pointer)); } while(0)
 #define DEBUG_ASSERT(condition) do  {if(!(condition)) MSG_ERR(("ASSERT failed: " #condition)); } while(0)
@@ -52,16 +53,16 @@ extern int ump_debug_level;
 #endif /* DEBUG */
 
 #define MSG_ERR(args) do{ /* args should be in brackets */ \
-        _mali_osk_dbgmsg("UMP: ERR: %s\n" ,__FILE__); \
-        _mali_osk_dbgmsg( "           %s()%4d\n", __FUNCTION__, __LINE__) ; \
-        _mali_osk_dbgmsg args ; \
-        _mali_osk_dbgmsg("\n"); \
+               _mali_osk_dbgmsg("UMP: ERR: %s\n" ,__FILE__); \
+               _mali_osk_dbgmsg( "           %s()%4d\n", __FUNCTION__, __LINE__) ; \
+               _mali_osk_dbgmsg args ; \
+               _mali_osk_dbgmsg("\n"); \
        } while(0)
 
 #define MSG(args) do{ /* args should be in brackets */ \
-                _mali_osk_dbgmsg("UMP: "); \
-                _mali_osk_dbgmsg args; \
-               } while (0)
+               _mali_osk_dbgmsg("UMP: "); \
+               _mali_osk_dbgmsg args; \
+       } while (0)
 
 
 
@@ -75,7 +76,7 @@ typedef struct ump_session_data {
        _mali_osk_list_t list_head_session_memory_mappings_list; /**< List of ump_memory_allocations mapped in */
        int api_version;
        _mali_osk_mutex_t *lock;
-       ump_descriptor_mapping * cookies_map; /**< Secure mapping of cookies from _ump_ukk_map_mem() */
+       ump_descriptor_mapping *cookies_map;  /**< Secure mapping of cookies from _ump_ukk_map_mem() */
        int cache_operations_ongoing;
        int has_pending_level1_cache_flush;
 } ump_session_data;
@@ -88,7 +89,7 @@ typedef struct ump_session_data {
  * which don't do it themself (e.g. due to a crash or premature termination).
  */
 typedef struct ump_session_memory_list_element {
-       struct ump_dd_mem * mem;
+       struct ump_dd_mem *mem;
        _mali_osk_list_t list;
 } ump_session_memory_list_element;
 
@@ -98,9 +99,8 @@ typedef struct ump_session_memory_list_element {
  * Device specific data, created when device driver is loaded, and then kept as the global variable device.
  */
 typedef struct ump_dev {
-       _mali_osk_mutex_t *secure_id_map_lock;
-       ump_descriptor_mapping * secure_id_map;
-       ump_memory_backend * backend;
+       ump_random_mapping *secure_id_map;
+       ump_memory_backend *backend;
 } ump_dev;
 
 
@@ -110,7 +110,7 @@ extern struct ump_dev device;
 
 _mali_osk_errcode_t ump_kernel_constructor(void);
 void ump_kernel_destructor(void);
-int map_errcode( _mali_osk_errcode_t err );
+int map_errcode(_mali_osk_errcode_t err);
 
 /**
  * variables from user space cannot be dereferenced from kernel space; tagging them
index 8863ff70399d0350f1ca7b094953757d642c06f8..d4c80175c68c14e95bc407fd25e70be4ea768138 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2011, 2013 ARM Limited
+ * (C) COPYRIGHT 2008-2011, 2013-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
  * @param count Number of mappings in the table
  * @return Pointer to a new table, NULL on error
  */
-static ump_descriptor_table * descriptor_table_alloc(int count);
+static ump_descriptor_table *descriptor_table_alloc(int count);
 
 /**
  * Free a descriptor table
  * @param table The table to free
  */
-static void descriptor_table_free(ump_descriptor_table * table);
+static void descriptor_table_free(ump_descriptor_table *table);
 
-ump_descriptor_mapping * ump_descriptor_mapping_create(int init_entries, int max_entries)
+ump_descriptor_mapping *ump_descriptor_mapping_create(int init_entries, int max_entries)
 {
-       ump_descriptor_mapping * map = _mali_osk_calloc(1, sizeof(ump_descriptor_mapping) );
+       ump_descriptor_mapping *map = _mali_osk_calloc(1, sizeof(ump_descriptor_mapping));
 
        init_entries = MALI_PAD_INT(init_entries);
        max_entries = MALI_PAD_INT(max_entries);
@@ -40,7 +40,7 @@ ump_descriptor_mapping * ump_descriptor_mapping_create(int init_entries, int max
                map->table = descriptor_table_alloc(init_entries);
                if (NULL != map->table) {
                        map->lock = _mali_osk_mutex_rw_init(_MALI_OSK_LOCKFLAG_UNORDERED, 0);
-                       if ( NULL != map->lock ) {
+                       if (NULL != map->lock) {
                                _mali_osk_set_nonatomic_bit(0, map->table->usage); /* reserve bit 0 to prevent NULL/zero logic to kick in */
                                map->max_nr_mappings_allowed = max_entries;
                                map->current_nr_mappings = init_entries;
@@ -53,14 +53,14 @@ ump_descriptor_mapping * ump_descriptor_mapping_create(int init_entries, int max
        return NULL;
 }
 
-void ump_descriptor_mapping_destroy(ump_descriptor_mapping * map)
+void ump_descriptor_mapping_destroy(ump_descriptor_mapping *map)
 {
        descriptor_table_free(map->table);
        _mali_osk_mutex_rw_term(map->lock);
        _mali_osk_free(map);
 }
 
-int ump_descriptor_mapping_allocate_mapping(ump_descriptor_mapping * map, void * target)
+int ump_descriptor_mapping_allocate_mapping(ump_descriptor_mapping *map, void *target)
 {
        int descriptor = -1;/*-EFAULT;*/
        _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
@@ -68,9 +68,9 @@ int ump_descriptor_mapping_allocate_mapping(ump_descriptor_mapping * map, void *
        if (descriptor == map->current_nr_mappings) {
                int nr_mappings_new;
                /* no free descriptor, try to expand the table */
-               ump_descriptor_table * new_table;
-               ump_descriptor_table * old_table = map->table;
-               nr_mappings_new= map->current_nr_mappings *2;
+               ump_descriptor_table *new_table;
+               ump_descriptor_table *old_table = map->table;
+               nr_mappings_new = map->current_nr_mappings * 2;
 
                if (map->current_nr_mappings >= map->max_nr_mappings_allowed) {
                        descriptor = -1;
@@ -84,7 +84,7 @@ int ump_descriptor_mapping_allocate_mapping(ump_descriptor_mapping * map, void *
                }
 
                _mali_osk_memcpy(new_table->usage, old_table->usage, (sizeof(unsigned long)*map->current_nr_mappings) / BITS_PER_LONG);
-               _mali_osk_memcpy(new_table->mappings, old_table->mappings, map->current_nr_mappings * sizeof(void*));
+               _mali_osk_memcpy(new_table->mappings, old_table->mappings, map->current_nr_mappings * sizeof(void *));
                map->table = new_table;
                map->current_nr_mappings = nr_mappings_new;
                descriptor_table_free(old_table);
@@ -99,7 +99,7 @@ unlock_and_exit:
        return descriptor;
 }
 
-int ump_descriptor_mapping_get(ump_descriptor_mapping * map, int descriptor, void** target)
+int ump_descriptor_mapping_get(ump_descriptor_mapping *map, int descriptor, void **target)
 {
        int result = -1;/*-EFAULT;*/
        DEBUG_ASSERT(map);
@@ -112,7 +112,7 @@ int ump_descriptor_mapping_get(ump_descriptor_mapping * map, int descriptor, voi
        return result;
 }
 
-int ump_descriptor_mapping_set(ump_descriptor_mapping * map, int descriptor, void * target)
+int ump_descriptor_mapping_set(ump_descriptor_mapping *map, int descriptor, void *target)
 {
        int result = -1;/*-EFAULT;*/
        _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
@@ -124,7 +124,7 @@ int ump_descriptor_mapping_set(ump_descriptor_mapping * map, int descriptor, voi
        return result;
 }
 
-void ump_descriptor_mapping_free(ump_descriptor_mapping * map, int descriptor)
+void ump_descriptor_mapping_free(ump_descriptor_mapping *map, int descriptor)
 {
        _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
        if ((descriptor > 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage)) {
@@ -134,21 +134,21 @@ void ump_descriptor_mapping_free(ump_descriptor_mapping * map, int descriptor)
        _mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
 }
 
-static ump_descriptor_table * descriptor_table_alloc(int count)
+static ump_descriptor_table *descriptor_table_alloc(int count)
 {
-       ump_descriptor_table * table;
+       ump_descriptor_table *table;
 
-       table = _mali_osk_calloc(1, sizeof(ump_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG) + (sizeof(void*) * count) );
+       table = _mali_osk_calloc(1, sizeof(ump_descriptor_table) + ((sizeof(unsigned long) * count) / BITS_PER_LONG) + (sizeof(void *) * count));
 
        if (NULL != table) {
-               table->usage = (u32*)((u8*)table + sizeof(ump_descriptor_table));
-               table->mappings = (void**)((u8*)table + sizeof(ump_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG));
+               table->usage = (u32 *)((u8 *)table + sizeof(ump_descriptor_table));
+               table->mappings = (void **)((u8 *)table + sizeof(ump_descriptor_table) + ((sizeof(unsigned long) * count) / BITS_PER_LONG));
        }
 
        return table;
 }
 
-static void descriptor_table_free(ump_descriptor_table * table)
+static void descriptor_table_free(ump_descriptor_table *table)
 {
        _mali_osk_free(table);
 }
index 234c6fc51da97665ca2c3d4df72de163ef6a16be..1cc77e00e4190701cd358e65d6c97980569953df 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2011, 2013 ARM Limited
+ * (C) COPYRIGHT 2008-2011, 2013-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -21,8 +21,8 @@
  * The actual descriptor mapping table, never directly accessed by clients
  */
 typedef struct ump_descriptor_table {
-       u32 * usage; /**< Pointer to bitpattern indicating if a descriptor is valid/used or not */
-       void** mappings; /**< Array of the pointers the descriptors map to */
+       u32 *usage;  /**< Pointer to bitpattern indicating if a descriptor is valid/used or not */
+       void **mappings; /**< Array of the pointers the descriptors map to */
 } ump_descriptor_table;
 
 /**
@@ -33,7 +33,7 @@ typedef struct ump_descriptor_mapping {
        _mali_osk_mutex_rw_t *lock; /**< Lock protecting access to the mapping object */
        int max_nr_mappings_allowed; /**< Max number of mappings to support in this namespace */
        int current_nr_mappings; /**< Current number of possible mappings */
-       ump_descriptor_table * table; /**< Pointer to the current mapping table */
+       ump_descriptor_table *table;  /**< Pointer to the current mapping table */
 } ump_descriptor_mapping;
 
 /**
@@ -43,13 +43,13 @@ typedef struct ump_descriptor_mapping {
  * @param max_entries Number of entries to max support
  * @return Pointer to a descriptor mapping object, NULL on failure
  */
-ump_descriptor_mapping * ump_descriptor_mapping_create(int init_entries, int max_entries);
+ump_descriptor_mapping *ump_descriptor_mapping_create(int init_entries, int max_entries);
 
 /**
  * Destroy a descriptor mapping object
  * @param map The map to free
  */
-void ump_descriptor_mapping_destroy(ump_descriptor_mapping * map);
+void ump_descriptor_mapping_destroy(ump_descriptor_mapping *map);
 
 /**
  * Allocate a new mapping entry (descriptor ID)
@@ -58,7 +58,7 @@ void ump_descriptor_mapping_destroy(ump_descriptor_mapping * map);
  * @param target The value to map to
  * @return The descriptor allocated, a negative value on error
  */
-int ump_descriptor_mapping_allocate_mapping(ump_descriptor_mapping * map, void * target);
+int ump_descriptor_mapping_allocate_mapping(ump_descriptor_mapping *map, void *target);
 
 /**
  * Get the value mapped to by a descriptor ID
@@ -67,7 +67,7 @@ int ump_descriptor_mapping_allocate_mapping(ump_descriptor_mapping * map, void *
  * @param target Pointer to a pointer which will receive the stored value
  * @return 0 on successful lookup, negative on error
  */
-int ump_descriptor_mapping_get(ump_descriptor_mapping * map, int descriptor, void** target);
+int ump_descriptor_mapping_get(ump_descriptor_mapping *map, int descriptor, void **target);
 
 /**
  * Set the value mapped to by a descriptor ID
@@ -76,7 +76,7 @@ int ump_descriptor_mapping_get(ump_descriptor_mapping * map, int descriptor, voi
  * @param target Pointer to replace the current value with
  * @return 0 on successful lookup, negative on error
  */
-int ump_descriptor_mapping_set(ump_descriptor_mapping * map, int descriptor, void * target);
+int ump_descriptor_mapping_set(ump_descriptor_mapping *map, int descriptor, void *target);
 
 /**
  * Free the descriptor ID
@@ -84,6 +84,6 @@ int ump_descriptor_mapping_set(ump_descriptor_mapping * map, int descriptor, voi
  * @param map The map to free the descriptor from
  * @param descriptor The descriptor ID to free
  */
-void ump_descriptor_mapping_free(ump_descriptor_mapping * map, int descriptor);
+void ump_descriptor_mapping_free(ump_descriptor_mapping *map, int descriptor);
 
 #endif /* __UMP_KERNEL_DESCRIPTOR_MAPPING_H__ */
index ad35b6f91853bfd975a4c4ef96dad2cec96b0084..3163756fa3e1d4cf2afae884732baf1d6a098977 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2011, 2013 ARM Limited
+ * (C) COPYRIGHT 2008-2011, 2013-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 
 
 typedef struct ump_memory_allocation {
-       void                    phys_addr;
-       void                    mapping;
+       void                     *phys_addr;
+       void                     *mapping;
        unsigned long             size;
        ump_dd_handle             handle;
-       void                    process_mapping_info;
+       void                     *process_mapping_info;
        u32                       cookie;               /**< necessary on some U/K interface implementations */
-       struct ump_session_data * ump_session;          /**< Session that this allocation belongs to */
+       struct ump_session_data *ump_session;           /**< Session that this allocation belongs to */
        _mali_osk_list_t          list;                 /**< List for linking together memory allocations into the session's memory head */
        u32 is_cached;
 } ump_memory_allocation;
 
 typedef struct ump_memory_backend {
-       int  (*allocate)(void* ctx, ump_dd_mem * descriptor);
-       void (*release)(void* ctx, ump_dd_mem * descriptor);
-       void (*shutdown)(struct ump_memory_backend * backend);
-       u32  (*stat)(struct ump_memory_backend *backend);
-       int  (*pre_allocate_physical_check)(void *ctx, u32 size);
-       u32  (*adjust_to_mali_phys)(void *ctx, u32 cpu_phys);
-       void * ctx;
+       int (*allocate)(void *ctx, ump_dd_mem *descriptor);
+       void (*release)(void *ctx, ump_dd_mem *descriptor);
+       void (*shutdown)(struct ump_memory_backend *backend);
+       u32(*stat)(struct ump_memory_backend *backend);
+       int (*pre_allocate_physical_check)(void *ctx, u32 size);
+       u32(*adjust_to_mali_phys)(void *ctx, u32 cpu_phys);
+       void *ctx;
 } ump_memory_backend;
 
-ump_memory_backend * ump_memory_backend_create ( void );
-void ump_memory_backend_destroy( void );
+ump_memory_backend *ump_memory_backend_create(void);
+void ump_memory_backend_destroy(void);
 
 #endif /*__UMP_KERNEL_MEMORY_BACKEND_H__ */
 
index 30e04e8373a311113ade70c55e14c03a15b6470a..7ad08ef3bd560c2b5957ece009b8e138c6335b0e 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2009-2013 ARM Limited
+ * (C) COPYRIGHT 2009-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 #define UMP_MINIMUM_SIZE_MASK    (~(UMP_MINIMUM_SIZE-1))
 #define UMP_SIZE_ALIGN(x)        (((x)+UMP_MINIMUM_SIZE-1)&UMP_MINIMUM_SIZE_MASK)
 #define UMP_ADDR_ALIGN_OFFSET(x) ((x)&(UMP_MINIMUM_SIZE-1))
-static void phys_blocks_release(void * ctx, struct ump_dd_mem * descriptor);
+static void phys_blocks_release(void *ctx, struct ump_dd_mem *descriptor);
 
-UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_phys_blocks(ump_dd_physical_block * blocks, unsigned long num_blocks)
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_phys_blocks(ump_dd_physical_block *blocks, unsigned long num_blocks)
 {
-       ump_dd_mem * mem;
+       ump_dd_mem *mem;
        unsigned long size_total = 0;
-       int map_id;
+       int ret;
        u32 i;
 
        /* Go through the input blocks and verify that they are sane */
-       for (i=0; i < num_blocks; i++) {
+       for (i = 0; i < num_blocks; i++) {
                unsigned long addr = blocks[i].addr;
                unsigned long size = blocks[i].size;
 
@@ -56,22 +56,9 @@ UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_phys_blocks(ump_dd
                return UMP_DD_HANDLE_INVALID;
        }
 
-       /* Find a secure ID for this allocation */
-       _mali_osk_mutex_wait(device.secure_id_map_lock);
-       map_id = ump_descriptor_mapping_allocate_mapping(device.secure_id_map, (void*) mem);
-
-       if (map_id < 0) {
-               _mali_osk_mutex_signal(device.secure_id_map_lock);
-               _mali_osk_free(mem);
-               DBG_MSG(1, ("Failed to allocate secure ID in ump_dd_handle_create_from_phys_blocks()\n"));
-               return UMP_DD_HANDLE_INVALID;
-       }
-
        /* Now, make a copy of the block information supplied by the user */
-       mem->block_array = _mali_osk_malloc(sizeof(ump_dd_physical_block)* num_blocks);
+       mem->block_array = _mali_osk_malloc(sizeof(ump_dd_physical_block) * num_blocks);
        if (NULL == mem->block_array) {
-               ump_descriptor_mapping_free(device.secure_id_map, map_id);
-               _mali_osk_mutex_signal(device.secure_id_map_lock);
                _mali_osk_free(mem);
                DBG_MSG(1, ("Could not allocate a mem handle for function ump_dd_handle_create_from_phys_blocks().\n"));
                return UMP_DD_HANDLE_INVALID;
@@ -81,7 +68,6 @@ UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_phys_blocks(ump_dd
 
        /* And setup the rest of the ump_dd_mem struct */
        _mali_osk_atomic_init(&mem->ref_count, 1);
-       mem->secure_id = (ump_secure_id)map_id;
        mem->size_bytes = size_total;
        mem->nr_blocks = num_blocks;
        mem->backend_info = NULL;
@@ -92,76 +78,75 @@ UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_phys_blocks(ump_dd
        mem->hw_device = _UMP_UK_USED_BY_CPU;
        mem->lock_usage = UMP_NOT_LOCKED;
 
-       _mali_osk_mutex_signal(device.secure_id_map_lock);
+       /* Find a secure ID for this allocation */
+       ret = ump_random_mapping_insert(device.secure_id_map, mem);
+       if (unlikely(ret)) {
+               _mali_osk_free(mem->block_array);
+               _mali_osk_free(mem);
+               DBG_MSG(1, ("Failed to allocate secure ID in ump_dd_handle_create_from_phys_blocks()\n"));
+               return UMP_DD_HANDLE_INVALID;
+       }
+
        DBG_MSG(3, ("UMP memory created. ID: %u, size: %lu\n", mem->secure_id, mem->size_bytes));
 
        return (ump_dd_handle)mem;
 }
 
-static void phys_blocks_release(void * ctx, struct ump_dd_mem * descriptor)
+static void phys_blocks_release(void *ctx, struct ump_dd_mem *descriptor)
 {
        _mali_osk_free(descriptor->block_array);
        descriptor->block_array = NULL;
 }
 
-_mali_osk_errcode_t _ump_ukk_allocate( _ump_uk_allocate_s *user_interaction )
+_mali_osk_errcode_t _ump_ukk_allocate(_ump_uk_allocate_s *user_interaction)
 {
-       ump_session_data * session_data = NULL;
+       ump_session_data *session_data = NULL;
        ump_dd_mem *new_allocation = NULL;
-       ump_session_memory_list_element * session_memory_element = NULL;
-       int map_id;
+       ump_session_memory_list_element *session_memory_element = NULL;
+       int ret;
 
-       DEBUG_ASSERT_POINTER( user_interaction );
-       DEBUG_ASSERT_POINTER( user_interaction->ctx );
+       DEBUG_ASSERT_POINTER(user_interaction);
+       DEBUG_ASSERT_POINTER(user_interaction->ctx);
 
        session_data = (ump_session_data *) user_interaction->ctx;
 
-       session_memory_element = _mali_osk_calloc( 1, sizeof(ump_session_memory_list_element));
+       session_memory_element = _mali_osk_calloc(1, sizeof(ump_session_memory_list_element));
        if (NULL == session_memory_element) {
                DBG_MSG(1, ("Failed to allocate ump_session_memory_list_element in ump_ioctl_allocate()\n"));
                return _MALI_OSK_ERR_NOMEM;
        }
 
 
-       new_allocation = _mali_osk_calloc( 1, sizeof(ump_dd_mem));
-       if (NULL==new_allocation) {
+       new_allocation = _mali_osk_calloc(1, sizeof(ump_dd_mem));
+       if (NULL == new_allocation) {
                _mali_osk_free(session_memory_element);
                DBG_MSG(1, ("Failed to allocate ump_dd_mem in _ump_ukk_allocate()\n"));
                return _MALI_OSK_ERR_NOMEM;
        }
 
-       /* Create a secure ID for this allocation */
-       _mali_osk_mutex_wait(device.secure_id_map_lock);
-       map_id = ump_descriptor_mapping_allocate_mapping(device.secure_id_map, (void*)new_allocation);
-
-       if (map_id < 0) {
-               _mali_osk_mutex_signal(device.secure_id_map_lock);
-               _mali_osk_free(session_memory_element);
-               _mali_osk_free(new_allocation);
-               DBG_MSG(1, ("Failed to allocate secure ID in ump_ioctl_allocate()\n"));
-               return - _MALI_OSK_ERR_INVALID_FUNC;
-       }
-
        /* Initialize the part of the new_allocation that we know so for */
-       new_allocation->secure_id = (ump_secure_id)map_id;
-       _mali_osk_atomic_init(&new_allocation->ref_count,1);
-       if ( 0==(UMP_REF_DRV_UK_CONSTRAINT_USE_CACHE & user_interaction->constraints) )
+       _mali_osk_atomic_init(&new_allocation->ref_count, 1);
+       if (0 == (UMP_REF_DRV_UK_CONSTRAINT_USE_CACHE & user_interaction->constraints))
                new_allocation->is_cached = 0;
        else new_allocation->is_cached = 1;
 
-       /* special case a size of 0, we should try to emulate what malloc does in this case, which is to return a valid pointer that must be freed, but can't be dereferences */
+       /* Special case a size of 0, we should try to emulate what malloc does
+        * in this case, which is to return a valid pointer that must be freed,
+        * but can't be dereferenced */
        if (0 == user_interaction->size) {
-               user_interaction->size = 1; /* emulate by actually allocating the minimum block size */
+               /* Emulate by actually allocating the minimum block size */
+               user_interaction->size = 1;
        }
 
-       new_allocation->size_bytes = UMP_SIZE_ALIGN(user_interaction->size); /* Page align the size */
+       /* Page align the size */
+       new_allocation->size_bytes = UMP_SIZE_ALIGN(user_interaction->size);
        new_allocation->lock_usage = UMP_NOT_LOCKED;
 
        /* Now, ask the active memory backend to do the actual memory allocation */
-       if (!device.backend->allocate( device.backend->ctx, new_allocation ) ) {
-               DBG_MSG(3, ("OOM: No more UMP memory left. Failed to allocate memory in ump_ioctl_allocate(). Size: %lu, requested size: %lu\n", new_allocation->size_bytes, (unsigned long)user_interaction->size));
-               ump_descriptor_mapping_free(device.secure_id_map, map_id);
-               _mali_osk_mutex_signal(device.secure_id_map_lock);
+       if (!device.backend->allocate(device.backend->ctx, new_allocation)) {
+               DBG_MSG(3, ("OOM: No more UMP memory left. Failed to allocate memory in ump_ioctl_allocate(). Size: %lu, requested size: %lu\n",
+                           new_allocation->size_bytes,
+                           (unsigned long)user_interaction->size));
                _mali_osk_free(new_allocation);
                _mali_osk_free(session_memory_element);
                return _MALI_OSK_ERR_INVALID_FUNC;
@@ -170,17 +155,27 @@ _mali_osk_errcode_t _ump_ukk_allocate( _ump_uk_allocate_s *user_interaction )
        new_allocation->ctx = device.backend->ctx;
        new_allocation->release_func = device.backend->release;
 
-       _mali_osk_mutex_signal(device.secure_id_map_lock);
-
        /* Initialize the session_memory_element, and add it to the session object */
        session_memory_element->mem = new_allocation;
        _mali_osk_mutex_wait(session_data->lock);
        _mali_osk_list_add(&(session_memory_element->list), &(session_data->list_head_session_memory_list));
        _mali_osk_mutex_signal(session_data->lock);
 
+       /* Create a secure ID for this allocation */
+       ret = ump_random_mapping_insert(device.secure_id_map, new_allocation);
+       if (unlikely(ret)) {
+               new_allocation->release_func(new_allocation->ctx, new_allocation);
+               _mali_osk_free(session_memory_element);
+               _mali_osk_free(new_allocation);
+               DBG_MSG(1, ("Failed to allocate secure ID in ump_ioctl_allocate()\n"));
+               return _MALI_OSK_ERR_INVALID_FUNC;
+       }
+
        user_interaction->secure_id = new_allocation->secure_id;
        user_interaction->size = new_allocation->size_bytes;
-       DBG_MSG(3, ("UMP memory allocated. ID: %u, size: %lu\n", new_allocation->secure_id, new_allocation->size_bytes));
+       DBG_MSG(3, ("UMP memory allocated. ID: %u, size: %lu\n",
+                   new_allocation->secure_id,
+                   new_allocation->size_bytes));
 
        return _MALI_OSK_ERR_OK;
 }
index 115c06ed27e0bec32b35bd342b3506558b71d7f2..04c4d10f55c71d0bacafe695009c89aae8037efd 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2009-2013 ARM Limited
+ * (C) COPYRIGHT 2009-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 #include "ump_kernel_interface.h"
 #include "mali_osk.h"
 
+#include <linux/rbtree.h>
 
 typedef enum {
        UMP_USED_BY_CPU = 0,
        UMP_USED_BY_MALI = 1,
-       UMP_USED_BY_UNKNOWN_DEVICE= 100,
+       UMP_USED_BY_UNKNOWN_DEVICE = 100,
 } ump_hw_usage;
 
 typedef enum {
@@ -27,19 +28,19 @@ typedef enum {
        UMP_READ_WRITE = 3,
 } ump_lock_usage;
 
-
 /*
  * This struct is what is "behind" a ump_dd_handle
  */
 typedef struct ump_dd_mem {
+       struct rb_node node;
        ump_secure_id secure_id;
        _mali_osk_atomic_t ref_count;
        unsigned long size_bytes;
        unsigned long nr_blocks;
-       ump_dd_physical_block * block_array;
-       void (*release_func)(void * ctx, struct ump_dd_mem * descriptor);
-       void * ctx;
-       void * backend_info;
+       ump_dd_physical_block *block_array;
+       void (*release_func)(void *ctx, struct ump_dd_mem *descriptor);
+       void *ctx;
+       void *backend_info;
        int is_cached;
        ump_hw_usage hw_device;
        ump_lock_usage lock_usage;
index a146c5b555dff407f8db012848447911121a9dd5..66d5a202eff2354f84ba6c455f7d4ed393a537c4 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
+ * (C) COPYRIGHT 2008-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 extern "C" {
 #endif
 
-_mali_osk_errcode_t _ump_osk_init( void );
+_mali_osk_errcode_t _ump_osk_init(void);
 
-_mali_osk_errcode_t _ump_osk_term( void );
+_mali_osk_errcode_t _ump_osk_term(void);
 
-int _ump_osk_atomic_inc_and_read( _mali_osk_atomic_t *atom );
+int _ump_osk_atomic_inc_and_read(_mali_osk_atomic_t *atom);
 
-int _ump_osk_atomic_dec_and_read( _mali_osk_atomic_t *atom );
+int _ump_osk_atomic_dec_and_read(_mali_osk_atomic_t *atom);
 
-_mali_osk_errcode_t _ump_osk_mem_mapregion_init( ump_memory_allocation *descriptor );
+_mali_osk_errcode_t _ump_osk_mem_mapregion_init(ump_memory_allocation *descriptor);
 
-_mali_osk_errcode_t _ump_osk_mem_mapregion_map( ump_memory_allocation * descriptor, u32 offset, u32 * phys_addr, unsigned long size );
+_mali_osk_errcode_t _ump_osk_mem_mapregion_map(ump_memory_allocation *descriptor, u32 offset, u32 *phys_addr, unsigned long size);
 
-void _ump_osk_mem_mapregion_term( ump_memory_allocation * descriptor );
+void _ump_osk_mem_mapregion_term(ump_memory_allocation *descriptor);
 
-void _ump_osk_msync( ump_dd_mem * mem, void * virt, u32 offset, u32 size, ump_uk_msync_op op, ump_session_data * session_data );
+void _ump_osk_msync(ump_dd_mem *mem, void *virt, u32 offset, u32 size, ump_uk_msync_op op, ump_session_data *session_data);
 
 #ifdef __cplusplus
 }
diff --git a/drivers/misc/mediatek/gpu/mt8127/mali/ump/common/ump_uk_types.h b/drivers/misc/mediatek/gpu/mt8127/mali/ump/common/ump_uk_types.h
deleted file mode 100644 (file)
index 3d65d17..0000000
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- * Copyright (C) 2010, 2012-2013 ARM Limited. All rights reserved.
- * 
- * This program is free software and is provided to you under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
- * 
- * A copy of the licence is included with the program, and can also be obtained from Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
- */
-
-/**
- * @file ump_uk_types.h
- * Defines the types and constants used in the user-kernel interface
- */
-
-#ifndef __UMP_UK_TYPES_H__
-#define __UMP_UK_TYPES_H__
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-/* Helpers for API version handling */
-#define MAKE_VERSION_ID(x) (((x) << 16UL) | (x))
-#define IS_VERSION_ID(x) (((x) & 0xFFFF) == (((x) >> 16UL) & 0xFFFF))
-#define GET_VERSION(x) (((x) >> 16UL) & 0xFFFF)
-#define IS_API_MATCH(x, y) (IS_VERSION_ID((x)) && IS_VERSION_ID((y)) && (GET_VERSION((x)) == GET_VERSION((y))))
-
-/**
- * API version define.
- * Indicates the version of the kernel API
- * The version is a 16bit integer incremented on each API change.
- * The 16bit integer is stored twice in a 32bit integer
- * So for version 1 the value would be 0x00010001
- */
-#define UMP_IOCTL_API_VERSION MAKE_VERSION_ID(2)
-
-typedef enum
-{
-       _UMP_IOC_QUERY_API_VERSION = 1,
-       _UMP_IOC_ALLOCATE,
-       _UMP_IOC_RELEASE,
-       _UMP_IOC_SIZE_GET,
-       _UMP_IOC_MAP_MEM,    /* not used in Linux */
-       _UMP_IOC_UNMAP_MEM,  /* not used in Linux */
-       _UMP_IOC_MSYNC,
-       _UMP_IOC_CACHE_OPERATIONS_CONTROL,
-       _UMP_IOC_SWITCH_HW_USAGE,
-       _UMP_IOC_LOCK,
-       _UMP_IOC_UNLOCK,
-}_ump_uk_functions;
-
-typedef enum
-{
-       UMP_REF_DRV_UK_CONSTRAINT_NONE = 0,
-       UMP_REF_DRV_UK_CONSTRAINT_PHYSICALLY_LINEAR = 1,
-       UMP_REF_DRV_UK_CONSTRAINT_USE_CACHE = 4,
-} ump_uk_alloc_constraints;
-
-typedef enum
-{
-       _UMP_UK_MSYNC_CLEAN = 0,
-       _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE = 1,
-       _UMP_UK_MSYNC_INVALIDATE = 2,
-       _UMP_UK_MSYNC_FLUSH_L1   = 3,
-       _UMP_UK_MSYNC_READOUT_CACHE_ENABLED = 128,
-} ump_uk_msync_op;
-
-typedef enum
-{
-       _UMP_UK_CACHE_OP_START = 0,
-       _UMP_UK_CACHE_OP_FINISH  = 1,
-} ump_uk_cache_op_control;
-
-typedef enum
-{
-       _UMP_UK_READ = 1,
-       _UMP_UK_READ_WRITE = 3,
-} ump_uk_lock_usage;
-
-typedef enum
-{
-       _UMP_UK_USED_BY_CPU = 0,
-       _UMP_UK_USED_BY_MALI = 1,
-       _UMP_UK_USED_BY_UNKNOWN_DEVICE= 100,
-} ump_uk_user;
-
-/**
- * Get API version ([in,out] u32 api_version, [out] u32 compatible)
- */
-typedef struct _ump_uk_api_version_s
-{
-       void *ctx;      /**< [in,out] user-kernel context (trashed on output) */
-       u32 version;    /**< Set to the user space version on entry, stores the device driver version on exit */
-       u32 compatible; /**< Non-null if the device is compatible with the client */
-} _ump_uk_api_version_s;
-
-/**
- * ALLOCATE ([out] u32 secure_id, [in,out] u32 size,  [in] contraints)
- */
-typedef struct _ump_uk_allocate_s
-{
-       void *ctx;                              /**< [in,out] user-kernel context (trashed on output) */
-       u32 secure_id;                          /**< Return value from DD to Userdriver */
-       u32 size;                               /**< Input and output. Requested size; input. Returned size; output */
-       ump_uk_alloc_constraints constraints;   /**< Only input to Devicedriver */
-} _ump_uk_allocate_s;
-
-/**
- * SIZE_GET ([in] u32 secure_id, [out]size )
- */
-typedef struct _ump_uk_size_get_s
-{
-       void *ctx;                              /**< [in,out] user-kernel context (trashed on output) */
-       u32 secure_id;                          /**< Input to DD */
-       u32 size;                               /**< Returned size; output */
-} _ump_uk_size_get_s;
-
-/**
- * Release ([in] u32 secure_id)
- */
-typedef struct _ump_uk_release_s
-{
-       void *ctx;                              /**< [in,out] user-kernel context (trashed on output) */
-       u32 secure_id;                          /**< Input to DD */
-} _ump_uk_release_s;
-
-typedef struct _ump_uk_map_mem_s
-{
-       void *ctx;                      /**< [in,out] user-kernel context (trashed on output) */
-       void *mapping;                  /**< [out] Returns user-space virtual address for the mapping */
-       void *phys_addr;                /**< [in] physical address */
-       unsigned long size;             /**< [in] size */
-       u32 secure_id;                  /**< [in] secure_id to assign to mapping */
-       void * _ukk_private;            /**< Only used inside linux port between kernel frontend and common part to store vma */
-       u32 cookie;
-       u32 is_cached;            /**< [in,out] caching of CPU mappings */
-} _ump_uk_map_mem_s;
-
-typedef struct _ump_uk_unmap_mem_s
-{
-       void *ctx;            /**< [in,out] user-kernel context (trashed on output) */
-       void *mapping;
-       u32 size;
-       void * _ukk_private;
-       u32 cookie;
-} _ump_uk_unmap_mem_s;
-
-typedef struct _ump_uk_msync_s
-{
-       void *ctx;            /**< [in,out] user-kernel context (trashed on output) */
-       void *mapping;        /**< [in] mapping addr */
-       void *address;        /**< [in] flush start addr */
-       u32 size;             /**< [in] size to flush */
-       ump_uk_msync_op op;   /**< [in] flush operation */
-       u32 cookie;           /**< [in] cookie stored with reference to the kernel mapping internals */
-       u32 secure_id;        /**< [in] secure_id that identifies the ump buffer */
-       u32 is_cached;        /**< [out] caching of CPU mappings */
-} _ump_uk_msync_s;
-
-typedef struct _ump_uk_cache_operations_control_s
-{
-       void *ctx;                   /**< [in,out] user-kernel context (trashed on output) */
-       ump_uk_cache_op_control op;  /**< [in] cache operations start/stop */
-} _ump_uk_cache_operations_control_s;
-
-
-typedef struct _ump_uk_switch_hw_usage_s
-{
-       void *ctx;            /**< [in,out] user-kernel context (trashed on output) */
-       u32 secure_id;        /**< [in] secure_id that identifies the ump buffer */
-       ump_uk_user new_user;         /**< [in] cookie stored with reference to the kernel mapping internals */
-
-} _ump_uk_switch_hw_usage_s;
-
-typedef struct _ump_uk_lock_s
-{
-       void *ctx;            /**< [in,out] user-kernel context (trashed on output) */
-       u32 secure_id;        /**< [in] secure_id that identifies the ump buffer */
-       ump_uk_lock_usage lock_usage;
-} _ump_uk_lock_s;
-
-typedef struct _ump_uk_unlock_s
-{
-       void *ctx;            /**< [in,out] user-kernel context (trashed on output) */
-       u32 secure_id;        /**< [in] secure_id that identifies the ump buffer */
-} _ump_uk_unlock_s;
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* __UMP_UK_TYPES_H__ */
index fbc02be823c5514a7dd000b7e65238b7241e8437..caa79e087215679ebb5adffc65e0e795101f3a09 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
+ * (C) COPYRIGHT 2008-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -25,33 +25,33 @@ extern "C" {
 #endif
 
 
-_mali_osk_errcode_t _ump_ukk_open( void** context );
+_mali_osk_errcode_t _ump_ukk_open(void **context);
 
-_mali_osk_errcode_t _ump_ukk_close( void** context );
+_mali_osk_errcode_t _ump_ukk_close(void **context);
 
-_mali_osk_errcode_t _ump_ukk_allocate( _ump_uk_allocate_s *user_interaction );
+_mali_osk_errcode_t _ump_ukk_allocate(_ump_uk_allocate_s *user_interaction);
 
-_mali_osk_errcode_t _ump_ukk_release( _ump_uk_release_s *release_info );
+_mali_osk_errcode_t _ump_ukk_release(_ump_uk_release_s *release_info);
 
-_mali_osk_errcode_t _ump_ukk_size_get( _ump_uk_size_get_s *user_interaction );
+_mali_osk_errcode_t _ump_ukk_size_get(_ump_uk_size_get_s *user_interaction);
 
-_mali_osk_errcode_t _ump_ukk_map_mem( _ump_uk_map_mem_s *args );
+_mali_osk_errcode_t _ump_ukk_map_mem(_ump_uk_map_mem_s *args);
 
-_mali_osk_errcode_t _ump_uku_get_api_version( _ump_uk_api_version_s *args );
+_mali_osk_errcode_t _ump_uku_get_api_version(_ump_uk_api_version_s *args);
 
-void _ump_ukk_unmap_mem( _ump_uk_unmap_mem_s *args );
+void _ump_ukk_unmap_mem(_ump_uk_unmap_mem_s *args);
 
-void _ump_ukk_msync( _ump_uk_msync_s *args );
+void _ump_ukk_msync(_ump_uk_msync_s *args);
 
-void _ump_ukk_cache_operations_control(_ump_uk_cache_operations_control_sargs);
+void _ump_ukk_cache_operations_control(_ump_uk_cache_operations_control_s *args);
 
-void _ump_ukk_switch_hw_usage(_ump_uk_switch_hw_usage_s *args );
+void _ump_ukk_switch_hw_usage(_ump_uk_switch_hw_usage_s *args);
 
-void _ump_ukk_lock(_ump_uk_lock_s *args );
+void _ump_ukk_lock(_ump_uk_lock_s *args);
 
-void _ump_ukk_unlock(_ump_uk_unlock_s *args );
+void _ump_ukk_unlock(_ump_uk_unlock_s *args);
 
-u32 _ump_ukk_report_memory_usage( void );
+u32 _ump_ukk_report_memory_usage(void);
 
 #ifdef __cplusplus
 }
diff --git a/drivers/misc/mediatek/gpu/mt8127/mali/ump/linux/license/gpl/ump_kernel_license.h b/drivers/misc/mediatek/gpu/mt8127/mali/ump/linux/license/gpl/ump_kernel_license.h
deleted file mode 100644 (file)
index 17b930d..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright (C) 2010 ARM Limited. All rights reserved.
- * 
- * This program is free software and is provided to you under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
- * 
- * A copy of the licence is included with the program, and can also be obtained from Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
- */
-
-/**
- * @file ump_kernel_license.h
- * Defines for the macro MODULE_LICENSE.
- */
-
-#ifndef __UMP_KERNEL_LICENSE_H__
-#define __UMP_KERNEL_LICENSE_H__
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-#define UMP_KERNEL_LINUX_LICENSE     "GPL"
-#define UMP_LICENSE_IS_GPL 1
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* __UMP_KERNEL_LICENSE_H__ */
index 93ac8082bf13993b191abde734b950fb0bccfe48..766abef3dc983bf33c8b4c2dec5a451082d82d15 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2010-2011, 2013 ARM Limited
+ * (C) COPYRIGHT 2010-2011, 2013, 2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
index 326ff27597592bec5431eb19539ec17bf2293a42..b0cb7dbf7902a8204d3d9f8c0dc5ecd29d73d19d 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
+ * (C) COPYRIGHT 2008-2013, 2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
index 8d15ecd77653b8ae18d7001f587e1be956fe3793..83ac7746b3b430a1399ede6eeea490260f6b7160 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
+ * (C) COPYRIGHT 2008-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -67,7 +67,7 @@ typedef struct ump_vma_usage_tracker {
 struct ump_device {
        struct cdev cdev;
 #if UMP_LICENSE_IS_GPL
-       struct class * ump_class;
+       struct class *ump_class;
 #endif
 };
 
@@ -83,7 +83,7 @@ static long ump_file_ioctl(struct file *filp, unsigned int cmd, unsigned long ar
 #else
 static int ump_file_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
 #endif
-static int ump_file_mmap(struct file * filp, struct vm_area_struct * vma);
+static int ump_file_mmap(struct file *filp, struct vm_area_struct *vma);
 
 
 /* This variable defines the file operations this UMP device driver offer */
@@ -192,7 +192,7 @@ int ump_kernel_device_initialize(void)
                        if (IS_ERR(ump_device.ump_class)) {
                                err = PTR_ERR(ump_device.ump_class);
                        } else {
-                               struct device * mdev;
+                               struct device *mdev;
                                mdev = device_create(ump_device.ump_class, NULL, dev, NULL, ump_dev_name);
                                if (!IS_ERR(mdev)) {
                                        return 0;
@@ -233,7 +233,7 @@ void ump_kernel_device_terminate(void)
        unregister_chrdev_region(dev, 1);
 
 #if UMP_LICENSE_IS_GPL
-       if(ump_debugfs_dir)
+       if (ump_debugfs_dir)
                debugfs_remove_recursive(ump_debugfs_dir);
 #endif
 }
@@ -243,7 +243,7 @@ void ump_kernel_device_terminate(void)
  */
 static int ump_file_open(struct inode *inode, struct file *filp)
 {
-       struct ump_session_data * session_data;
+       struct ump_session_data *session_data;
        _mali_osk_errcode_t err;
 
        /* input validation */
@@ -253,13 +253,13 @@ static int ump_file_open(struct inode *inode, struct file *filp)
        }
 
        /* Call the OS-Independent UMP Open function */
-       err = _ump_ukk_open((void**) &session_data );
-       if( _MALI_OSK_ERR_OK != err ) {
+       err = _ump_ukk_open((void **) &session_data);
+       if (_MALI_OSK_ERR_OK != err) {
                MSG_ERR(("Ump failed to open a new session\n"));
-               return map_errcode( err );
+               return map_errcode(err);
        }
 
-       filp->private_data = (void*)session_data;
+       filp->private_data = (void *)session_data;
        filp->f_pos = 0;
 
        return 0; /* success */
@@ -274,9 +274,9 @@ static int ump_file_release(struct inode *inode, struct file *filp)
 {
        _mali_osk_errcode_t err;
 
-       err = _ump_ukk_close((void**) &filp->private_data );
-       if( _MALI_OSK_ERR_OK != err ) {
-               return map_errcode( err );
+       err = _ump_ukk_close((void **) &filp->private_data);
+       if (_MALI_OSK_ERR_OK != err) {
+               return map_errcode(err);
        }
 
        return 0;  /* success */
@@ -294,8 +294,8 @@ static int ump_file_ioctl(struct inode *inode, struct file *filp, unsigned int c
 #endif
 {
        int err = -ENOTTY;
-       void __user * argument;
-       struct ump_session_data * session_data;
+       void __user *argument;
+       struct ump_session_data *session_data;
 
 #ifndef HAVE_UNLOCKED_IOCTL
        (void)inode; /* inode not used */
@@ -356,9 +356,9 @@ static int ump_file_ioctl(struct inode *inode, struct file *filp, unsigned int c
        return err;
 }
 
-int map_errcode( _mali_osk_errcode_t err )
+int map_errcode(_mali_osk_errcode_t err)
 {
-       switch(err) {
+       switch (err) {
        case _MALI_OSK_ERR_OK :
                return 0;
        case _MALI_OSK_ERR_FAULT:
@@ -383,11 +383,11 @@ int map_errcode( _mali_osk_errcode_t err )
 /*
  * Handle from OS to map specified virtual memory to specified UMP memory.
  */
-static int ump_file_mmap(struct file * filp, struct vm_area_struct * vma)
+static int ump_file_mmap(struct file *filp, struct vm_area_struct *vma)
 {
        _ump_uk_map_mem_s args;
        _mali_osk_errcode_t err;
-       struct ump_session_data * session_data;
+       struct ump_session_data *session_data;
 
        /* Validate the session data */
        session_data = (struct ump_session_data *)filp->private_data;
@@ -412,13 +412,13 @@ static int ump_file_mmap(struct file * filp, struct vm_area_struct * vma)
        /* By setting this flag, during a process fork; the child process will not have the parent UMP mappings */
        vma->vm_flags |= VM_DONTCOPY;
 
-       DBG_MSG(4, ("UMP vma->flags: %x\n", vma->vm_flags ));
+       DBG_MSG(4, ("UMP vma->flags: %x\n", vma->vm_flags));
 
        /* Call the common mmap handler */
-       err = _ump_ukk_map_mem( &args );
-       if ( _MALI_OSK_ERR_OK != err) {
+       err = _ump_ukk_map_mem(&args);
+       if (_MALI_OSK_ERR_OK != err) {
                MSG_ERR(("_ump_ukk_map_mem() failed in function ump_file_mmap()"));
-               return map_errcode( err );
+               return map_errcode(err);
        }
 
        return 0; /* success */
index 5f302cf97d5fabf273b1cb344d8829656195830f..a0cca3d185d0c5c01d29bed89a9ff687275708ee 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2007-2013 ARM Limited
+ * (C) COPYRIGHT 2007-2013, 2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
index 4f41d1b24db421ce3559ef91e1db4314505e9f8b..6b6192ca283b6716f867b7c8fbf5a6f77915e2a0 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2011, 2013 ARM Limited
+ * (C) COPYRIGHT 2008-2011, 2013-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 
 
 typedef struct block_info {
-       struct block_info * next;
+       struct block_info *next;
 } block_info;
 
 
 
 typedef struct block_allocator {
        struct semaphore mutex;
-       block_info * all_blocks;
-       block_info * first_free;
+       block_info *all_blocks;
+       block_info *first_free;
        u32 base;
        u32 num_blocks;
        u32 num_free;
 } block_allocator;
 
 
-static void block_allocator_shutdown(ump_memory_backend * backend);
-static int block_allocator_allocate(void* ctx, ump_dd_mem * mem);
-static void block_allocator_release(void * ctx, ump_dd_mem * handle);
-static inline u32 get_phys(block_allocator * allocator, block_info * block);
+static void block_allocator_shutdown(ump_memory_backend *backend);
+static int block_allocator_allocate(void *ctx, ump_dd_mem *mem);
+static void block_allocator_release(void *ctx, ump_dd_mem *handle);
+static inline u32 get_phys(block_allocator *allocator, block_info *block);
 static u32 block_allocator_stat(struct ump_memory_backend *backend);
 
 
@@ -57,10 +57,10 @@ static u32 block_allocator_stat(struct ump_memory_backend *backend);
 /*
  * Create dedicated memory backend
  */
-ump_memory_backend * ump_block_allocator_create(u32 base_address, u32 size)
+ump_memory_backend *ump_block_allocator_create(u32 base_address, u32 size)
 {
-       ump_memory_backend * backend;
-       block_allocator * allocator;
+       ump_memory_backend *backend;
+       block_allocator *allocator;
        u32 usable_size;
        u32 num_blocks;
 
@@ -117,14 +117,14 @@ ump_memory_backend * ump_block_allocator_create(u32 base_address, u32 size)
 /*
  * Destroy specified dedicated memory backend
  */
-static void block_allocator_shutdown(ump_memory_backend * backend)
+static void block_allocator_shutdown(ump_memory_backend *backend)
 {
-       block_allocator * allocator;
+       block_allocator *allocator;
 
        BUG_ON(!backend);
        BUG_ON(!backend->ctx);
 
-       allocator = (block_allocator*)backend->ctx;
+       allocator = (block_allocator *)backend->ctx;
 
        DBG_MSG_IF(1, allocator->num_free != allocator->num_blocks, ("%u blocks still in use during shutdown\n", allocator->num_blocks - allocator->num_free));
 
@@ -135,24 +135,24 @@ static void block_allocator_shutdown(ump_memory_backend * backend)
 
 
 
-static int block_allocator_allocate(void* ctx, ump_dd_mem * mem)
+static int block_allocator_allocate(void *ctx, ump_dd_mem *mem)
 {
-       block_allocator * allocator;
+       block_allocator *allocator;
        u32 left;
-       block_info * last_allocated = NULL;
+       block_info *last_allocated = NULL;
        int i = 0;
 
        BUG_ON(!ctx);
        BUG_ON(!mem);
 
-       allocator = (block_allocator*)ctx;
+       allocator = (block_allocator *)ctx;
        left = mem->size_bytes;
 
        BUG_ON(!left);
        BUG_ON(!&allocator->mutex);
 
        mem->nr_blocks = ((left + UMP_BLOCK_SIZE - 1) & ~(UMP_BLOCK_SIZE - 1)) / UMP_BLOCK_SIZE;
-       mem->block_array = (ump_dd_physical_block*)vmalloc(sizeof(ump_dd_physical_block) * mem->nr_blocks);
+       mem->block_array = (ump_dd_physical_block *)vmalloc(sizeof(ump_dd_physical_block) * mem->nr_blocks);
        if (NULL == mem->block_array) {
                MSG_ERR(("Failed to allocate block array\n"));
                return 0;
@@ -166,7 +166,7 @@ static int block_allocator_allocate(void* ctx, ump_dd_mem * mem)
        mem->size_bytes = 0;
 
        while ((left > 0) && (allocator->first_free)) {
-               block_info * block;
+               block_info *block;
 
                block = allocator->first_free;
                allocator->first_free = allocator->first_free->next;
@@ -185,7 +185,7 @@ static int block_allocator_allocate(void* ctx, ump_dd_mem * mem)
        }
 
        if (left) {
-               block_info * block;
+               block_info *block;
                /* release all memory back to the pool */
                while (last_allocated) {
                        block = last_allocated->next;
@@ -208,23 +208,23 @@ static int block_allocator_allocate(void* ctx, ump_dd_mem * mem)
        mem->backend_info = last_allocated;
 
        up(&allocator->mutex);
-       mem->is_cached=0;
+       mem->is_cached = 0;
 
        return 1;
 }
 
 
 
-static void block_allocator_release(void * ctx, ump_dd_mem * handle)
+static void block_allocator_release(void *ctx, ump_dd_mem *handle)
 {
-       block_allocator * allocator;
-       block_info * block, * next;
+       block_allocator *allocator;
+       block_info *block, * next;
 
        BUG_ON(!ctx);
        BUG_ON(!handle);
 
-       allocator = (block_allocator*)ctx;
-       block = (block_info*)handle->backend_info;
+       allocator = (block_allocator *)ctx;
+       block = (block_info *)handle->backend_info;
        BUG_ON(!block);
 
        if (down_interruptible(&allocator->mutex)) {
@@ -235,7 +235,7 @@ static void block_allocator_release(void * ctx, ump_dd_mem * handle)
        while (block) {
                next = block->next;
 
-               BUG_ON( (block < allocator->all_blocks) || (block > (allocator->all_blocks + allocator->num_blocks)));
+               BUG_ON((block < allocator->all_blocks) || (block > (allocator->all_blocks + allocator->num_blocks)));
 
                block->next = allocator->first_free;
                allocator->first_free = block;
@@ -255,7 +255,7 @@ static void block_allocator_release(void * ctx, ump_dd_mem * handle)
 /*
  * Helper function for calculating the physical base adderss of a memory block
  */
-static inline u32 get_phys(block_allocator * allocator, block_info * block)
+static inline u32 get_phys(block_allocator *allocator, block_info *block)
 {
        return allocator->base + ((block - allocator->all_blocks) * UMP_BLOCK_SIZE);
 }
@@ -264,8 +264,8 @@ static u32 block_allocator_stat(struct ump_memory_backend *backend)
 {
        block_allocator *allocator;
        BUG_ON(!backend);
-       allocator = (block_allocator*)backend->ctx;
+       allocator = (block_allocator *)backend->ctx;
        BUG_ON(!allocator);
 
-       return (allocator->num_blocks - allocator->num_free)* UMP_BLOCK_SIZE;
+       return (allocator->num_blocks - allocator->num_free) * UMP_BLOCK_SIZE;
 }
index dbd5356d41308c94419c59254da50de647ea7d4f..abf66a9c443728c1e1d078b69988dd32dee0d850 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2010 ARM Limited
+ * (C) COPYRIGHT 2008-2010, 2014-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -17,7 +17,7 @@
 
 #include "ump_kernel_memory_backend.h"
 
-ump_memory_backend * ump_block_allocator_create(u32 base_address, u32 size);
+ump_memory_backend *ump_block_allocator_create(u32 base_address, u32 size);
 
 #endif /* __UMP_KERNEL_MEMORY_BACKEND_DEDICATED_H__ */
 
index 5e6fafd4929469fcf5c3f7d280490a8cc073fe82..5968224a77ea8e518351743cdaee287b501268a4 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2011, 2013 ARM Limited
+ * (C) COPYRIGHT 2008-2011, 2013-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -36,9 +36,9 @@ typedef struct os_allocator {
 
 
 
-static void os_free(void* ctx, ump_dd_mem * descriptor);
-static int os_allocate(void* ctx, ump_dd_mem * descriptor);
-static void os_memory_backend_destroy(ump_memory_backend * backend);
+static void os_free(void *ctx, ump_dd_mem *descriptor);
+static int os_allocate(void *ctx, ump_dd_mem *descriptor);
+static void os_memory_backend_destroy(ump_memory_backend *backend);
 static u32 os_stat(struct ump_memory_backend *backend);
 
 
@@ -46,10 +46,10 @@ static u32 os_stat(struct ump_memory_backend *backend);
 /*
  * Create OS memory backend
  */
-ump_memory_backend * ump_os_memory_backend_create(const int max_allocation)
+ump_memory_backend *ump_os_memory_backend_create(const int max_allocation)
 {
-       ump_memory_backend * backend;
-       os_allocator * info;
+       ump_memory_backend *backend;
+       os_allocator *info;
 
        info = kmalloc(sizeof(os_allocator), GFP_KERNEL);
        if (NULL == info) {
@@ -83,9 +83,9 @@ ump_memory_backend * ump_os_memory_backend_create(const int max_allocation)
 /*
  * Destroy specified OS memory backend
  */
-static void os_memory_backend_destroy(ump_memory_backend * backend)
+static void os_memory_backend_destroy(ump_memory_backend *backend)
 {
-       os_allocator * info = (os_allocator*)backend->ctx;
+       os_allocator *info = (os_allocator *)backend->ctx;
 
        DBG_MSG_IF(1, 0 != info->num_pages_allocated, ("%d pages still in use during shutdown\n", info->num_pages_allocated));
 
@@ -98,17 +98,17 @@ static void os_memory_backend_destroy(ump_memory_backend * backend)
 /*
  * Allocate UMP memory
  */
-static int os_allocate(void* ctx, ump_dd_mem * descriptor)
+static int os_allocate(void *ctx, ump_dd_mem *descriptor)
 {
        u32 left;
-       os_allocator * info;
+       os_allocator *info;
        int pages_allocated = 0;
        int is_cached;
 
        BUG_ON(!descriptor);
        BUG_ON(!ctx);
 
-       info = (os_allocator*)ctx;
+       info = (os_allocator *)ctx;
        left = descriptor->size_bytes;
        is_cached = descriptor->is_cached;
 
@@ -130,7 +130,7 @@ static int os_allocate(void* ctx, ump_dd_mem * descriptor)
        }
 
        while (left > 0 && ((info->num_pages_allocated + pages_allocated) < info->num_pages_max)) {
-               struct page * new_page;
+               struct page *new_page;
 
                if (is_cached) {
                        new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN);
@@ -142,11 +142,11 @@ static int os_allocate(void* ctx, ump_dd_mem * descriptor)
                }
 
                /* Ensure page caches are flushed. */
-               if ( is_cached ) {
+               if (is_cached) {
                        descriptor->block_array[pages_allocated].addr = page_to_phys(new_page);
                        descriptor->block_array[pages_allocated].size = PAGE_SIZE;
                } else {
-                       descriptor->block_array[pages_allocated].addr = dma_map_page(NULL, new_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL );
+                       descriptor->block_array[pages_allocated].addr = dma_map_page(NULL, new_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
                        descriptor->block_array[pages_allocated].size = PAGE_SIZE;
                }
 
@@ -166,12 +166,12 @@ static int os_allocate(void* ctx, ump_dd_mem * descriptor)
        if (left) {
                DBG_MSG(1, ("Failed to allocate needed pages\n"));
 
-               while(pages_allocated) {
+               while (pages_allocated) {
                        pages_allocated--;
-                       if ( !is_cached ) {
+                       if (!is_cached) {
                                dma_unmap_page(NULL, descriptor->block_array[pages_allocated].addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
                        }
-                       __free_page(pfn_to_page(descriptor->block_array[pages_allocated].addr >> PAGE_SHIFT) );
+                       __free_page(pfn_to_page(descriptor->block_array[pages_allocated].addr >> PAGE_SHIFT));
                }
 
                up(&info->mutex);
@@ -192,15 +192,15 @@ static int os_allocate(void* ctx, ump_dd_mem * descriptor)
 /*
  * Free specified UMP memory
  */
-static void os_free(void* ctx, ump_dd_mem * descriptor)
+static void os_free(void *ctx, ump_dd_mem *descriptor)
 {
-       os_allocator * info;
+       os_allocator *info;
        int i;
 
        BUG_ON(!ctx);
        BUG_ON(!descriptor);
 
-       info = (os_allocator*)ctx;
+       info = (os_allocator *)ctx;
 
        BUG_ON(descriptor->nr_blocks > info->num_pages_allocated);
 
@@ -215,12 +215,12 @@ static void os_free(void* ctx, ump_dd_mem * descriptor)
 
        up(&info->mutex);
 
-       for ( i = 0; i < descriptor->nr_blocks; i++) {
+       for (i = 0; i < descriptor->nr_blocks; i++) {
                DBG_MSG(6, ("Freeing physical page. Address: 0x%08lx\n", descriptor->block_array[i].addr));
-               if ( ! descriptor->is_cached) {
+               if (! descriptor->is_cached) {
                        dma_unmap_page(NULL, descriptor->block_array[i].addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
                }
-               __free_page(pfn_to_page(descriptor->block_array[i].addr>>PAGE_SHIFT) );
+               __free_page(pfn_to_page(descriptor->block_array[i].addr >> PAGE_SHIFT));
        }
 
        vfree(descriptor->block_array);
@@ -230,6 +230,6 @@ static void os_free(void* ctx, ump_dd_mem * descriptor)
 static u32 os_stat(struct ump_memory_backend *backend)
 {
        os_allocator *info;
-       info = (os_allocator*)backend->ctx;
+       info = (os_allocator *)backend->ctx;
        return info->num_pages_allocated * _MALI_OSK_MALI_PAGE_SIZE;
 }
index 82b45c2435f5cb70971c00365a0057356e12e4a3..aee898046e39fe998861639bc0bd9e77d0352497 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2010 ARM Limited
+ * (C) COPYRIGHT 2008-2010, 2014-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -17,7 +17,7 @@
 
 #include "ump_kernel_memory_backend.h"
 
-ump_memory_backend * ump_os_memory_backend_create(const int max_allocation);
+ump_memory_backend *ump_os_memory_backend_create(const int max_allocation);
 
 #endif /* __UMP_KERNEL_MEMORY_BACKEND_OS_H__ */
 
diff --git a/drivers/misc/mediatek/gpu/mt8127/mali/ump/linux/ump_kernel_random_mapping.c b/drivers/misc/mediatek/gpu/mt8127/mali/ump/linux/ump_kernel_random_mapping.c
new file mode 100644 (file)
index 0000000..d20b7dd
--- /dev/null
@@ -0,0 +1,207 @@
+/*
+ * This confidential and proprietary software may be used only as
+ * authorised by a licensing agreement from ARM Limited
+ * (C) COPYRIGHT 2008-2011, 2013-2015 ARM Limited
+ * ALL RIGHTS RESERVED
+ * The entire notice above must be reproduced on all authorised
+ * copies and copies may only be made to the extent permitted
+ * by a licensing agreement from ARM Limited.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "ump_osk.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_types.h"
+#include "ump_kernel_random_mapping.h"
+
+#include <linux/random.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/jiffies.h>
+
+
+static ump_dd_mem *search(struct rb_root *root, int id)
+{
+       struct rb_node *node = root->rb_node;
+
+       while (node) {
+               ump_dd_mem *e = container_of(node, ump_dd_mem, node);
+
+               if (id < e->secure_id) {
+                       node = node->rb_left;
+               } else if (id > e->secure_id) {
+                       node = node->rb_right;
+               } else {
+                       return e;
+               }
+       }
+
+       return NULL;
+}
+
+static mali_bool insert(struct rb_root *root, int id, ump_dd_mem *mem)
+{
+       struct rb_node **new = &(root->rb_node);
+       struct rb_node *parent = NULL;
+
+       while (*new) {
+               ump_dd_mem *this = container_of(*new, ump_dd_mem, node);
+
+               parent = *new;
+               if (id < this->secure_id) {
+                       new = &((*new)->rb_left);
+               } else if (id > this->secure_id) {
+                       new = &((*new)->rb_right);
+               } else {
+                       pr_warn("UMP: ID already used %x\n", id);
+                       return MALI_FALSE;
+               }
+       }
+
+       rb_link_node(&mem->node, parent, new);
+       rb_insert_color(&mem->node, root);
+
+       return MALI_TRUE;
+}
+
+
+ump_random_mapping *ump_random_mapping_create(void)
+{
+       ump_random_mapping *map = _mali_osk_calloc(1, sizeof(ump_random_mapping));
+
+       if (NULL == map)
+               return NULL;
+
+       map->lock = _mali_osk_mutex_rw_init(_MALI_OSK_LOCKFLAG_ORDERED,
+                                           _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP);
+       if (NULL != map->lock) {
+               map->root = RB_ROOT;
+#if UMP_RANDOM_MAP_DELAY
+               map->failed.count = 0;
+               map->failed.timestamp = jiffies;
+#endif
+               return map;
+       }
+       return NULL;
+}
+
+void ump_random_mapping_destroy(ump_random_mapping *map)
+{
+       _mali_osk_mutex_rw_term(map->lock);
+       _mali_osk_free(map);
+}
+
+int ump_random_mapping_insert(ump_random_mapping *map, ump_dd_mem *mem)
+{
+       _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+
+       while (1) {
+               u32 id;
+
+               get_random_bytes(&id, sizeof(id));
+
+               /* Try a new random number if id happened to be the invalid
+                * secure ID (-1). */
+               if (unlikely(id == UMP_INVALID_SECURE_ID))
+                       continue;
+
+               /* Insert into the tree. If the id was already in use, get a
+                * new random id and try again. */
+               if (insert(&map->root, id, mem)) {
+                       mem->secure_id = id;
+                       break;
+               }
+       }
+       _mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+
+       return 0;
+}
+
+ump_dd_mem *ump_random_mapping_get(ump_random_mapping *map, int id)
+{
+       ump_dd_mem *mem = NULL;
+#if UMP_RANDOM_MAP_DELAY
+       int do_delay = 0;
+#endif
+
+       DEBUG_ASSERT(map);
+
+       _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+       mem = search(&map->root, id);
+
+       if (unlikely(NULL == mem)) {
+#if UMP_RANDOM_MAP_DELAY
+               map->failed.count++;
+
+               if (time_is_before_jiffies(map->failed.timestamp +
+                                          UMP_FAILED_LOOKUP_DELAY * HZ)) {
+                       /* If it is a long time since last failure, reset
+                        * the counter and skip the delay this time. */
+                       map->failed.count = 0;
+               } else if (map->failed.count > UMP_FAILED_LOOKUPS_ALLOWED) {
+                       do_delay = 1;
+               }
+
+               map->failed.timestamp = jiffies;
+#endif /* UMP_RANDOM_MAP_DELAY */
+       } else {
+               ump_dd_reference_add(mem);
+       }
+       _mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+
+#if UMP_RANDOM_MAP_DELAY
+       if (do_delay) {
+               /* Apply delay */
+               schedule_timeout_killable(UMP_FAILED_LOOKUP_DELAY);
+       }
+#endif /* UMP_RANDOM_MAP_DELAY */
+
+       return mem;
+}
+
+static ump_dd_mem *ump_random_mapping_remove_internal(ump_random_mapping *map, int id)
+{
+       ump_dd_mem *mem = NULL;
+
+       mem = search(&map->root, id);
+
+       if (mem) {
+               rb_erase(&mem->node, &map->root);
+       }
+
+       return mem;
+}
+
+void ump_random_mapping_put(ump_dd_mem *mem)
+{
+       int new_ref;
+
+       _mali_osk_mutex_rw_wait(device.secure_id_map->lock, _MALI_OSK_LOCKMODE_RW);
+
+       new_ref = _ump_osk_atomic_dec_and_read(&mem->ref_count);
+       DBG_MSG(5, ("Memory reference decremented. ID: %u, new value: %d\n",
+                   mem->secure_id, new_ref));
+
+       if (0 == new_ref) {
+               DBG_MSG(3, ("Final release of memory. ID: %u\n", mem->secure_id));
+
+               ump_random_mapping_remove_internal(device.secure_id_map, mem->secure_id);
+
+               mem->release_func(mem->ctx, mem);
+               _mali_osk_free(mem);
+       }
+
+       _mali_osk_mutex_rw_signal(device.secure_id_map->lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+ump_dd_mem *ump_random_mapping_remove(ump_random_mapping *map, int descriptor)
+{
+       ump_dd_mem *mem;
+
+       _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+       mem = ump_random_mapping_remove_internal(map, descriptor);
+       _mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+
+       return mem;
+}
diff --git a/drivers/misc/mediatek/gpu/mt8127/mali/ump/linux/ump_kernel_random_mapping.h b/drivers/misc/mediatek/gpu/mt8127/mali/ump/linux/ump_kernel_random_mapping.h
new file mode 100644 (file)
index 0000000..bfdaf93
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * This confidential and proprietary software may be used only as
+ * authorised by a licensing agreement from ARM Limited
+ * (C) COPYRIGHT 2008-2011, 2013-2015 ARM Limited
+ * ALL RIGHTS RESERVED
+ * The entire notice above must be reproduced on all authorised
+ * copies and copies may only be made to the extent permitted
+ * by a licensing agreement from ARM Limited.
+ */
+
+/**
+ * @file ump_kernel_random_mapping.h
+ */
+
+#ifndef __UMP_KERNEL_RANDOM_MAPPING_H__
+#define __UMP_KERNEL_RANDOM_MAPPING_H__
+
+#include "mali_osk.h"
+#include <linux/rbtree.h>
+
+#define UMP_RANDOM_MAP_DELAY 1
+#define UMP_FAILED_LOOKUP_DELAY 10 /* ms */
+#define UMP_FAILED_LOOKUPS_ALLOWED 10 /* number of allowed failed lookups */
+
+/**
+ * The random mapping object
+ * Provides a separate namespace where we can map an integer to a pointer
+ */
+typedef struct ump_random_mapping {
+       _mali_osk_mutex_rw_t *lock; /**< Lock protecting access to the mapping object */
+       struct rb_root root;
+#if UMP_RANDOM_MAP_DELAY
+       struct {
+               unsigned long count;
+               unsigned long timestamp;
+       } failed;
+#endif
+} ump_random_mapping;
+
+/**
+ * Create a random mapping object
+ * Create a random mapping capable of holding 2^20 entries
+ * @return Pointer to a random mapping object, NULL on failure
+ */
+ump_random_mapping *ump_random_mapping_create(void);
+
+/**
+ * Destroy a random mapping object
+ * @param map The map to free
+ */
+void ump_random_mapping_destroy(ump_random_mapping *map);
+
+/**
+ * Allocate a new mapping entry (random ID)
+ * Allocates a new entry in the map.
+ * @param map The map to allocate a new entry in
+ * @param target The value to map to
+ * @return The random allocated, a negative value on error
+ */
+int ump_random_mapping_insert(ump_random_mapping *map, ump_dd_mem *mem);
+
+/**
+ * Get the value mapped to by a random ID
+ *
+ * If the lookup fails, punish the calling thread by applying a delay.
+ *
+ * @param map The map to lookup the random id in
+ * @param id The ID to lookup
+ * @param target Pointer to a pointer which will receive the stored value
+ * @return ump_dd_mem pointer on successful lookup, NULL on error
+ */
+ump_dd_mem *ump_random_mapping_get(ump_random_mapping *map, int id);
+
+void ump_random_mapping_put(ump_dd_mem *mem);
+
+/**
+ * Free the random ID
+ * For the random to be reused it has to be freed
+ * @param map The map to free the random from
+ * @param id The ID to free
+ */
+ump_dd_mem *ump_random_mapping_remove(ump_random_mapping *map, int id);
+
+#endif /* __UMP_KERNEL_RANDOM_MAPPING_H__ */
index 01c38e014426fc514714757a3907df23bdfc1dd1..44051be0bee1f18db724f9becb8b633d04ceed28 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2009-2010, 2013 ARM Limited
+ * (C) COPYRIGHT 2009-2010, 2013-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -33,9 +33,9 @@ unsigned int ump_memory_size = ARCH_UMP_MEMORY_SIZE_DEFAULT;
 module_param(ump_memory_size, uint, S_IRUGO); /* r--r--r-- */
 MODULE_PARM_DESC(ump_memory_size, "The size of fixed memory to map in the dedicated memory backend");
 
-ump_memory_backend* ump_memory_backend_create ( void )
+ump_memory_backend *ump_memory_backend_create(void)
 {
-       ump_memory_backend * backend = NULL;
+       ump_memory_backend *backend = NULL;
 
        /* Create the dynamic memory allocator backend */
        if (0 == ump_backend) {
@@ -56,7 +56,7 @@ ump_memory_backend* ump_memory_backend_create ( void )
        return backend;
 }
 
-void ump_memory_backend_destroy( void )
+void ump_memory_backend_destroy(void)
 {
        if (0 == ump_backend) {
                DBG_MSG(2, ("Releasing dedicated memory: 0x%08x\n", ump_memory_address));
index 0d56ae08b5cda6467db8037708e806befa9c61c7..65321ac825f3821b31392576c1b8cb2eadffd905 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2010 ARM Limited
+ * (C) COPYRIGHT 2008-2010, 2014-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
 #include "ump_osk.h"
 #include <asm/atomic.h>
 
-int _ump_osk_atomic_dec_and_read( _mali_osk_atomic_t *atom )
+int _ump_osk_atomic_dec_and_read(_mali_osk_atomic_t *atom)
 {
        return atomic_dec_return((atomic_t *)&atom->u.val);
 }
 
-int _ump_osk_atomic_inc_and_read( _mali_osk_atomic_t *atom )
+int _ump_osk_atomic_inc_and_read(_mali_osk_atomic_t *atom)
 {
        return atomic_inc_return((atomic_t *)&atom->u.val);
 }
index 4d5a72684167141dc7827f466de86f93d4f82e39..8031b44cd0d73258b2faa2153d5bc8d494eb455c 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
+ * (C) COPYRIGHT 2008-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -26,7 +26,7 @@
 #include <linux/slab.h>
 
 #include <asm/memory.h>
-#include <asm/uaccess.h>                       /* to verify pointers from user space */
+#include <asm/uaccess.h>                        /* to verify pointers from user space */
 #include <asm/cacheflush.h>
 #include <linux/dma-mapping.h>
 
@@ -35,12 +35,12 @@ typedef struct ump_vma_usage_tracker {
        ump_memory_allocation *descriptor;
 } ump_vma_usage_tracker;
 
-static void ump_vma_open(struct vm_area_struct * vma);
-static void ump_vma_close(struct vm_area_struct * vma);
+static void ump_vma_open(struct vm_area_struct *vma);
+static void ump_vma_close(struct vm_area_struct *vma);
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
 static int ump_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf);
 #else
-static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address);
+static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct *vma, unsigned long address);
 #endif
 
 static struct vm_operations_struct ump_vm_ops = {
@@ -60,11 +60,11 @@ static struct vm_operations_struct ump_vm_ops = {
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
 static int ump_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf)
 #else
-static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address)
+static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct *vma, unsigned long address)
 #endif
 {
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
-       void __user * address;
+       void __user *address;
        address = vmf->virtual_address;
 #endif
        MSG_ERR(("Page-fault in UMP memory region caused by the CPU\n"));
@@ -77,12 +77,12 @@ static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct * vma, uns
 #endif
 }
 
-static void ump_vma_open(struct vm_area_struct * vma)
+static void ump_vma_open(struct vm_area_struct *vma)
 {
-       ump_vma_usage_tracker * vma_usage_tracker;
+       ump_vma_usage_tracker *vma_usage_tracker;
        int new_val;
 
-       vma_usage_tracker = (ump_vma_usage_tracker*)vma->vm_private_data;
+       vma_usage_tracker = (ump_vma_usage_tracker *)vma->vm_private_data;
        BUG_ON(NULL == vma_usage_tracker);
 
        new_val = atomic_inc_return(&vma_usage_tracker->references);
@@ -90,13 +90,13 @@ static void ump_vma_open(struct vm_area_struct * vma)
        DBG_MSG(4, ("VMA open, VMA reference count incremented. VMA: 0x%08lx, reference count: %d\n", (unsigned long)vma, new_val));
 }
 
-static void ump_vma_close(struct vm_area_struct * vma)
+static void ump_vma_close(struct vm_area_struct *vma)
 {
-       ump_vma_usage_tracker * vma_usage_tracker;
+       ump_vma_usage_tracker *vma_usage_tracker;
        _ump_uk_unmap_mem_s args;
        int new_val;
 
-       vma_usage_tracker = (ump_vma_usage_tracker*)vma->vm_private_data;
+       vma_usage_tracker = (ump_vma_usage_tracker *)vma->vm_private_data;
        BUG_ON(NULL == vma_usage_tracker);
 
        new_val = atomic_dec_return(&vma_usage_tracker->references);
@@ -104,7 +104,7 @@ static void ump_vma_close(struct vm_area_struct * vma)
        DBG_MSG(4, ("VMA close, VMA reference count decremented. VMA: 0x%08lx, reference count: %d\n", (unsigned long)vma, new_val));
 
        if (0 == new_val) {
-               ump_memory_allocation * descriptor;
+               ump_memory_allocation *descriptor;
 
                descriptor = vma_usage_tracker->descriptor;
 
@@ -116,15 +116,15 @@ static void ump_vma_close(struct vm_area_struct * vma)
                args._ukk_private = NULL; /** @note unused */
 
                DBG_MSG(4, ("No more VMA references left, releasing UMP memory\n"));
-               _ump_ukk_unmap_mem( & args );
+               _ump_ukk_unmap_mem(& args);
 
                /* vma_usage_tracker is free()d by _ump_osk_mem_mapregion_term() */
        }
 }
 
-_mali_osk_errcode_t _ump_osk_mem_mapregion_init( ump_memory_allocation * descriptor )
+_mali_osk_errcode_t _ump_osk_mem_mapregion_init(ump_memory_allocation *descriptor)
 {
-       ump_vma_usage_tracker * vma_usage_tracker;
+       ump_vma_usage_tracker *vma_usage_tracker;
        struct vm_area_struct *vma;
 
        if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
@@ -135,8 +135,8 @@ _mali_osk_errcode_t _ump_osk_mem_mapregion_init( ump_memory_allocation * descrip
                return -_MALI_OSK_ERR_FAULT;
        }
 
-       vma = (struct vm_area_struct*)descriptor->process_mapping_info;
-       if (NULL == vma ) {
+       vma = (struct vm_area_struct *)descriptor->process_mapping_info;
+       if (NULL == vma) {
                kfree(vma_usage_tracker);
                return _MALI_OSK_ERR_FAULT;
        }
@@ -152,16 +152,16 @@ _mali_osk_errcode_t _ump_osk_mem_mapregion_init( ump_memory_allocation * descrip
 #endif
 
 
-       if (0==descriptor->is_cached) {
+       if (0 == descriptor->is_cached) {
                vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
        }
-       DBG_MSG(3, ("Mapping with page_prot: 0x%x\n", vma->vm_page_prot ));
+       DBG_MSG(3, ("Mapping with page_prot: 0x%x\n", vma->vm_page_prot));
 
        /* Setup the functions which handle further VMA handling */
        vma->vm_ops = &ump_vm_ops;
 
        /* Do the va range allocation - in this case, it was done earlier, so we copy in that information */
-       descriptor->mapping = (void __user*)vma->vm_start;
+       descriptor->mapping = (void __user *)vma->vm_start;
 
        atomic_set(&vma_usage_tracker->references, 1); /*this can later be increased if process is forked, see ump_vma_open() */
        vma_usage_tracker->descriptor = descriptor;
@@ -169,16 +169,16 @@ _mali_osk_errcode_t _ump_osk_mem_mapregion_init( ump_memory_allocation * descrip
        return _MALI_OSK_ERR_OK;
 }
 
-void _ump_osk_mem_mapregion_term( ump_memory_allocation * descriptor )
+void _ump_osk_mem_mapregion_term(ump_memory_allocation *descriptor)
 {
-       struct vm_area_structvma;
-       ump_vma_usage_tracker * vma_usage_tracker;
+       struct vm_area_struct *vma;
+       ump_vma_usage_tracker *vma_usage_tracker;
 
        if (NULL == descriptor) return;
 
        /* Linux does the right thing as part of munmap to remove the mapping
         * All that remains is that we remove the vma_usage_tracker setup in init() */
-       vma = (struct vm_area_struct*)descriptor->process_mapping_info;
+       vma = (struct vm_area_struct *)descriptor->process_mapping_info;
 
        vma_usage_tracker = vma->vm_private_data;
 
@@ -187,26 +187,26 @@ void _ump_osk_mem_mapregion_term( ump_memory_allocation * descriptor )
        return;
 }
 
-_mali_osk_errcode_t _ump_osk_mem_mapregion_map( ump_memory_allocation * descriptor, u32 offset, u32 * phys_addr, unsigned long size )
+_mali_osk_errcode_t _ump_osk_mem_mapregion_map(ump_memory_allocation *descriptor, u32 offset, u32 *phys_addr, unsigned long size)
 {
        struct vm_area_struct *vma;
        _mali_osk_errcode_t retval;
 
        if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
 
-       vma = (struct vm_area_struct*)descriptor->process_mapping_info;
+       vma = (struct vm_area_struct *)descriptor->process_mapping_info;
 
-       if (NULL == vma ) return _MALI_OSK_ERR_FAULT;
+       if (NULL == vma) return _MALI_OSK_ERR_FAULT;
 
-       retval = remap_pfn_range( vma, ((u32)descriptor->mapping) + offset, (*phys_addr) >> PAGE_SHIFT, size, vma->vm_page_prot) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;;
+       retval = remap_pfn_range(vma, ((u32)descriptor->mapping) + offset, (*phys_addr) >> PAGE_SHIFT, size, vma->vm_page_prot) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;;
 
        DBG_MSG(4, ("Mapping virtual to physical memory. ID: %u, vma: 0x%08lx, virtual addr:0x%08lx, physical addr: 0x%08lx, size:%lu, prot:0x%x, vm_flags:0x%x RETVAL: 0x%x\n",
-                   ump_dd_secure_id_get(descriptor->handle),
-                   (unsigned long)vma,
-                   (unsigned long)(vma->vm_start + offset),
-                   (unsigned long)*phys_addr,
-                   size,
-                   (unsigned int)vma->vm_page_prot, vma->vm_flags, retval));
+                   ump_dd_secure_id_get(descriptor->handle),
+                   (unsigned long)vma,
+                   (unsigned long)(vma->vm_start + offset),
+                   (unsigned long)*phys_addr,
+                   size,
+                   (unsigned int)vma->vm_page_prot, vma->vm_flags, retval));
 
        return retval;
 }
@@ -217,18 +217,18 @@ static void level1_cache_flush_all(void)
        __cpuc_flush_kern_all();
 }
 
-void _ump_osk_msync( ump_dd_mem * mem, void * virt, u32 offset, u32 size, ump_uk_msync_op op, ump_session_data * session_data )
+void _ump_osk_msync(ump_dd_mem *mem, void *virt, u32 offset, u32 size, ump_uk_msync_op op, ump_session_data *session_data)
 {
        int i;
 
        /* Flush L1 using virtual address, the entire range in one go.
         * Only flush if user space process has a valid write mapping on given address. */
-       if( (mem) && (virt!=NULL) && (access_ok(VERIFY_WRITE, virt, size)) ) {
+       if ((mem) && (virt != NULL) && (access_ok(VERIFY_WRITE, virt, size))) {
                __cpuc_flush_dcache_area(virt, size);
                DBG_MSG(3, ("UMP[%02u] Flushing CPU L1 Cache. CPU address: %x, size: %x\n", mem->secure_id, virt, size));
        } else {
                if (session_data) {
-                       if (op == _UMP_UK_MSYNC_FLUSH_L1  ) {
+                       if (op == _UMP_UK_MSYNC_FLUSH_L1) {
                                DBG_MSG(4, ("UMP Pending L1 cache flushes: %d\n", session_data->has_pending_level1_cache_flush));
                                session_data->has_pending_level1_cache_flush = 0;
                                level1_cache_flush_all();
@@ -236,7 +236,7 @@ void _ump_osk_msync( ump_dd_mem * mem, void * virt, u32 offset, u32 size, ump_uk
                        } else {
                                if (session_data->cache_operations_ongoing) {
                                        session_data->has_pending_level1_cache_flush++;
-                                       DBG_MSG(4, ("UMP[%02u] Defering the L1 flush. Nr pending:%d\n", mem->secure_id, session_data->has_pending_level1_cache_flush) );
+                                       DBG_MSG(4, ("UMP[%02u] Defering the L1 flush. Nr pending:%d\n", mem->secure_id, session_data->has_pending_level1_cache_flush));
                                } else {
                                        /* Flushing the L1 cache for each switch_user() if ump_cache_operations_control(START) is not called */
                                        level1_cache_flush_all();
@@ -248,49 +248,49 @@ void _ump_osk_msync( ump_dd_mem * mem, void * virt, u32 offset, u32 size, ump_uk
                }
        }
 
-       if ( NULL == mem ) return;
+       if (NULL == mem) return;
 
-       if ( mem->size_bytes==size) {
-               DBG_MSG(3, ("UMP[%02u] Flushing CPU L2 Cache\n",mem->secure_id));
+       if (mem->size_bytes == size) {
+               DBG_MSG(3, ("UMP[%02u] Flushing CPU L2 Cache\n", mem->secure_id));
        } else {
                DBG_MSG(3, ("UMP[%02u] Flushing CPU L2 Cache. Blocks:%u, TotalSize:%u. FlushSize:%u Offset:0x%x FirstPaddr:0x%08x\n",
-                           mem->secure_id, mem->nr_blocks, mem->size_bytes, size, offset, mem->block_array[0].addr));
+                           mem->secure_id, mem->nr_blocks, mem->size_bytes, size, offset, mem->block_array[0].addr));
        }
 
 
        /* Flush L2 using physical addresses, block for block. */
-       for (i=0 ; i < mem->nr_blocks; i++) {
+       for (i = 0 ; i < mem->nr_blocks; i++) {
                u32 start_p, end_p;
                ump_dd_physical_block *block;
                block = &mem->block_array[i];
 
-               if(offset >= block->size) {
+               if (offset >= block->size) {
                        offset -= block->size;
                        continue;
                }
 
-               if(offset) {
+               if (offset) {
                        start_p = (u32)block->addr + offset;
                        /* We'll zero the offset later, after using it to calculate end_p. */
                } else {
                        start_p = (u32)block->addr;
                }
 
-               if(size < block->size - offset) {
-                       end_p = start_p + size - 1;
+               if (size < block->size - offset) {
+                       end_p = start_p + size;
                        size = 0;
                } else {
-                       if(offset) {
-                               end_p = start_p + (block->size - offset - 1);
+                       if (offset) {
+                               end_p = start_p + (block->size - offset);
                                size -= block->size - offset;
                                offset = 0;
                        } else {
-                               end_p = start_p + block->size - 1;
+                               end_p = start_p + block->size;
                                size -= block->size;
                        }
                }
 
-               switch(op) {
+               switch (op) {
                case _UMP_UK_MSYNC_CLEAN:
                        outer_clean_range(start_p, end_p);
                        break;
@@ -304,7 +304,7 @@ void _ump_osk_msync( ump_dd_mem * mem, void * virt, u32 offset, u32 size, ump_uk
                        break;
                }
 
-               if(0 == size) {
+               if (0 == size) {
                        /* Nothing left to flush. */
                        break;
                }
index 6ed25d3447e28022cd3bcc8e395ef9dba65bb061..906ed22cf5327b537e3c082b6ae3994bbae764dd 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2009-2010, 2013 ARM Limited
+ * (C) COPYRIGHT 2009-2010, 2013-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -20,7 +20,7 @@
 #include "ump_kernel_linux.h"
 
 /* is called from ump_kernel_constructor in common code */
-_mali_osk_errcode_t _ump_osk_init( void )
+_mali_osk_errcode_t _ump_osk_init(void)
 {
        if (0 != ump_kernel_device_initialize()) {
                return _MALI_OSK_ERR_FAULT;
@@ -29,7 +29,7 @@ _mali_osk_errcode_t _ump_osk_init( void )
        return _MALI_OSK_ERR_OK;
 }
 
-_mali_osk_errcode_t _ump_osk_term( void )
+_mali_osk_errcode_t _ump_osk_term(void)
 {
        ump_kernel_device_terminate();
        return _MALI_OSK_ERR_OK;
index f2b084efaf426aee2f1521d1c65dcc199dcd8178..a2c8e69b0bebc95e98721f3d047d22423bfd2a86 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2009-2010, 2013 ARM Limited
+ * (C) COPYRIGHT 2009-2010, 2013-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -24,7 +24,7 @@
 /*
  * IOCTL operation; Allocate UMP memory
  */
-int ump_allocate_wrapper(u32 __user * argument, struct ump_session_data  * session_data)
+int ump_allocate_wrapper(u32 __user *argument, struct ump_session_data   *session_data)
 {
        _ump_uk_allocate_s user_interaction;
        _mali_osk_errcode_t err;
@@ -43,8 +43,8 @@ int ump_allocate_wrapper(u32 __user * argument, struct ump_session_data  * sessi
 
        user_interaction.ctx = (void *) session_data;
 
-       err = _ump_ukk_allocate( &user_interaction );
-       if( _MALI_OSK_ERR_OK != err ) {
+       err = _ump_ukk_allocate(&user_interaction);
+       if (_MALI_OSK_ERR_OK != err) {
                DBG_MSG(1, ("_ump_ukk_allocate() failed in ump_ioctl_allocate()\n"));
                return map_errcode(err);
        }
@@ -59,8 +59,8 @@ int ump_allocate_wrapper(u32 __user * argument, struct ump_session_data  * sessi
                release_args.ctx = (void *) session_data;
                release_args.secure_id = user_interaction.secure_id;
 
-               err = _ump_ukk_release( &release_args );
-               if(_MALI_OSK_ERR_OK != err) {
+               err = _ump_ukk_release(&release_args);
+               if (_MALI_OSK_ERR_OK != err) {
                        MSG_ERR(("_ump_ukk_release() also failed when trying to release newly allocated memory in ump_ioctl_allocate()\n"));
                }
 
index ba028f2668ff96b2b9d75c0a57f41d2afbb9b971..7e099c01fb197b217e92aa80ae1bdddea1f8d0e4 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2009-2010, 2013 ARM Limited
+ * (C) COPYRIGHT 2009-2010, 2013-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -24,7 +24,7 @@ extern "C" {
 #endif
 
 
-int ump_allocate_wrapper(u32 __user * argument, struct ump_session_data  * session_data);
+int ump_allocate_wrapper(u32 __user *argument, struct ump_session_data   *session_data);
 
 
 #ifdef __cplusplus
index 64a62f69705fc356c83caa9ed7cdc6e14c5bfe55..bef4056c2045eb7422cbb189c0bec74e0635f5e9 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2009-2013 ARM Limited
+ * (C) COPYRIGHT 2009-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -23,7 +23,7 @@
 /*
  * IOCTL operation; Negotiate version of IOCTL API
  */
-int ump_get_api_version_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+int ump_get_api_version_wrapper(u32 __user *argument, struct ump_session_data *session_data)
 {
        _ump_uk_api_version_s version_info;
        _mali_osk_errcode_t err;
@@ -40,9 +40,9 @@ int ump_get_api_version_wrapper(u32 __user * argument, struct ump_session_data *
                return -EFAULT;
        }
 
-       version_info.ctx = (void*) session_data;
-       err = _ump_uku_get_api_version( &version_info );
-       if( _MALI_OSK_ERR_OK != err ) {
+       version_info.ctx = (void *) session_data;
+       err = _ump_uku_get_api_version(&version_info);
+       if (_MALI_OSK_ERR_OK != err) {
                MSG_ERR(("_ump_uku_get_api_version() failed in ump_ioctl_get_api_version()\n"));
                return map_errcode(err);
        }
@@ -62,7 +62,7 @@ int ump_get_api_version_wrapper(u32 __user * argument, struct ump_session_data *
 /*
  * IOCTL operation; Release reference to specified UMP memory.
  */
-int ump_release_wrapper(u32 __user * argument, struct ump_session_data  * session_data)
+int ump_release_wrapper(u32 __user *argument, struct ump_session_data   *session_data)
 {
        _ump_uk_release_s release_args;
        _mali_osk_errcode_t err;
@@ -79,9 +79,9 @@ int ump_release_wrapper(u32 __user * argument, struct ump_session_data  * sessio
                return -EFAULT;
        }
 
-       release_args.ctx = (void*) session_data;
-       err = _ump_ukk_release( &release_args );
-       if( _MALI_OSK_ERR_OK != err ) {
+       release_args.ctx = (void *) session_data;
+       err = _ump_ukk_release(&release_args);
+       if (_MALI_OSK_ERR_OK != err) {
                MSG_ERR(("_ump_ukk_release() failed in ump_ioctl_release()\n"));
                return map_errcode(err);
        }
@@ -93,7 +93,7 @@ int ump_release_wrapper(u32 __user * argument, struct ump_session_data  * sessio
 /*
  * IOCTL operation; Return size for specified UMP memory.
  */
-int ump_size_get_wrapper(u32 __user * argument, struct ump_session_data  * session_data)
+int ump_size_get_wrapper(u32 __user *argument, struct ump_session_data   *session_data)
 {
        _ump_uk_size_get_s user_interaction;
        _mali_osk_errcode_t err;
@@ -110,8 +110,8 @@ int ump_size_get_wrapper(u32 __user * argument, struct ump_session_data  * sessi
        }
 
        user_interaction.ctx = (void *) session_data;
-       err = _ump_ukk_size_get( &user_interaction );
-       if( _MALI_OSK_ERR_OK != err ) {
+       err = _ump_ukk_size_get(&user_interaction);
+       if (_MALI_OSK_ERR_OK != err) {
                MSG_ERR(("_ump_ukk_size_get() failed in ump_ioctl_size_get()\n"));
                return map_errcode(err);
        }
@@ -129,7 +129,7 @@ int ump_size_get_wrapper(u32 __user * argument, struct ump_session_data  * sessi
 /*
  * IOCTL operation; Do cache maintenance on specified UMP memory.
  */
-int ump_msync_wrapper(u32 __user * argument, struct ump_session_data  * session_data)
+int ump_msync_wrapper(u32 __user *argument, struct ump_session_data   *session_data)
 {
        _ump_uk_msync_s user_interaction;
 
@@ -146,7 +146,7 @@ int ump_msync_wrapper(u32 __user * argument, struct ump_session_data  * session_
 
        user_interaction.ctx = (void *) session_data;
 
-       _ump_ukk_msync( &user_interaction );
+       _ump_ukk_msync(&user_interaction);
 
        user_interaction.ctx = NULL;
 
@@ -157,7 +157,7 @@ int ump_msync_wrapper(u32 __user * argument, struct ump_session_data  * session_
 
        return 0; /* success */
 }
-int ump_cache_operations_control_wrapper(u32 __user * argument, struct ump_session_data  * session_data)
+int ump_cache_operations_control_wrapper(u32 __user *argument, struct ump_session_data   *session_data)
 {
        _ump_uk_cache_operations_control_s user_interaction;
 
@@ -174,7 +174,7 @@ int ump_cache_operations_control_wrapper(u32 __user * argument, struct ump_sessi
 
        user_interaction.ctx = (void *) session_data;
 
-       _ump_ukk_cache_operations_control((_ump_uk_cache_operations_control_s*) &user_interaction );
+       _ump_ukk_cache_operations_control((_ump_uk_cache_operations_control_s *) &user_interaction);
 
        user_interaction.ctx = NULL;
 
@@ -187,7 +187,7 @@ int ump_cache_operations_control_wrapper(u32 __user * argument, struct ump_sessi
        return 0; /* success */
 }
 
-int ump_switch_hw_usage_wrapper(u32 __user * argument, struct ump_session_data  * session_data)
+int ump_switch_hw_usage_wrapper(u32 __user *argument, struct ump_session_data   *session_data)
 {
        _ump_uk_switch_hw_usage_s user_interaction;
 
@@ -204,7 +204,7 @@ int ump_switch_hw_usage_wrapper(u32 __user * argument, struct ump_session_data
 
        user_interaction.ctx = (void *) session_data;
 
-       _ump_ukk_switch_hw_usage( &user_interaction );
+       _ump_ukk_switch_hw_usage(&user_interaction);
 
        user_interaction.ctx = NULL;
 
@@ -217,7 +217,7 @@ int ump_switch_hw_usage_wrapper(u32 __user * argument, struct ump_session_data
        return 0; /* success */
 }
 
-int ump_lock_wrapper(u32 __user * argument, struct ump_session_data  * session_data)
+int ump_lock_wrapper(u32 __user *argument, struct ump_session_data   *session_data)
 {
        _ump_uk_lock_s user_interaction;
 
@@ -234,7 +234,7 @@ int ump_lock_wrapper(u32 __user * argument, struct ump_session_data  * session_d
 
        user_interaction.ctx = (void *) session_data;
 
-       _ump_ukk_lock( &user_interaction );
+       _ump_ukk_lock(&user_interaction);
 
        user_interaction.ctx = NULL;
 
@@ -248,7 +248,7 @@ int ump_lock_wrapper(u32 __user * argument, struct ump_session_data  * session_d
        return 0; /* success */
 }
 
-int ump_unlock_wrapper(u32 __user * argument, struct ump_session_data  * session_data)
+int ump_unlock_wrapper(u32 __user *argument, struct ump_session_data   *session_data)
 {
        _ump_uk_unlock_s user_interaction;
 
@@ -265,7 +265,7 @@ int ump_unlock_wrapper(u32 __user * argument, struct ump_session_data  * session
 
        user_interaction.ctx = (void *) session_data;
 
-       _ump_ukk_unlock( &user_interaction );
+       _ump_ukk_unlock(&user_interaction);
 
        user_interaction.ctx = NULL;
 
index 9b8c87df70b73cdd42743a4a0865ff657434dbc3..c3e44561f7c869fc710bc134b3159c101da966ee 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This confidential and proprietary software may be used only as
  * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2009-2010, 2012-2013 ARM Limited
+ * (C) COPYRIGHT 2009-2010, 2012-2015 ARM Limited
  * ALL RIGHTS RESERVED
  * The entire notice above must be reproduced on all authorised
  * copies and copies may only be made to the extent permitted
@@ -25,14 +25,14 @@ extern "C" {
 
 
 
-int ump_get_api_version_wrapper(u32 __user * argument, struct ump_session_data * session_data);
-int ump_release_wrapper(u32 __user * argument, struct ump_session_data  * session_data);
-int ump_size_get_wrapper(u32 __user * argument, struct ump_session_data  * session_data);
-int ump_msync_wrapper(u32 __user * argument, struct ump_session_data  * session_data);
-int ump_cache_operations_control_wrapper(u32 __user * argument, struct ump_session_data  * session_data);
-int ump_switch_hw_usage_wrapper(u32 __user * argument, struct ump_session_data  * session_data);
-int ump_lock_wrapper(u32 __user * argument, struct ump_session_data  * session_data);
-int ump_unlock_wrapper(u32 __user * argument, struct ump_session_data  * session_data);
+int ump_get_api_version_wrapper(u32 __user *argument, struct ump_session_data *session_data);
+int ump_release_wrapper(u32 __user *argument, struct ump_session_data   *session_data);
+int ump_size_get_wrapper(u32 __user *argument, struct ump_session_data   *session_data);
+int ump_msync_wrapper(u32 __user *argument, struct ump_session_data   *session_data);
+int ump_cache_operations_control_wrapper(u32 __user *argument, struct ump_session_data   *session_data);
+int ump_switch_hw_usage_wrapper(u32 __user *argument, struct ump_session_data   *session_data);
+int ump_lock_wrapper(u32 __user *argument, struct ump_session_data   *session_data);
+int ump_unlock_wrapper(u32 __user *argument, struct ump_session_data   *session_data);
 
 
 
diff --git a/drivers/misc/mediatek/gpu/mt8127/mali/ump/readme.txt b/drivers/misc/mediatek/gpu/mt8127/mali/ump/readme.txt
deleted file mode 100755 (executable)
index c238cf0..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-Building the UMP Device Driver for Linux
-----------------------------------------
-
-Build the UMP Device Driver for Linux by running the following make command:
-
-KDIR=<kdir_path> CONFIG=<your_config> BUILD=<build_option> make
-
-where
-    kdir_path: Path to your Linux Kernel directory
-    your_config: Name of the sub-folder to find the required config.h file
-                 ("arch-" will be prepended)
-    build_option: debug or release. Debug is default.
-
-The config.h contains following configuration parameters:
-
-ARCH_UMP_BACKEND_DEFAULT
-    0 specifies the dedicated memory allocator.
-    1 specifies the OS memory allocator.
-ARCH_UMP_MEMORY_ADDRESS_DEFAULT
-    This is only required for the dedicated memory allocator, and specifies
-    the physical start address of the memory block reserved for UMP.
-ARCH_UMP_MEMORY_SIZE_DEFAULT
-    This specified the size of the memory block reserved for UMP, or the
-    maximum limit for allocations from the OS.
-
-The result will be a ump.ko file, which can be loaded into the Linux kernel
-by using the insmod command. The driver can also be built as a part of the
-kernel itself.
diff --git a/drivers/misc/mediatek/gpu/mt8127/mali/umplock/Makefile b/drivers/misc/mediatek/gpu/mt8127/mali/umplock/Makefile
deleted file mode 100755 (executable)
index d1d5c4c..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-#
-# Copyright (C) 2012 ARM Limited. All rights reserved.
-# 
-# This program is free software and is provided to you under the terms of the GNU General Public License version 2
-# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
-# 
-# A copy of the licence is included with the program, and can also be obtained from Free Software
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
-#
-
-# default to building for the host
-ARCH ?= $(shell uname -m)
-
-# linux build system integration
-
-ifneq ($(KERNELRELEASE),)
-# Inside the kernel build system
-
-EXTRA_CFLAGS += -I$(KBUILD_EXTMOD)
-
-SRC =  umplock_driver.c
-
-MODULE:=umplock.ko
-
-obj-m := $(MODULE:.ko=.o)
-$(MODULE:.ko=-y) := $(SRC:.c=.o)
-
-$(MODULE:.ko=-objs) := $(SRC:.c=.o) 
-
-else
-# Outside the kernel build system
-#
-#
-
-# Get any user defined KDIR-<names> or maybe even a hardcoded KDIR
--include KDIR_CONFIGURATION
-
-# Define host system directory
-KDIR-$(shell uname -m):=/lib/modules/$(shell uname -r)/build
-
-ifeq ($(ARCH), arm)
-       # when compiling for ARM we're cross compiling
-       export CROSS_COMPILE ?= arm-none-linux-gnueabi-
-       CONFIG ?= arm
-else
-       # Compiling for the host
-       CONFIG ?= $(shell uname -m)
-endif
-
-# default cpu to select
-CPU ?= $(shell uname -m)
-
-# look up KDIR based om CPU selection
-KDIR ?= $(KDIR-$(CPU))
-
-ifeq ($(KDIR),)
-$(error No KDIR found for platform $(CPU))
-endif
-
-all:
-       $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR)
-
-kernelrelease:
-       $(MAKE) -C $(KDIR) kernelrelease
-
-clean:
-       $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean
-
-endif
diff --git a/drivers/misc/mediatek/gpu/mt8127/mali/umplock/umplock_driver.c b/drivers/misc/mediatek/gpu/mt8127/mali/umplock/umplock_driver.c
deleted file mode 100644 (file)
index eaa4bd4..0000000
+++ /dev/null
@@ -1,671 +0,0 @@
-/*
- * Copyright (C) 2012-2013 ARM Limited. All rights reserved.
- * 
- * This program is free software and is provided to you under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
- * 
- * A copy of the licence is included with the program, and can also be obtained from Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/cdev.h>
-#include <linux/device.h>
-#include <asm/uaccess.h>
-#include "umplock_ioctl.h"
-#include <linux/sched.h>
-
-#define MAX_ITEMS 1024
-#define MAX_PIDS 128    
-
-typedef struct lock_cmd_priv
-{
-       uint32_t msg[128];    /*ioctl args*/     
-       u32 pid;                          /*process id*/
-}_lock_cmd_priv;
-
-typedef struct lock_ref
-{
-       int ref_count;
-       u32 pid;
-}_lock_ref;
-
-typedef struct umplock_item
-{
-       u32 secure_id;
-       /*u32 references;*/
-       _lock_access_usage usage;
-       _lock_ref references[MAX_PIDS]; 
-       struct semaphore item_lock;
-} umplock_item;
-
-typedef struct umplock_device_private
-{
-       struct mutex item_list_lock;
-       atomic_t sessions;
-       umplock_item items[MAX_ITEMS];
-       u32 pids[MAX_PIDS];
-} umplock_device_private;
-
-struct umplock_device
-{
-       struct cdev cdev;
-       struct class *umplock_class;
-};
-
-static char umplock_dev_name[] = "umplock";
-
-int umplock_major = 0;
-module_param(umplock_major, int, S_IRUGO); /* r--r--r-- */
-MODULE_PARM_DESC(umplock_major, "Device major number");
-
-static int  umplock_driver_open( struct inode *inode, struct file *filp );
-static int  umplock_driver_release( struct inode *inode, struct file *filp );
-static long umplock_driver_ioctl( struct file *f, unsigned int cmd, unsigned long arg );
-
-static struct file_operations umplock_fops =  
-{  
-       .owner   = THIS_MODULE,
-       .open    = umplock_driver_open,
-       .release = umplock_driver_release,
-       .unlocked_ioctl = umplock_driver_ioctl,
-};
-
-static struct umplock_device umplock_device;
-static umplock_device_private device;
-
-void umplock_init_locklist( void )
-{
-       memset(&device.items, 0, sizeof(umplock_item)*MAX_ITEMS);
-       atomic_set(&device.sessions, 0);
-}
-
-void umplock_deinit_locklist( void )
-{
-       memset(&device.items, 0, sizeof(umplock_item)*MAX_ITEMS);
-}
-
-int umplock_device_initialize( void )
-{
-       int err;
-       dev_t dev = 0;
-
-       if ( 0 == umplock_major )
-       {
-               err = alloc_chrdev_region(&dev, 0, 1, umplock_dev_name);
-               umplock_major = MAJOR(dev);
-       }
-       else
-       {
-               dev = MKDEV(umplock_major, 0);
-               err = register_chrdev_region(dev, 1, umplock_dev_name);
-       }
-
-       if ( 0 == err )
-       {
-               memset(&umplock_device, 0, sizeof(umplock_device));
-               cdev_init(&umplock_device.cdev, &umplock_fops);
-               umplock_device.cdev.owner = THIS_MODULE;
-               umplock_device.cdev.ops = &umplock_fops;
-
-               err = cdev_add(&umplock_device.cdev, dev, 1);
-               if ( 0 == err )
-               {
-                       umplock_device.umplock_class = class_create(THIS_MODULE, umplock_dev_name);
-                       if ( IS_ERR(umplock_device.umplock_class ) )
-                       {
-                               err = PTR_ERR(umplock_device.umplock_class);
-                       }
-                       else
-                       {
-                               struct device *mdev;
-                               mdev = device_create(umplock_device.umplock_class, NULL, dev, NULL, umplock_dev_name);
-                               if ( !IS_ERR(mdev) ) 
-                               {
-                                       return 0; /* all ok */
-                               }
-       
-                               err = PTR_ERR(mdev);
-                               class_destroy(umplock_device.umplock_class);
-                       }
-                       cdev_del(&umplock_device.cdev);
-               }
-
-               unregister_chrdev_region(dev, 1);
-       }
-
-       return 1;
-}
-
-void umplock_device_terminate(void)
-{
-       dev_t dev = MKDEV(umplock_major, 0);
-
-       device_destroy(umplock_device.umplock_class, dev);
-       class_destroy(umplock_device.umplock_class);
-
-       cdev_del(&umplock_device.cdev);
-       unregister_chrdev_region(dev, 1);
-}
-
-int umplock_constructor(void)
-{
-       mutex_init(&device.item_list_lock);
-       if ( !umplock_device_initialize() ) return 1;
-       umplock_init_locklist();
-
-       return 0;
-}
-
-void umplock_destructor(void)
-{
-       umplock_deinit_locklist();
-       umplock_device_terminate();
-       mutex_destroy(&device.item_list_lock);
-}
-
-int umplock_find_item( u32 secure_id )
-{
-       int i;
-       for ( i=0; i<MAX_ITEMS; i++ )
-       {
-               if ( device.items[i].secure_id == secure_id ) return i;
-       }
-       
-       return -1;
-}
-
-int umplock_find_slot( void )
-{
-       int i;
-       for ( i=0; i<MAX_ITEMS; i++ )
-       {
-               if ( device.items[i].secure_id == 0 ) return i;
-       }
-
-       return -1;
-}
-
-static int umplock_find_item_by_pid( _lock_cmd_priv *lock_cmd, int *item_slot, int *ref_slot)
-{
-       _lock_item_s *lock_item;
-       int i,j;
-       
-       lock_item = (_lock_item_s *)&lock_cmd->msg;
-
-       i = umplock_find_item(lock_item->secure_id);
-
-       if ( i < 0)
-               return -1;
-       
-       for(j=0; j<MAX_PIDS; j++)
-       {
-               if(device.items[i].references[j].pid == lock_cmd->pid)
-               {
-                       *item_slot = i;
-                       *ref_slot = j;
-                       return 0;
-               }
-       }
-       return -1 ;
-}
-
-static int umplock_find_client_valid(u32 pid)
-{
-       int i;
-
-       if(pid == 0)
-               return -1;
-       
-       for(i=0; i<MAX_PIDS; i++)
-       {
-               if(device.pids[i] == pid) return i;
-       }
-
-       return -1;
-}
-
-static int do_umplock_create_locked( _lock_cmd_priv *lock_cmd)
-{
-       int i_index,ref_index;
-       int ret;
-       _lock_item_s *lock_item = (_lock_item_s *)&lock_cmd->msg;
-
-       i_index = ref_index = -1;
-
-       #if 0 
-       if ( lock_item->usage == 1 ) printk( KERN_DEBUG "UMPLOCK: C 0x%x GPU SURFACE\n", lock_item->secure_id );
-       else if ( lock_item->usage == 2 ) printk( KERN_DEBUG "UMPLOCK: C 0x%x GPU TEXTURE\n", lock_item->secure_id );
-       else printk( KERN_DEBUG "UMPLOCK: C 0x%x CPU\n", lock_item->secure_id );
-       #endif
-
-       ret = umplock_find_client_valid( lock_cmd->pid );       
-       if( ret < 0 )
-       {
-               /*lock request from an invalid client pid, do nothing*/
-               return 0;
-       }
-
-       ret = umplock_find_item_by_pid( lock_cmd, &i_index, &ref_index );
-       if ( ret >= 0 )
-       {
-               if (device.items[i_index].references[ref_index].ref_count == 0)
-                       device.items[i_index].references[ref_index].ref_count = 1;
-       }
-       else if ( (i_index = umplock_find_item( lock_item->secure_id)) >= 0 )
-       {
-               for ( ref_index = 0; ref_index < MAX_PIDS; ref_index++)
-               {
-                       if (device.items[i_index].references[ref_index].pid == 0) break;
-               }
-               if ( ref_index < MAX_PIDS )
-               {
-                       device.items[i_index].references[ref_index].pid = lock_cmd->pid;
-                       device.items[i_index].references[ref_index].ref_count = 1;
-               }
-               else
-               {
-                       printk( KERN_ERR "UMPLOCK: whoops, item ran out of available reference slot\n" );
-               }
-       }
-       else
-       {
-               i_index = umplock_find_slot();
-
-               if ( i_index >= 0 )
-               {
-                       device.items[i_index].secure_id = lock_item->secure_id;
-                       device.items[i_index].usage = lock_item->usage;
-                       device.items[i_index].references[0].pid = lock_cmd->pid;
-                       device.items[i_index].references[0].ref_count = 1;
-                       sema_init(&device.items[i_index].item_lock, 1);
-               }
-               else
-               {
-                       printk( KERN_ERR "UMPLOCK: whoops, ran out of available slots\n" );
-               }
-       }
-       
-       return 0;
-}
-/** IOCTLs **/
-
-static int do_umplock_create(_lock_cmd_priv *lock_cmd)
-{
-       int ret = 0;
-       mutex_lock(&device.item_list_lock);
-       ret = do_umplock_create_locked(lock_cmd);
-       mutex_unlock(&device.item_list_lock);
-       return ret;
-}
-
-static int do_umplock_process( _lock_cmd_priv *lock_cmd )
-{
-       int ret, i_index, ref_index, ref_count;
-       _lock_item_s *lock_item = (_lock_item_s *)&lock_cmd->msg;
-
-       mutex_lock(&device.item_list_lock);
-
-       do_umplock_create_locked(lock_cmd);
-       
-       ret = umplock_find_client_valid( lock_cmd->pid );       
-       if( ret < 0 )
-       {
-               /*lock request from an invalid client pid, do nothing*/
-               mutex_unlock(&device.item_list_lock);
-               return 0;
-       }
-       
-       ret = umplock_find_item_by_pid( lock_cmd, &i_index, &ref_index );
-       ref_count = device.items[i_index].references[ref_index].ref_count;
-       if ( ret >= 0 )
-       {       
-               if (ref_count == 1)
-               {
-                       /*add ref before down to wait for the umplock*/
-                       device.items[i_index].references[ref_index].ref_count++;
-                       mutex_unlock(&device.item_list_lock);
-                       if ( down_interruptible(&device.items[i_index].item_lock) )
-                       {
-                               /*wait up without hold the umplock. restore previous state and return*/
-                               mutex_lock(&device.item_list_lock);
-                               device.items[i_index].references[ref_index].ref_count--;
-                               mutex_unlock(&device.item_list_lock);
-                               return -ERESTARTSYS;
-                       }
-                       mutex_lock(&device.item_list_lock);
-               }
-               else
-               {
-                       /*already got the umplock, add ref*/
-                       device.items[i_index].references[ref_index].ref_count++;
-               }
-               #if 0
-               if ( lock_item->usage == 1 ) printk( KERN_DEBUG "UMPLOCK:  P 0x%x GPU SURFACE\n", lock_item->secure_id );
-               else if ( lock_item->usage == 2 ) printk( KERN_DEBUG "UMPLOCK:  P 0x%x GPU TEXTURE\n", lock_item->secure_id );
-               else printk( KERN_DEBUG "UMPLOCK:  P 0x%x CPU\n", lock_item->secure_id );
-               #endif
-       }
-       else
-       {
-               /*fail to find a item*/
-               printk(KERN_ERR "UMPLOCK: IOCTL_UMPLOCK_PROCESS called with invalid parameter\n");
-               mutex_unlock(&device.item_list_lock);
-               return -EINVAL;
-       }
-       mutex_unlock(&device.item_list_lock);
-       return 0;
-}
-
-static int do_umplock_release( _lock_cmd_priv *lock_cmd )
-{
-       int i_index,ref_index, ref_count;
-       int ret;
-       _lock_item_s *lock_item = (_lock_item_s *)&lock_cmd->msg;
-
-       mutex_lock(&device.item_list_lock);
-       ret = umplock_find_client_valid( lock_cmd->pid );       
-       if( ret < 0 )
-       {
-               /*lock request from an invalid client pid, do nothing*/
-               mutex_unlock(&device.item_list_lock);
-               return 0;
-       }
-       
-       i_index = ref_index = -1;
-
-       ret = umplock_find_item_by_pid( lock_cmd, &i_index, &ref_index );
-
-       if ( ret >= 0 )
-       {
-               device.items[i_index].references[ref_index].ref_count--;
-               ref_count = device.items[i_index].references[ref_index].ref_count;
-
-               #if 0
-               if ( lock_item->usage == 1 ) printk( KERN_DEBUG "UMPLOCK:   R 0x%x GPU SURFACE\n", lock_item->secure_id );
-               else if ( lock_item->usage == 2 ) printk( KERN_DEBUG "UMPLOCK:   R 0x%x GPU TEXTURE\n", lock_item->secure_id );
-               else printk( KERN_DEBUG "UMPLOCK:   R 0x%x CPU\n", lock_item->secure_id );
-               #endif
-               /*reached the last reference to the umplock*/
-               if ( ref_count == 1 )
-               {
-                       /*release the umplock*/
-                       up( &device.items[i_index].item_lock );
-               
-                       device.items[i_index].references[ref_index].ref_count = 0;
-                       device.items[i_index].references[ref_index].pid = 0;
-               }
-       }
-       else
-       {
-               /*fail to find item*/
-               printk(KERN_ERR "UMPLOCK: IOCTL_UMPLOCK_RELEASE called with invalid parameter\n");
-               mutex_unlock(&device.item_list_lock);
-               return -EINVAL;
-       }
-       mutex_unlock(&device.item_list_lock);
-       return 0;
-}
-
-static int do_umplock_zap( void )
-{
-       int i;
-
-       printk( KERN_DEBUG "UMPLOCK: ZAP ALL ENTRIES!\n" );
-
-       mutex_lock(&device.item_list_lock);
-       
-       for ( i=0; i<MAX_ITEMS; i++ )
-       {
-               device.items[i].secure_id = 0;
-               memset(&device.items[i].references, 0, sizeof(_lock_ref)*MAX_PIDS);
-               sema_init(&device.items[i].item_lock, 1);
-       }
-       mutex_unlock(&device.item_list_lock);
-
-       for ( i=0; i<MAX_PIDS; i++)
-       {
-               device.pids[i] = 0;
-       }
-       return 0;
-}
-
-static int do_umplock_dump( void )
-{
-       int i, j;
-
-       printk("dump all the items\n");
-
-       mutex_lock(&device.item_list_lock);
-       for (i = 0; i < MAX_ITEMS; i++)
-       {
-               for (j = 0; j < MAX_PIDS; j++)
-               {
-                       if (device.items[i].secure_id != 0 && device.items[i].references[j].pid != 0)
-                       {
-                               printk("item[%d]->secure_id=%d\t reference[%d].ref_count=%d.pid=%d\n",
-                                       i,
-                                       device.items[i].secure_id,
-                                       j,
-                                       device.items[i].references[j].ref_count,
-                                       device.items[i].references[j].pid);
-                       }
-               }
-       }
-       mutex_unlock(&device.item_list_lock);
-
-       return 0;
-}
-
-int do_umplock_client_add (_lock_cmd_priv *lock_cmd )
-{
-       int i;
-       mutex_lock(&device.item_list_lock);
-       for ( i= 0; i<MAX_PIDS; i++)
-       {
-               if(device.pids[i] == lock_cmd->pid)
-               {
-                       return 0;
-               }
-       }
-       for ( i=0; i<MAX_PIDS; i++)
-       {
-               if(device.pids[i]==0)
-               {
-                       device.pids[i] = lock_cmd->pid;
-                       break;
-               }
-       }
-       mutex_unlock(&device.item_list_lock);
-       if( i==MAX_PIDS)
-       {
-               printk(KERN_ERR "Oops, Run out of cient slots\n ");
-       }
-       return 0;
-}
-
-int do_umplock_client_delete (_lock_cmd_priv *lock_cmd )
-{
-       int p_index=-1, i_index=-1,ref_index=-1;
-       int ret;
-       _lock_item_s *lock_item;
-       lock_item = (_lock_item_s *)&lock_cmd->msg;
-       
-       mutex_lock(&device.item_list_lock);
-       p_index = umplock_find_client_valid( lock_cmd->pid );
-       /*lock item pid is not valid.*/
-       if ( p_index<0 )
-       {
-               mutex_unlock(&device.item_list_lock);
-               return 0;
-       }
-
-       /*walk through umplock item list and release reference attached to this client*/
-       for(i_index = 0; i_index< MAX_ITEMS; i_index++ )
-       {
-               lock_item->secure_id = device.items[i_index].secure_id;
-               /*find the item index and reference slot for the lock_item*/
-               ret = umplock_find_item_by_pid(lock_cmd, &i_index, &ref_index);
-               
-               if(ret < 0)
-               {
-                       /*client has no reference on this umplock item, skip*/
-                       continue;
-               }
-               while(device.items[i_index].references[ref_index].ref_count)
-               {
-                       /*release references on this client*/
-                       mutex_unlock(&device.item_list_lock);
-                       do_umplock_release(lock_cmd);
-                       mutex_lock(&device.item_list_lock);
-               }
-       }
-       
-       /*remove the pid from umplock valid pid list*/
-       device.pids[p_index] = 0;
-       mutex_unlock(&device.item_list_lock);
-
-       return 0;
-}
-
-static long umplock_driver_ioctl( struct file *f, unsigned int cmd, unsigned long arg )
-{
-       int ret;
-       uint32_t size = _IOC_SIZE(cmd);
-       _lock_cmd_priv lock_cmd ;
-
-       if (_IOC_TYPE(cmd) != LOCK_IOCTL_GROUP )
-       {
-               return -ENOTTY;
-       }
-
-       if (_IOC_NR(cmd) >= LOCK_IOCTL_MAX_CMDS )
-       {
-               return -ENOTTY;
-       }
-
-       switch ( cmd )
-       {
-               case LOCK_IOCTL_CREATE:
-                       if (size != sizeof(_lock_item_s)) 
-                       {
-                               return -ENOTTY;
-                       }
-
-                       if (copy_from_user(&lock_cmd.msg, (void __user *)arg, size))
-                       {
-                               return -EFAULT;
-                       }
-                       lock_cmd.pid = (u32)current->tgid;              
-                       ret = do_umplock_create(&lock_cmd);
-                       if (ret)
-                       {
-                               return ret;
-                       }
-                       return 0;
-
-               case LOCK_IOCTL_PROCESS:
-                       if (size != sizeof(_lock_item_s)) 
-                       {
-                               return -ENOTTY;
-                       }
-
-                       if (copy_from_user(&lock_cmd.msg, (void __user *)arg, size))
-                       {
-                               return -EFAULT;
-                       }
-                       lock_cmd.pid = (u32)current->tgid;
-                       return do_umplock_process(&lock_cmd);
-
-               case LOCK_IOCTL_RELEASE:
-                       if (size != sizeof(_lock_item_s)) 
-                       {
-                               return -ENOTTY;
-                       }
-
-                       if (copy_from_user(&lock_cmd.msg, (void __user *)arg, size))
-                       {
-                               return -EFAULT;
-                       }
-                       lock_cmd.pid = (u32)current->tgid;
-                       ret = do_umplock_release( &lock_cmd );
-                       if (ret)
-                       {
-                               return ret;
-                       }
-                       return 0;
-
-               case LOCK_IOCTL_ZAP:
-                       do_umplock_zap();
-                       return 0;
-
-               case LOCK_IOCTL_DUMP:
-                       do_umplock_dump();
-                       return 0;
-       }
-
-       return -ENOIOCTLCMD;
-}
-
-static int umplock_driver_open( struct inode *inode, struct file *filp )
-{
-       _lock_cmd_priv lock_cmd;
-       
-       atomic_inc(&device.sessions); 
-       printk( KERN_DEBUG "UMPLOCK: OPEN SESSION (%i references)\n", atomic_read(&device.sessions) );
-       
-       lock_cmd.pid = (u32)current->tgid;
-       do_umplock_client_add(&lock_cmd);
-       
-       return 0;
-}
-
-static int umplock_driver_release( struct inode *inode, struct file *filp )
-{
-       _lock_cmd_priv lock_cmd;
-       
-       lock_cmd.pid = (u32)current->tgid;
-       do_umplock_client_delete(&lock_cmd);
-       
-       atomic_dec(&device.sessions); 
-       printk( KERN_DEBUG "UMPLOCK: CLOSE SESSION (%i references)\n", atomic_read(&device.sessions) );
-       if ( atomic_read(&device.sessions) == 0 )
-       {
-               do_umplock_zap();
-       }
-
-       return 0;
-}
-
-static int __init umplock_initialize_module( void )
-{
-       printk( KERN_DEBUG "Inserting UMP lock device driver. Compiled: %s, time: %s\n", __DATE__, __TIME__ );
-
-       if ( !umplock_constructor() )
-       {
-               printk( KERN_ERR "UMP lock device driver init failed\n");
-               return -ENOTTY;
-       }
-
-       printk( KERN_DEBUG "UMP lock device driver loaded\n" );
-
-       return 0;
-}
-
-static void __exit umplock_cleanup_module( void )
-{
-       printk( KERN_DEBUG "unloading UMP lock module\n" );
-       umplock_destructor();
-       printk( KERN_DEBUG "UMP lock module unloaded\n" );
-}
-
-module_init(umplock_initialize_module);
-module_exit(umplock_cleanup_module);
-
-
-MODULE_LICENSE("GPL");  
-MODULE_AUTHOR("ARM Ltd.");  
-MODULE_DESCRIPTION("ARM UMP locker"); 
diff --git a/drivers/misc/mediatek/gpu/mt8127/mali/umplock/umplock_ioctl.h b/drivers/misc/mediatek/gpu/mt8127/mali/umplock/umplock_ioctl.h
deleted file mode 100644 (file)
index 81d59ca..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2012 ARM Limited. All rights reserved.
- * 
- * This program is free software and is provided to you under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
- * 
- * A copy of the licence is included with the program, and can also be obtained from Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
- */
-
-#ifndef __UMPLOCK_IOCTL_H__
-#define __UMPLOCK_IOCTL_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <linux/types.h>
-#include <linux/ioctl.h>
-
-#ifndef __user
-#define __user
-#endif
-
-
-/**
- * @file umplock_ioctl.h
- * This file describes the interface needed to use the Linux device driver.
- * The interface is used by the userpace Mali DDK.
- */
-
-typedef enum
-{
-       _LOCK_ACCESS_RENDERABLE = 1,
-       _LOCK_ACCESS_TEXTURE,
-       _LOCK_ACCESS_CPU_WRITE,
-       _LOCK_ACCESS_CPU_READ,
-} _lock_access_usage;
-
-typedef struct _lock_item_s
-{
-       unsigned int secure_id;
-       _lock_access_usage usage;
-} _lock_item_s;
-
-
-#define LOCK_IOCTL_GROUP 0x91
-
-#define _LOCK_IOCTL_CREATE_CMD  0   /* create kernel lock item        */
-#define _LOCK_IOCTL_PROCESS_CMD 1   /* process kernel lock item       */
-#define _LOCK_IOCTL_RELEASE_CMD 2   /* release kernel lock item       */
-#define _LOCK_IOCTL_ZAP_CMD     3   /* clean up all kernel lock items */
-#define _LOCK_IOCTL_DUMP_CMD    4   /* dump all the items */
-
-#define LOCK_IOCTL_MAX_CMDS     5
-
-#define LOCK_IOCTL_CREATE  _IOW( LOCK_IOCTL_GROUP, _LOCK_IOCTL_CREATE_CMD,  _lock_item_s )
-#define LOCK_IOCTL_PROCESS _IOW( LOCK_IOCTL_GROUP, _LOCK_IOCTL_PROCESS_CMD, _lock_item_s )
-#define LOCK_IOCTL_RELEASE _IOW( LOCK_IOCTL_GROUP, _LOCK_IOCTL_RELEASE_CMD, _lock_item_s )
-#define LOCK_IOCTL_ZAP     _IO ( LOCK_IOCTL_GROUP, _LOCK_IOCTL_ZAP_CMD )
-#define LOCK_IOCTL_DUMP    _IO ( LOCK_IOCTL_GROUP, _LOCK_IOCTL_DUMP_CMD )
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* __UMPLOCK_IOCTL_H__ */
-
index 752329bcb5d11adc81ce0b4bdb62b0e14a9b499a..763c08b03c5e161e990fb180f0db2070c30e2c9c 100644 (file)
@@ -1,11 +1,11 @@
+
 #include "mtk_mali_kernel.h"
 #include "mali_kernel_common.h" /*for mali printf*/
-#include <mach/mt_clkmgr.h>     /*For MFG sub-system clock control API*/
-#include <linux/earlysuspend.h> /*For early suspend*/
-#include <mach/mt_clkmgr.h>
+/*#include <linux/earlysuspend.h>*/ /*For early suspend*/
 
 void MTKMALI_DumpRegister( void )
 {
+#if 0
 #define DUMP_REG_INFO( addr )   MALIK_MSG("REG: %s = 0x%08x\n", #addr, M_READ32( addr, 0 ))
     unsigned long dummy;
 
@@ -26,10 +26,10 @@ void MTKMALI_DumpRegister( void )
     DUMP_REG_INFO( REG_MFG_DEBUG_SEL );
 
     MALIK_MSG("---------------------------:\n"); 
+#endif // 0  
 
 
    /*Dump Call stack*/
     dump_stack();
-   
 }
 
index d481e553b00c195f8ea1343f5774292fbbf13f41..09f51b0ef488489cedba879d281325a6f4074983 100644 (file)
@@ -1,7 +1,8 @@
+
 #ifndef __MTK_MALI_KERNEL_H__
 #define __MTK_MALI_KERNEL_H__
 
-#include "mt_reg_base.h"
+/*#include "mt_reg_base.h"*/
 #include <asm/io.h> /*For ioread/iowrite*/
 
 
@@ -29,7 +30,7 @@
 /*-----------------------------------------------------------------------------
     Print Macro
   -----------------------------------------------------------------------------*/
-#define MALIK_PRINTF             printk
+#define MALIK_PRINTF             pr_warn
 #define MALIK_MSG(fmt, arg...)   { MALIK_PRINTF("Mali: %s(): "fmt,__FUNCTION__, ##arg); }
 #define MALIK_ERRMSG(fmt, arg...)   { MALIK_PRINTF("Mali Error: %s()@%s:line %d: "fmt,__FUNCTION__, __FILE__,__LINE__, ##arg);  MTKMALI_DumpRegister(); }
 #define MALIK_WARNMSG(fmt, arg...)  { MALIK_PRINTF("Mali Warn: %s(): "fmt,__FUNCTION__, ##arg); }
diff --git a/drivers/misc/mediatek/gpu/mt8127/mediatek/mtk_mali_trace.c b/drivers/misc/mediatek/gpu/mt8127/mediatek/mtk_mali_trace.c
deleted file mode 100644 (file)
index fdc5f89..0000000
+++ /dev/null
@@ -1,334 +0,0 @@
-#include <linux/kallsyms.h>
-#include <linux/cache.h>
-#include <linux/ftrace_event.h>
-#include <linux/workqueue.h>
-#include <linux/kthread.h>
-#include "mtk_mali_trace.h"
-
-static struct task_struct           *gTraceThread[MTK_MALI_MAX_CORE_COUNT];  // Trace task
-static wait_queue_head_t            gTraceQueue[MTK_MALI_MAX_CORE_COUNT];    // Trace queue
-static mtk_mali_trace_work          gTraceEvent[MTK_MALI_MAX_CORE_COUNT];    // Trace event
-static spinlock_t                   gThreadLock[MTK_MALI_MAX_CORE_COUNT];    // Trace lock
-
-// Mark address
-static unsigned long __read_mostly  gMarkWriteAddr = 0;
-
-
-static void inline mali_update_write_addr(void)
-{
-#if 0
-    if(unlikely(0 == gMarkWriteAddr))
-    {
-        gMarkWriteAddr = kallsyms_lookup_name("tracing_mark_write");
-    }
-#endif // 0
-}
-
-
-static void mali_kernel_trace_begin(char *pName)
-{
-#if 0
-    mali_update_write_addr();
-
-    event_trace_printk(gMarkWriteAddr,
-                       "B|%d|%s\n",
-                       current->tgid, pName);
-#endif // 0
-}
-
-
-static void mali_kernel_trace_counter(char *pName, int count)
-{
-#if 0
-    mali_update_write_addr();
-
-    event_trace_printk(gMarkWriteAddr,
-                       "C|%d|%s|%d\n",
-                       (in_interrupt()? -1: current->tgid),
-                       pName,
-                       count);
-#endif // 0
-}
-
-
-static void mali_kernel_trace_end(void)
-{
-#if 0
-    mali_update_write_addr();
-
-    event_trace_printk(gMarkWriteAddr,
-                       "E\n"); 
-#endif // 0
-}
-
-
-static inline int mali_core_index_to_event_id(MALI_CORE_ENUM coreType,
-                                              int            index,
-                                              int            traceType)
-{
-    int eventID;
-
-    eventID = 0;
-
-    switch(coreType)
-    {
-        case MALI_CORE_TYPE_GP:
-            eventID = ((1 == traceType)? 0: 1);
-            break;
-        case MALI_CORE_TYPE_PP:
-            if (0 == index)
-            {
-                eventID = ((1 == traceType)? 2: 3);
-            }
-            else
-            {
-                eventID = ((1 == traceType)? 4: 5);
-            }
-            break;
-        default:
-            // assert(0);
-            break;
-    }
-
-    return eventID;
-}
-
-
-static inline int mali_core_index_to_core_id(MALI_CORE_ENUM coreType,
-                                             int            index)
-{
-    int coreID;
-
-    coreID = 0;
-
-    switch(coreType)
-    {
-        case MALI_CORE_TYPE_GP:
-            coreID = 0;
-            break;
-        case MALI_CORE_TYPE_PP:
-            coreID = ((0 == index)? 1: 2);
-            break;
-        default:
-            // assert(0);
-            break;
-    }
-
-    return coreID;
-}
-
-
-static inline int mali_core_id_to_core_name(int coreID)
-{
-    static const char *pName[] =
-    {
-        "MALI_GP0",
-        "MALI_PP0",
-        "MALI_PP1"
-    };
-    
-    switch(coreID)
-    {
-        case 0:
-        case 1:
-        case 2:
-            return pName[coreID];
-        default:
-            // assert(0);
-            break;
-    }
-
-    return "MALI_ERR";
-}
-
-
-static int mtk_mali_trace_handler(struct mtk_mali_trace_work *pWork)
-{
-    struct task_struct  *pTask; 
-    struct sched_param  param;;
-    long                flags;
-    int                 coreID;
-    int                 eventID;
-    char                *pName;
-
-    pTask = current;
-
-    //sched_getparam(0, &param);
-    memset(&param, 0, sizeof(param));
-    param.sched_priority = 90; 
-    sched_setscheduler(pTask, SCHED_RR, &param);
-
-    coreID = pWork->coreID;
-
-    do
-    {
-        wait_event_interruptible(gTraceQueue[coreID], ((pWork->total > 0) || (1 == pWork->stop)));
-
-        if(kthread_should_stop() || (1 == pWork->stop))
-        {
-            break;
-        }
-
-        spin_lock_irqsave(&gThreadLock[coreID], flags);
-
-        smp_mb();
-
-        if (pWork->total <= 0)
-        {
-            spin_unlock_irqrestore(&gThreadLock[coreID], flags); 
-            continue;
-        }
-        
-        eventID = pWork->event[pWork->read].theID;
-        pName   = pWork->event[pWork->read].name;
-        pWork->read++;
-        if (pWork->read >= MTK_MALI_MAX_QUEUE_SIZE)
-        {
-            pWork->read = 0;
-        }
-
-        pWork->total--;
-        spin_unlock_irqrestore(&gThreadLock[coreID], flags); 
-
-        switch(eventID)
-        {
-            case 0:  // GP0 begin
-                mali_kernel_trace_begin(pName);
-                break;
-            case 1:  // GP0 end
-                mali_kernel_trace_end();
-                break;
-            case 2:  // PP0 begin
-                mali_kernel_trace_begin(pName);
-                break;
-            case 3:  // PP0 end
-                mali_kernel_trace_end();
-                break;
-            case 4:  // PP1 begin
-                mali_kernel_trace_begin(pName);
-                break;
-            case 5:  // PP1 end
-                mali_kernel_trace_end();
-                break;
-            break;
-                //assert(0);
-                break;
-        }
-    } while(1);
-
-    return 0;
-}
-
-
-int mtk_mali_kernel_trace_init(void)
-{
-    int  index;
-    char *pName;
-
-    for (index = 0; index < MTK_MALI_MAX_CORE_COUNT; index++)
-    {
-        init_waitqueue_head(&gTraceQueue[index]);
-
-        memset(&gTraceEvent[index], 0x0, sizeof(mtk_mali_trace_work));
-
-        // Record the core ID
-        gTraceEvent[index].coreID = index;
-
-        spin_lock_init(&gThreadLock[index]);
-
-        gTraceThread[index] = kthread_run(mtk_mali_trace_handler,
-                                          &gTraceEvent[index],
-                                          "%s",
-                                          mali_core_id_to_core_name(index));      
-        if(IS_ERR(gTraceThread[index]))
-        {
-            printk("Unable to start kernel thread for core%d\n", index);  
-        }
-    }
-
-    return 0;
-}
-
-
-void mtk_mali_kernel_trace_begin(MALI_CORE_ENUM     coreType,
-                                 int                index,
-                                 struct task_struct *pTask)
-{
-    long flags;
-    int  coreID;
-    int  eventID;
-    int  slotID;
-
-    coreID  = mali_core_index_to_core_id(coreType, index);
-    eventID = mali_core_index_to_event_id(coreType, index, 1);
-
-    if (eventID < MTK_MALI_MAX_EVENT_COUNT)
-    {
-        spin_lock_irqsave(&gThreadLock[coreID], flags);
-
-        smp_mb();
-
-        slotID = gTraceEvent[coreID].write;
-        gTraceEvent[coreID].write++;
-        if (gTraceEvent[coreID].write >= MTK_MALI_MAX_QUEUE_SIZE)
-        {
-            gTraceEvent[coreID].write = 0;
-        }
-       
-        gTraceEvent[coreID].total++;
-        spin_unlock_irqrestore(&gThreadLock[coreID], flags); 
-
-        gTraceEvent[coreID].event[slotID].theID = eventID;
-        memcpy(gTraceEvent[coreID].event[slotID].name, pTask->comm, sizeof(current->comm));
-
-        wake_up_interruptible(&gTraceQueue[coreID]);
-    }
-}
-
-
-void mtk_mali_kernel_trace_end(MALI_CORE_ENUM coreType,
-                               int            index)
-{
-    long flags;
-    int  coreID;
-    int  eventID;
-    int  slotID;
-
-    coreID  = mali_core_index_to_core_id(coreType, index);
-    eventID = mali_core_index_to_event_id(coreType, index, 0);
-
-    if (eventID < MTK_MALI_MAX_EVENT_COUNT)
-    {
-        spin_lock_irqsave(&gThreadLock[coreID], flags);
-
-        smp_mb();
-
-        slotID = gTraceEvent[coreID].write;
-        gTraceEvent[coreID].write++;
-        if (gTraceEvent[coreID].write >= MTK_MALI_MAX_QUEUE_SIZE)
-        {
-            gTraceEvent[coreID].write = 0;
-        }
-
-        gTraceEvent[coreID].total++;
-        spin_unlock_irqrestore(&gThreadLock[coreID], flags);
-
-        gTraceEvent[coreID].event[slotID].theID = eventID;
-
-        wake_up_interruptible(&gTraceQueue[coreID]);
-    }
-}
-
-
-int mtk_mali_kernel_trace_exit()
-{
-    int index;
-
-    for (index = 0; index < MTK_MALI_MAX_CORE_COUNT; index++)
-    {
-        gTraceEvent[index].stop = 1;
-        kthread_stop(gTraceThread[index]);
-    }
-    
-    return 0;
-}
diff --git a/drivers/misc/mediatek/gpu/mt8127/mediatek/mtk_mali_trace.h b/drivers/misc/mediatek/gpu/mt8127/mediatek/mtk_mali_trace.h
deleted file mode 100644 (file)
index e68bc1b..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-#ifndef __MTK_MALI_TRACE_H__
-#define __MTK_MALI_TRACE_H__
-
-#include <linux/sched.h>
-
-#define MTK_MALI_MAX_QUEUE_SIZE     32
-#define MTK_MALI_MAX_CORE_COUNT     3  // For 6582 MALI: 1 GP core + 2 PP cores
-#define MTK_MALI_MAX_EVENT_COUNT    6  // MALI_CORE_MAX_NUM * 2 (i.e. begin and end)
-#define MTK_MALI_MAX_NAME_SIZE      32
-
-typedef enum MALI_CORE_ENUM
-{
-    MALI_CORE_TYPE_GP,
-    MALI_CORE_TYPE_PP,
-} MALI_CORE_ENUM;
-
-
-typedef struct mtk_mali_trace_event
-{
-    int                  theID;
-    char                 name[MTK_MALI_MAX_NAME_SIZE];
-} mtk_mali_trace_event;
-
-typedef struct mtk_mali_trace_work
-{
-    mtk_mali_trace_event event[MTK_MALI_MAX_QUEUE_SIZE];
-    int                  read;
-    int                  write;
-    int                  total; 
-    int                  stop;   
-    int                  coreID;
-} mtk_mali_trace_work;
-
-
-#define MTK_TRACE_CONTAINER_OF(ptr, type, member)       \
-    ((type *)( ((char *)ptr) - offsetof(type, member)))
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-int mtk_mali_kernel_trace_init();
-
-void mtk_mali_kernel_trace_begin(MALI_CORE_ENUM     coreType,
-                                 int                index,
-                                 struct task_struct *pTask);
-
-void mtk_mali_kernel_trace_end(MALI_CORE_ENUM coreType,
-                               int            index);
-
-int mtk_mali_kernel_trace_exit();
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif  // __MTK_MALI_TRACE_H__
index 13e5d6ab8f02e7144c120d5546adc2007b1298c7..db4bba2279ea515e6bf3dac4e71922e3322d2b25 100644 (file)
@@ -1 +1,2 @@
+
 unsigned long