OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB ?= 16
USING_GPU_UTILIZATION ?= 1
+PROFILING_SKIP_PP_JOBS ?= 0
+PROFILING_SKIP_PP_AND_GP_JOBS ?= 0
############## Kasin Added, for platform. ################
ifeq ($(CONFIG_MALI400_DEBUG),y)
BUILD ?= debug
BUILD ?= release
ldflags-y += --strip-debug
endif
+ifeq ($(CONFIG_CONFIG_MALI_DVFS),y)
+ USING_GPU_UTILIZATION = 1
+endif
##################### end Kasin Added. ###################
############## Kasin Added, useless now. ################
common/mali_gp_job.o \
common/mali_soft_job.o \
common/mali_scheduler.o \
- common/mali_gp_scheduler.o \
- common/mali_pp_scheduler.o \
+ common/mali_executor.o \
common/mali_group.o \
common/mali_dlbu.o \
common/mali_broadcast.o \
common/mali_pmu.o \
common/mali_user_settings_db.o \
common/mali_kernel_utilization.o \
+ common/mali_control_timer.o \
common/mali_l2_cache.o \
- common/mali_dma.o \
common/mali_timeline.o \
common/mali_timeline_fence_wait.o \
common/mali_timeline_sync_fence.o \
mali-$(CONFIG_MALI400_UMP) += linux/mali_memory_ump.o
-mali-$(CONFIG_MALI400_POWER_PERFORMANCE_POLICY) += common/mali_power_performance_policy.o
+mali-$(CONFIG_MALI_DVFS) += common/mali_dvfs_policy.o
# Tell the Linux build system from which .o file to create the kernel module
obj-$(CONFIG_MALI400) := mali.o
VERSION_STRINGS += USING_PROFILING=$(CONFIG_MALI400_PROFILING)
VERSION_STRINGS += USING_INTERNAL_PROFILING=$(CONFIG_MALI400_INTERNAL_PROFILING)
VERSION_STRINGS += USING_GPU_UTILIZATION=$(USING_GPU_UTILIZATION)
-VERSION_STRINGS += USING_POWER_PERFORMANCE_POLICY=$(CONFIG_POWER_PERFORMANCE_POLICY)
+VERSION_STRINGS += USING_DVFS=$(CONFIG_MALI_DVFS)
VERSION_STRINGS += MALI_UPPER_HALF_SCHEDULING=$(MALI_UPPER_HALF_SCHEDULING)
# Create file with Mali driver configuration
select HIGH_RES_TIMERS
select HW_PERF_EVENTS
select CPU_FREQ
+ select MALI400_DEBUG
config MALI400_PROFILING
bool "Enable Mali profiling"
depends on MALI400
- select MALI400_DEBUG
select TRACEPOINTS
default n
---help---
---help---
This enables support for the UMP memory sharing API in the Mali driver.
-config MALI400_POWER_PERFORMANCE_POLICY
- bool "Enable Mali power performance policy"
- depends on ARM
+config MALI_DVFS
+ bool "Enable Mali dynamically frequency change"
+ depends on MALI400
default n
---help---
- This enables support for dynamic performance scaling of Mali with the goal of lowering power consumption.
+ This enables support for dynamic change frequency of Mali with the goal of lowering power consumption.
config MALI_DMA_BUF_MAP_ON_ATTACH
bool "Map dma-buf attachments on attach"
domains at the same time may cause peak currents higher than what some systems can handle.
These systems must not enable this option.
+config MALI_DT
+ bool "Using device tree to initialize module"
+ depends on MALI400 && CONFIG_OF
+ default n
+ ---help---
+ This enable the Mali driver to choose the device tree path to get platform resoures
+ and disable the old config method. Mali driver could run on the platform which the
+ device tree is enabled in kernel and corresponding hardware description is implemented
+ properly in device DTS file.
+
config MALI_QUIET
bool "Make Mali driver very quiet"
depends on MALI400 && !MALI400_DEBUG
USE_UMPV2=0
USING_PROFILING ?= 1
USING_INTERNAL_PROFILING ?= 0
-USING_POWER_PERFORMANCE_POLICY ?= 0
+USING_DVFS ?= 0
MALI_HEATMAPS_ENABLED ?= 0
MALI_DMA_BUF_MAP_ON_ATTACH ?= 1
MALI_PMU_PARALLEL_POWER_UP ?= 0
+USING_DT ?= 0
# The Makefile sets up "arch" based on the CONFIG, creates the version info
# string and the __malidrv_build_info.c file, and then call the Linux build
$(error No KDIR found for platform $(TARGET_PLATFORM))
endif
+ifeq ($(USING_GPU_UTILIZATION), 1)
+ ifeq ($(USING_DVFS), 1)
+ $(error USING_GPU_UTILIZATION conflict with USING_DVFS you can read the Integration Guide to choose which one do you need)
+ endif
+endif
ifeq ($(USING_UMP),1)
export CONFIG_MALI400_UMP=y
export EXTRA_DEFINES += -DCONFIG_MALI_SHARED_INTERRUPTS
endif
-ifeq ($(USING_POWER_PERFORMANCE_POLICY),1)
-export CONFIG_MALI400_POWER_PERFORMANCE_POLICY=y
-export EXTRA_DEFINES += -DCONFIG_MALI400_POWER_PERFORMANCE_POLICY
+ifeq ($(USING_DVFS),1)
+export CONFIG_MALI_DVFS=y
+export EXTRA_DEFINES += -DCONFIG_MALI_DVFS
endif
ifeq ($(MALI_PMU_PARALLEL_POWER_UP),1)
export EXTRA_DEFINES += -DCONFIG_MALI_PMU_PARALLEL_POWER_UP
endif
+ifdef CONFIG_OF
+ifeq ($(USING_DT),1)
+export CONFIG_MALI_DT=y
+export EXTRA_DEFINES += -DCONFIG_MALI_DT
+endif
+endif
+
ifneq ($(BUILD),release)
# Debug
export CONFIG_MALI400_DEBUG=y
+++ /dev/null
-/// ***************************************************************************************
-/// - MALI
-//$$ MODULE="MALI"
-//$$ DEVICE="mali"
-//$$ L2 PROP_STR = "status"
- mali{
- compatible = "arm,mali";
- dev_name = "mali";
- status = "ok";
-
-//$$ L2 PROP_U32 = "dvfs_id"
-//$$ L2 PROP_U32 = "recorde_number"
-//$$ L2 PROP_U32 = "dvfs_table"
- cfg {
- shared_memory = <1024>; /** Mbyte **/
- dvfs_size = <5>; /** must be correct count for dvfs_table */
- dvfs_table = <
- /* NOTE: frequent in this table must be ascending order */
- /* freq_idx volage_index min max */
- 0 0 0 200
- 1 1 152 205
- 2 2 180 212
- 3 3 205 236
- 4 4 230 256
- >;
- };
-};
#include "mali_kernel_common.h"
#include "mali_osk.h"
-static const int bcast_unit_reg_size = 0x1000;
-static const int bcast_unit_addr_broadcast_mask = 0x0;
-static const int bcast_unit_addr_irq_override_mask = 0x4;
+#define MALI_BROADCAST_REGISTER_SIZE 0x1000
+#define MALI_BROADCAST_REG_BROADCAST_MASK 0x0
+#define MALI_BROADCAST_REG_INTERRUPT_MASK 0x4
struct mali_bcast_unit {
struct mali_hw_core hw_core;
struct mali_bcast_unit *bcast_unit = NULL;
MALI_DEBUG_ASSERT_POINTER(resource);
- MALI_DEBUG_PRINT(2, ("Mali Broadcast unit: Creating Mali Broadcast unit: %s\n", resource->description));
+ MALI_DEBUG_PRINT(2, ("Broadcast: Creating Mali Broadcast unit: %s\n",
+ resource->description));
bcast_unit = _mali_osk_malloc(sizeof(struct mali_bcast_unit));
if (NULL == bcast_unit) {
- MALI_PRINT_ERROR(("Mali Broadcast unit: Failed to allocate memory for Broadcast unit\n"));
+ MALI_PRINT_ERROR(("Broadcast: Failed to allocate memory for Broadcast unit\n"));
return NULL;
}
- if (_MALI_OSK_ERR_OK == mali_hw_core_create(&bcast_unit->hw_core, resource, bcast_unit_reg_size)) {
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&bcast_unit->hw_core,
+ resource, MALI_BROADCAST_REGISTER_SIZE)) {
bcast_unit->current_mask = 0;
mali_bcast_reset(bcast_unit);
return bcast_unit;
} else {
- MALI_PRINT_ERROR(("Mali Broadcast unit: Failed map broadcast unit\n"));
+ MALI_PRINT_ERROR(("Broadcast: Failed map broadcast unit\n"));
}
_mali_osk_free(bcast_unit);
void mali_bcast_unit_delete(struct mali_bcast_unit *bcast_unit)
{
MALI_DEBUG_ASSERT_POINTER(bcast_unit);
-
mali_hw_core_delete(&bcast_unit->hw_core);
_mali_osk_free(bcast_unit);
}
* Note: redundant calling this function with same @group
* doesn't make any difference as calling it once
*/
-void mali_bcast_add_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group)
+void mali_bcast_add_group(struct mali_bcast_unit *bcast_unit,
+ struct mali_group *group)
{
u32 bcast_id;
u32 broadcast_mask;
* Note: redundant calling this function with same @group
* doesn't make any difference as calling it once
*/
-void mali_bcast_remove_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group)
+void mali_bcast_remove_group(struct mali_bcast_unit *bcast_unit,
+ struct mali_group *group)
{
u32 bcast_id;
u32 broadcast_mask;
{
MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+ MALI_DEBUG_PRINT(4,
+ ("Broadcast: setting mask 0x%08X + 0x%08X (reset)\n",
+ bcast_unit->current_mask,
+ bcast_unit->current_mask & 0xFF));
+
/* set broadcast mask */
mali_hw_core_register_write(&bcast_unit->hw_core,
- bcast_unit_addr_broadcast_mask,
+ MALI_BROADCAST_REG_BROADCAST_MASK,
bcast_unit->current_mask);
/* set IRQ override mask */
mali_hw_core_register_write(&bcast_unit->hw_core,
- bcast_unit_addr_irq_override_mask,
+ MALI_BROADCAST_REG_INTERRUPT_MASK,
bcast_unit->current_mask & 0xFF);
}
{
MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+ MALI_DEBUG_PRINT(4, ("Broadcast: setting mask 0x0 + 0x0 (disable)\n"));
+
/* set broadcast mask */
mali_hw_core_register_write(&bcast_unit->hw_core,
- bcast_unit_addr_broadcast_mask,
+ MALI_BROADCAST_REG_BROADCAST_MASK,
0x0);
/* set IRQ override mask */
mali_hw_core_register_write(&bcast_unit->hw_core,
- bcast_unit_addr_irq_override_mask,
+ MALI_BROADCAST_REG_INTERRUPT_MASK,
0x0);
}
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
+#ifndef __MALI_BROADCAST_H__
+#define __MALI_BROADCAST_H__
+
/*
* Interface for the broadcast unit on Mali-450.
*
{
mali_bcast_reset(bcast_unit);
}
+
+#endif /* __MALI_BROADCAST_H__ */
--- /dev/null
+/*
+ * Copyright (C) 2010-2012, 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_utilization.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_dvfs_policy.h"
+#include "mali_control_timer.h"
+
+static u64 period_start_time = 0;
+
+static _mali_osk_timer_t *mali_control_timer = NULL;
+static mali_bool timer_running = MALI_FALSE;
+
+static u32 mali_control_timeout = 1000;
+
+void mali_control_timer_add(u32 timeout)
+{
+ _mali_osk_timer_add(mali_control_timer, _mali_osk_time_mstoticks(timeout));
+}
+
+static void mali_control_timer_callback(void *arg)
+{
+ if (mali_utilization_enabled()) {
+ struct mali_gpu_utilization_data *util_data = NULL;
+ u64 time_period = 0;
+
+ /* Calculate gpu utilization */
+ util_data = mali_utilization_calculate(&period_start_time, &time_period);
+
+ if (util_data) {
+#if defined(CONFIG_MALI_DVFS)
+ mali_dvfs_policy_realize(util_data, time_period);
+#else
+ mali_utilization_platform_realize(util_data);
+#endif
+ }
+
+ if (MALI_TRUE == timer_running) {
+ mali_control_timer_add(mali_control_timeout);
+ }
+ }
+}
+
+/* Init a timer (for now it is used for GPU utilization and dvfs) */
+_mali_osk_errcode_t mali_control_timer_init(void)
+{
+ _mali_osk_device_data data;
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+ /* Use device specific settings (if defined) */
+ if (0 != data.control_interval) {
+ mali_control_timeout = data.control_interval;
+ MALI_DEBUG_PRINT(2, ("Mali GPU Timer: %u\n", mali_control_timeout));
+ }
+ }
+
+ mali_control_timer = _mali_osk_timer_init();
+ if (NULL == mali_control_timer) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ _mali_osk_timer_setcallback(mali_control_timer, mali_control_timer_callback, NULL);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_control_timer_term(void)
+{
+ if (NULL != mali_control_timer) {
+ _mali_osk_timer_del(mali_control_timer);
+ timer_running = MALI_FALSE;
+ _mali_osk_timer_term(mali_control_timer);
+ mali_control_timer = NULL;
+ }
+}
+
+mali_bool mali_control_timer_resume(u64 time_now)
+{
+ if (timer_running != MALI_TRUE) {
+ timer_running = MALI_TRUE;
+
+ period_start_time = time_now;
+
+ mali_utilization_reset();
+
+ return MALI_TRUE;
+ }
+
+ return MALI_FALSE;
+}
+
+void mali_control_timer_suspend(mali_bool suspend)
+{
+ mali_utilization_data_lock();
+
+ if (timer_running == MALI_TRUE) {
+ timer_running = MALI_FALSE;
+
+ mali_utilization_data_unlock();
+
+ if (suspend == MALI_TRUE) {
+ _mali_osk_timer_del(mali_control_timer);
+ mali_utilization_reset();
+ }
+ } else {
+ mali_utilization_data_unlock();
+ }
+}
--- /dev/null
+/*
+ * Copyright (C) 2010-2012, 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_CONTROL_TIMER_H__
+#define __MALI_CONTROL_TIMER_H__
+
+#include "mali_osk.h"
+
+_mali_osk_errcode_t mali_control_timer_init(void);
+
+void mali_control_timer_term(void);
+
+mali_bool mali_control_timer_resume(u64 time_now);
+
+void mali_control_timer_suspend(mali_bool suspend);
+
+void mali_control_timer_add(u32 timeout);
+
+#endif /* __MALI_CONTROL_TIMER_H__ */
+
_mali_osk_errcode_t mali_dlbu_initialize(void)
{
-
MALI_DEBUG_PRINT(2, ("Mali DLBU: Initializing\n"));
- if (_MALI_OSK_ERR_OK == mali_mmu_get_table_page(&mali_dlbu_phys_addr, &mali_dlbu_cpu_addr)) {
- MALI_SUCCESS;
+ if (_MALI_OSK_ERR_OK ==
+ mali_mmu_get_table_page(&mali_dlbu_phys_addr,
+ &mali_dlbu_cpu_addr)) {
+ return _MALI_OSK_ERR_OK;
}
return _MALI_OSK_ERR_FAULT;
{
MALI_DEBUG_PRINT(3, ("Mali DLBU: terminating\n"));
- mali_mmu_release_table_page(mali_dlbu_phys_addr, mali_dlbu_cpu_addr);
+ if (0 != mali_dlbu_phys_addr && 0 != mali_dlbu_cpu_addr) {
+ mali_mmu_release_table_page(mali_dlbu_phys_addr,
+ mali_dlbu_cpu_addr);
+ mali_dlbu_phys_addr = 0;
+ mali_dlbu_cpu_addr = 0;
+ }
}
struct mali_dlbu_core *mali_dlbu_create(const _mali_osk_resource_t *resource)
void mali_dlbu_delete(struct mali_dlbu_core *dlbu)
{
MALI_DEBUG_ASSERT_POINTER(dlbu);
-
- mali_dlbu_reset(dlbu);
mali_hw_core_delete(&dlbu->hw_core);
_mali_osk_free(dlbu);
}
+++ /dev/null
-/*
- * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
- *
- * This program is free software and is provided to you under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
- *
- * A copy of the licence is included with the program, and can also be obtained from Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#include "mali_kernel_common.h"
-#include "mali_osk.h"
-#include "mali_hw_core.h"
-#include "mali_dma.h"
-
-/**
- * Size of the Mali-450 DMA unit registers in bytes.
- */
-#define MALI450_DMA_REG_SIZE 0x08
-
-/**
- * Value that appears in MEMSIZE if an error occurs when reading the command list.
- */
-#define MALI450_DMA_BUS_ERR_VAL 0xffffffff
-
-/**
- * Mali DMA registers
- * Used in the register read/write routines.
- * See the hardware documentation for more information about each register.
- */
-typedef enum mali_dma_register {
-
- MALI450_DMA_REG_SOURCE_ADDRESS = 0x0000,
- MALI450_DMA_REG_SOURCE_SIZE = 0x0004,
-} mali_dma_register;
-
-struct mali_dma_core {
- struct mali_hw_core hw_core; /**< Common for all HW cores */
- _mali_osk_spinlock_t *lock; /**< Lock protecting access to DMA core */
- mali_dma_pool pool; /**< Memory pool for command buffers */
-};
-
-static struct mali_dma_core *mali_global_dma_core = NULL;
-
-struct mali_dma_core *mali_dma_create(_mali_osk_resource_t *resource)
-{
- struct mali_dma_core *dma;
- _mali_osk_errcode_t err;
-
- MALI_DEBUG_ASSERT(NULL == mali_global_dma_core);
-
- dma = _mali_osk_malloc(sizeof(struct mali_dma_core));
- if (dma == NULL) goto alloc_failed;
-
- dma->lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_DMA_COMMAND);
- if (NULL == dma->lock) goto lock_init_failed;
-
- dma->pool = mali_dma_pool_create(MALI_DMA_CMD_BUF_SIZE, 4, 0);
- if (NULL == dma->pool) goto dma_pool_failed;
-
- err = mali_hw_core_create(&dma->hw_core, resource, MALI450_DMA_REG_SIZE);
- if (_MALI_OSK_ERR_OK != err) goto hw_core_failed;
-
- mali_global_dma_core = dma;
- MALI_DEBUG_PRINT(2, ("Mali DMA: Created Mali APB DMA unit\n"));
- return dma;
-
- /* Error handling */
-
-hw_core_failed:
- mali_dma_pool_destroy(dma->pool);
-dma_pool_failed:
- _mali_osk_spinlock_term(dma->lock);
-lock_init_failed:
- _mali_osk_free(dma);
-alloc_failed:
- MALI_DEBUG_PRINT(2, ("Mali DMA: Failed to create APB DMA unit\n"));
- return NULL;
-}
-
-void mali_dma_delete(struct mali_dma_core *dma)
-{
- MALI_DEBUG_ASSERT_POINTER(dma);
-
- MALI_DEBUG_PRINT(2, ("Mali DMA: Deleted Mali APB DMA unit\n"));
-
- mali_hw_core_delete(&dma->hw_core);
- _mali_osk_spinlock_term(dma->lock);
- mali_dma_pool_destroy(dma->pool);
- _mali_osk_free(dma);
-}
-
-static void mali_dma_bus_error(struct mali_dma_core *dma)
-{
- u32 addr = mali_hw_core_register_read(&dma->hw_core, MALI450_DMA_REG_SOURCE_ADDRESS);
-
- MALI_PRINT_ERROR(("Mali DMA: Bus error when reading command list from 0x%lx\n", addr));
- MALI_IGNORE(addr);
-
- /* Clear the bus error */
- mali_hw_core_register_write(&dma->hw_core, MALI450_DMA_REG_SOURCE_SIZE, 0);
-}
-
-static mali_bool mali_dma_is_busy(struct mali_dma_core *dma)
-{
- u32 val;
- mali_bool dma_busy_flag = MALI_FALSE;
-
- MALI_DEBUG_ASSERT_POINTER(dma);
-
- val = mali_hw_core_register_read(&dma->hw_core, MALI450_DMA_REG_SOURCE_SIZE);
-
- if (MALI450_DMA_BUS_ERR_VAL == val) {
- /* Bus error reading command list */
- mali_dma_bus_error(dma);
- return MALI_FALSE;
- }
- if (val > 0) {
- dma_busy_flag = MALI_TRUE;
- }
-
- return dma_busy_flag;
-}
-
-static void mali_dma_start_transfer(struct mali_dma_core *dma, mali_dma_cmd_buf *buf)
-{
- u32 memsize = buf->size * 4;
- u32 addr = buf->phys_addr;
-
- MALI_DEBUG_ASSERT_POINTER(dma);
- MALI_DEBUG_ASSERT(memsize < (1 << 16));
- MALI_DEBUG_ASSERT(0 == (memsize & 0x3)); /* 4 byte aligned */
-
- MALI_DEBUG_ASSERT(!mali_dma_is_busy(dma));
-
- /* Writes the physical source memory address of chunk containing command headers and data */
- mali_hw_core_register_write(&dma->hw_core, MALI450_DMA_REG_SOURCE_ADDRESS, addr);
-
- /* Writes the length of transfer */
- mali_hw_core_register_write(&dma->hw_core, MALI450_DMA_REG_SOURCE_SIZE, memsize);
-}
-
-_mali_osk_errcode_t mali_dma_get_cmd_buf(mali_dma_cmd_buf *buf)
-{
- MALI_DEBUG_ASSERT_POINTER(buf);
-
- buf->virt_addr = (u32 *)mali_dma_pool_alloc(mali_global_dma_core->pool, &buf->phys_addr);
- if (NULL == buf->virt_addr) {
- return _MALI_OSK_ERR_NOMEM;
- }
-
- /* size contains the number of words in the buffer and is incremented
- * as commands are added to the buffer. */
- buf->size = 0;
-
- return _MALI_OSK_ERR_OK;
-}
-
-void mali_dma_put_cmd_buf(mali_dma_cmd_buf *buf)
-{
- MALI_DEBUG_ASSERT_POINTER(buf);
-
- if (NULL == buf->virt_addr) return;
-
- mali_dma_pool_free(mali_global_dma_core->pool, buf->virt_addr, buf->phys_addr);
-
- buf->virt_addr = NULL;
-}
-
-_mali_osk_errcode_t mali_dma_start(struct mali_dma_core *dma, mali_dma_cmd_buf *buf)
-{
- _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
-
- _mali_osk_spinlock_lock(dma->lock);
-
- if (mali_dma_is_busy(dma)) {
- err = _MALI_OSK_ERR_BUSY;
- goto out;
- }
-
- mali_dma_start_transfer(dma, buf);
-
-out:
- _mali_osk_spinlock_unlock(dma->lock);
- return err;
-}
-
-void mali_dma_debug(struct mali_dma_core *dma)
-{
- MALI_DEBUG_ASSERT_POINTER(dma);
- MALI_DEBUG_PRINT(1, ("DMA unit registers:\n\t%08x, %08x\n",
- mali_hw_core_register_read(&dma->hw_core, MALI450_DMA_REG_SOURCE_ADDRESS),
- mali_hw_core_register_read(&dma->hw_core, MALI450_DMA_REG_SOURCE_SIZE)
- ));
-
-}
-
-struct mali_dma_core *mali_dma_get_global_dma_core(void)
-{
- /* Returns the global dma core object */
- return mali_global_dma_core;
-}
+++ /dev/null
-/*
- * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
- *
- * This program is free software and is provided to you under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
- *
- * A copy of the licence is included with the program, and can also be obtained from Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#ifndef __MALI_DMA_H__
-#define __MALI_DMA_H__
-
-#include "mali_osk.h"
-#include "mali_osk_mali.h"
-#include "mali_hw_core.h"
-
-#define MALI_DMA_CMD_BUF_SIZE 1024
-
-typedef struct mali_dma_cmd_buf {
- u32 *virt_addr; /**< CPU address of command buffer */
- mali_dma_addr phys_addr; /**< Physical address of command buffer */
- u32 size; /**< Number of prepared words in command buffer */
-} mali_dma_cmd_buf;
-
-/** @brief Create a new DMA unit
- *
- * This is called from entry point of the driver in order to create and
- * intialize the DMA resource
- *
- * @param resource it will be a pointer to a DMA resource
- * @return DMA object on success, NULL on failure
- */
-struct mali_dma_core *mali_dma_create(_mali_osk_resource_t *resource);
-
-/** @brief Delete DMA unit
- *
- * This is called on entry point of driver if the driver initialization fails
- * after initialization of the DMA unit. It is also called on the exit of the
- * driver to delete the DMA resource
- *
- * @param dma Pointer to DMA unit object
- */
-void mali_dma_delete(struct mali_dma_core *dma);
-
-/** @brief Retrieves the MALI DMA core object (if there is)
- *
- * @return The Mali DMA object otherwise NULL
- */
-struct mali_dma_core *mali_dma_get_global_dma_core(void);
-
-/**
- * @brief Run a command buffer on the DMA unit
- *
- * @param dma Pointer to the DMA unit to use
- * @param buf Pointer to the command buffer to use
- * @return _MALI_OSK_ERR_OK if the buffer was started successfully,
- * _MALI_OSK_ERR_BUSY if the DMA unit is busy.
- */
-_mali_osk_errcode_t mali_dma_start(struct mali_dma_core *dma, mali_dma_cmd_buf *buf);
-
-/**
- * @brief Create a DMA command
- *
- * @param core Mali core
- * @param reg offset to register of core
- * @param n number of registers to write
- */
-MALI_STATIC_INLINE u32 mali_dma_command_write(struct mali_hw_core *core, u32 reg, u32 n)
-{
- u32 core_offset = core->phys_offset;
-
- MALI_DEBUG_ASSERT(reg < 0x2000);
- MALI_DEBUG_ASSERT(n < 0x800);
- MALI_DEBUG_ASSERT(core_offset < 0x30000);
- MALI_DEBUG_ASSERT(0 == ((core_offset + reg) & ~0x7FFFF));
-
- return (n << 20) | (core_offset + reg);
-}
-
-/**
- * @brief Add a array write to DMA command buffer
- *
- * @param buf DMA command buffer to fill in
- * @param core Core to do DMA to
- * @param reg Register on core to start writing to
- * @param data Pointer to data to write
- * @param count Number of 4 byte words to write
- */
-MALI_STATIC_INLINE void mali_dma_write_array(mali_dma_cmd_buf *buf, struct mali_hw_core *core,
- u32 reg, u32 *data, u32 count)
-{
- MALI_DEBUG_ASSERT((buf->size + 1 + count) < MALI_DMA_CMD_BUF_SIZE / 4);
-
- buf->virt_addr[buf->size++] = mali_dma_command_write(core, reg, count);
-
- _mali_osk_memcpy(buf->virt_addr + buf->size, data, count * sizeof(*buf->virt_addr));
-
- buf->size += count;
-}
-
-/**
- * @brief Add a conditional array write to DMA command buffer
- *
- * @param buf DMA command buffer to fill in
- * @param core Core to do DMA to
- * @param reg Register on core to start writing to
- * @param data Pointer to data to write
- * @param count Number of 4 byte words to write
- * @param ref Pointer to referance data that can be skipped if equal
- */
-MALI_STATIC_INLINE void mali_dma_write_array_conditional(mali_dma_cmd_buf *buf, struct mali_hw_core *core,
- u32 reg, u32 *data, u32 count, const u32 *ref)
-{
- /* Do conditional array writes are not yet implemented, fallback to a
- * normal array write. */
- mali_dma_write_array(buf, core, reg, data, count);
-}
-
-/**
- * @brief Add a conditional register write to the DMA command buffer
- *
- * If the data matches the reference the command will be skipped.
- *
- * @param buf DMA command buffer to fill in
- * @param core Core to do DMA to
- * @param reg Register on core to start writing to
- * @param data Pointer to data to write
- * @param ref Pointer to referance data that can be skipped if equal
- */
-MALI_STATIC_INLINE void mali_dma_write_conditional(mali_dma_cmd_buf *buf, struct mali_hw_core *core,
- u32 reg, u32 data, const u32 ref)
-{
- /* Skip write if reference value is equal to data. */
- if (data == ref) return;
-
- buf->virt_addr[buf->size++] = mali_dma_command_write(core, reg, 1);
-
- buf->virt_addr[buf->size++] = data;
-
- MALI_DEBUG_ASSERT(buf->size < MALI_DMA_CMD_BUF_SIZE / 4);
-}
-
-/**
- * @brief Add a register write to the DMA command buffer
- *
- * @param buf DMA command buffer to fill in
- * @param core Core to do DMA to
- * @param reg Register on core to start writing to
- * @param data Pointer to data to write
- */
-MALI_STATIC_INLINE void mali_dma_write(mali_dma_cmd_buf *buf, struct mali_hw_core *core,
- u32 reg, u32 data)
-{
- buf->virt_addr[buf->size++] = mali_dma_command_write(core, reg, 1);
-
- buf->virt_addr[buf->size++] = data;
-
- MALI_DEBUG_ASSERT(buf->size < MALI_DMA_CMD_BUF_SIZE / 4);
-}
-
-/**
- * @brief Prepare DMA command buffer for use
- *
- * This function allocates the DMA buffer itself.
- *
- * @param buf The mali_dma_cmd_buf to prepare
- * @return _MALI_OSK_ERR_OK if the \a buf is ready to use
- */
-_mali_osk_errcode_t mali_dma_get_cmd_buf(mali_dma_cmd_buf *buf);
-
-/**
- * @brief Check if a DMA command buffer is ready for use
- *
- * @param buf The mali_dma_cmd_buf to check
- * @return MALI_TRUE if buffer is usable, MALI_FALSE otherwise
- */
-MALI_STATIC_INLINE mali_bool mali_dma_cmd_buf_is_valid(mali_dma_cmd_buf *buf)
-{
- return NULL != buf->virt_addr;
-}
-
-/**
- * @brief Return a DMA command buffer
- *
- * @param buf Pointer to DMA command buffer to return
- */
-void mali_dma_put_cmd_buf(mali_dma_cmd_buf *buf);
-
-#endif /* __MALI_DMA_H__ */
--- /dev/null
+/*
+ * Copyright (C) 2010-2012, 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/mali/mali_utgard.h>
+#include "mali_kernel_common.h"
+#include "mali_scheduler.h"
+#include "mali_dvfs_policy.h"
+#include "mali_osk_mali.h"
+#include "mali_osk_profiling.h"
+
+#define CLOCK_TUNING_TIME_DEBUG 0
+
+#define MAX_PERFORMANCE_VALUE 256
+#define MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(percent) ((int) ((percent)*(MAX_PERFORMANCE_VALUE)/100.0 + 0.5))
+
+/** The max fps the same as display vsync default 60, can set by module insert parameter */
+int mali_max_system_fps = 60;
+/** A lower limit on their desired FPS default 58, can set by module insert parameter */
+int mali_desired_fps = 58;
+
+static int mali_fps_step1 = 0;
+static int mali_fps_step2 = 0;
+
+static int clock_step = -1;
+static int cur_clk_step = -1;
+static struct mali_gpu_clock *gpu_clk = NULL;
+
+/*Function prototype */
+static int (*mali_gpu_set_freq)(int) = NULL;
+static int (*mali_gpu_get_freq)(void) = NULL;
+
+static mali_bool mali_dvfs_enabled = MALI_FALSE;
+
+#define NUMBER_OF_NANOSECONDS_PER_SECOND 1000000000ULL
+static u32 calculate_window_render_fps(u64 time_period)
+{
+ u32 max_window_number;
+ u64 tmp;
+ u64 max = time_period;
+ u32 leading_zeroes;
+ u32 shift_val;
+ u32 time_period_shift;
+ u32 max_window_number_shift;
+ u32 ret_val;
+
+ max_window_number = mali_session_max_window_num();
+
+ /* To avoid float division, extend the dividend to ns unit */
+ tmp = (u64)max_window_number * NUMBER_OF_NANOSECONDS_PER_SECOND;
+ if (tmp > time_period) {
+ max = tmp;
+ }
+
+ /*
+ * We may have 64-bit values, a dividend or a divisor or both
+ * To avoid dependencies to a 64-bit divider, we shift down the two values
+ * equally first.
+ */
+ leading_zeroes = _mali_osk_clz((u32)(max >> 32));
+ shift_val = 32 - leading_zeroes;
+
+ time_period_shift = (u32)(time_period >> shift_val);
+ max_window_number_shift = (u32)(tmp >> shift_val);
+
+ ret_val = max_window_number_shift / time_period_shift;
+
+ return ret_val;
+}
+
+static bool mali_pickup_closest_avail_clock(int target_clock_mhz, mali_bool pick_clock_up)
+{
+ int i = 0;
+ bool clock_changed = false;
+
+ /* Round up the closest available frequency step for target_clock_hz */
+ for (i = 0; i < gpu_clk->num_of_steps; i++) {
+ /* Find the first item > target_clock_hz */
+ if (((int)(gpu_clk->item[i].clock) - target_clock_mhz) > 0) {
+ break;
+ }
+ }
+
+ /* If the target clock greater than the maximum clock just pick the maximum one*/
+ if (i == gpu_clk->num_of_steps) {
+ i = gpu_clk->num_of_steps - 1;
+ } else {
+ if ((!pick_clock_up) && (i > 0)) {
+ i = i - 1;
+ }
+ }
+
+ clock_step = i;
+ if (cur_clk_step != clock_step) {
+ clock_changed = true;
+ }
+
+ return clock_changed;
+}
+
+void mali_dvfs_policy_realize(struct mali_gpu_utilization_data *data, u64 time_period)
+{
+ int under_perform_boundary_value = 0;
+ int over_perform_boundary_value = 0;
+ int current_fps = 0;
+ int current_gpu_util = 0;
+ bool clock_changed = false;
+#if CLOCK_TUNING_TIME_DEBUG
+ struct timeval start;
+ struct timeval stop;
+ unsigned int elapse_time;
+ do_gettimeofday(&start);
+#endif
+ u32 window_render_fps;
+
+ if (NULL == gpu_clk) {
+ MALI_DEBUG_PRINT(2, ("Enable DVFS but patform doesn't Support freq change. \n"));
+ return;
+ }
+
+ window_render_fps = calculate_window_render_fps(time_period);
+
+ current_fps = window_render_fps;
+ current_gpu_util = data->utilization_gpu;
+
+ /* Get the specific under_perform_boundary_value and over_perform_boundary_value */
+ if ((mali_desired_fps <= current_fps) && (current_fps < mali_max_system_fps)) {
+ under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(90);
+ over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(70);
+ } else if ((mali_fps_step1 <= current_fps) && (current_fps < mali_desired_fps)) {
+ under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(55);
+ over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(35);
+ } else if ((mali_fps_step2 <= current_fps) && (current_fps < mali_fps_step1)) {
+ under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(70);
+ over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(50);
+ } else {
+ under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(55);
+ over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(35);
+ }
+
+ MALI_DEBUG_PRINT(5, ("Using ARM power policy: gpu util = %d \n", current_gpu_util));
+ MALI_DEBUG_PRINT(5, ("Using ARM power policy: under_perform = %d, over_perform = %d \n", under_perform_boundary_value, over_perform_boundary_value));
+ MALI_DEBUG_PRINT(5, ("Using ARM power policy: render fps = %d, pressure render fps = %d \n", current_fps, window_render_fps));
+
+ /* Get current clock value */
+ cur_clk_step = mali_gpu_get_freq();
+
+ /* Consider offscreen */
+ if (0 == current_fps) {
+ /* GP or PP under perform, need to give full power */
+ if (current_gpu_util > over_perform_boundary_value) {
+ if (cur_clk_step != gpu_clk->num_of_steps - 1) {
+ clock_changed = true;
+ clock_step = gpu_clk->num_of_steps - 1;
+ }
+ }
+
+ /* If GPU is idle, use lowest power */
+ if (0 == current_gpu_util) {
+ if (cur_clk_step != 0) {
+ clock_changed = true;
+ clock_step = 0;
+ }
+ }
+
+ goto real_setting;
+ }
+
+ /* 2. Calculate target clock if the GPU clock can be tuned */
+ if (-1 != cur_clk_step) {
+ int target_clk_mhz = -1;
+ mali_bool pick_clock_up = MALI_TRUE;
+
+ if (current_gpu_util > under_perform_boundary_value) {
+ /* when under perform, need to consider the fps part */
+ target_clk_mhz = gpu_clk->item[cur_clk_step].clock * current_gpu_util * mali_desired_fps / under_perform_boundary_value / current_fps;
+ pick_clock_up = MALI_TRUE;
+ } else if (current_gpu_util < over_perform_boundary_value) {
+ /* when over perform, did't need to consider fps, system didn't want to reach desired fps */
+ target_clk_mhz = gpu_clk->item[cur_clk_step].clock * current_gpu_util / under_perform_boundary_value;
+ pick_clock_up = MALI_FALSE;
+ }
+
+ if (-1 != target_clk_mhz) {
+ clock_changed = mali_pickup_closest_avail_clock(target_clk_mhz, pick_clock_up);
+ }
+ }
+
+real_setting:
+ if (clock_changed) {
+ mali_gpu_set_freq(clock_step);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+ gpu_clk->item[clock_step].clock,
+ gpu_clk->item[clock_step].vol / 1000,
+ 0, 0, 0);
+ }
+
+#if CLOCK_TUNING_TIME_DEBUG
+ do_gettimeofday(&stop);
+
+ elapse_time = timeval_to_ns(&stop) - timeval_to_ns(&start);
+ MALI_DEBUG_PRINT(2, ("Using ARM power policy: eclapse time = %d\n", elapse_time));
+#endif
+}
+
+_mali_osk_errcode_t mali_dvfs_policy_init(void)
+{
+ _mali_osk_device_data data;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+ if ((NULL != data.get_clock_info) && (NULL != data.set_freq) && (NULL != data.get_freq)) {
+ MALI_DEBUG_PRINT(2, ("Mali DVFS init: using arm dvfs policy \n"));
+
+
+ mali_fps_step1 = mali_max_system_fps / 3;
+ mali_fps_step2 = mali_max_system_fps / 5;
+
+ data.get_clock_info(&gpu_clk);
+
+ if (gpu_clk != NULL) {
+#ifdef DEBUG
+ int i;
+ for (i = 0; i < gpu_clk->num_of_steps; i++) {
+ MALI_DEBUG_PRINT(5, ("mali gpu clock info: step%d clock(%d)Hz,vol(%d) \n",
+ i, gpu_clk->item[i].clock, gpu_clk->item[i].vol));
+ }
+#endif
+ } else {
+ MALI_DEBUG_PRINT(2, ("Mali DVFS init: platform didn't define enough info for ddk to do DVFS \n"));
+ }
+
+ mali_gpu_get_freq = data.get_freq;
+ mali_gpu_set_freq = data.set_freq;
+
+ if ((NULL != gpu_clk) && (gpu_clk->num_of_steps > 0)
+ && (NULL != mali_gpu_get_freq) && (NULL != mali_gpu_set_freq)) {
+ mali_dvfs_enabled = MALI_TRUE;
+ }
+ } else {
+ MALI_DEBUG_PRINT(2, ("Mali DVFS init: platform function callback incomplete, need check mali_gpu_device_data in platform .\n"));
+ }
+ } else {
+ err = _MALI_OSK_ERR_FAULT;
+ MALI_DEBUG_PRINT(2, ("Mali DVFS init: get platform data error .\n"));
+ }
+
+ return err;
+}
+
+/*
+ * Always give full power when start a new period,
+ * if mali dvfs enabled, for performance consideration
+ */
+void mali_dvfs_policy_new_period(void)
+{
+ /* Always give full power when start a new period */
+ unsigned int cur_clk_step = 0;
+
+ cur_clk_step = mali_gpu_get_freq();
+
+ if (cur_clk_step != (gpu_clk->num_of_steps - 1)) {
+ mali_gpu_set_freq(gpu_clk->num_of_steps - 1);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE, gpu_clk->item[gpu_clk->num_of_steps - 1].clock,
+ gpu_clk->item[gpu_clk->num_of_steps - 1].vol / 1000, 0, 0, 0);
+ }
+}
+
+mali_bool mali_dvfs_policy_enabled(void)
+{
+ return mali_dvfs_enabled;
+}
+
+#if defined(CONFIG_MALI400_PROFILING)
+void mali_get_current_gpu_clk_item(struct mali_gpu_clk_item *clk_item)
+{
+ if (mali_platform_device != NULL) {
+
+ struct mali_gpu_device_data *device_data = NULL;
+ device_data = (struct mali_gpu_device_data *)mali_platform_device->dev.platform_data;
+
+ if ((NULL != device_data->get_clock_info) && (NULL != device_data->get_freq)) {
+
+ int cur_clk_step = device_data->get_freq();
+ struct mali_gpu_clock *mali_gpu_clk = NULL;
+
+ device_data->get_clock_info(&mali_gpu_clk);
+ clk_item->clock = mali_gpu_clk->item[cur_clk_step].clock;
+ clk_item->vol = mali_gpu_clk->item[cur_clk_step].vol;
+ } else {
+ MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: platform function callback incomplete, need check mali_gpu_device_data in platform .\n"));
+ }
+ }
+}
+#endif
+
--- /dev/null
+/*
+ * Copyright (C) 2010-2012, 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_DVFS_POLICY_H__
+#define __MALI_DVFS_POLICY_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void mali_dvfs_policy_realize(struct mali_gpu_utilization_data *data, u64 time_period);
+
+_mali_osk_errcode_t mali_dvfs_policy_init(void);
+
+void mali_dvfs_policy_new_period(void);
+
+mali_bool mali_dvfs_policy_enabled(void);
+
+#if defined(CONFIG_MALI400_PROFILING)
+void mali_get_current_gpu_clk_item(struct mali_gpu_clk_item *clk_item);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif/* __MALI_DVFS_POLICY_H__ */
--- /dev/null
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_executor.h"
+#include "mali_scheduler.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_pp.h"
+#include "mali_pp_job.h"
+#include "mali_group.h"
+#include "mali_pm.h"
+#include "mali_timeline.h"
+#include "mali_osk_profiling.h"
+#include "mali_session.h"
+
+/*
+ * If dma_buf with map on demand is used, we defer job deletion and job queue
+ * if in atomic context, since both might sleep.
+ */
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+#define MALI_EXECUTOR_USE_DEFERRED_PP_JOB_DELETE 1
+#define MALI_EXECUTOR_USE_DEFERRED_PP_JOB_QUEUE 1
+#endif /* !defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) */
+
+/*
+ * ---------- static type definitions (structs, enums, etc) ----------
+ */
+
+enum mali_executor_state_t {
+ EXEC_STATE_NOT_PRESENT, /* Virtual group on Mali-300/400 (do not use) */
+ EXEC_STATE_DISABLED, /* Disabled by core scaling (do not use) */
+ EXEC_STATE_EMPTY, /* No child groups for virtual group (do not use) */
+ EXEC_STATE_INACTIVE, /* Can be used, but must be activate first */
+ EXEC_STATE_IDLE, /* Active and ready to be used */
+ EXEC_STATE_WORKING, /* Executing a job */
+};
+
+/*
+ * ---------- global variables (exported due to inline functions) ----------
+ */
+
+/* Lock for this module (protecting all HW access except L2 caches) */
+_mali_osk_spinlock_irq_t *mali_executor_lock_obj = NULL;
+
+mali_bool mali_executor_hints[MALI_EXECUTOR_HINT_MAX];
+
+/*
+ * ---------- static variables ----------
+ */
+
+/* Used to defer job scheduling */
+static _mali_osk_wq_work_t *executor_wq_high_pri = NULL;
+
+/* Store version from GP and PP (user space wants to know this) */
+static u32 pp_version = 0;
+static u32 gp_version = 0;
+
+/* List of physical PP groups which are disabled by some external source */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_disabled);
+static u32 group_list_disabled_count = 0;
+
+/* List of groups which can be used, but activate first */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_inactive);
+static u32 group_list_inactive_count = 0;
+
+/* List of groups which are active and ready to be used */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_idle);
+static u32 group_list_idle_count = 0;
+
+/* List of groups which are executing a job */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_working);
+static u32 group_list_working_count = 0;
+
+/* Virtual group (if any) */
+static struct mali_group *virtual_group = NULL;
+
+/* Virtual group state is tracked with a state variable instead of 4 lists */
+static enum mali_executor_state_t virtual_group_state = EXEC_STATE_NOT_PRESENT;
+
+/* GP group */
+static struct mali_group *gp_group = NULL;
+
+/* GP group state is tracked with a state variable instead of 4 lists */
+static enum mali_executor_state_t gp_group_state = EXEC_STATE_NOT_PRESENT;
+
+static u32 gp_returned_cookie = 0;
+
+/* Total number of physical PP cores present */
+static u32 num_physical_pp_cores_total = 0;
+
+/* Number of physical cores which are enabled */
+static u32 num_physical_pp_cores_enabled = 0;
+
+/* Enable or disable core scaling */
+static mali_bool core_scaling_enabled = MALI_TRUE;
+
+/* Variables to allow safe pausing of the scheduler */
+static _mali_osk_wait_queue_t *executor_working_wait_queue = NULL;
+static u32 pause_count = 0;
+
+/* PP cores haven't been enabled because of some pp cores haven't been disabled. */
+static int core_scaling_delay_up_mask[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 };
+
+/* Variables used to implement notify pp core changes to userspace when core scaling
+ * is finished in mali_executor_complete_group() function. */
+static _mali_osk_wq_work_t *executor_wq_notify_core_change = NULL;
+static _mali_osk_wait_queue_t *executor_notify_core_change_wait_queue = NULL;
+
+/*
+ * ---------- Forward declaration of static functions ----------
+ */
+static void mali_executor_lock(void);
+static void mali_executor_unlock(void);
+static mali_bool mali_executor_is_suspended(void *data);
+static mali_bool mali_executor_is_working(void);
+static void mali_executor_disable_empty_virtual(void);
+static mali_bool mali_executor_physical_rejoin_virtual(struct mali_group *group);
+static mali_bool mali_executor_has_virtual_group(void);
+static mali_bool mali_executor_virtual_group_is_usable(void);
+static void mali_executor_schedule(void);
+static void mali_executor_wq_schedule(void *arg);
+static void mali_executor_send_gp_oom_to_user(struct mali_gp_job *job);
+static void mali_executor_complete_group(struct mali_group *group,
+ mali_bool success,
+ mali_bool release_jobs,
+ struct mali_gp_job **gp_job_done,
+ struct mali_pp_job **pp_job_done);
+static void mali_executor_change_state_pp_physical(struct mali_group *group,
+ _mali_osk_list_t *old_list,
+ u32 *old_count,
+ _mali_osk_list_t *new_list,
+ u32 *new_count);
+static mali_bool mali_executor_group_is_in_state(struct mali_group *group,
+ enum mali_executor_state_t state);
+
+static void mali_executor_group_enable_internal(struct mali_group *group);
+static void mali_executor_group_disable_internal(struct mali_group *group);
+static void mali_executor_core_scale(unsigned int target_core_nr);
+static void mali_executor_core_scale_in_group_complete(struct mali_group *group);
+static void mali_executor_notify_core_change(u32 num_cores);
+static void mali_executor_wq_notify_core_change(void *arg);
+static void mali_executor_change_group_status_disabled(struct mali_group *group);
+static mali_bool mali_executor_deactivate_list_idle(mali_bool deactivate_idle_group);
+static void mali_executor_set_state_pp_physical(struct mali_group *group,
+ _mali_osk_list_t *new_list,
+ u32 *new_count);
+
+/*
+ * ---------- Actual implementation ----------
+ */
+
+_mali_osk_errcode_t mali_executor_initialize(void)
+{
+ mali_executor_lock_obj = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_EXECUTOR);
+ if (NULL == mali_executor_lock_obj) {
+ mali_executor_terminate();
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ executor_wq_high_pri = _mali_osk_wq_create_work_high_pri(mali_executor_wq_schedule, NULL);
+ if (NULL == executor_wq_high_pri) {
+ mali_executor_terminate();
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ executor_working_wait_queue = _mali_osk_wait_queue_init();
+ if (NULL == executor_working_wait_queue) {
+ mali_executor_terminate();
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ executor_wq_notify_core_change = _mali_osk_wq_create_work(mali_executor_wq_notify_core_change, NULL);
+ if (NULL == executor_wq_notify_core_change) {
+ mali_executor_terminate();
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ executor_notify_core_change_wait_queue = _mali_osk_wait_queue_init();
+ if (NULL == executor_notify_core_change_wait_queue) {
+ mali_executor_terminate();
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_executor_terminate(void)
+{
+ if (NULL != executor_notify_core_change_wait_queue) {
+ _mali_osk_wait_queue_term(executor_notify_core_change_wait_queue);
+ executor_notify_core_change_wait_queue = NULL;
+ }
+
+ if (NULL != executor_wq_notify_core_change) {
+ _mali_osk_wq_delete_work(executor_wq_notify_core_change);
+ executor_wq_notify_core_change = NULL;
+ }
+
+ if (NULL != executor_working_wait_queue) {
+ _mali_osk_wait_queue_term(executor_working_wait_queue);
+ executor_working_wait_queue = NULL;
+ }
+
+ if (NULL != executor_wq_high_pri) {
+ _mali_osk_wq_delete_work(executor_wq_high_pri);
+ executor_wq_high_pri = NULL;
+ }
+
+ if (NULL != mali_executor_lock_obj) {
+ _mali_osk_spinlock_irq_term(mali_executor_lock_obj);
+ mali_executor_lock_obj = NULL;
+ }
+}
+
+void mali_executor_populate(void)
+{
+ u32 num_groups;
+ u32 i;
+
+ num_groups = mali_group_get_glob_num_groups();
+
+ /* Do we have a virtual group? */
+ for (i = 0; i < num_groups; i++) {
+ struct mali_group *group = mali_group_get_glob_group(i);
+
+ if (mali_group_is_virtual(group)) {
+ virtual_group = group;
+ virtual_group_state = EXEC_STATE_INACTIVE;
+ break;
+ }
+ }
+
+ /* Find all the available physical GP and PP cores */
+ for (i = 0; i < num_groups; i++) {
+ struct mali_group *group = mali_group_get_glob_group(i);
+
+ if (NULL != group) {
+ struct mali_pp_core *pp_core = mali_group_get_pp_core(group);
+ struct mali_gp_core *gp_core = mali_group_get_gp_core(group);
+
+ if (!mali_group_is_virtual(group)) {
+ if (NULL != pp_core) {
+ if (0 == pp_version) {
+ /* Retrieve PP version from the first available PP core */
+ pp_version = mali_pp_core_get_version(pp_core);
+ }
+
+ if (NULL != virtual_group) {
+ mali_executor_lock();
+ mali_group_add_group(virtual_group, group);
+ mali_executor_unlock();
+ } else {
+ _mali_osk_list_add(&group->executor_list, &group_list_inactive);
+ group_list_inactive_count++;
+ }
+
+ num_physical_pp_cores_total++;
+ } else {
+ MALI_DEBUG_ASSERT_POINTER(gp_core);
+
+ if (0 == gp_version) {
+ /* Retrieve GP version */
+ gp_version = mali_gp_core_get_version(gp_core);
+ }
+
+ gp_group = group;
+ gp_group_state = EXEC_STATE_INACTIVE;
+ }
+
+ }
+ }
+ }
+
+ num_physical_pp_cores_enabled = num_physical_pp_cores_total;
+}
+
+void mali_executor_depopulate(void)
+{
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != gp_group_state);
+
+ if (NULL != gp_group) {
+ mali_group_delete(gp_group);
+ gp_group = NULL;
+ }
+
+ MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != virtual_group_state);
+
+ if (NULL != virtual_group) {
+ mali_group_delete(virtual_group);
+ virtual_group = NULL;
+ }
+
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&group_list_working));
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, executor_list) {
+ mali_group_delete(group);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_inactive, struct mali_group, executor_list) {
+ mali_group_delete(group);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, executor_list) {
+ mali_group_delete(group);
+ }
+}
+
+void mali_executor_suspend(void)
+{
+ mali_executor_lock();
+
+ /* Increment the pause_count so that no more jobs will be scheduled */
+ pause_count++;
+
+ mali_executor_unlock();
+
+ _mali_osk_wait_queue_wait_event(executor_working_wait_queue,
+ mali_executor_is_suspended, NULL);
+
+ /*
+ * mali_executor_complete_XX() leaves jobs in idle state.
+ * deactivate option is used when we are going to power down
+ * the entire GPU (OS suspend) and want a consistent SW vs HW
+ * state.
+ */
+ mali_executor_lock();
+
+ mali_executor_deactivate_list_idle(MALI_TRUE);
+
+ /*
+ * The following steps are used to deactive all of activated
+ * (MALI_GROUP_STATE_ACTIVE) and activating (MALI_GROUP
+ * _STAET_ACTIVATION_PENDING) groups, to make sure the variable
+ * pd_mask_wanted is equal with 0. */
+ if (MALI_GROUP_STATE_INACTIVE != mali_group_get_state(gp_group)) {
+ gp_group_state = EXEC_STATE_INACTIVE;
+ mali_group_deactivate(gp_group);
+ }
+
+ if (mali_executor_has_virtual_group()) {
+ if (MALI_GROUP_STATE_INACTIVE
+ != mali_group_get_state(virtual_group)) {
+ virtual_group_state = EXEC_STATE_INACTIVE;
+ mali_group_deactivate(virtual_group);
+ }
+ }
+
+ if (0 < group_list_inactive_count) {
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp,
+ &group_list_inactive,
+ struct mali_group, executor_list) {
+ if (MALI_GROUP_STATE_ACTIVATION_PENDING
+ == mali_group_get_state(group)) {
+ mali_group_deactivate(group);
+ }
+
+ /*
+ * On mali-450 platform, we may have physical group in the group inactive
+ * list, and its state is MALI_GROUP_STATE_ACTIVATION_PENDING, so we only
+ * deactivate it is not enough, we still also need add it back to virtual group.
+ * And now, virtual group must be in INACTIVE state, so it's safe to add
+ * physical group to virtual group at this point.
+ */
+ if (NULL != virtual_group) {
+ _mali_osk_list_delinit(&group->executor_list);
+ group_list_inactive_count--;
+
+ mali_group_add_group(virtual_group, group);
+ }
+ }
+ }
+
+ mali_executor_unlock();
+}
+
+void mali_executor_resume(void)
+{
+ mali_executor_lock();
+
+ /* Decrement pause_count to allow scheduling again (if it reaches 0) */
+ pause_count--;
+ if (0 == pause_count) {
+ mali_executor_schedule();
+ }
+
+ mali_executor_unlock();
+}
+
+u32 mali_executor_get_num_cores_total(void)
+{
+ return num_physical_pp_cores_total;
+}
+
+u32 mali_executor_get_num_cores_enabled(void)
+{
+ return num_physical_pp_cores_enabled;
+}
+
+struct mali_pp_core *mali_executor_get_virtual_pp(void)
+{
+ MALI_DEBUG_ASSERT_POINTER(virtual_group);
+ MALI_DEBUG_ASSERT_POINTER(virtual_group->pp_core);
+ return virtual_group->pp_core;
+}
+
+struct mali_group *mali_executor_get_virtual_group(void)
+{
+ return virtual_group;
+}
+
+void mali_executor_zap_all_active(struct mali_session_data *session)
+{
+ struct mali_group *group;
+ struct mali_group *temp;
+ mali_bool ret;
+
+ mali_executor_lock();
+
+ /*
+ * This function is a bit complicated because
+ * mali_group_zap_session() can fail. This only happens because the
+ * group is in an unhandled page fault status.
+ * We need to make sure this page fault is handled before we return,
+ * so that we know every single outstanding MMU transactions have
+ * completed. This will allow caller to safely remove physical pages
+ * when we have returned.
+ */
+
+ MALI_DEBUG_ASSERT(NULL != gp_group);
+ ret = mali_group_zap_session(gp_group, session);
+ if (MALI_FALSE == ret) {
+ struct mali_gp_job *gp_job = NULL;
+
+ mali_executor_complete_group(gp_group, MALI_FALSE,
+ MALI_TRUE, &gp_job, NULL);
+
+ MALI_DEBUG_ASSERT_POINTER(gp_job);
+
+ /* GP job completed, make sure it is freed */
+ mali_scheduler_complete_gp_job(gp_job, MALI_FALSE,
+ MALI_TRUE, MALI_TRUE);
+ }
+
+ if (mali_executor_has_virtual_group()) {
+ ret = mali_group_zap_session(virtual_group, session);
+ if (MALI_FALSE == ret) {
+ struct mali_pp_job *pp_job = NULL;
+
+ mali_executor_complete_group(virtual_group, MALI_FALSE,
+ MALI_TRUE, NULL, &pp_job);
+
+ if (NULL != pp_job) {
+ /* PP job completed, make sure it is freed */
+ mali_scheduler_complete_pp_job(pp_job, 0,
+ MALI_FALSE, MALI_TRUE);
+ }
+ }
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working,
+ struct mali_group, executor_list) {
+ ret = mali_group_zap_session(group, session);
+ if (MALI_FALSE == ret) {
+ ret = mali_group_zap_session(group, session);
+ if (MALI_FALSE == ret) {
+ struct mali_pp_job *pp_job = NULL;
+
+ mali_executor_complete_group(group, MALI_FALSE,
+ MALI_TRUE, NULL, &pp_job);
+
+ if (NULL != pp_job) {
+ /* PP job completed, free it */
+ mali_scheduler_complete_pp_job(pp_job,
+ 0, MALI_FALSE,
+ MALI_TRUE);
+ }
+ }
+ }
+ }
+
+ mali_executor_unlock();
+}
+
+void mali_executor_schedule_from_mask(mali_scheduler_mask mask, mali_bool deferred_schedule)
+{
+ if (MALI_SCHEDULER_MASK_EMPTY != mask) {
+ if (MALI_TRUE == deferred_schedule) {
+ _mali_osk_wq_schedule_work_high_pri(executor_wq_high_pri);
+ } else {
+ /* Schedule from this thread*/
+ mali_executor_lock();
+ mali_executor_schedule();
+ mali_executor_unlock();
+ }
+ }
+}
+
+_mali_osk_errcode_t mali_executor_interrupt_gp(struct mali_group *group,
+ mali_bool in_upper_half)
+{
+ enum mali_interrupt_result int_result;
+ mali_bool time_out = MALI_FALSE;
+
+ MALI_DEBUG_PRINT(4, ("Executor: GP interrupt from %s in %s half\n",
+ mali_group_core_description(group),
+ in_upper_half ? "upper" : "bottom"));
+
+ mali_executor_lock();
+ if (!mali_group_is_working(group)) {
+ /* Not working, so nothing to do */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(mali_group_is_working(group));
+
+ if (mali_group_has_timed_out(group)) {
+ int_result = MALI_INTERRUPT_RESULT_ERROR;
+ time_out = MALI_TRUE;
+ MALI_PRINT(("Executor GP: Job %d Timeout on %s\n",
+ mali_gp_job_get_id(group->gp_running_job),
+ mali_group_core_description(group)));
+ } else {
+ int_result = mali_group_get_interrupt_result_gp(group);
+ if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+ /* No interrupts signalled, so nothing to do */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+#else
+ MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_NONE != int_result);
+#endif
+
+ mali_group_mask_all_interrupts_gp(group);
+
+ if (MALI_INTERRUPT_RESULT_SUCCESS_VS == int_result) {
+ if (mali_group_gp_is_active(group)) {
+ /* Only VS completed so far, while PLBU is still active */
+
+ /* Enable all but the current interrupt */
+ mali_group_enable_interrupts_gp(group, int_result);
+
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_OK;
+ }
+ } else if (MALI_INTERRUPT_RESULT_SUCCESS_PLBU == int_result) {
+ if (mali_group_gp_is_active(group)) {
+ /* Only PLBU completed so far, while VS is still active */
+
+ /* Enable all but the current interrupt */
+ mali_group_enable_interrupts_gp(group, int_result);
+
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_OK;
+ }
+ } else if (MALI_INTERRUPT_RESULT_OOM == int_result) {
+ struct mali_gp_job *job = mali_group_get_running_gp_job(group);
+
+ /* PLBU out of mem */
+ MALI_DEBUG_PRINT(3, ("Executor: PLBU needs more heap memory\n"));
+
+#if defined(CONFIG_MALI400_PROFILING)
+ /* Give group a chance to generate a SUSPEND event */
+ mali_group_oom(group);
+#endif
+
+ /*
+ * no need to hold interrupt raised while
+ * waiting for more memory.
+ */
+ mali_executor_send_gp_oom_to_user(job);
+
+ mali_executor_unlock();
+
+ return _MALI_OSK_ERR_OK;
+ }
+
+ /* We should now have a real interrupt to handle */
+
+ MALI_DEBUG_PRINT(4, ("Executor: Group %s completed with %s\n",
+ mali_group_core_description(group),
+ (MALI_INTERRUPT_RESULT_ERROR == int_result) ?
+ "ERROR" : "success"));
+
+ if (in_upper_half && MALI_INTERRUPT_RESULT_ERROR == int_result) {
+ /* Don't bother to do processing of errors in upper half */
+ mali_executor_unlock();
+
+ if (MALI_FALSE == time_out) {
+ mali_group_schedule_bottom_half_gp(group);
+ }
+ } else {
+ struct mali_gp_job *job;
+ mali_bool success;
+
+ success = (int_result != MALI_INTERRUPT_RESULT_ERROR) ?
+ MALI_TRUE : MALI_FALSE;
+
+ mali_executor_complete_group(group, success,
+ MALI_TRUE, &job, NULL);
+
+ mali_executor_unlock();
+
+ /* GP jobs always fully complete */
+ MALI_DEBUG_ASSERT(NULL != job);
+
+ /* This will notify user space and close the job object */
+ mali_scheduler_complete_gp_job(job, success,
+ MALI_TRUE, MALI_TRUE);
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_executor_interrupt_pp(struct mali_group *group,
+ mali_bool in_upper_half)
+{
+ enum mali_interrupt_result int_result;
+ mali_bool time_out = MALI_FALSE;
+
+ MALI_DEBUG_PRINT(4, ("Executor: PP interrupt from %s in %s half\n",
+ mali_group_core_description(group),
+ in_upper_half ? "upper" : "bottom"));
+
+ mali_executor_lock();
+
+ if (!mali_group_is_working(group)) {
+ /* Not working, so nothing to do */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ if (in_upper_half) {
+ if (mali_group_is_in_virtual(group)) {
+ /* Child groups should never handle PP interrupts */
+ MALI_DEBUG_ASSERT(!mali_group_has_timed_out(group));
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(mali_group_is_working(group));
+ MALI_DEBUG_ASSERT(!mali_group_is_in_virtual(group));
+
+ if (mali_group_has_timed_out(group)) {
+ int_result = MALI_INTERRUPT_RESULT_ERROR;
+ time_out = MALI_TRUE;
+ MALI_PRINT(("Executor PP: Job %d Timeout on %s\n",
+ mali_pp_job_get_id(group->pp_running_job),
+ mali_group_core_description(group)));
+ } else {
+ int_result = mali_group_get_interrupt_result_pp(group);
+ if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+ /* No interrupts signalled, so nothing to do */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ } else if (MALI_INTERRUPT_RESULT_SUCCESS == int_result) {
+ if (mali_group_is_virtual(group) && mali_group_pp_is_active(group)) {
+ /* Some child groups are still working, so nothing to do right now */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+#else
+ MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_NONE != int_result);
+ if (!mali_group_has_timed_out(group)) {
+ MALI_DEBUG_ASSERT(!mali_group_pp_is_active(group));
+ }
+#endif
+
+ /* We should now have a real interrupt to handle */
+
+ MALI_DEBUG_PRINT(4, ("Executor: Group %s completed with %s\n",
+ mali_group_core_description(group),
+ (MALI_INTERRUPT_RESULT_ERROR == int_result) ?
+ "ERROR" : "success"));
+
+ if (in_upper_half && MALI_INTERRUPT_RESULT_ERROR == int_result) {
+ /* Don't bother to do processing of errors in upper half */
+ mali_group_mask_all_interrupts_pp(group);
+ mali_executor_unlock();
+
+ if (MALI_FALSE == time_out) {
+ mali_group_schedule_bottom_half_pp(group);
+ }
+ } else {
+ struct mali_pp_job *job = NULL;
+ mali_bool success;
+
+ success = (int_result == MALI_INTERRUPT_RESULT_SUCCESS) ?
+ MALI_TRUE : MALI_FALSE;
+
+ mali_executor_complete_group(group, success,
+ MALI_TRUE, NULL, &job);
+
+ mali_executor_unlock();
+
+ if (NULL != job) {
+ /* Notify user space and close the job object */
+ mali_scheduler_complete_pp_job(job,
+ num_physical_pp_cores_total,
+ MALI_TRUE, MALI_TRUE);
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_executor_interrupt_mmu(struct mali_group *group,
+ mali_bool in_upper_half)
+{
+ enum mali_interrupt_result int_result;
+
+ MALI_DEBUG_PRINT(4, ("Executor: MMU interrupt from %s in %s half\n",
+ mali_group_core_description(group),
+ in_upper_half ? "upper" : "bottom"));
+
+ mali_executor_lock();
+ if (!mali_group_is_working(group)) {
+ /* Not working, so nothing to do */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(mali_group_is_working(group));
+
+ int_result = mali_group_get_interrupt_result_mmu(group);
+ if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+ /* No interrupts signalled, so nothing to do */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+#else
+ MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_ERROR == int_result);
+#endif
+
+ /* We should now have a real interrupt to handle */
+
+ if (in_upper_half) {
+ /* Don't bother to do processing of errors in upper half */
+
+ struct mali_group *parent = group->parent_group;
+
+ mali_mmu_mask_all_interrupts(group->mmu);
+
+ mali_executor_unlock();
+
+ if (NULL == parent) {
+ mali_group_schedule_bottom_half_mmu(group);
+ } else {
+ mali_group_schedule_bottom_half_mmu(parent);
+ }
+
+ } else {
+ struct mali_gp_job *gp_job = NULL;
+ struct mali_pp_job *pp_job = NULL;
+
+#ifdef DEBUG
+
+ u32 fault_address = mali_mmu_get_page_fault_addr(group->mmu);
+ u32 status = mali_mmu_get_status(group->mmu);
+ MALI_DEBUG_PRINT(2, ("Executor: Mali page fault detected at 0x%x from bus id %d of type %s on %s\n",
+ (void *)(uintptr_t)fault_address,
+ (status >> 6) & 0x1F,
+ (status & 32) ? "write" : "read",
+ group->mmu->hw_core.description));
+ MALI_DEBUG_PRINT(3, ("Executor: MMU rawstat = 0x%08X, MMU status = 0x%08X\n",
+ mali_mmu_get_rawstat(group->mmu), status));
+#endif
+
+ mali_executor_complete_group(group, MALI_FALSE,
+ MALI_TRUE, &gp_job, &pp_job);
+
+ mali_executor_unlock();
+
+ if (NULL != gp_job) {
+ MALI_DEBUG_ASSERT(NULL == pp_job);
+
+ /* Notify user space and close the job object */
+ mali_scheduler_complete_gp_job(gp_job, MALI_FALSE,
+ MALI_TRUE, MALI_TRUE);
+ } else if (NULL != pp_job) {
+ MALI_DEBUG_ASSERT(NULL == gp_job);
+
+ /* Notify user space and close the job object */
+ mali_scheduler_complete_pp_job(pp_job,
+ num_physical_pp_cores_total,
+ MALI_TRUE, MALI_TRUE);
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_executor_group_power_up(struct mali_group *groups[], u32 num_groups)
+{
+ u32 i;
+ mali_bool child_groups_activated = MALI_FALSE;
+ mali_bool do_schedule = MALI_FALSE;
+#if defined(DEBUG)
+ u32 num_activated = 0;
+#endif
+
+ MALI_DEBUG_ASSERT_POINTER(groups);
+ MALI_DEBUG_ASSERT(0 < num_groups);
+
+ mali_executor_lock();
+
+ MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups\n", num_groups));
+
+ for (i = 0; i < num_groups; i++) {
+ MALI_DEBUG_PRINT(3, ("Executor: powering up group %s\n",
+ mali_group_core_description(groups[i])));
+
+ mali_group_power_up(groups[i]);
+
+ if ((MALI_GROUP_STATE_ACTIVATION_PENDING != mali_group_get_state(groups[i]) ||
+ (MALI_TRUE != mali_executor_group_is_in_state(groups[i], EXEC_STATE_INACTIVE)))) {
+ /* nothing more to do for this group */
+ continue;
+ }
+
+ MALI_DEBUG_PRINT(3, ("Executor: activating group %s\n",
+ mali_group_core_description(groups[i])));
+
+#if defined(DEBUG)
+ num_activated++;
+#endif
+
+ if (mali_group_is_in_virtual(groups[i])) {
+ /*
+ * At least one child group of virtual group is powered on.
+ */
+ child_groups_activated = MALI_TRUE;
+ } else if (MALI_FALSE == mali_group_is_virtual(groups[i])) {
+ /* Set gp and pp not in virtual to active. */
+ mali_group_set_active(groups[i]);
+ }
+
+ /* Move group from inactive to idle list */
+ if (groups[i] == gp_group) {
+ MALI_DEBUG_ASSERT(EXEC_STATE_INACTIVE ==
+ gp_group_state);
+ gp_group_state = EXEC_STATE_IDLE;
+ } else if (MALI_FALSE == mali_group_is_in_virtual(groups[i])
+ && MALI_FALSE == mali_group_is_virtual(groups[i])) {
+ MALI_DEBUG_ASSERT(MALI_TRUE == mali_executor_group_is_in_state(groups[i],
+ EXEC_STATE_INACTIVE));
+
+ mali_executor_change_state_pp_physical(groups[i],
+ &group_list_inactive,
+ &group_list_inactive_count,
+ &group_list_idle,
+ &group_list_idle_count);
+ }
+
+ do_schedule = MALI_TRUE;
+ }
+
+ if (mali_executor_has_virtual_group() &&
+ MALI_TRUE == child_groups_activated &&
+ MALI_GROUP_STATE_ACTIVATION_PENDING ==
+ mali_group_get_state(virtual_group)) {
+ /*
+ * Try to active virtual group while it may be not sucessful every time,
+ * because there is one situation that not all of child groups are powered on
+ * in one time and virtual group is in activation pending state.
+ */
+ if (mali_group_set_active(virtual_group)) {
+ /* Move group from inactive to idle */
+ MALI_DEBUG_ASSERT(EXEC_STATE_INACTIVE ==
+ virtual_group_state);
+ virtual_group_state = EXEC_STATE_IDLE;
+
+ MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups completed, %u physical activated, 1 virtual activated.\n", num_groups, num_activated));
+ } else {
+ MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups completed, %u physical activated\n", num_groups, num_activated));
+ }
+ } else {
+ MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups completed, %u physical activated\n", num_groups, num_activated));
+ }
+
+ if (MALI_TRUE == do_schedule) {
+ /* Trigger a schedule */
+ mali_executor_schedule();
+ }
+
+ mali_executor_unlock();
+}
+
+void mali_executor_group_power_down(struct mali_group *groups[],
+ u32 num_groups)
+{
+ u32 i;
+
+ MALI_DEBUG_ASSERT_POINTER(groups);
+ MALI_DEBUG_ASSERT(0 < num_groups);
+
+ mali_executor_lock();
+
+ MALI_DEBUG_PRINT(3, ("Executor: powering down %u groups\n", num_groups));
+
+ for (i = 0; i < num_groups; i++) {
+ /* Groups must be either disabled or inactive */
+ MALI_DEBUG_ASSERT(mali_executor_group_is_in_state(groups[i],
+ EXEC_STATE_DISABLED) ||
+ mali_executor_group_is_in_state(groups[i],
+ EXEC_STATE_INACTIVE));
+
+ MALI_DEBUG_PRINT(3, ("Executor: powering down group %s\n",
+ mali_group_core_description(groups[i])));
+
+ mali_group_power_down(groups[i]);
+ }
+
+ MALI_DEBUG_PRINT(3, ("Executor: powering down %u groups completed\n", num_groups));
+
+ mali_executor_unlock();
+}
+
+void mali_executor_abort_session(struct mali_session_data *session)
+{
+ struct mali_group *group;
+ struct mali_group *tmp_group;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT(session->is_aborting);
+
+ MALI_DEBUG_PRINT(3,
+ ("Executor: Aborting all jobs from session 0x%08X.\n",
+ session));
+
+ mali_executor_lock();
+
+ if (mali_group_get_session(gp_group) == session) {
+ if (EXEC_STATE_WORKING == gp_group_state) {
+ struct mali_gp_job *gp_job = NULL;
+
+ mali_executor_complete_group(gp_group, MALI_FALSE,
+ MALI_TRUE, &gp_job, NULL);
+
+ MALI_DEBUG_ASSERT_POINTER(gp_job);
+
+ /* GP job completed, make sure it is freed */
+ mali_scheduler_complete_gp_job(gp_job, MALI_FALSE,
+ MALI_FALSE, MALI_TRUE);
+ } else {
+ /* Same session, but not working, so just clear it */
+ mali_group_clear_session(gp_group);
+ }
+ }
+
+ if (mali_executor_has_virtual_group()) {
+ if (EXEC_STATE_WORKING == virtual_group_state
+ && mali_group_get_session(virtual_group) == session) {
+ struct mali_pp_job *pp_job = NULL;
+
+ mali_executor_complete_group(virtual_group, MALI_FALSE,
+ MALI_FALSE, NULL, &pp_job);
+
+ if (NULL != pp_job) {
+ /* PP job completed, make sure it is freed */
+ mali_scheduler_complete_pp_job(pp_job, 0,
+ MALI_FALSE, MALI_TRUE);
+ }
+ }
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_working,
+ struct mali_group, executor_list) {
+ if (mali_group_get_session(group) == session) {
+ struct mali_pp_job *pp_job = NULL;
+
+ mali_executor_complete_group(group, MALI_FALSE,
+ MALI_FALSE, NULL, &pp_job);
+
+ if (NULL != pp_job) {
+ /* PP job completed, make sure it is freed */
+ mali_scheduler_complete_pp_job(pp_job, 0,
+ MALI_FALSE, MALI_TRUE);
+ }
+ }
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_idle, struct mali_group, executor_list) {
+ mali_group_clear_session(group);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_inactive, struct mali_group, executor_list) {
+ mali_group_clear_session(group);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_disabled, struct mali_group, executor_list) {
+ mali_group_clear_session(group);
+ }
+
+ mali_executor_unlock();
+}
+
+
+void mali_executor_core_scaling_enable(void)
+{
+ /* PS: Core scaling is by default enabled */
+ core_scaling_enabled = MALI_TRUE;
+}
+
+void mali_executor_core_scaling_disable(void)
+{
+ core_scaling_enabled = MALI_FALSE;
+}
+
+mali_bool mali_executor_core_scaling_is_enabled(void)
+{
+ return core_scaling_enabled;
+}
+
+void mali_executor_group_enable(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ mali_executor_lock();
+
+ if ((NULL != mali_group_get_gp_core(group) || NULL != mali_group_get_pp_core(group))
+ && (mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED))) {
+ mali_executor_group_enable_internal(group);
+ }
+
+ mali_executor_schedule();
+ mali_executor_unlock();
+
+ _mali_osk_wq_schedule_work(executor_wq_notify_core_change);
+}
+
+/*
+ * If a physical group is inactive or idle, we should disable it immediately,
+ * if group is in virtual, and virtual group is idle, disable given physical group in it.
+ */
+void mali_executor_group_disable(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ mali_executor_lock();
+
+ if ((NULL != mali_group_get_gp_core(group) || NULL != mali_group_get_pp_core(group))
+ && (!mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED))) {
+ mali_executor_group_disable_internal(group);
+ }
+
+ mali_executor_schedule();
+ mali_executor_unlock();
+
+ _mali_osk_wq_schedule_work(executor_wq_notify_core_change);
+}
+
+mali_bool mali_executor_group_is_disabled(struct mali_group *group)
+{
+ /* NB: This function is not optimized for time critical usage */
+
+ mali_bool ret;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ mali_executor_lock();
+ ret = mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED);
+ mali_executor_unlock();
+
+ return ret;
+}
+
+int mali_executor_set_perf_level(unsigned int target_core_nr, mali_bool override)
+{
+ if (target_core_nr == num_physical_pp_cores_enabled) return 0;
+ if (MALI_FALSE == core_scaling_enabled && MALI_FALSE == override) return -EPERM;
+ if (target_core_nr > num_physical_pp_cores_total) return -EINVAL;
+ if (0 == target_core_nr) return -EINVAL;
+
+ mali_executor_core_scale(target_core_nr);
+
+ _mali_osk_wq_schedule_work(executor_wq_notify_core_change);
+
+ return 0;
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_executor_dump_state(char *buf, u32 size)
+{
+ int n = 0;
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ mali_executor_lock();
+
+ switch (gp_group_state) {
+ case EXEC_STATE_INACTIVE:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "GP group is in state INACTIVE\n");
+ break;
+ case EXEC_STATE_IDLE:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "GP group is in state IDLE\n");
+ break;
+ case EXEC_STATE_WORKING:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "GP group is in state WORKING\n");
+ break;
+ default:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "GP group is in unknown/illegal state %u\n",
+ gp_group_state);
+ break;
+ }
+
+ n += mali_group_dump_state(gp_group, buf + n, size - n);
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Physical PP groups in WORKING state (count = %u):\n",
+ group_list_working_count);
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working, struct mali_group, executor_list) {
+ n += mali_group_dump_state(group, buf + n, size - n);
+ }
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Physical PP groups in IDLE state (count = %u):\n",
+ group_list_idle_count);
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, executor_list) {
+ n += mali_group_dump_state(group, buf + n, size - n);
+ }
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Physical PP groups in INACTIVE state (count = %u):\n",
+ group_list_inactive_count);
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_inactive, struct mali_group, executor_list) {
+ n += mali_group_dump_state(group, buf + n, size - n);
+ }
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Physical PP groups in DISABLED state (count = %u):\n",
+ group_list_disabled_count);
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, executor_list) {
+ n += mali_group_dump_state(group, buf + n, size - n);
+ }
+
+ if (mali_executor_has_virtual_group()) {
+ switch (virtual_group_state) {
+ case EXEC_STATE_EMPTY:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Virtual PP group is in state EMPTY\n");
+ break;
+ case EXEC_STATE_INACTIVE:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Virtual PP group is in state INACTIVE\n");
+ break;
+ case EXEC_STATE_IDLE:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Virtual PP group is in state IDLE\n");
+ break;
+ case EXEC_STATE_WORKING:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Virtual PP group is in state WORKING\n");
+ break;
+ default:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Virtual PP group is in unknown/illegal state %u\n",
+ virtual_group_state);
+ break;
+ }
+
+ n += mali_group_dump_state(virtual_group, buf + n, size - n);
+ }
+
+ mali_executor_unlock();
+
+ n += _mali_osk_snprintf(buf + n, size - n, "\n");
+
+ return n;
+}
+#endif
+
+_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores(_mali_uk_get_pp_number_of_cores_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+ args->number_of_total_cores = num_physical_pp_cores_total;
+ args->number_of_enabled_cores = num_physical_pp_cores_enabled;
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_pp_core_version(_mali_uk_get_pp_core_version_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+ args->version = pp_version;
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores(_mali_uk_get_gp_number_of_cores_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+ args->number_of_cores = 1;
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_gp_core_version(_mali_uk_get_gp_core_version_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+ args->version = gp_version;
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_gp_suspend_response(_mali_uk_gp_suspend_response_s *args)
+{
+ struct mali_session_data *session;
+ struct mali_gp_job *job;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+ session = (struct mali_session_data *)(uintptr_t)args->ctx;
+
+ if (_MALIGP_JOB_RESUME_WITH_NEW_HEAP == args->code) {
+ _mali_osk_notification_t *new_notification = NULL;
+
+ new_notification = _mali_osk_notification_create(
+ _MALI_NOTIFICATION_GP_STALLED,
+ sizeof(_mali_uk_gp_job_suspended_s));
+
+ if (NULL != new_notification) {
+ MALI_DEBUG_PRINT(3, ("Executor: Resuming job %u with new heap; 0x%08X - 0x%08X\n",
+ args->cookie, args->arguments[0], args->arguments[1]));
+
+ mali_executor_lock();
+
+ /* Resume the job in question if it is still running */
+ job = mali_group_get_running_gp_job(gp_group);
+ if (NULL != job &&
+ args->cookie == mali_gp_job_get_id(job) &&
+ session == mali_gp_job_get_session(job)) {
+ /*
+ * Correct job is running, resume with new heap
+ */
+
+ mali_gp_job_set_oom_notification(job,
+ new_notification);
+
+ /* This will also re-enable interrupts */
+ mali_group_resume_gp_with_new_heap(gp_group,
+ args->cookie,
+ args->arguments[0],
+ args->arguments[1]);
+
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_OK;
+ } else {
+ MALI_PRINT_ERROR(("Executor: Unable to resume, GP job no longer running.\n"));
+
+ _mali_osk_notification_delete(new_notification);
+
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+ } else {
+ MALI_PRINT_ERROR(("Executor: Failed to allocate notification object. Will abort GP job.\n"));
+ }
+ } else {
+ MALI_DEBUG_PRINT(2, ("Executor: Aborting job %u, no new heap provided\n", args->cookie));
+ }
+
+ mali_executor_lock();
+
+ /* Abort the job in question if it is still running */
+ job = mali_group_get_running_gp_job(gp_group);
+ if (NULL != job &&
+ args->cookie == mali_gp_job_get_id(job) &&
+ session == mali_gp_job_get_session(job)) {
+ /* Correct job is still running */
+ struct mali_gp_job *job_done = NULL;
+
+ mali_executor_complete_group(gp_group, MALI_FALSE,
+ MALI_TRUE, &job_done, NULL);
+
+ /* The same job should have completed */
+ MALI_DEBUG_ASSERT(job_done == job);
+
+ /* GP job completed, make sure it is freed */
+ mali_scheduler_complete_gp_job(job_done, MALI_FALSE,
+ MALI_TRUE, MALI_TRUE);
+ }
+
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+}
+
+
+/*
+ * ---------- Implementation of static functions ----------
+ */
+
+static void mali_executor_lock(void)
+{
+ _mali_osk_spinlock_irq_lock(mali_executor_lock_obj);
+ MALI_DEBUG_PRINT(5, ("Executor: lock taken\n"));
+}
+
+static void mali_executor_unlock(void)
+{
+ MALI_DEBUG_PRINT(5, ("Executor: Releasing lock\n"));
+ _mali_osk_spinlock_irq_unlock(mali_executor_lock_obj);
+}
+
+static mali_bool mali_executor_is_suspended(void *data)
+{
+ mali_bool ret;
+
+ /* This callback does not use the data pointer. */
+ MALI_IGNORE(data);
+
+ mali_executor_lock();
+
+ ret = pause_count > 0 && !mali_executor_is_working();
+
+ mali_executor_unlock();
+
+ return ret;
+}
+
+static mali_bool mali_executor_is_working()
+{
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ return (0 != group_list_working_count ||
+ EXEC_STATE_WORKING == gp_group_state ||
+ EXEC_STATE_WORKING == virtual_group_state);
+}
+
+static void mali_executor_disable_empty_virtual(void)
+{
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(virtual_group_state != EXEC_STATE_EMPTY);
+ MALI_DEBUG_ASSERT(virtual_group_state != EXEC_STATE_WORKING);
+
+ if (mali_group_is_empty(virtual_group)) {
+ virtual_group_state = EXEC_STATE_EMPTY;
+ }
+}
+
+static mali_bool mali_executor_physical_rejoin_virtual(struct mali_group *group)
+{
+ mali_bool trigger_pm_update = MALI_FALSE;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ /* Only rejoining after job has completed (still active) */
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE ==
+ mali_group_get_state(group));
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(MALI_TRUE == mali_executor_has_virtual_group());
+ MALI_DEBUG_ASSERT(MALI_FALSE == mali_group_is_virtual(group));
+
+ /* Make sure group and virtual group have same status */
+
+ if (MALI_GROUP_STATE_INACTIVE == mali_group_get_state(virtual_group)) {
+ if (mali_group_deactivate(group)) {
+ trigger_pm_update = MALI_TRUE;
+ }
+
+ if (virtual_group_state == EXEC_STATE_EMPTY) {
+ virtual_group_state = EXEC_STATE_INACTIVE;
+ }
+ } else if (MALI_GROUP_STATE_ACTIVATION_PENDING ==
+ mali_group_get_state(virtual_group)) {
+ /*
+ * Activation is pending for virtual group, leave
+ * this child group as active.
+ */
+ if (virtual_group_state == EXEC_STATE_EMPTY) {
+ virtual_group_state = EXEC_STATE_INACTIVE;
+ }
+ } else {
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE ==
+ mali_group_get_state(virtual_group));
+
+ if (virtual_group_state == EXEC_STATE_EMPTY) {
+ virtual_group_state = EXEC_STATE_IDLE;
+ }
+ }
+
+ /* Remove group from idle list */
+ MALI_DEBUG_ASSERT(mali_executor_group_is_in_state(group,
+ EXEC_STATE_IDLE));
+ _mali_osk_list_delinit(&group->executor_list);
+ group_list_idle_count--;
+
+ /*
+ * And finally rejoin the virtual group
+ * group will start working on same job as virtual_group,
+ * if virtual_group is working on a job
+ */
+ mali_group_add_group(virtual_group, group);
+
+ return trigger_pm_update;
+}
+
+static mali_bool mali_executor_has_virtual_group(void)
+{
+#if defined(CONFIG_MALI450)
+ return (NULL != virtual_group) ? MALI_TRUE : MALI_FALSE;
+#else
+ return MALI_FALSE;
+#endif /* defined(CONFIG_MALI450) */
+}
+
+static mali_bool mali_executor_virtual_group_is_usable(void)
+{
+#if defined(CONFIG_MALI450)
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return (EXEC_STATE_INACTIVE == virtual_group_state ||
+ EXEC_STATE_IDLE == virtual_group_state) ?
+ MALI_TRUE : MALI_FALSE;
+#else
+ return MALI_FALSE;
+#endif /* defined(CONFIG_MALI450) */
+}
+
+static mali_bool mali_executor_tackle_gp_bound(void)
+{
+ struct mali_pp_job *job;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ job = mali_scheduler_job_pp_physical_peek();
+
+ if (NULL != job && MALI_TRUE == mali_is_mali400()) {
+ if (0 < group_list_working_count &&
+ mali_pp_job_is_large_and_unstarted(job)) {
+ return MALI_TRUE;
+ }
+ }
+
+ return MALI_FALSE;
+}
+
+/*
+ * This is where jobs are actually started.
+ */
+static void mali_executor_schedule(void)
+{
+ u32 i;
+ u32 num_physical_needed = 0;
+ u32 num_physical_to_process = 0;
+ mali_bool trigger_pm_update = MALI_FALSE;
+ mali_bool deactivate_idle_group = MALI_TRUE;
+
+ /* Physical groups + jobs to start in this function */
+ struct mali_group *groups_to_start[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS];
+ struct mali_pp_job *jobs_to_start[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS];
+ u32 sub_jobs_to_start[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS];
+ int num_jobs_to_start = 0;
+
+ /* Virtual job to start in this function */
+ struct mali_pp_job *virtual_job_to_start = NULL;
+
+ /* GP job to start in this function */
+ struct mali_gp_job *gp_job_to_start = NULL;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ if (pause_count > 0) {
+ /* Execution is suspended, don't schedule any jobs. */
+ return;
+ }
+
+ /* Lock needed in order to safely handle the job queues */
+ mali_scheduler_lock();
+
+ /* 1. Activate gp firstly if have gp job queued. */
+ if (EXEC_STATE_INACTIVE == gp_group_state &&
+ 0 < mali_scheduler_job_gp_count()) {
+
+ enum mali_group_state state =
+ mali_group_activate(gp_group);
+ if (MALI_GROUP_STATE_ACTIVE == state) {
+ /* Set GP group state to idle */
+ gp_group_state = EXEC_STATE_IDLE;
+ } else {
+ trigger_pm_update = MALI_TRUE;
+ }
+ }
+
+ /* 2. Prepare as many physical groups as needed/possible */
+
+ num_physical_needed = mali_scheduler_job_physical_head_count();
+
+ /* On mali-450 platform, we don't need to enter in this block frequently. */
+ if (0 < num_physical_needed) {
+
+ if (num_physical_needed <= group_list_idle_count) {
+ /* We have enough groups on idle list already */
+ num_physical_to_process = num_physical_needed;
+ num_physical_needed = 0;
+ } else {
+ /* We need to get a hold of some more groups */
+ num_physical_to_process = group_list_idle_count;
+ num_physical_needed -= group_list_idle_count;
+ }
+
+ if (0 < num_physical_needed) {
+
+ /* 2.1. Activate groups which are inactive */
+
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_inactive,
+ struct mali_group, executor_list) {
+ enum mali_group_state state =
+ mali_group_activate(group);
+ if (MALI_GROUP_STATE_ACTIVE == state) {
+ /* Move from inactive to idle */
+ mali_executor_change_state_pp_physical(group,
+ &group_list_inactive,
+ &group_list_inactive_count,
+ &group_list_idle,
+ &group_list_idle_count);
+ num_physical_to_process++;
+ } else {
+ trigger_pm_update = MALI_TRUE;
+ }
+
+ num_physical_needed--;
+ if (0 == num_physical_needed) {
+ /* We have activated all the groups we need */
+ break;
+ }
+ }
+ }
+
+ if (mali_executor_virtual_group_is_usable()) {
+
+ /*
+ * 2.2. And finally, steal and activate groups
+ * from virtual group if we need even more
+ */
+ while (0 < num_physical_needed) {
+ struct mali_group *group;
+
+ group = mali_group_acquire_group(virtual_group);
+ if (NULL != group) {
+ enum mali_group_state state;
+
+ mali_executor_disable_empty_virtual();
+
+ state = mali_group_activate(group);
+ if (MALI_GROUP_STATE_ACTIVE == state) {
+ /* Group is ready, add to idle list */
+ _mali_osk_list_add(
+ &group->executor_list,
+ &group_list_idle);
+ group_list_idle_count++;
+ num_physical_to_process++;
+ } else {
+ /*
+ * Group is not ready yet,
+ * add to inactive list
+ */
+ _mali_osk_list_add(
+ &group->executor_list,
+ &group_list_inactive);
+ group_list_inactive_count++;
+
+ trigger_pm_update = MALI_TRUE;
+ }
+ num_physical_needed--;
+ } else {
+ /*
+ * We could not get enough groups
+ * from the virtual group.
+ */
+ break;
+ }
+ }
+ }
+
+ /* 2.3. Assign physical jobs to groups */
+
+ if (0 < num_physical_to_process) {
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle,
+ struct mali_group, executor_list) {
+ struct mali_pp_job *job = NULL;
+ u32 sub_job = MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS;
+
+ MALI_DEBUG_ASSERT(num_jobs_to_start <
+ MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS);
+
+ MALI_DEBUG_ASSERT(0 <
+ mali_scheduler_job_physical_head_count());
+
+ if (mali_executor_hint_is_enabled(
+ MALI_EXECUTOR_HINT_GP_BOUND)) {
+ if (MALI_TRUE == mali_executor_tackle_gp_bound()) {
+ /*
+ * We're gp bound,
+ * don't start this right now.
+ */
+ deactivate_idle_group = MALI_FALSE;
+ num_physical_to_process = 0;
+ break;
+ }
+ }
+
+ job = mali_scheduler_job_pp_physical_get(
+ &sub_job);
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT(sub_job <= MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS);
+
+ /* Put job + group on list of jobs to start later on */
+
+ groups_to_start[num_jobs_to_start] = group;
+ jobs_to_start[num_jobs_to_start] = job;
+ sub_jobs_to_start[num_jobs_to_start] = sub_job;
+ num_jobs_to_start++;
+
+ /* Move group from idle to working */
+ mali_executor_change_state_pp_physical(group,
+ &group_list_idle,
+ &group_list_idle_count,
+ &group_list_working,
+ &group_list_working_count);
+
+ num_physical_to_process--;
+ if (0 == num_physical_to_process) {
+ /* Got all we needed */
+ break;
+ }
+ }
+ }
+ }
+
+ /* 3. Activate virtual group, if needed */
+
+ if (EXEC_STATE_INACTIVE == virtual_group_state &&
+ 0 < mali_scheduler_job_next_is_virtual()) {
+ enum mali_group_state state =
+ mali_group_activate(virtual_group);
+ if (MALI_GROUP_STATE_ACTIVE == state) {
+ /* Set virtual group state to idle */
+ virtual_group_state = EXEC_STATE_IDLE;
+ } else {
+ trigger_pm_update = MALI_TRUE;
+ }
+ }
+
+ /* 4. To power up group asap, we trigger pm update here. */
+
+ if (MALI_TRUE == trigger_pm_update) {
+ trigger_pm_update = MALI_FALSE;
+ mali_pm_update_async();
+ }
+
+ /* 5. Deactivate idle pp group */
+
+ if (MALI_TRUE == mali_executor_deactivate_list_idle(deactivate_idle_group
+ && (!mali_timeline_has_physical_pp_job()))) {
+ trigger_pm_update = MALI_TRUE;
+ }
+
+ /* 6. Assign jobs to idle virtual group (or deactivate if no job) */
+
+ if (EXEC_STATE_IDLE == virtual_group_state) {
+ if (0 < mali_scheduler_job_next_is_virtual()) {
+ virtual_job_to_start =
+ mali_scheduler_job_pp_virtual_get();
+ virtual_group_state = EXEC_STATE_WORKING;
+ } else if (!mali_timeline_has_virtual_pp_job()) {
+ virtual_group_state = EXEC_STATE_INACTIVE;
+
+ if (mali_group_deactivate(virtual_group)) {
+ trigger_pm_update = MALI_TRUE;
+ }
+ }
+ }
+
+ /* 7. Assign job to idle GP group (or deactivate if no job) */
+
+ if (EXEC_STATE_IDLE == gp_group_state) {
+ if (0 < mali_scheduler_job_gp_count()) {
+ gp_job_to_start = mali_scheduler_job_gp_get();
+ gp_group_state = EXEC_STATE_WORKING;
+ } else if (!mali_timeline_has_gp_job()) {
+ gp_group_state = EXEC_STATE_INACTIVE;
+ if (mali_group_deactivate(gp_group)) {
+ trigger_pm_update = MALI_TRUE;
+ }
+ }
+ }
+
+ /* 8. We no longer need the schedule/queue lock */
+
+ mali_scheduler_unlock();
+
+ /* 9. start jobs */
+
+ if (NULL != virtual_job_to_start) {
+ MALI_DEBUG_ASSERT(!mali_group_pp_is_active(virtual_group));
+ mali_group_start_pp_job(virtual_group,
+ virtual_job_to_start, 0);
+ }
+
+ for (i = 0; i < num_jobs_to_start; i++) {
+ MALI_DEBUG_ASSERT(!mali_group_pp_is_active(
+ groups_to_start[i]));
+ mali_group_start_pp_job(groups_to_start[i],
+ jobs_to_start[i],
+ sub_jobs_to_start[i]);
+ }
+
+ MALI_DEBUG_ASSERT_POINTER(gp_group);
+
+ if (NULL != gp_job_to_start) {
+ MALI_DEBUG_ASSERT(!mali_group_gp_is_active(gp_group));
+ mali_group_start_gp_job(gp_group, gp_job_to_start);
+ }
+
+ /* 10. Trigger any pending PM updates */
+ if (MALI_TRUE == trigger_pm_update) {
+ mali_pm_update_async();
+ }
+}
+
+/* Handler for deferred schedule requests */
+static void mali_executor_wq_schedule(void *arg)
+{
+ MALI_IGNORE(arg);
+ mali_executor_lock();
+ mali_executor_schedule();
+ mali_executor_unlock();
+}
+
+static void mali_executor_send_gp_oom_to_user(struct mali_gp_job *job)
+{
+ _mali_uk_gp_job_suspended_s *jobres;
+ _mali_osk_notification_t *notification;
+
+ notification = mali_gp_job_get_oom_notification(job);
+
+ /*
+ * Remember the id we send to user space, so we have something to
+ * verify when we get a response
+ */
+ gp_returned_cookie = mali_gp_job_get_id(job);
+
+ jobres = (_mali_uk_gp_job_suspended_s *)notification->result_buffer;
+ jobres->user_job_ptr = mali_gp_job_get_user_id(job);
+ jobres->cookie = gp_returned_cookie;
+
+ mali_session_send_notification(mali_gp_job_get_session(job),
+ notification);
+}
+static struct mali_gp_job *mali_executor_complete_gp(struct mali_group *group,
+ mali_bool success,
+ mali_bool release_jobs)
+{
+ struct mali_gp_job *job;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ /* Extracts the needed HW status from core and reset */
+ job = mali_group_complete_gp(group, success);
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ /* Core is now ready to go into idle list */
+ gp_group_state = EXEC_STATE_IDLE;
+
+ if (release_jobs) {
+ /* This will potentially queue more GP and PP jobs */
+ mali_timeline_tracker_release(&job->tracker);
+
+ /* Signal PP job */
+ mali_gp_job_signal_pp_tracker(job, success);
+ }
+
+ return job;
+}
+
+static struct mali_pp_job *mali_executor_complete_pp(struct mali_group *group,
+ mali_bool success,
+ mali_bool release_jobs)
+{
+ struct mali_pp_job *job;
+ u32 sub_job;
+ mali_bool job_is_done;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ /* Extracts the needed HW status from core and reset */
+ job = mali_group_complete_pp(group, success, &sub_job);
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ /* Core is now ready to go into idle list */
+ if (mali_group_is_virtual(group)) {
+ virtual_group_state = EXEC_STATE_IDLE;
+ } else {
+ /* Move from working to idle state */
+ mali_executor_change_state_pp_physical(group,
+ &group_list_working,
+ &group_list_working_count,
+ &group_list_idle,
+ &group_list_idle_count);
+ }
+
+ /* It is the executor module which owns the jobs themselves by now */
+ mali_pp_job_mark_sub_job_completed(job, success);
+ job_is_done = mali_pp_job_is_complete(job);
+
+ if (job_is_done && release_jobs) {
+ /* This will potentially queue more GP and PP jobs */
+ mali_timeline_tracker_release(&job->tracker);
+ }
+
+ return job;
+}
+
+static void mali_executor_complete_group(struct mali_group *group,
+ mali_bool success,
+ mali_bool release_jobs,
+ struct mali_gp_job **gp_job_done,
+ struct mali_pp_job **pp_job_done)
+{
+ struct mali_gp_core *gp_core = mali_group_get_gp_core(group);
+ struct mali_pp_core *pp_core = mali_group_get_pp_core(group);
+ struct mali_gp_job *gp_job = NULL;
+ struct mali_pp_job *pp_job = NULL;
+ mali_bool pp_job_is_done = MALI_TRUE;
+
+ if (NULL != gp_core) {
+ gp_job = mali_executor_complete_gp(group,
+ success, release_jobs);
+ } else {
+ MALI_DEBUG_ASSERT_POINTER(pp_core);
+ MALI_IGNORE(pp_core);
+ pp_job = mali_executor_complete_pp(group,
+ success, release_jobs);
+
+ pp_job_is_done = mali_pp_job_is_complete(pp_job);
+ }
+
+ if (pause_count > 0) {
+ /* Execution has been suspended */
+
+ if (!mali_executor_is_working()) {
+ /* Last job completed, wake up sleepers */
+ _mali_osk_wait_queue_wake_up(
+ executor_working_wait_queue);
+ }
+ } else if (MALI_TRUE == mali_group_disable_requested(group)) {
+ mali_executor_core_scale_in_group_complete(group);
+
+ mali_executor_schedule();
+ } else {
+ /* try to schedule new jobs */
+ mali_executor_schedule();
+ }
+
+ if (NULL != gp_job) {
+ MALI_DEBUG_ASSERT_POINTER(gp_job_done);
+ *gp_job_done = gp_job;
+ } else if (pp_job_is_done) {
+ MALI_DEBUG_ASSERT_POINTER(pp_job);
+ MALI_DEBUG_ASSERT_POINTER(pp_job_done);
+ *pp_job_done = pp_job;
+ }
+}
+
+static void mali_executor_change_state_pp_physical(struct mali_group *group,
+ _mali_osk_list_t *old_list,
+ u32 *old_count,
+ _mali_osk_list_t *new_list,
+ u32 *new_count)
+{
+ /*
+ * It's a bit more complicated to change the state for the physical PP
+ * groups since their state is determined by the list they are on.
+ */
+#if defined(DEBUG)
+ mali_bool found = MALI_FALSE;
+ struct mali_group *group_iter;
+ struct mali_group *temp;
+ u32 old_counted = 0;
+ u32 new_counted = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(old_list);
+ MALI_DEBUG_ASSERT_POINTER(old_count);
+ MALI_DEBUG_ASSERT_POINTER(new_list);
+ MALI_DEBUG_ASSERT_POINTER(new_count);
+
+ /*
+ * Verify that group is present on old list,
+ * and that the count is correct
+ */
+
+ _MALI_OSK_LIST_FOREACHENTRY(group_iter, temp, old_list,
+ struct mali_group, executor_list) {
+ old_counted++;
+ if (group == group_iter) {
+ found = MALI_TRUE;
+ }
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group_iter, temp, new_list,
+ struct mali_group, executor_list) {
+ new_counted++;
+ }
+
+ if (MALI_FALSE == found) {
+ if (old_list == &group_list_idle) {
+ MALI_DEBUG_PRINT(1, (" old Group list is idle,"));
+ } else if (old_list == &group_list_inactive) {
+ MALI_DEBUG_PRINT(1, (" old Group list is inactive,"));
+ } else if (old_list == &group_list_working) {
+ MALI_DEBUG_PRINT(1, (" old Group list is working,"));
+ } else if (old_list == &group_list_disabled) {
+ MALI_DEBUG_PRINT(1, (" old Group list is disable,"));
+ }
+
+ if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_WORKING)) {
+ MALI_DEBUG_PRINT(1, (" group in working \n"));
+ } else if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_INACTIVE)) {
+ MALI_DEBUG_PRINT(1, (" group in inactive \n"));
+ } else if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_IDLE)) {
+ MALI_DEBUG_PRINT(1, (" group in idle \n"));
+ } else if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED)) {
+ MALI_DEBUG_PRINT(1, (" but group in disabled \n"));
+ }
+ }
+
+ MALI_DEBUG_ASSERT(MALI_TRUE == found);
+ MALI_DEBUG_ASSERT(0 < (*old_count));
+ MALI_DEBUG_ASSERT((*old_count) == old_counted);
+ MALI_DEBUG_ASSERT((*new_count) == new_counted);
+#endif
+
+ _mali_osk_list_move(&group->executor_list, new_list);
+ (*old_count)--;
+ (*new_count)++;
+}
+
+static void mali_executor_set_state_pp_physical(struct mali_group *group,
+ _mali_osk_list_t *new_list,
+ u32 *new_count)
+{
+ _mali_osk_list_add(&group->executor_list, new_list);
+ (*new_count)++;
+}
+
+static mali_bool mali_executor_group_is_in_state(struct mali_group *group,
+ enum mali_executor_state_t state)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ if (gp_group == group) {
+ if (gp_group_state == state) {
+ return MALI_TRUE;
+ }
+ } else if (virtual_group == group || mali_group_is_in_virtual(group)) {
+ if (virtual_group_state == state) {
+ return MALI_TRUE;
+ }
+ } else {
+ /* Physical PP group */
+ struct mali_group *group_iter;
+ struct mali_group *temp;
+ _mali_osk_list_t *list;
+
+ if (EXEC_STATE_DISABLED == state) {
+ list = &group_list_disabled;
+ } else if (EXEC_STATE_INACTIVE == state) {
+ list = &group_list_inactive;
+ } else if (EXEC_STATE_IDLE == state) {
+ list = &group_list_idle;
+ } else {
+ MALI_DEBUG_ASSERT(EXEC_STATE_WORKING == state);
+ list = &group_list_working;
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group_iter, temp, list,
+ struct mali_group, executor_list) {
+ if (group_iter == group) {
+ return MALI_TRUE;
+ }
+ }
+ }
+
+ /* group not in correct state */
+ return MALI_FALSE;
+}
+
+static void mali_executor_group_enable_internal(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED));
+
+ /* Put into inactive state (== "lowest" enabled state) */
+ if (group == gp_group) {
+ MALI_DEBUG_ASSERT(EXEC_STATE_DISABLED == gp_group_state);
+ gp_group_state = EXEC_STATE_INACTIVE;
+ } else {
+ mali_executor_change_state_pp_physical(group,
+ &group_list_disabled,
+ &group_list_disabled_count,
+ &group_list_inactive,
+ &group_list_inactive_count);
+
+ ++num_physical_pp_cores_enabled;
+ MALI_DEBUG_PRINT(4, ("Enabling group id %d \n", group->pp_core->core_id));
+ }
+
+ if (MALI_GROUP_STATE_ACTIVE == mali_group_activate(group)) {
+ MALI_DEBUG_ASSERT(MALI_TRUE == mali_group_power_is_on(group));
+
+ /* Move from inactive to idle */
+ if (group == gp_group) {
+ gp_group_state = EXEC_STATE_IDLE;
+ } else {
+ mali_executor_change_state_pp_physical(group,
+ &group_list_inactive,
+ &group_list_inactive_count,
+ &group_list_idle,
+ &group_list_idle_count);
+
+ if (mali_executor_has_virtual_group()) {
+ if (mali_executor_physical_rejoin_virtual(group)) {
+ mali_pm_update_async();
+ }
+ }
+ }
+ } else {
+ mali_pm_update_async();
+ }
+}
+
+static void mali_executor_group_disable_internal(struct mali_group *group)
+{
+ mali_bool working;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(!mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED));
+
+ working = mali_executor_group_is_in_state(group, EXEC_STATE_WORKING);
+ if (MALI_TRUE == working) {
+ /** Group to be disabled once it completes current work,
+ * when virtual group completes, also check child groups for this flag */
+ mali_group_set_disable_request(group, MALI_TRUE);
+ return;
+ }
+
+ /* Put into disabled state */
+ if (group == gp_group) {
+ /* GP group */
+ MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != gp_group_state);
+ gp_group_state = EXEC_STATE_DISABLED;
+ } else {
+ if (mali_group_is_in_virtual(group)) {
+ /* A child group of virtual group. move the specific group from virtual group */
+ MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != virtual_group_state);
+
+ mali_executor_set_state_pp_physical(group,
+ &group_list_disabled,
+ &group_list_disabled_count);
+
+ mali_group_remove_group(virtual_group, group);
+ mali_executor_disable_empty_virtual();
+ } else {
+ mali_executor_change_group_status_disabled(group);
+ }
+
+ --num_physical_pp_cores_enabled;
+ MALI_DEBUG_PRINT(4, ("Disabling group id %d \n", group->pp_core->core_id));
+ }
+
+ if (MALI_GROUP_STATE_INACTIVE != group->state) {
+ if (MALI_TRUE == mali_group_deactivate(group)) {
+ mali_pm_update_async();
+ }
+ }
+}
+
+static void mali_executor_notify_core_change(u32 num_cores)
+{
+ mali_bool done = MALI_FALSE;
+
+ if (mali_is_mali450()) {
+ return;
+ }
+
+ /*
+ * This function gets a bit complicated because we can't hold the session lock while
+ * allocating notification objects.
+ */
+ while (!done) {
+ u32 i;
+ u32 num_sessions_alloc;
+ u32 num_sessions_with_lock;
+ u32 used_notification_objects = 0;
+ _mali_osk_notification_t **notobjs;
+
+ /* Pre allocate the number of notifications objects we need right now (might change after lock has been taken) */
+ num_sessions_alloc = mali_session_get_count();
+ if (0 == num_sessions_alloc) {
+ /* No sessions to report to */
+ return;
+ }
+
+ notobjs = (_mali_osk_notification_t **)_mali_osk_malloc(sizeof(_mali_osk_notification_t *) * num_sessions_alloc);
+ if (NULL == notobjs) {
+ MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure)\n"));
+ /* there is probably no point in trying again, system must be really low on memory and probably unusable now anyway */
+ return;
+ }
+
+ for (i = 0; i < num_sessions_alloc; i++) {
+ notobjs[i] = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_NUM_CORE_CHANGE, sizeof(_mali_uk_pp_num_cores_changed_s));
+ if (NULL != notobjs[i]) {
+ _mali_uk_pp_num_cores_changed_s *data = notobjs[i]->result_buffer;
+ data->number_of_enabled_cores = num_cores;
+ } else {
+ MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure %u)\n", i));
+ }
+ }
+
+ mali_session_lock();
+
+ /* number of sessions will not change while we hold the lock */
+ num_sessions_with_lock = mali_session_get_count();
+
+ if (num_sessions_alloc >= num_sessions_with_lock) {
+ /* We have allocated enough notification objects for all the sessions atm */
+ struct mali_session_data *session, *tmp;
+ MALI_SESSION_FOREACH(session, tmp, link) {
+ MALI_DEBUG_ASSERT(used_notification_objects < num_sessions_alloc);
+ if (NULL != notobjs[used_notification_objects]) {
+ mali_session_send_notification(session, notobjs[used_notification_objects]);
+ notobjs[used_notification_objects] = NULL; /* Don't track this notification object any more */
+ }
+ used_notification_objects++;
+ }
+ done = MALI_TRUE;
+ }
+
+ mali_session_unlock();
+
+ /* Delete any remaining/unused notification objects */
+ for (; used_notification_objects < num_sessions_alloc; used_notification_objects++) {
+ if (NULL != notobjs[used_notification_objects]) {
+ _mali_osk_notification_delete(notobjs[used_notification_objects]);
+ }
+ }
+
+ _mali_osk_free(notobjs);
+ }
+}
+
+static mali_bool mali_executor_core_scaling_is_done(void *data)
+{
+ u32 i;
+ u32 num_groups;
+ mali_bool ret = MALI_TRUE;
+
+ MALI_IGNORE(data);
+
+ mali_executor_lock();
+
+ num_groups = mali_group_get_glob_num_groups();
+
+ for (i = 0; i < num_groups; i++) {
+ struct mali_group *group = mali_group_get_glob_group(i);
+
+ if (NULL != group) {
+ if (MALI_TRUE == group->disable_requested && NULL != mali_group_get_pp_core(group)) {
+ ret = MALI_FALSE;
+ break;
+ }
+ }
+ }
+ mali_executor_unlock();
+
+ return ret;
+}
+
+static void mali_executor_wq_notify_core_change(void *arg)
+{
+ MALI_IGNORE(arg);
+
+ if (mali_is_mali450()) {
+ return;
+ }
+
+ _mali_osk_wait_queue_wait_event(executor_notify_core_change_wait_queue,
+ mali_executor_core_scaling_is_done, NULL);
+
+ mali_executor_notify_core_change(num_physical_pp_cores_enabled);
+}
+
+/**
+ * Clear all disable request from the _last_ core scaling behavior.
+ */
+static void mali_executor_core_scaling_reset(void)
+{
+ u32 i;
+ u32 num_groups;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ num_groups = mali_group_get_glob_num_groups();
+
+ for (i = 0; i < num_groups; i++) {
+ struct mali_group *group = mali_group_get_glob_group(i);
+
+ if (NULL != group) {
+ group->disable_requested = MALI_FALSE;
+ }
+ }
+
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ core_scaling_delay_up_mask[i] = 0;
+ }
+}
+
+static void mali_executor_core_scale(unsigned int target_core_nr)
+{
+ int current_core_scaling_mask[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 };
+ int target_core_scaling_mask[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 };
+ mali_bool update_global_core_scaling_mask = MALI_FALSE;
+ int i;
+
+ MALI_DEBUG_ASSERT(0 < target_core_nr);
+ MALI_DEBUG_ASSERT(num_physical_pp_cores_total >= target_core_nr);
+
+ mali_executor_lock();
+
+ if (target_core_nr < num_physical_pp_cores_enabled) {
+ MALI_DEBUG_PRINT(2, ("Requesting %d cores: disabling %d cores\n", target_core_nr, num_physical_pp_cores_enabled - target_core_nr));
+ } else {
+ MALI_DEBUG_PRINT(2, ("Requesting %d cores: enabling %d cores\n", target_core_nr, target_core_nr - num_physical_pp_cores_enabled));
+ }
+
+ /* When a new core scaling request is comming, we should remove the un-doing
+ * part of the last core scaling request. It's safe because we have only one
+ * lock(executor lock) protection. */
+ mali_executor_core_scaling_reset();
+
+ mali_pm_get_best_power_cost_mask(num_physical_pp_cores_enabled, current_core_scaling_mask);
+ mali_pm_get_best_power_cost_mask(target_core_nr, target_core_scaling_mask);
+
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ target_core_scaling_mask[i] = target_core_scaling_mask[i] - current_core_scaling_mask[i];
+ MALI_DEBUG_PRINT(5, ("target_core_scaling_mask[%d] = %d\n", i, target_core_scaling_mask[i]));
+ }
+
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ if (0 > target_core_scaling_mask[i]) {
+ struct mali_pm_domain *domain;
+
+ domain = mali_pm_domain_get_from_index(i);
+
+ /* Domain is valid and has pp cores */
+ if ((NULL != domain) && !(_mali_osk_list_empty(&domain->group_list))) {
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &domain->group_list, struct mali_group, pm_domain_list) {
+ if (NULL != mali_group_get_pp_core(group) && (!mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED))
+ && (!mali_group_is_virtual(group))) {
+ mali_executor_group_disable_internal(group);
+ target_core_scaling_mask[i]++;
+ if ((0 == target_core_scaling_mask[i])) {
+ break;
+ }
+
+ }
+ }
+ }
+ }
+ }
+
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ /**
+ * Target_core_scaling_mask[i] is bigger than 0,
+ * means we need to enable some pp cores in
+ * this domain whose domain index is i.
+ */
+ if (0 < target_core_scaling_mask[i]) {
+ struct mali_pm_domain *domain;
+
+ if (num_physical_pp_cores_enabled >= target_core_nr) {
+ update_global_core_scaling_mask = MALI_TRUE;
+ break;
+ }
+
+ domain = mali_pm_domain_get_from_index(i);
+
+ /* Domain is valid and has pp cores */
+ if ((NULL != domain) && !(_mali_osk_list_empty(&domain->group_list))) {
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &domain->group_list, struct mali_group, pm_domain_list) {
+ if (NULL != mali_group_get_pp_core(group) && mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED)
+ && (!mali_group_is_virtual(group))) {
+ mali_executor_group_enable_internal(group);
+ target_core_scaling_mask[i]--;
+
+ if ((0 == target_core_scaling_mask[i]) || num_physical_pp_cores_enabled == target_core_nr) {
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * Here, we may still have some pp cores not been enabled because of some
+ * pp cores need to be disabled are still in working state.
+ */
+ if (update_global_core_scaling_mask) {
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ if (0 < target_core_scaling_mask[i]) {
+ core_scaling_delay_up_mask[i] = target_core_scaling_mask[i];
+ }
+ }
+ }
+
+ mali_executor_schedule();
+ mali_executor_unlock();
+}
+
+static void mali_executor_core_scale_in_group_complete(struct mali_group *group)
+{
+ int num_pp_cores_disabled = 0;
+ int num_pp_cores_to_enable = 0;
+ int i;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(MALI_TRUE == mali_group_disable_requested(group));
+
+ /* Disable child group of virtual group */
+ if (mali_group_is_virtual(group)) {
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ if (MALI_TRUE == mali_group_disable_requested(child)) {
+ mali_group_set_disable_request(child, MALI_FALSE);
+ mali_executor_group_disable_internal(child);
+ num_pp_cores_disabled++;
+ }
+ }
+ mali_group_set_disable_request(group, MALI_FALSE);
+ } else {
+ mali_executor_group_disable_internal(group);
+ mali_group_set_disable_request(group, MALI_FALSE);
+ if (NULL != mali_group_get_pp_core(group)) {
+ num_pp_cores_disabled++;
+ }
+ }
+
+ num_pp_cores_to_enable = num_pp_cores_disabled;
+
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ if (0 < core_scaling_delay_up_mask[i]) {
+ struct mali_pm_domain *domain;
+
+ if (0 == num_pp_cores_to_enable) {
+ break;
+ }
+
+ domain = mali_pm_domain_get_from_index(i);
+
+ /* Domain is valid and has pp cores */
+ if ((NULL != domain) && !(_mali_osk_list_empty(&domain->group_list))) {
+ struct mali_group *disabled_group;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(disabled_group, temp, &domain->group_list, struct mali_group, pm_domain_list) {
+ if (NULL != mali_group_get_pp_core(disabled_group) && mali_executor_group_is_in_state(disabled_group, EXEC_STATE_DISABLED)) {
+ mali_executor_group_enable_internal(disabled_group);
+ core_scaling_delay_up_mask[i]--;
+ num_pp_cores_to_enable--;
+
+ if ((0 == core_scaling_delay_up_mask[i]) || 0 == num_pp_cores_to_enable) {
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ _mali_osk_wait_queue_wake_up(executor_notify_core_change_wait_queue);
+}
+
+static void mali_executor_change_group_status_disabled(struct mali_group *group)
+{
+ /* Physical PP group */
+ mali_bool idle;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ idle = mali_executor_group_is_in_state(group, EXEC_STATE_IDLE);
+ if (MALI_TRUE == idle) {
+ mali_executor_change_state_pp_physical(group,
+ &group_list_idle,
+ &group_list_idle_count,
+ &group_list_disabled,
+ &group_list_disabled_count);
+ } else {
+ mali_executor_change_state_pp_physical(group,
+ &group_list_inactive,
+ &group_list_inactive_count,
+ &group_list_disabled,
+ &group_list_disabled_count);
+ }
+}
+
+static mali_bool mali_executor_deactivate_list_idle(mali_bool deactivate_idle_group)
+{
+ mali_bool trigger_pm_update = MALI_FALSE;
+
+ if (group_list_idle_count > 0) {
+ if (mali_executor_has_virtual_group()) {
+
+ /* Rejoin virtual group on Mali-450 */
+
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp,
+ &group_list_idle,
+ struct mali_group, executor_list) {
+ if (mali_executor_physical_rejoin_virtual(
+ group)) {
+ trigger_pm_update = MALI_TRUE;
+ }
+ }
+ } else if (deactivate_idle_group) {
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ /* Deactivate group on Mali-300/400 */
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp,
+ &group_list_idle,
+ struct mali_group, executor_list) {
+ if (mali_group_deactivate(group)) {
+ trigger_pm_update = MALI_TRUE;
+ }
+
+ /* Move from idle to inactive */
+ mali_executor_change_state_pp_physical(group,
+ &group_list_idle,
+ &group_list_idle_count,
+ &group_list_inactive,
+ &group_list_inactive_count);
+ }
+ }
+ }
+
+ return trigger_pm_update;
+}
--- /dev/null
+/*
+ * Copyright (C) 2012, 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_EXECUTOR_H__
+#define __MALI_EXECUTOR_H__
+
+#include "mali_osk.h"
+#include "mali_scheduler_types.h"
+#include "mali_kernel_common.h"
+
+typedef enum {
+ MALI_EXECUTOR_HINT_GP_BOUND = 0
+#define MALI_EXECUTOR_HINT_MAX 1
+} mali_executor_hint;
+
+extern mali_bool mali_executor_hints[MALI_EXECUTOR_HINT_MAX];
+
+/* forward declare struct instead of using include */
+struct mali_session_data;
+struct mali_group;
+struct mali_pp_core;
+
+extern _mali_osk_spinlock_irq_t *mali_executor_lock_obj;
+
+#define MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD() MALI_DEBUG_ASSERT_LOCK_HELD(mali_executor_lock_obj);
+
+_mali_osk_errcode_t mali_executor_initialize(void);
+void mali_executor_terminate(void);
+
+void mali_executor_populate(void);
+void mali_executor_depopulate(void);
+
+void mali_executor_suspend(void);
+void mali_executor_resume(void);
+
+u32 mali_executor_get_num_cores_total(void);
+u32 mali_executor_get_num_cores_enabled(void);
+struct mali_pp_core *mali_executor_get_virtual_pp(void);
+struct mali_group *mali_executor_get_virtual_group(void);
+
+void mali_executor_zap_all_active(struct mali_session_data *session);
+
+/**
+ * Schedule GP and PP according to bitmask.
+ *
+ * @param mask A scheduling bitmask.
+ * @param deferred_schedule MALI_TRUE if schedule should be deferred, MALI_FALSE if not.
+ */
+void mali_executor_schedule_from_mask(mali_scheduler_mask mask, mali_bool deferred_schedule);
+
+_mali_osk_errcode_t mali_executor_interrupt_gp(struct mali_group *group, mali_bool in_upper_half);
+_mali_osk_errcode_t mali_executor_interrupt_pp(struct mali_group *group, mali_bool in_upper_half);
+_mali_osk_errcode_t mali_executor_interrupt_mmu(struct mali_group *group, mali_bool in_upper_half);
+
+void mali_executor_group_power_up(struct mali_group *groups[], u32 num_groups);
+void mali_executor_group_power_down(struct mali_group *groups[], u32 num_groups);
+
+void mali_executor_abort_session(struct mali_session_data *session);
+
+void mali_executor_core_scaling_enable(void);
+void mali_executor_core_scaling_disable(void);
+mali_bool mali_executor_core_scaling_is_enabled(void);
+
+void mali_executor_group_enable(struct mali_group *group);
+void mali_executor_group_disable(struct mali_group *group);
+mali_bool mali_executor_group_is_disabled(struct mali_group *group);
+
+int mali_executor_set_perf_level(unsigned int target_core_nr, mali_bool override);
+
+#if MALI_STATE_TRACKING
+u32 mali_executor_dump_state(char *buf, u32 size);
+#endif
+
+MALI_STATIC_INLINE void mali_executor_hint_enable(mali_executor_hint hint)
+{
+ MALI_DEBUG_ASSERT(hint < MALI_EXECUTOR_HINT_MAX);
+ mali_executor_hints[hint] = MALI_TRUE;
+}
+
+MALI_STATIC_INLINE void mali_executor_hint_disable(mali_executor_hint hint)
+{
+ MALI_DEBUG_ASSERT(hint < MALI_EXECUTOR_HINT_MAX);
+ mali_executor_hints[hint] = MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_executor_hint_is_enabled(mali_executor_hint hint)
+{
+ MALI_DEBUG_ASSERT(hint < MALI_EXECUTOR_HINT_MAX);
+ return mali_executor_hints[hint];
+}
+
+#endif /* __MALI_EXECUTOR_H__ */
mali_gp_stop_bus(core);
/* Wait for bus to be stopped */
- for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+ for (i = 0; i < MALI_REG_POLL_COUNT_SLOW; i++) {
if (mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS) & MALIGP2_REG_VAL_STATUS_BUS_STOPPED) {
break;
}
}
- if (MALI_REG_POLL_COUNT_FAST == i) {
+ if (MALI_REG_POLL_COUNT_SLOW == i) {
MALI_PRINT_ERROR(("Mali GP: Failed to stop bus on %s\n", core->hw_core.description));
if (mali_gp_reset_fail < 65533)
mali_gp_reset_fail++;
}
#endif
-void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_gp_job *job, mali_bool suspend)
+void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_gp_job *job)
{
u32 val0 = 0;
u32 val1 = 0;
struct mali_gp_core *mali_gp_get_global_gp_core(void);
+#if MALI_STATE_TRACKING
u32 mali_gp_dump_state(struct mali_gp_core *core, char *buf, u32 size);
+#endif
-void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_gp_job *job, mali_bool suspend);
+void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_gp_job *job);
-/*** Accessor functions ***/
-MALI_STATIC_INLINE const char *mali_gp_get_hw_core_desc(struct mali_gp_core *core)
+MALI_STATIC_INLINE const char *mali_gp_core_description(struct mali_gp_core *core)
{
return core->hw_core.description;
}
-/*** Register reading/writing functions ***/
-MALI_STATIC_INLINE u32 mali_gp_get_int_stat(struct mali_gp_core *core)
+MALI_STATIC_INLINE enum mali_interrupt_result mali_gp_get_interrupt_result(struct mali_gp_core *core)
{
- return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_STAT);
+ u32 stat_used = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_STAT) &
+ MALIGP2_REG_VAL_IRQ_MASK_USED;
+
+ if (0 == stat_used) {
+ return MALI_INTERRUPT_RESULT_NONE;
+ } else if ((MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST |
+ MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST) == stat_used) {
+ return MALI_INTERRUPT_RESULT_SUCCESS;
+ } else if (MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST == stat_used) {
+ return MALI_INTERRUPT_RESULT_SUCCESS_VS;
+ } else if (MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST == stat_used) {
+ return MALI_INTERRUPT_RESULT_SUCCESS_PLBU;
+ } else if (MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM & stat_used) {
+ return MALI_INTERRUPT_RESULT_OOM;
+ }
+
+ return MALI_INTERRUPT_RESULT_ERROR;
}
-MALI_STATIC_INLINE void mali_gp_mask_all_interrupts(struct mali_gp_core *core)
+MALI_STATIC_INLINE u32 mali_gp_get_rawstat(struct mali_gp_core *core)
{
- mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_NONE);
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return mali_hw_core_register_read(&core->hw_core,
+ MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT);
}
-MALI_STATIC_INLINE u32 mali_gp_read_rawstat(struct mali_gp_core *core)
+MALI_STATIC_INLINE u32 mali_gp_is_active(struct mali_gp_core *core)
{
- return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT) & MALIGP2_REG_VAL_IRQ_MASK_USED;
+ u32 status = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS);
+ return (status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE) ? MALI_TRUE : MALI_FALSE;
}
-MALI_STATIC_INLINE u32 mali_gp_read_core_status(struct mali_gp_core *core)
+MALI_STATIC_INLINE void mali_gp_mask_all_interrupts(struct mali_gp_core *core)
{
- return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_NONE);
}
-MALI_STATIC_INLINE void mali_gp_enable_interrupts(struct mali_gp_core *core, u32 irq_exceptions)
+MALI_STATIC_INLINE void mali_gp_enable_interrupts(struct mali_gp_core *core, enum mali_interrupt_result exceptions)
{
- /* Enable all interrupts, except those specified in irq_exceptions */
- mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK,
- MALIGP2_REG_VAL_IRQ_MASK_USED & ~irq_exceptions);
+ /* Enable all interrupts, except those specified in exceptions */
+ u32 value;
+
+ if (MALI_INTERRUPT_RESULT_SUCCESS_VS == exceptions) {
+ /* Enable all used except VS complete */
+ value = MALIGP2_REG_VAL_IRQ_MASK_USED &
+ ~MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST;
+ } else {
+ MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_SUCCESS_PLBU ==
+ exceptions);
+ /* Enable all used except PLBU complete */
+ value = MALIGP2_REG_VAL_IRQ_MASK_USED &
+ ~MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST;
+ }
+
+ mali_hw_core_register_write(&core->hw_core,
+ MALIGP2_REG_ADDR_MGMT_INT_MASK,
+ value);
}
MALI_STATIC_INLINE u32 mali_gp_read_plbu_alloc_start_addr(struct mali_gp_core *core)
{
MALI_DEBUG_ASSERT_POINTER(job);
MALI_DEBUG_ASSERT(NULL == job->pp_tracker);
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
/* de-allocate the pre-allocated oom notifications */
if (NULL != job->oom_notification) {
_mali_osk_free(job);
}
+void mali_gp_job_list_add(struct mali_gp_job *job, _mali_osk_list_t *list)
+{
+ struct mali_gp_job *iter;
+ struct mali_gp_job *tmp;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+
+ /* Find position in list/queue where job should be added. */
+ _MALI_OSK_LIST_FOREACHENTRY_REVERSE(iter, tmp, list,
+ struct mali_gp_job, list) {
+
+ /* A span is used to handle job ID wrapping. */
+ bool job_is_after = (mali_gp_job_get_id(job) -
+ mali_gp_job_get_id(iter)) <
+ MALI_SCHEDULER_JOB_ID_SPAN;
+
+ if (job_is_after) {
+ break;
+ }
+ }
+
+ _mali_osk_list_add(&job->list, &iter->list);
+}
+
u32 mali_gp_job_get_gp_counter_src0(void)
{
return gp_counter_src0;
#include "mali_session.h"
#include "mali_timeline.h"
#include "mali_scheduler_types.h"
+#include "mali_scheduler.h"
+#include "mali_executor.h"
+#include "mali_timeline.h"
/**
- * The structure represents a GP job, including all sub-jobs
- * (This struct unfortunately needs to be public because of how the _mali_osk_list_*
- * mechanism works)
+ * This structure represents a GP job
+ *
+ * The GP job object itself is not protected by any single lock,
+ * but relies on other locks instead (scheduler, executor and timeline lock).
+ * Think of the job object as moving between these sub systems through-out
+ * its lifetime. Different part of the GP job struct is used by different
+ * subsystems. Accessor functions ensure that correct lock is taken.
+ * Do NOT access any data members directly from outside this module!
*/
struct mali_gp_job {
- _mali_osk_list_t list; /**< Used to link jobs together in the scheduler queue */
- struct mali_session_data *session; /**< Session which submitted this job */
+ /*
+ * These members are typically only set at creation,
+ * and only read later on.
+ * They do not require any lock protection.
+ */
_mali_uk_gp_start_job_s uargs; /**< Arguments from user space */
+ struct mali_session_data *session; /**< Session which submitted this job */
+ u32 pid; /**< Process ID of submitting process */
+ u32 tid; /**< Thread ID of submitting thread */
u32 id; /**< Identifier for this job in kernel space (sequential numbering) */
u32 cache_order; /**< Cache order used for L2 cache flushing (sequential numbering) */
+ struct mali_timeline_tracker tracker; /**< Timeline tracker for this job */
+ struct mali_timeline_tracker *pp_tracker; /**< Pointer to Timeline tracker for PP job that depends on this job. */
+ _mali_osk_notification_t *finished_notification; /**< Notification sent back to userspace on job complete */
+
+ /*
+ * These members are used by the scheduler,
+ * protected by scheduler lock
+ */
+ _mali_osk_list_t list; /**< Used to link jobs together in the scheduler queue */
+
+ /*
+ * These members are used by the executor and/or group,
+ * protected by executor lock
+ */
+ _mali_osk_notification_t *oom_notification; /**< Notification sent back to userspace on OOM */
+
+ /*
+ * Set by executor/group on job completion, read by scheduler when
+ * returning job to user. Hold executor lock when setting,
+ * no lock needed when reading
+ */
u32 heap_current_addr; /**< Holds the current HEAP address when the job has completed */
u32 perf_counter_value0; /**< Value of performance counter 0 (to be returned to user space) */
u32 perf_counter_value1; /**< Value of performance counter 1 (to be returned to user space) */
- u32 pid; /**< Process ID of submitting process */
- u32 tid; /**< Thread ID of submitting thread */
- _mali_osk_notification_t *finished_notification; /**< Notification sent back to userspace on job complete */
- _mali_osk_notification_t *oom_notification; /**< Notification sent back to userspace on OOM */
- struct mali_timeline_tracker tracker; /**< Timeline tracker for this job */
- struct mali_timeline_tracker *pp_tracker; /**< Pointer to Timeline tracker for PP job that depends on this job. */
};
struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *uargs, u32 id, struct mali_timeline_tracker *pp_tracker);
MALI_STATIC_INLINE u32 mali_gp_job_get_id(struct mali_gp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return (NULL == job) ? 0 : job->id;
}
+MALI_STATIC_INLINE void mali_gp_job_set_cache_order(struct mali_gp_job *job,
+ u32 cache_order)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ job->cache_order = cache_order;
+}
+
MALI_STATIC_INLINE u32 mali_gp_job_get_cache_order(struct mali_gp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return (NULL == job) ? 0 : job->cache_order;
}
MALI_STATIC_INLINE u64 mali_gp_job_get_user_id(struct mali_gp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->uargs.user_job_ptr;
}
MALI_STATIC_INLINE u32 mali_gp_job_get_frame_builder_id(struct mali_gp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->uargs.frame_builder_id;
}
MALI_STATIC_INLINE u32 mali_gp_job_get_flush_id(struct mali_gp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->uargs.flush_id;
}
MALI_STATIC_INLINE u32 mali_gp_job_get_pid(struct mali_gp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->pid;
}
MALI_STATIC_INLINE u32 mali_gp_job_get_tid(struct mali_gp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->tid;
}
MALI_STATIC_INLINE u32 *mali_gp_job_get_frame_registers(struct mali_gp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->uargs.frame_registers;
}
MALI_STATIC_INLINE struct mali_session_data *mali_gp_job_get_session(struct mali_gp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->session;
}
MALI_STATIC_INLINE mali_bool mali_gp_job_has_vs_job(struct mali_gp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return (job->uargs.frame_registers[0] != job->uargs.frame_registers[1]) ? MALI_TRUE : MALI_FALSE;
}
MALI_STATIC_INLINE mali_bool mali_gp_job_has_plbu_job(struct mali_gp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return (job->uargs.frame_registers[2] != job->uargs.frame_registers[3]) ? MALI_TRUE : MALI_FALSE;
}
MALI_STATIC_INLINE u32 mali_gp_job_get_current_heap_addr(struct mali_gp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->heap_current_addr;
}
MALI_STATIC_INLINE void mali_gp_job_set_current_heap_addr(struct mali_gp_job *job, u32 heap_addr)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
job->heap_current_addr = heap_addr;
}
MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_flag(struct mali_gp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->uargs.perf_counter_flag;
}
MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_src0(struct mali_gp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->uargs.perf_counter_src0;
}
MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_src1(struct mali_gp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->uargs.perf_counter_src1;
}
MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_value0(struct mali_gp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->perf_counter_value0;
}
MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_value1(struct mali_gp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->perf_counter_value1;
}
MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_src0(struct mali_gp_job *job, u32 src)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
job->uargs.perf_counter_src0 = src;
}
MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_src1(struct mali_gp_job *job, u32 src)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
job->uargs.perf_counter_src1 = src;
}
MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_value0(struct mali_gp_job *job, u32 value)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
job->perf_counter_value0 = value;
}
MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_value1(struct mali_gp_job *job, u32 value)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
job->perf_counter_value1 = value;
}
-/**
- * Returns MALI_TRUE if first job is after the second job, ordered by job ID.
- *
- * @param first First job.
- * @param second Second job.
- * @return MALI_TRUE if first job should be ordered after the second job, MALI_FALSE if not.
- */
-MALI_STATIC_INLINE mali_bool mali_gp_job_is_after(struct mali_gp_job *first, struct mali_gp_job *second)
+void mali_gp_job_list_add(struct mali_gp_job *job, _mali_osk_list_t *list);
+
+MALI_STATIC_INLINE void mali_gp_job_list_move(struct mali_gp_job *job,
+ _mali_osk_list_t *list)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ MALI_DEBUG_ASSERT(!_mali_osk_list_empty(&job->list));
+ _mali_osk_list_move(&job->list, list);
+}
+
+MALI_STATIC_INLINE void mali_gp_job_list_remove(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ _mali_osk_list_delinit(&job->list);
+}
+
+MALI_STATIC_INLINE _mali_osk_notification_t *
+mali_gp_job_get_finished_notification(struct mali_gp_job *job)
{
- /* A span is used to handle job ID wrapping. */
- return (mali_gp_job_get_id(first) - mali_gp_job_get_id(second)) < MALI_SCHEDULER_JOB_ID_SPAN;
+ _mali_osk_notification_t *notification;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(job->finished_notification);
+
+ notification = job->finished_notification;
+ job->finished_notification = NULL;
+
+ return notification;
}
+MALI_STATIC_INLINE _mali_osk_notification_t *mali_gp_job_get_oom_notification(
+ struct mali_gp_job *job)
+{
+ _mali_osk_notification_t *notification;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT_POINTER(job->oom_notification);
+
+ notification = job->oom_notification;
+ job->oom_notification = NULL;
+
+ return notification;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_oom_notification(
+ struct mali_gp_job *job,
+ _mali_osk_notification_t *notification)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(NULL == job->oom_notification);
+ job->oom_notification = notification;
+}
+
+MALI_STATIC_INLINE struct mali_timeline_tracker *mali_gp_job_get_tracker(
+ struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return &(job->tracker);
+}
+
+
+MALI_STATIC_INLINE u32 *mali_gp_job_get_timeline_point_ptr(
+ struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (u32 __user *)(uintptr_t)job->uargs.timeline_point_ptr;
+}
+
+
/**
* Release reference on tracker for PP job that depends on this GP job.
*
#include "mali_osk_profiling.h"
#include "mali_pm_domain.h"
#include "mali_pm.h"
+#include "mali_executor.h"
+#include <mali_platform.h>
+
#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
#include <linux/sched.h>
#include <trace/events/gpu.h>
#endif
-#include <mali_platform.h>
-
-static void mali_group_bottom_half_mmu(void *data);
-static void mali_group_bottom_half_gp(void *data);
-static void mali_group_bottom_half_pp(void *data);
-
-static void mali_group_timeout(void *data);
-static void mali_group_reset_pp(struct mali_group *group);
-static void mali_group_reset_mmu(struct mali_group *group);
+#define MALI_MAX_NUM_DOMAIN_REFS (MALI_MAX_NUMBER_OF_GROUPS * 2)
#if defined(CONFIG_MALI400_PROFILING)
static void mali_group_report_l2_cache_counters_per_core(struct mali_group *group, u32 core_num);
#endif /* #if defined(CONFIG_MALI400_PROFILING) */
-/*
- * The group object is the most important object in the device driver,
- * and acts as the center of many HW operations.
- * The reason for this is that operations on the MMU will affect all
- * cores connected to this MMU (a group is defined by the MMU and the
- * cores which are connected to this).
- * The group lock is thus the most important lock, followed by the
- * GP and PP scheduler locks. They must be taken in the following
- * order:
- * GP/PP lock first, then group lock(s).
- */
-
static struct mali_group *mali_global_groups[MALI_MAX_NUMBER_OF_GROUPS] = { NULL, };
static u32 mali_global_num_groups = 0;
-/* timer related */
+/* SW timer for job execution */
int mali_max_job_runtime = MALI_MAX_JOB_RUNTIME_DEFAULT;
/* local helper functions */
-static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session);
-static void mali_group_remove_session_if_unused(struct mali_group *group, struct mali_session_data *session);
-static void mali_group_recovery_reset(struct mali_group *group);
-static void mali_group_mmu_page_fault_and_unlock(struct mali_group *group);
-
-static void mali_group_post_process_job_pp(struct mali_group *group);
-static void mali_group_post_process_job_gp(struct mali_group *group, mali_bool suspend);
-
-void mali_group_lock(struct mali_group *group)
-{
-#ifdef MALI_UPPER_HALF_SCHEDULING
- _mali_osk_spinlock_irq_lock(group->lock);
-#else
- _mali_osk_spinlock_lock(group->lock);
-#endif
- MALI_DEBUG_PRINT(5, ("Mali group: Group lock taken 0x%08X\n", group));
-}
-
-void mali_group_unlock(struct mali_group *group)
-{
- MALI_DEBUG_PRINT(5, ("Mali group: Releasing group lock 0x%08X\n", group));
-#ifdef MALI_UPPER_HALF_SCHEDULING
- _mali_osk_spinlock_irq_unlock(group->lock);
-#else
- _mali_osk_spinlock_unlock(group->lock);
-#endif
-}
+static void mali_group_bottom_half_mmu(void *data);
+static void mali_group_bottom_half_gp(void *data);
+static void mali_group_bottom_half_pp(void *data);
+static void mali_group_timeout(void *data);
-#ifdef DEBUG
-void mali_group_assert_locked(struct mali_group *group)
-{
- MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
-}
-#endif
+static void mali_group_reset_pp(struct mali_group *group);
+static void mali_group_reset_mmu(struct mali_group *group);
+static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session);
+static void mali_group_recovery_reset(struct mali_group *group);
-struct mali_group *mali_group_create(struct mali_l2_cache_core *core, struct mali_dlbu_core *dlbu, struct mali_bcast_unit *bcast)
+struct mali_group *mali_group_create(struct mali_l2_cache_core *core,
+ struct mali_dlbu_core *dlbu,
+ struct mali_bcast_unit *bcast,
+ u32 domain_index)
{
struct mali_group *group = NULL;
group = _mali_osk_calloc(1, sizeof(struct mali_group));
if (NULL != group) {
group->timeout_timer = _mali_osk_timer_init();
-
if (NULL != group->timeout_timer) {
- _mali_osk_lock_order_t order;
_mali_osk_timer_setcallback(group->timeout_timer, mali_group_timeout, (void *)group);
- if (NULL != dlbu) {
- order = _MALI_OSK_LOCK_ORDER_GROUP_VIRTUAL;
- } else {
- order = _MALI_OSK_LOCK_ORDER_GROUP;
- }
+ group->l2_cache_core[0] = core;
+ _mali_osk_list_init(&group->group_list);
+ _mali_osk_list_init(&group->executor_list);
+ _mali_osk_list_init(&group->pm_domain_list);
+ group->bcast_core = bcast;
+ group->dlbu_core = dlbu;
-#ifdef MALI_UPPER_HALF_SCHEDULING
- group->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, order);
-#else
- group->lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, order);
-#endif
+ /* register this object as a part of the correct power domain */
+ if ((NULL != core) || (NULL != dlbu) || (NULL != bcast))
+ group->pm_domain = mali_pm_register_group(domain_index, group);
- if (NULL != group->lock) {
- group->l2_cache_core[0] = core;
- group->session = NULL;
- group->power_is_on = MALI_TRUE;
- group->state = MALI_GROUP_STATE_IDLE;
- _mali_osk_list_init(&group->group_list);
- _mali_osk_list_init(&group->pp_scheduler_list);
- group->parent_group = NULL;
- group->l2_cache_core_ref_count[0] = 0;
- group->l2_cache_core_ref_count[1] = 0;
- group->bcast_core = bcast;
- group->dlbu_core = dlbu;
-
- mali_global_groups[mali_global_num_groups] = group;
- mali_global_num_groups++;
-
- return group;
- }
- _mali_osk_timer_term(group->timeout_timer);
+ mali_global_groups[mali_global_num_groups] = group;
+ mali_global_num_groups++;
+
+ return group;
}
_mali_osk_free(group);
}
return NULL;
}
+void mali_group_delete(struct mali_group *group)
+{
+ u32 i;
+
+ MALI_DEBUG_PRINT(4, ("Deleting group %s\n",
+ mali_group_core_description(group)));
+
+ MALI_DEBUG_ASSERT(NULL == group->parent_group);
+ MALI_DEBUG_ASSERT((MALI_GROUP_STATE_INACTIVE == group->state) || ((MALI_GROUP_STATE_ACTIVATION_PENDING == group->state)));
+
+ /* Delete the resources that this group owns */
+ if (NULL != group->gp_core) {
+ mali_gp_delete(group->gp_core);
+ }
+
+ if (NULL != group->pp_core) {
+ mali_pp_delete(group->pp_core);
+ }
+
+ if (NULL != group->mmu) {
+ mali_mmu_delete(group->mmu);
+ }
+
+ if (mali_group_is_virtual(group)) {
+ /* Remove all groups from virtual group */
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ child->parent_group = NULL;
+ mali_group_delete(child);
+ }
+
+ mali_dlbu_delete(group->dlbu_core);
+
+ if (NULL != group->bcast_core) {
+ mali_bcast_unit_delete(group->bcast_core);
+ }
+ }
+
+ for (i = 0; i < mali_global_num_groups; i++) {
+ if (mali_global_groups[i] == group) {
+ mali_global_groups[i] = NULL;
+ mali_global_num_groups--;
+
+ if (i != mali_global_num_groups) {
+ /* We removed a group from the middle of the array -- move the last
+ * group to the current position to close the gap */
+ mali_global_groups[i] = mali_global_groups[mali_global_num_groups];
+ mali_global_groups[mali_global_num_groups] = NULL;
+ }
+
+ break;
+ }
+ }
+
+ if (NULL != group->timeout_timer) {
+ _mali_osk_timer_del(group->timeout_timer);
+ _mali_osk_timer_term(group->timeout_timer);
+ }
+
+ if (NULL != group->bottom_half_work_mmu) {
+ _mali_osk_wq_delete_work(group->bottom_half_work_mmu);
+ }
+
+ if (NULL != group->bottom_half_work_gp) {
+ _mali_osk_wq_delete_work(group->bottom_half_work_gp);
+ }
+
+ if (NULL != group->bottom_half_work_pp) {
+ _mali_osk_wq_delete_work(group->bottom_half_work_pp);
+ }
+
+ _mali_osk_free(group);
+}
+
_mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group, struct mali_mmu_core *mmu_core)
{
/* This group object now owns the MMU core object */
}
}
-void mali_group_set_pm_domain(struct mali_group *group, struct mali_pm_domain *domain)
+enum mali_group_state mali_group_activate(struct mali_group *group)
{
- group->pm_domain = domain;
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ MALI_DEBUG_PRINT(4, ("Group: Activating group %s\n",
+ mali_group_core_description(group)));
+
+ if (MALI_GROUP_STATE_INACTIVE == group->state) {
+ /* Group is inactive, get PM refs in order to power up */
+
+ /*
+ * We'll take a maximum of 2 power domain references pr group,
+ * one for the group itself, and one for it's L2 cache.
+ */
+ struct mali_pm_domain *domains[MALI_MAX_NUM_DOMAIN_REFS];
+ struct mali_group *groups[MALI_MAX_NUM_DOMAIN_REFS];
+ u32 num_domains = 0;
+ mali_bool all_groups_on;
+
+ /* Deal with child groups first */
+ if (mali_group_is_virtual(group)) {
+ /*
+ * The virtual group might have 0, 1 or 2 L2s in
+ * its l2_cache_core array, but we ignore these and
+ * let the child groups take the needed L2 cache ref
+ * on behalf of the virtual group.
+ * In other words; The L2 refs are taken in pair with
+ * the physical group which the L2 is attached to.
+ */
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ /*
+ * Child group is inactive, get PM
+ * refs in order to power up.
+ */
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp,
+ &group->group_list,
+ struct mali_group, group_list) {
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_INACTIVE
+ == child->state);
+
+ child->state = MALI_GROUP_STATE_ACTIVATION_PENDING;
+
+ MALI_DEBUG_ASSERT_POINTER(
+ child->pm_domain);
+ domains[num_domains] = child->pm_domain;
+ groups[num_domains] = child;
+ num_domains++;
+
+ /*
+ * Take L2 domain ref for child group.
+ */
+ MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS
+ > num_domains);
+ domains[num_domains] = mali_l2_cache_get_pm_domain(
+ child->l2_cache_core[0]);
+ groups[num_domains] = NULL;
+ MALI_DEBUG_ASSERT(NULL ==
+ child->l2_cache_core[1]);
+ num_domains++;
+ }
+ } else {
+ /* Take L2 domain ref for physical groups. */
+ MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS >
+ num_domains);
+
+ domains[num_domains] = mali_l2_cache_get_pm_domain(
+ group->l2_cache_core[0]);
+ groups[num_domains] = NULL;
+ MALI_DEBUG_ASSERT(NULL == group->l2_cache_core[1]);
+ num_domains++;
+ }
+
+ /* Do the group itself last (it's dependencies first) */
+
+ group->state = MALI_GROUP_STATE_ACTIVATION_PENDING;
+
+ MALI_DEBUG_ASSERT_POINTER(group->pm_domain);
+ domains[num_domains] = group->pm_domain;
+ groups[num_domains] = group;
+ num_domains++;
+
+ all_groups_on = mali_pm_get_domain_refs(domains, groups,
+ num_domains);
+
+ /*
+ * Complete activation for group, include
+ * virtual group or physical group.
+ */
+ if (MALI_TRUE == all_groups_on) {
+
+ mali_group_set_active(group);
+ }
+ } else if (MALI_GROUP_STATE_ACTIVE == group->state) {
+ /* Already active */
+ MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
+ } else {
+ /*
+ * Activation already pending, group->power_is_on could
+ * be both true or false. We need to wait for power up
+ * notification anyway.
+ */
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVATION_PENDING
+ == group->state);
+ }
+
+ MALI_DEBUG_PRINT(4, ("Group: group %s activation result: %s\n",
+ mali_group_core_description(group),
+ MALI_GROUP_STATE_ACTIVE == group->state ?
+ "ACTIVE" : "PENDING"));
+
+ return group->state;
}
-void mali_group_delete(struct mali_group *group)
+mali_bool mali_group_set_active(struct mali_group *group)
{
- u32 i;
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVATION_PENDING == group->state);
+ MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
- MALI_DEBUG_PRINT(4, ("Deleting group %p\n", group));
+ MALI_DEBUG_PRINT(4, ("Group: Activation completed for %s\n",
+ mali_group_core_description(group)));
- MALI_DEBUG_ASSERT(NULL == group->parent_group);
+ if (mali_group_is_virtual(group)) {
+ struct mali_group *child;
+ struct mali_group *temp;
- /* Delete the resources that this group owns */
- if (NULL != group->gp_core) {
- mali_gp_delete(group->gp_core);
- }
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list,
+ struct mali_group, group_list) {
+ if (MALI_TRUE != child->power_is_on) {
+ return MALI_FALSE;
+ }
- if (NULL != group->pp_core) {
- mali_pp_delete(group->pp_core);
- }
+ child->state = MALI_GROUP_STATE_ACTIVE;
+ }
- if (NULL != group->mmu) {
- mali_mmu_delete(group->mmu);
+ mali_group_reset(group);
}
+ /* Go to ACTIVE state */
+ group->state = MALI_GROUP_STATE_ACTIVE;
+
+ return MALI_TRUE;
+}
+
+mali_bool mali_group_deactivate(struct mali_group *group)
+{
+ struct mali_pm_domain *domains[MALI_MAX_NUM_DOMAIN_REFS];
+ u32 num_domains = 0;
+ mali_bool power_down = MALI_FALSE;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_INACTIVE != group->state);
+
+ MALI_DEBUG_PRINT(3, ("Group: Deactivating group %s\n",
+ mali_group_core_description(group)));
+
+ group->state = MALI_GROUP_STATE_INACTIVE;
+
+ MALI_DEBUG_ASSERT_POINTER(group->pm_domain);
+ domains[num_domains] = group->pm_domain;
+ num_domains++;
+
if (mali_group_is_virtual(group)) {
- /* Remove all groups from virtual group */
+ /* Release refs for all child groups */
struct mali_group *child;
struct mali_group *temp;
- _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
- child->parent_group = NULL;
- mali_group_delete(child);
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp,
+ &group->group_list,
+ struct mali_group, group_list) {
+ child->state = MALI_GROUP_STATE_INACTIVE;
+
+ MALI_DEBUG_ASSERT_POINTER(child->pm_domain);
+ domains[num_domains] = child->pm_domain;
+ num_domains++;
+
+ /* Release L2 cache domain for child groups */
+ MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS >
+ num_domains);
+ domains[num_domains] = mali_l2_cache_get_pm_domain(
+ child->l2_cache_core[0]);
+ MALI_DEBUG_ASSERT(NULL == child->l2_cache_core[1]);
+ num_domains++;
}
- mali_dlbu_delete(group->dlbu_core);
+ /*
+ * Must do mali_group_power_down() steps right here for
+ * virtual group, because virtual group itself is likely to
+ * stay powered on, however child groups are now very likely
+ * to be powered off (and thus lose their state).
+ */
- if (NULL != group->bcast_core) {
- mali_bcast_unit_delete(group->bcast_core);
- }
+ mali_group_clear_session(group);
+ /*
+ * Disable the broadcast unit (clear it's mask).
+ * This is needed in case the GPU isn't actually
+ * powered down at this point and groups are
+ * removed from an inactive virtual group.
+ * If not, then the broadcast unit will intercept
+ * their interrupts!
+ */
+ mali_bcast_disable(group->bcast_core);
+ } else {
+ /* Release L2 cache domain for physical groups */
+ MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS >
+ num_domains);
+ domains[num_domains] = mali_l2_cache_get_pm_domain(
+ group->l2_cache_core[0]);
+ MALI_DEBUG_ASSERT(NULL == group->l2_cache_core[1]);
+ num_domains++;
}
- for (i = 0; i < mali_global_num_groups; i++) {
- if (mali_global_groups[i] == group) {
- mali_global_groups[i] = NULL;
- mali_global_num_groups--;
+ power_down = mali_pm_put_domain_refs(domains, num_domains);
- if (i != mali_global_num_groups) {
- /* We removed a group from the middle of the array -- move the last
- * group to the current position to close the gap */
- mali_global_groups[i] = mali_global_groups[mali_global_num_groups];
- mali_global_groups[mali_global_num_groups] = NULL;
- }
+ return power_down;
+}
- break;
- }
- }
+void mali_group_power_up(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
- if (NULL != group->timeout_timer) {
- _mali_osk_timer_del(group->timeout_timer);
- _mali_osk_timer_term(group->timeout_timer);
- }
+ MALI_DEBUG_PRINT(3, ("Group: Power up for %s\n",
+ mali_group_core_description(group)));
- if (NULL != group->bottom_half_work_mmu) {
- _mali_osk_wq_delete_work(group->bottom_half_work_mmu);
- }
+ group->power_is_on = MALI_TRUE;
- if (NULL != group->bottom_half_work_gp) {
- _mali_osk_wq_delete_work(group->bottom_half_work_gp);
+ if (MALI_FALSE == mali_group_is_virtual(group)
+ && MALI_FALSE == mali_group_is_in_virtual(group)) {
+ mali_group_reset(group);
}
- if (NULL != group->bottom_half_work_pp) {
- _mali_osk_wq_delete_work(group->bottom_half_work_pp);
+ /*
+ * When we just acquire only one physical group form virt group,
+ * we should remove the bcast&dlbu mask from virt group and
+ * reset bcast and dlbu core, although part of pp cores in virt
+ * group maybe not be powered on.
+ */
+ if (MALI_TRUE == mali_group_is_virtual(group)) {
+ mali_bcast_reset(group->bcast_core);
+ mali_dlbu_update_mask(group->dlbu_core);
}
+}
-#ifdef MALI_UPPER_HALF_SCHEDULING
- _mali_osk_spinlock_irq_term(group->lock);
-#else
- _mali_osk_spinlock_term(group->lock);
-#endif
- _mali_osk_free(group);
+void mali_group_power_down(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ MALI_DEBUG_PRINT(3, ("Group: Power down for %s\n",
+ mali_group_core_description(group)));
+
+ group->power_is_on = MALI_FALSE;
+
+ if (mali_group_is_virtual(group)) {
+ /*
+ * What we do for physical jobs in this function should
+ * already have been done in mali_group_deactivate()
+ * for virtual group.
+ */
+ MALI_DEBUG_ASSERT(NULL == group->session);
+ } else {
+ mali_group_clear_session(group);
+ }
}
MALI_DEBUG_CODE(static void mali_group_print_virtual(struct mali_group *vgroup)
struct mali_group *group;
struct mali_group *temp;
- MALI_DEBUG_PRINT(4, ("Virtual group %p\n", vgroup));
+ MALI_DEBUG_PRINT(4, ("Virtual group %s (%p)\n",
+ mali_group_core_description(vgroup),
+ vgroup));
MALI_DEBUG_PRINT(4, ("l2_cache_core[0] = %p, ref = %d\n", vgroup->l2_cache_core[0], vgroup->l2_cache_core_ref_count[0]));
MALI_DEBUG_PRINT(4, ("l2_cache_core[1] = %p, ref = %d\n", vgroup->l2_cache_core[1], vgroup->l2_cache_core_ref_count[1]));
i = 0;
_MALI_OSK_LIST_FOREACHENTRY(group, temp, &vgroup->group_list, struct mali_group, group_list) {
- MALI_DEBUG_PRINT(4, ("[%d] %p, l2_cache_core[0] = %p\n", i, group, group->l2_cache_core[0]));
+ MALI_DEBUG_PRINT(4, ("[%d] %s (%p), l2_cache_core[0] = %p\n",
+ i, mali_group_core_description(group),
+ group, group->l2_cache_core[0]));
i++;
}
})
/**
* @brief Add child group to virtual group parent
- *
- * Before calling this function, child must have it's state set to JOINING_VIRTUAL
- * to ensure it's not touched during the transition period. When this function returns,
- * child's state will be IN_VIRTUAL.
*/
-void mali_group_add_group(struct mali_group *parent, struct mali_group *child, mali_bool update_hw)
+void mali_group_add_group(struct mali_group *parent, struct mali_group *child)
{
mali_bool found;
u32 i;
- struct mali_session_data *child_session;
-
- MALI_DEBUG_PRINT(3, ("Adding group %p to virtual group %p\n", child, parent));
- MALI_ASSERT_GROUP_LOCKED(parent);
+ MALI_DEBUG_PRINT(3, ("Adding group %s to virtual group %s\n",
+ mali_group_core_description(child),
+ mali_group_core_description(parent)));
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
MALI_DEBUG_ASSERT(!mali_group_is_virtual(child));
MALI_DEBUG_ASSERT(NULL == child->parent_group);
- MALI_DEBUG_ASSERT(MALI_GROUP_STATE_JOINING_VIRTUAL == child->state);
_mali_osk_list_addtail(&child->group_list, &parent->group_list);
- child->state = MALI_GROUP_STATE_IN_VIRTUAL;
child->parent_group = parent;
MALI_DEBUG_ASSERT_POINTER(child->l2_cache_core[0]);
mali_bcast_add_group(parent->bcast_core, child);
mali_dlbu_add_group(parent->dlbu_core, child);
- child_session = child->session;
- child->session = NULL;
-
- /* Above this comment, only software state is updated and the HW is not
- * touched. Now, check if Mali is powered and skip the rest if it isn't
- * powered.
- */
-
- if (!update_hw) {
- MALI_DEBUG_CODE(mali_group_print_virtual(parent));
- return;
+ if (MALI_TRUE == parent->power_is_on) {
+ mali_bcast_reset(parent->bcast_core);
+ mali_dlbu_update_mask(parent->dlbu_core);
}
- /* Update MMU */
- if (parent->session == child_session) {
- mali_mmu_zap_tlb(child->mmu);
- } else {
+ if (MALI_TRUE == child->power_is_on) {
if (NULL == parent->session) {
- mali_mmu_activate_empty_page_directory(child->mmu);
+ if (NULL != child->session) {
+ /*
+ * Parent has no session, so clear
+ * child session as well.
+ */
+ mali_mmu_activate_empty_page_directory(child->mmu);
+ }
} else {
- mali_mmu_activate_page_directory(child->mmu, mali_session_get_page_directory(parent->session));
+ if (parent->session == child->session) {
+ /* We already have same session as parent,
+ * so a simple zap should be enough.
+ */
+ mali_mmu_zap_tlb(child->mmu);
+ } else {
+ /*
+ * Parent has a different session, so we must
+ * switch to that sessions page table
+ */
+ mali_mmu_activate_page_directory(child->mmu, mali_session_get_page_directory(parent->session));
+ }
+
+ /* It is the parent which keeps the session from now on */
+ child->session = NULL;
}
+ } else {
+ /* should have been cleared when child was powered down */
+ MALI_DEBUG_ASSERT(NULL == child->session);
}
- mali_dlbu_update_mask(parent->dlbu_core);
-
/* Start job on child when parent is active */
if (NULL != parent->pp_running_job) {
struct mali_pp_job *job = parent->pp_running_job;
- u32 subjob = -1;
- if (mali_pp_job_is_with_dlbu(parent->pp_running_job)) {
- subjob = mali_pp_core_get_id(child->pp_core);
- }
+ MALI_DEBUG_PRINT(3, ("Group %x joining running job %d on virtual group %x\n",
+ child, mali_pp_job_get_id(job), parent));
- /* Take the next unstarted sub job directly without scheduler lock should be
- * safe here. Because: 1) Virtual group is the only consumer of this job.
- * 2) Taking next unstarted sub job doesn't do any change to the job queue itself
- */
- if (mali_pp_job_has_unstarted_sub_jobs(job)) {
- subjob = mali_pp_job_get_first_unstarted_sub_job(job);
- mali_pp_job_mark_sub_job_started(job, subjob);
- }
+ /* Only allowed to add active child to an active parent */
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE == parent->state);
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE == child->state);
- if (-1 != subjob) {
- MALI_DEBUG_PRINT(3, ("Group %x joining running job %d on virtual group %x\n",
- child, mali_pp_job_get_id(job), parent));
- MALI_DEBUG_ASSERT(MALI_GROUP_STATE_WORKING == parent->state);
- /* Reset broadcast unit only when it will help run subjob */
- mali_bcast_reset(parent->bcast_core);
+ mali_pp_job_start(child->pp_core, job, mali_pp_core_get_id(child->pp_core), MALI_TRUE);
- mali_group_start_job_on_group(child, job, subjob);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+ mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
- MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
- MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
- mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
+ mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_sched_switch(
+ mali_pp_core_description(group->pp_core),
+ sched_clock(), mali_pp_job_get_tid(job),
+ 0, mali_pp_job_get_id(job));
+#endif
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
- MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
- MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
- mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
#if defined(CONFIG_MALI400_PROFILING)
- trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core),
- mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job));
+ trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core),
+ mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job));
#endif
- }
}
MALI_DEBUG_CODE(mali_group_print_virtual(parent);)
/**
* @brief Remove child group from virtual group parent
- *
- * After the child is removed, it's state will be LEAVING_VIRTUAL and must be set
- * to IDLE before it can be used.
*/
void mali_group_remove_group(struct mali_group *parent, struct mali_group *child)
{
u32 i;
- MALI_ASSERT_GROUP_LOCKED(parent);
-
- MALI_DEBUG_PRINT(3, ("Removing group %p from virtual group %p\n", child, parent));
+ MALI_DEBUG_PRINT(3, ("Removing group %s from virtual group %s\n",
+ mali_group_core_description(child),
+ mali_group_core_description(parent)));
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
MALI_DEBUG_ASSERT(!mali_group_is_virtual(child));
MALI_DEBUG_ASSERT(parent == child->parent_group);
- MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IN_VIRTUAL == child->state);
- /* Removing groups while running is not yet supported. */
- MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == parent->state);
-
- mali_group_lock(child);
/* Update Broadcast Unit and DLBU */
mali_bcast_remove_group(parent->bcast_core, child);
mali_dlbu_remove_group(parent->dlbu_core, child);
- /* Update HW only if power is on */
- if (mali_pm_is_power_on()) {
+ if (MALI_TRUE == parent->power_is_on) {
mali_bcast_reset(parent->bcast_core);
mali_dlbu_update_mask(parent->dlbu_core);
}
- _mali_osk_list_delinit(&child->group_list);
-
child->session = parent->session;
child->parent_group = NULL;
- child->state = MALI_GROUP_STATE_LEAVING_VIRTUAL;
+
+ _mali_osk_list_delinit(&child->group_list);
+ if (_mali_osk_list_empty(&parent->group_list)) {
+ parent->session = NULL;
+ }
/* Keep track of the L2 cache cores of child groups */
i = (child->l2_cache_core[0] == parent->l2_cache_core[0]) ? 0 : 1;
MALI_DEBUG_ASSERT(child->l2_cache_core[0] == parent->l2_cache_core[i]);
parent->l2_cache_core_ref_count[i]--;
-
if (parent->l2_cache_core_ref_count[i] == 0) {
parent->l2_cache_core[i] = NULL;
}
MALI_DEBUG_CODE(mali_group_print_virtual(parent));
-
- mali_group_unlock(child);
}
struct mali_group *mali_group_acquire_group(struct mali_group *parent)
{
- struct mali_group *child;
-
- MALI_ASSERT_GROUP_LOCKED(parent);
+ struct mali_group *child = NULL;
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
- MALI_DEBUG_ASSERT(!_mali_osk_list_empty(&parent->group_list));
- child = _MALI_OSK_LIST_ENTRY(parent->group_list.prev, struct mali_group, group_list);
+ if (!_mali_osk_list_empty(&parent->group_list)) {
+ child = _MALI_OSK_LIST_ENTRY(parent->group_list.prev, struct mali_group, group_list);
+ mali_group_remove_group(parent, child);
+ }
- mali_group_remove_group(parent, child);
+ if (NULL != child) {
+ if (MALI_GROUP_STATE_ACTIVE != parent->state
+ && MALI_TRUE == child->power_is_on) {
+ mali_group_reset(child);
+ }
+ }
return child;
}
void mali_group_reset(struct mali_group *group)
{
- /*
- * This function should not be used to abort jobs,
- * currently only called during insmod and PM resume
- */
- MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
MALI_DEBUG_ASSERT(NULL == group->gp_running_job);
MALI_DEBUG_ASSERT(NULL == group->pp_running_job);
+ MALI_DEBUG_ASSERT(NULL == group->session);
- group->session = NULL;
+ MALI_DEBUG_PRINT(3, ("Group: reset of %s\n",
+ mali_group_core_description(group)));
if (NULL != group->dlbu_core) {
mali_dlbu_reset(group->dlbu_core);
mali_bcast_reset(group->bcast_core);
}
- if (NULL != group->mmu) {
- mali_group_reset_mmu(group);
- }
+ MALI_DEBUG_ASSERT(NULL != group->mmu);
+ mali_group_reset_mmu(group);
if (NULL != group->gp_core) {
+ MALI_DEBUG_ASSERT(NULL == group->pp_core);
mali_gp_reset(group->gp_core);
- }
-
- if (NULL != group->pp_core) {
- mali_group_reset_pp(group);
- }
-}
-
-/* This function is called before running a job on virtual group
- * Remove some child group from the bcast mask necessarily
- * Set child groups particular registers respectively etc
- */
-static void mali_group_job_prepare_virtual(struct mali_group *group, struct mali_pp_job *job,
- u32 first_subjob, u32 last_subjob)
-{
- struct mali_group *child;
- struct mali_group *temp;
- u32 subjob = first_subjob;
-
- MALI_DEBUG_ASSERT_POINTER(job);
- MALI_DEBUG_ASSERT(mali_pp_job_is_virtual_group_job(job));
-
- MALI_DEBUG_ASSERT_POINTER(group);
- MALI_DEBUG_ASSERT(mali_group_is_virtual(group));
- MALI_ASSERT_GROUP_LOCKED(group);
-
- MALI_DEBUG_ASSERT(first_subjob <= last_subjob);
-
- /* Set each core specific registers:
- * 1. Renderer List Address
- * 2. Fragment Shader Stack Address
- * Other general registers are set through Broadcast Unit in one go.
- * Note: for Addtional temporary unused group core in virtual group
- * we need to remove it from Broadcast Unit before start the job in
- * this virtual group, otherwise, we may never get Frame_end interrupt.
- */
- if (!mali_pp_job_is_with_dlbu(job)) {
- _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
- if (subjob <= last_subjob) {
- /* Write specific Renderer List Address for each group */
- mali_pp_write_addr_renderer_list(child->pp_core, job, subjob);
- /* Write specific stack address for each child group */
- mali_pp_write_addr_stack(child->pp_core, job, subjob);
- subjob++;
- MALI_DEBUG_PRINT(4, ("Mali Virtual Group: Virtual group job %u (0x%08X) part %u/%u started.\n",
- mali_pp_job_get_id(job), job, subjob,
- mali_pp_job_get_sub_job_count(job)));
- } else {
- /* Some physical group are just redundant for this run
- * remove it from broadcast
- */
- mali_bcast_remove_group(group->bcast_core, child);
- MALI_DEBUG_PRINT(4, ("Mali Virtual Group: Remained PP group %p remove from bcast_core\n", child));
- }
- }
-
- /* Reset broadcast */
- mali_bcast_reset(group->bcast_core);
} else {
- /* Write stack address for each child group */
- _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
- mali_pp_write_addr_stack(child->pp_core, job, child->pp_core->core_id);
- mali_bcast_add_group(group->bcast_core, child);
- }
-
- /* Reset broadcast */
- mali_bcast_reset(group->bcast_core);
-
- mali_dlbu_config_job(group->dlbu_core, job);
-
- /* Write Renderer List Address for each child group */
- mali_pp_write_addr_renderer_list(group->pp_core, job, 0);
-
- MALI_DEBUG_PRINT(4, ("Mali Virtual Group: Virtual job %u (0x%08X) part %u/%u started (from schedule).\n",
- mali_pp_job_get_id(job), job, 1,
- mali_pp_job_get_sub_job_count(job)));
- }
-}
-
-/* Call this function to make sure group->group_list are consistent with the group->broad_core mask */
-void mali_group_non_dlbu_job_done_virtual(struct mali_group *group)
-{
- struct mali_group *child, *temp;
-
- MALI_ASSERT_GROUP_LOCKED(group);
- MALI_DEBUG_ASSERT(mali_group_is_virtual(group));
-
- _MALI_OSK_LIST_FOREACHENTRY(child, temp,
- &group->group_list, struct mali_group, group_list) {
- mali_bcast_add_group(group->bcast_core, child);
+ MALI_DEBUG_ASSERT(NULL != group->pp_core);
+ mali_group_reset_pp(group);
}
-
- MALI_DEBUG_PRINT(3, ("Mali group: New physical groups added in virtual group at non dlbu job done"));
- /**
- * When new physical groups added in the virtual groups, they may have different
- * page directory with the virtual group. Here just activate the empty page directory
- * for the virtual group to avoid potential inconsistent page directory.
- */
- mali_mmu_activate_empty_page_directory(group->mmu);
- group->session = NULL;
-}
-
-struct mali_gp_core *mali_group_get_gp_core(struct mali_group *group)
-{
- return group->gp_core;
-}
-
-struct mali_pp_core *mali_group_get_pp_core(struct mali_group *group)
-{
- return group->pp_core;
}
void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job)
{
struct mali_session_data *session;
- MALI_ASSERT_GROUP_LOCKED(group);
- MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ MALI_DEBUG_PRINT(3, ("Group: Starting GP job 0x%08X on group %s\n",
+ job,
+ mali_group_core_description(group)));
session = mali_gp_job_get_session(job);
- if (NULL != group->l2_cache_core[0]) {
- mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_gp_job_get_cache_order(job));
- }
+ MALI_DEBUG_ASSERT_POINTER(group->l2_cache_core[0]);
+ mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_gp_job_get_cache_order(job));
mali_group_activate_page_directory(group, session);
#if defined(CONFIG_MALI400_PROFILING)
if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
- (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
mali_group_report_l2_cache_counters_per_core(group, 0);
+ }
#endif /* #if defined(CONFIG_MALI400_PROFILING) */
#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
- trace_gpu_sched_switch(mali_gp_get_hw_core_desc(group->gp_core), sched_clock(),
- mali_gp_job_get_pid(job), 0, mali_gp_job_get_id(job));
+ trace_gpu_sched_switch(mali_gp_core_description(group->gp_core),
+ sched_clock(), mali_gp_job_get_tid(job),
+ 0, mali_gp_job_get_id(job));
#endif
group->gp_running_job = job;
- group->state = MALI_GROUP_STATE_WORKING;
+ group->is_working = MALI_TRUE;
- /* Setup the timeout timer value and save the job id for the job running on the gp core */
+ /* Setup SW timer and record start time */
+ group->start_time = _mali_osk_time_tickcount();
_mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
+
+ MALI_DEBUG_PRINT(4, ("Group: Started GP job 0x%08X on group %s at %u\n",
+ job,
+ mali_group_core_description(group),
+ group->start_time));
}
/* Used to set all the registers except frame renderer list address and fragment shader stack address
* It means the caller must set these two registers properly before calling this function
*/
-static void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job)
+void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job)
{
struct mali_session_data *session;
- MALI_ASSERT_GROUP_LOCKED(group);
- MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ MALI_DEBUG_PRINT(3, ("Group: Starting PP job 0x%08X part %u/%u on group %s\n",
+ job, sub_job + 1,
+ mali_pp_job_get_sub_job_count(job),
+ mali_group_core_description(group)));
session = mali_pp_job_get_session(job);
mali_group_activate_page_directory(group, session);
if (mali_group_is_virtual(group)) {
- MALI_DEBUG_ASSERT(mali_pp_job_is_virtual_group_job(job));
+ struct mali_group *child;
+ struct mali_group *temp;
+ u32 core_num = 0;
+
+ MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(job));
- /* Try to use DMA unit to start job, fallback to writing directly to the core */
- MALI_DEBUG_ASSERT(mali_dma_cmd_buf_is_valid(&job->dma_cmd_buf));
- if (_MALI_OSK_ERR_OK != mali_dma_start(mali_dma_get_global_dma_core(), &job->dma_cmd_buf)) {
- mali_pp_job_start(group->pp_core, job, sub_job);
+ /* Configure DLBU for the job */
+ mali_dlbu_config_job(group->dlbu_core, job);
+
+ /* Write stack address for each child group */
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ mali_pp_write_addr_stack(child->pp_core, job);
+ core_num++;
}
+
+ mali_pp_job_start(group->pp_core, job, sub_job, MALI_FALSE);
} else {
- mali_pp_job_start(group->pp_core, job, sub_job);
+ mali_pp_job_start(group->pp_core, job, sub_job, MALI_FALSE);
}
/* if the group is virtual, loop through physical groups which belong to this group
mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job));
#endif
}
+
#if defined(CONFIG_MALI400_PROFILING)
if (0 != group->l2_cache_core_ref_count[0]) {
if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
}
}
#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+
} else { /* group is physical - call profiling events for physical cores */
_mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) |
}
#endif /* #if defined(CONFIG_MALI400_PROFILING) */
}
+
#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
- trace_gpu_sched_switch(mali_pp_get_hw_core_desc(group->pp_core), sched_clock(), mali_pp_job_get_tid(job), 0, mali_pp_job_get_id(job));
+ trace_gpu_sched_switch(mali_pp_core_description(group->pp_core),
+ sched_clock(), mali_pp_job_get_tid(job),
+ 0, mali_pp_job_get_id(job));
#endif
+
group->pp_running_job = job;
group->pp_running_sub_job = sub_job;
- group->state = MALI_GROUP_STATE_WORKING;
+ group->is_working = MALI_TRUE;
- /* Setup the timeout timer value and save the job id for the job running on the pp core */
+ /* Setup SW timer and record start time */
+ group->start_time = _mali_osk_time_tickcount();
_mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
-}
-
-void mali_group_start_job_on_virtual(struct mali_group *group, struct mali_pp_job *job,
- u32 first_subjob, u32 last_subjob)
-{
- MALI_DEBUG_ASSERT_POINTER(job);
- MALI_DEBUG_ASSERT(mali_pp_job_is_virtual_group_job(job));
-
- MALI_DEBUG_ASSERT_POINTER(group);
- MALI_DEBUG_ASSERT(mali_group_is_virtual(group));
- MALI_ASSERT_GROUP_LOCKED(group);
-
- MALI_DEBUG_ASSERT(first_subjob <= last_subjob);
-
- /* Prepare the group for running this job */
- mali_group_job_prepare_virtual(group, job, first_subjob, last_subjob);
-
- /* Start job. General setting for all the PP cores */
- mali_group_start_pp_job(group, job, first_subjob);
-}
-
-void mali_group_start_job_on_group(struct mali_group *group, struct mali_pp_job *job, u32 subjob)
-{
- MALI_DEBUG_ASSERT_POINTER(group);
- MALI_DEBUG_ASSERT(!mali_group_is_virtual(group));
- MALI_DEBUG_ASSERT_POINTER(job);
-
- MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state || MALI_GROUP_STATE_IN_VIRTUAL == group->state);
-
- /*
- * There are two frame registers which are different for each sub job:
- * 1. The Renderer List Address Register (MALI200_REG_ADDR_FRAME)
- * 2. The FS Stack Address Register (MALI200_REG_ADDR_STACK)
- */
- mali_pp_write_addr_renderer_list(group->pp_core, job, subjob);
-
- /* Write specific stack address for each child group */
- mali_pp_write_addr_stack(group->pp_core, job, subjob);
- /* For start a job in a group which is just joining the virtual group
- * just start the job directly, all the accouting information and state
- * updates have been covered by virtual group state
- */
- if (MALI_GROUP_STATE_IN_VIRTUAL == group->state) {
- mali_pp_job_start(group->pp_core, job, subjob);
- return;
- }
+ MALI_DEBUG_PRINT(4, ("Group: Started PP job 0x%08X part %u/%u on group %s at %u\n",
+ job, sub_job + 1,
+ mali_pp_job_get_sub_job_count(job),
+ mali_group_core_description(group),
+ group->start_time));
- /* Start job. General setting for all the PP cores */
- mali_group_start_pp_job(group, job, subjob);
}
-
-
-struct mali_gp_job *mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr)
+void mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr)
{
- MALI_ASSERT_GROUP_LOCKED(group);
-
- if (group->state != MALI_GROUP_STATE_OOM ||
- mali_gp_job_get_id(group->gp_running_job) != job_id) {
- return NULL; /* Illegal request or job has already been aborted */
- }
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
- if (NULL != group->l2_cache_core[0]) {
- mali_l2_cache_invalidate(group->l2_cache_core[0]);
- }
+ MALI_DEBUG_ASSERT_POINTER(group->l2_cache_core[0]);
+ mali_l2_cache_invalidate(group->l2_cache_core[0]);
mali_mmu_zap_tlb_without_stall(group->mmu);
mali_gp_resume_with_new_heap(group->gp_core, start_addr, end_addr);
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0), 0, 0, 0, 0, 0);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+ 0, 0, 0, 0, 0);
#if defined(CONFIG_MALI400_PROFILING)
trace_mali_core_active(mali_gp_job_get_pid(group->gp_running_job), 1 /* active */, 1 /* GP */, 0 /* core */,
mali_gp_job_get_frame_builder_id(group->gp_running_job), mali_gp_job_get_flush_id(group->gp_running_job));
#endif
-
- group->state = MALI_GROUP_STATE_WORKING;
-
- return group->gp_running_job;
}
static void mali_group_reset_mmu(struct mali_group *group)
struct mali_group *temp;
_mali_osk_errcode_t err;
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
if (!mali_group_is_virtual(group)) {
/* This is a physical group or an idle virtual group -- simply wait for
* the reset to complete. */
err = mali_mmu_reset(group->mmu);
MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
} else { /* virtual group */
- err = mali_mmu_reset(group->mmu);
- if (_MALI_OSK_ERR_OK == err) {
- return;
- }
-
/* Loop through all members of this virtual group and wait
* until they are done resetting.
*/
struct mali_group *child;
struct mali_group *temp;
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
mali_pp_reset_async(group->pp_core);
if (!mali_group_is_virtual(group) || NULL == group->pp_running_job) {
/* This is a physical group or an idle virtual group -- simply wait for
* the reset to complete. */
mali_pp_reset_wait(group->pp_core);
- } else { /* virtual group */
+ } else {
/* Loop through all members of this virtual group and wait until they
* are done resetting.
*/
}
}
-/* Group must be locked when entering this function. Will be unlocked before exiting. */
-static void mali_group_complete_pp_and_unlock(struct mali_group *group, mali_bool success, mali_bool in_upper_half)
+struct mali_pp_job *mali_group_complete_pp(struct mali_group *group, mali_bool success, u32 *sub_job)
{
struct mali_pp_job *pp_job_to_return;
- u32 pp_sub_job_to_return;
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
MALI_DEBUG_ASSERT_POINTER(group);
MALI_DEBUG_ASSERT_POINTER(group->pp_core);
MALI_DEBUG_ASSERT_POINTER(group->pp_running_job);
- MALI_ASSERT_GROUP_LOCKED(group);
+ MALI_DEBUG_ASSERT_POINTER(sub_job);
+ MALI_DEBUG_ASSERT(MALI_TRUE == group->is_working);
+
+ /* Stop/clear the timeout timer. */
+ _mali_osk_timer_del_async(group->timeout_timer);
+
+ if (NULL != group->pp_running_job) {
+
+ /* Deal with HW counters and profiling */
+
+ if (MALI_TRUE == mali_group_is_virtual(group)) {
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ /* update performance counters from each physical pp core within this virtual group */
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ mali_pp_update_performance_counters(group->pp_core, child->pp_core, group->pp_running_job, mali_pp_core_get_id(child->pp_core));
+ }
+
+#if defined(CONFIG_MALI400_PROFILING)
+ /* send profiling data per physical core */
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
+ mali_pp_job_get_perf_counter_value0(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
+ mali_pp_job_get_perf_counter_value1(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
+ mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8),
+ 0, 0);
+
+ trace_mali_core_active(mali_pp_job_get_pid(group->pp_running_job),
+ 0 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core),
+ mali_pp_job_get_frame_builder_id(group->pp_running_job),
+ mali_pp_job_get_flush_id(group->pp_running_job));
+ }
+ if (0 != group->l2_cache_core_ref_count[0]) {
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+ }
+ }
+ if (0 != group->l2_cache_core_ref_count[1]) {
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1]));
+ }
+ }
+
+#endif
+ } else {
+ /* update performance counters for a physical group's pp core */
+ mali_pp_update_performance_counters(group->pp_core, group->pp_core, group->pp_running_job, group->pp_running_sub_job);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL,
+ mali_pp_job_get_perf_counter_value0(group->pp_running_job, group->pp_running_sub_job),
+ mali_pp_job_get_perf_counter_value1(group->pp_running_job, group->pp_running_sub_job),
+ mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8),
+ 0, 0);
+
+ trace_mali_core_active(mali_pp_job_get_pid(group->pp_running_job),
+ 0 /* active */, 0 /* PP */, mali_pp_core_get_id(group->pp_core),
+ mali_pp_job_get_frame_builder_id(group->pp_running_job),
+ mali_pp_job_get_flush_id(group->pp_running_job));
+
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+ }
+#endif
+ }
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_sched_switch(
+ mali_gp_core_description(group->gp_core),
+ sched_clock(), 0, 0, 0);
+#endif
- mali_group_post_process_job_pp(group);
+ }
if (success) {
/* Only do soft reset for successful jobs, a full recovery
}
pp_job_to_return = group->pp_running_job;
- pp_sub_job_to_return = group->pp_running_sub_job;
- group->state = MALI_GROUP_STATE_IDLE;
group->pp_running_job = NULL;
+ group->is_working = MALI_FALSE;
+ *sub_job = group->pp_running_sub_job;
if (!success) {
MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n"));
mali_group_recovery_reset(group);
}
- /* Return job to user, schedule and unlock group. */
- mali_pp_scheduler_job_done(group, pp_job_to_return, pp_sub_job_to_return, success, in_upper_half);
+ return pp_job_to_return;
}
-/* Group must be locked when entering this function. Will be unlocked before exiting. */
-static void mali_group_complete_gp_and_unlock(struct mali_group *group, mali_bool success)
+struct mali_gp_job *mali_group_complete_gp(struct mali_group *group, mali_bool success)
{
struct mali_gp_job *gp_job_to_return;
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
MALI_DEBUG_ASSERT_POINTER(group);
MALI_DEBUG_ASSERT_POINTER(group->gp_core);
MALI_DEBUG_ASSERT_POINTER(group->gp_running_job);
- MALI_ASSERT_GROUP_LOCKED(group);
+ MALI_DEBUG_ASSERT(MALI_TRUE == group->is_working);
+
+ /* Stop/clear the timeout timer. */
+ _mali_osk_timer_del_async(group->timeout_timer);
+
+ if (NULL != group->gp_running_job) {
+ mali_gp_update_performance_counters(group->gp_core, group->gp_running_job);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+ mali_gp_job_get_perf_counter_value0(group->gp_running_job),
+ mali_gp_job_get_perf_counter_value1(group->gp_running_job),
+ mali_gp_job_get_perf_counter_src0(group->gp_running_job) | (mali_gp_job_get_perf_counter_src1(group->gp_running_job) << 8),
+ 0, 0);
+
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
+ mali_group_report_l2_cache_counters_per_core(group, 0);
+#endif
- mali_group_post_process_job_gp(group, MALI_FALSE);
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_sched_switch(
+ mali_pp_core_description(group->pp_core),
+ sched_clock(), 0, 0, 0);
+#endif
+
+#if defined(CONFIG_MALI400_PROFILING)
+ trace_mali_core_active(mali_gp_job_get_pid(group->gp_running_job), 0 /* active */, 1 /* GP */, 0 /* core */,
+ mali_gp_job_get_frame_builder_id(group->gp_running_job), mali_gp_job_get_flush_id(group->gp_running_job));
+#endif
+
+ mali_gp_job_set_current_heap_addr(group->gp_running_job,
+ mali_gp_read_plbu_alloc_start_addr(group->gp_core));
+ }
if (success) {
/* Only do soft reset for successful jobs, a full recovery
}
gp_job_to_return = group->gp_running_job;
- group->state = MALI_GROUP_STATE_IDLE;
group->gp_running_job = NULL;
+ group->is_working = MALI_FALSE;
if (!success) {
MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n"));
mali_group_recovery_reset(group);
}
- /* Return job to user, schedule and unlock group. */
- mali_gp_scheduler_job_done(group, gp_job_to_return, success);
-}
-
-void mali_group_abort_gp_job(struct mali_group *group, u32 job_id)
-{
- MALI_ASSERT_GROUP_LOCKED(group);
-
- if (MALI_GROUP_STATE_IDLE == group->state ||
- mali_gp_job_get_id(group->gp_running_job) != job_id) {
- return; /* No need to cancel or job has already been aborted or completed */
- }
-
- /* Function will unlock the group, so we need to lock it again */
- mali_group_complete_gp_and_unlock(group, MALI_FALSE);
- mali_group_lock(group);
-}
-
-static void mali_group_abort_pp_job(struct mali_group *group, u32 job_id)
-{
- MALI_ASSERT_GROUP_LOCKED(group);
-
- if (MALI_GROUP_STATE_IDLE == group->state ||
- mali_pp_job_get_id(group->pp_running_job) != job_id) {
- return; /* No need to cancel or job has already been aborted or completed */
- }
-
- mali_group_complete_pp_and_unlock(group, MALI_FALSE, MALI_FALSE);
- mali_group_lock(group);
-}
-
-void mali_group_abort_session(struct mali_group *group, struct mali_session_data *session)
-{
- struct mali_gp_job *gp_job;
- struct mali_pp_job *pp_job;
- u32 gp_job_id = 0;
- u32 pp_job_id = 0;
- mali_bool abort_pp = MALI_FALSE;
- mali_bool abort_gp = MALI_FALSE;
-
- mali_group_lock(group);
-
- if (mali_group_is_in_virtual(group)) {
- /* Group is member of a virtual group, don't touch it! */
- mali_group_unlock(group);
- return;
- }
-
- gp_job = group->gp_running_job;
- pp_job = group->pp_running_job;
-
- if ((NULL != gp_job) && (mali_gp_job_get_session(gp_job) == session)) {
- MALI_DEBUG_PRINT(4, ("Aborting GP job 0x%08x from session 0x%08x\n", gp_job, session));
-
- gp_job_id = mali_gp_job_get_id(gp_job);
- abort_gp = MALI_TRUE;
- }
-
- if ((NULL != pp_job) && (mali_pp_job_get_session(pp_job) == session)) {
- MALI_DEBUG_PRINT(4, ("Mali group: Aborting PP job 0x%08x from session 0x%08x\n", pp_job, session));
-
- pp_job_id = mali_pp_job_get_id(pp_job);
- abort_pp = MALI_TRUE;
- }
-
- if (abort_gp) {
- mali_group_abort_gp_job(group, gp_job_id);
- }
- if (abort_pp) {
- mali_group_abort_pp_job(group, pp_job_id);
- }
-
- mali_group_remove_session_if_unused(group, session);
-
- mali_group_unlock(group);
+ return gp_job_to_return;
}
struct mali_group *mali_group_get_glob_group(u32 index)
static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session)
{
- MALI_ASSERT_GROUP_LOCKED(group);
+ MALI_DEBUG_PRINT(5, ("Mali group: Activating page directory 0x%08X from session 0x%08X on group %s\n",
+ mali_session_get_page_directory(session), session,
+ mali_group_core_description(group)));
- MALI_DEBUG_PRINT(5, ("Mali group: Activating page directory 0x%08X from session 0x%08X on group 0x%08X\n", mali_session_get_page_directory(session), session, group));
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
if (group->session != session) {
/* Different session than last time, so we need to do some work */
- MALI_DEBUG_PRINT(5, ("Mali group: Activate session: %08x previous: %08x on group 0x%08X\n", session, group->session, group));
+ MALI_DEBUG_PRINT(5, ("Mali group: Activate session: %08x previous: %08x on group %s\n",
+ session, group->session,
+ mali_group_core_description(group)));
mali_mmu_activate_page_directory(group->mmu, mali_session_get_page_directory(session));
group->session = session;
} else {
/* Same session as last time, so no work required */
- MALI_DEBUG_PRINT(4, ("Mali group: Activate existing session 0x%08X on group 0x%08X\n", session->page_directory, group));
+ MALI_DEBUG_PRINT(4, ("Mali group: Activate existing session 0x%08X on group %s\n",
+ session->page_directory,
+ mali_group_core_description(group)));
mali_mmu_zap_tlb_without_stall(group->mmu);
}
}
-static void mali_group_remove_session_if_unused(struct mali_group *group, struct mali_session_data *session)
-{
- MALI_ASSERT_GROUP_LOCKED(group);
-
- if (MALI_GROUP_STATE_IDLE == group->state) {
- if (group->session == session) {
- MALI_DEBUG_ASSERT(MALI_GROUP_STATE_WORKING != group->state);
- MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
- MALI_DEBUG_PRINT(3, ("Mali group: Deactivating unused session 0x%08X on group %08X\n", session, group));
- mali_mmu_activate_empty_page_directory(group->mmu);
- group->session = NULL;
- }
- }
-}
-
-mali_bool mali_group_power_is_on(struct mali_group *group)
-{
- MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
- return group->power_is_on;
-}
-
-void mali_group_power_on_group(struct mali_group *group)
-{
- MALI_DEBUG_ASSERT_POINTER(group);
- MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
- MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state
- || MALI_GROUP_STATE_IN_VIRTUAL == group->state
- || MALI_GROUP_STATE_JOINING_VIRTUAL == group->state
- || MALI_GROUP_STATE_LEAVING_VIRTUAL == group->state
- || MALI_GROUP_STATE_DISABLED == group->state);
-
- MALI_DEBUG_PRINT(3, ("Group %p powered on\n", group));
-
- group->power_is_on = MALI_TRUE;
-}
-
-void mali_group_power_off_group(struct mali_group *group, mali_bool do_power_change)
-{
- MALI_DEBUG_ASSERT_POINTER(group);
- MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
- MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state
- || MALI_GROUP_STATE_IN_VIRTUAL == group->state
- || MALI_GROUP_STATE_JOINING_VIRTUAL == group->state
- || MALI_GROUP_STATE_LEAVING_VIRTUAL == group->state
- || MALI_GROUP_STATE_DISABLED == group->state);
-
- MALI_DEBUG_PRINT(3, ("Group %p powered off\n", group));
-
- /* It is necessary to set group->session = NULL so that the powered off MMU is not written
- * to on map/unmap. It is also necessary to set group->power_is_on = MALI_FALSE so that
- * pending bottom_halves does not access powered off cores. */
-
- group->session = NULL;
-
- if (do_power_change) {
- group->power_is_on = MALI_FALSE;
- }
-}
-
-void mali_group_power_on(void)
-{
- int i;
- for (i = 0; i < mali_global_num_groups; i++) {
- struct mali_group *group = mali_global_groups[i];
-
- mali_group_lock(group);
- if (MALI_GROUP_STATE_DISABLED == group->state) {
- MALI_DEBUG_ASSERT(MALI_FALSE == group->power_is_on);
- } else {
- mali_group_power_on_group(group);
- }
- mali_group_unlock(group);
- }
- MALI_DEBUG_PRINT(4, ("Mali Group: power on\n"));
-}
-
-void mali_group_power_off(mali_bool do_power_change)
-{
- int i;
-
- for (i = 0; i < mali_global_num_groups; i++) {
- struct mali_group *group = mali_global_groups[i];
-
- mali_group_lock(group);
- if (MALI_GROUP_STATE_DISABLED == group->state) {
- MALI_DEBUG_ASSERT(MALI_FALSE == group->power_is_on);
- } else {
- mali_group_power_off_group(group, do_power_change);
- }
- mali_group_unlock(group);
- }
- MALI_DEBUG_PRINT(4, ("Mali Group: power off\n"));
-}
-
-static void mali_group_recovery_reset(struct mali_group *group)
+static void mali_group_recovery_reset(struct mali_group *group)
{
_mali_osk_errcode_t err;
- MALI_ASSERT_GROUP_LOCKED(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
/* Stop cores, bus stop */
if (NULL != group->pp_core) {
u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size)
{
int n = 0;
+ int i;
+ struct mali_group *child;
+ struct mali_group *temp;
- n += _mali_osk_snprintf(buf + n, size - n, "Group: %p\n", group);
- n += _mali_osk_snprintf(buf + n, size - n, "\tstate: %d\n", group->state);
- if (group->gp_core) {
- n += mali_gp_dump_state(group->gp_core, buf + n, size - n);
- n += _mali_osk_snprintf(buf + n, size - n, "\tGP job: %p\n", group->gp_running_job);
- }
- if (group->pp_core) {
- n += mali_pp_dump_state(group->pp_core, buf + n, size - n);
- n += _mali_osk_snprintf(buf + n, size - n, "\tPP job: %p, subjob %d \n",
- group->pp_running_job, group->pp_running_sub_job);
+ if (mali_group_is_virtual(group)) {
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Virtual PP Group: %p\n", group);
+ } else if (mali_group_is_in_virtual(group)) {
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Child PP Group: %p\n", group);
+ } else if (NULL != group->pp_core) {
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Physical PP Group: %p\n", group);
+ } else {
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "GP Group: %p\n", group);
}
- return n;
-}
-#endif
-
-/* Group must be locked when entering this function. Will be unlocked before exiting. */
-static void mali_group_mmu_page_fault_and_unlock(struct mali_group *group)
-{
- MALI_DEBUG_ASSERT_POINTER(group);
- MALI_ASSERT_GROUP_LOCKED(group);
-
- if (NULL != group->pp_core) {
- struct mali_pp_job *pp_job_to_return;
- u32 pp_sub_job_to_return;
-
- MALI_DEBUG_ASSERT_POINTER(group->pp_running_job);
-
- mali_group_post_process_job_pp(group);
-
- pp_job_to_return = group->pp_running_job;
- pp_sub_job_to_return = group->pp_running_sub_job;
- group->state = MALI_GROUP_STATE_IDLE;
- group->pp_running_job = NULL;
-
- mali_group_recovery_reset(group); /* This will also clear the page fault itself */
+ switch (group->state) {
+ case MALI_GROUP_STATE_INACTIVE:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tstate: INACTIVE\n");
+ break;
+ case MALI_GROUP_STATE_ACTIVATION_PENDING:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tstate: ACTIVATION_PENDING\n");
+ break;
+ case MALI_GROUP_STATE_ACTIVE:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tstate: MALI_GROUP_STATE_ACTIVE\n");
+ break;
+ default:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tstate: UNKNOWN (%d)\n", group->state);
+ MALI_DEBUG_ASSERT(0);
+ break;
+ }
- /* Will unlock group. */
- mali_pp_scheduler_job_done(group, pp_job_to_return, pp_sub_job_to_return, MALI_FALSE, MALI_FALSE);
- } else {
- struct mali_gp_job *gp_job_to_return;
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tSW power: %s\n",
+ group->power_is_on ? "On" : "Off");
- MALI_DEBUG_ASSERT_POINTER(group->gp_running_job);
+ n += mali_pm_dump_state_domain(group->pm_domain, buf + n, size - n);
- mali_group_post_process_job_gp(group, MALI_FALSE);
+ for (i = 0; i < 2; i++) {
+ if (NULL != group->l2_cache_core[i]) {
+ struct mali_pm_domain *domain;
+ domain = mali_l2_cache_get_pm_domain(
+ group->l2_cache_core[i]);
+ n += mali_pm_dump_state_domain(domain,
+ buf + n, size - n);
+ }
+ }
- gp_job_to_return = group->gp_running_job;
- group->state = MALI_GROUP_STATE_IDLE;
- group->gp_running_job = NULL;
+ if (group->gp_core) {
+ n += mali_gp_dump_state(group->gp_core, buf + n, size - n);
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tGP running job: %p\n", group->gp_running_job);
+ }
- mali_group_recovery_reset(group); /* This will also clear the page fault itself */
+ if (group->pp_core) {
+ n += mali_pp_dump_state(group->pp_core, buf + n, size - n);
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tPP running job: %p, subjob %d \n",
+ group->pp_running_job,
+ group->pp_running_sub_job);
+ }
- /* Will unlock group. */
- mali_gp_scheduler_job_done(group, gp_job_to_return, MALI_FALSE);
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list,
+ struct mali_group, group_list) {
+ n += mali_group_dump_state(child, buf + n, size - n);
}
+
+ return n;
}
+#endif
+/* Kasin added. */
#if MESON_CPU_TYPE == MESON_CPU_TYPE_MESON6
#include <platform/meson_m400/mali_fix.h>
#define INT_MALI_PP2_MMU ( 6+32)
struct _mali_osk_irq_t_struct;
u32 get_irqnum(struct _mali_osk_irq_t_struct* irq);
#endif
-_mali_osk_errcode_t mali_group_upper_half_mmu(void * data)
+
+_mali_osk_errcode_t mali_group_upper_half_mmu(void *data)
{
- _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
struct mali_group *group = (struct mali_group *)data;
- struct mali_mmu_core *mmu = group->mmu;
- u32 int_stat;
+ _mali_osk_errcode_t ret;
- MALI_DEBUG_ASSERT_POINTER(mmu);
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->mmu);
#if MESON_CPU_TYPE == MESON_CPU_TYPE_MESON6
if (MALI_FALSE == group->power_is_on)
MALI_SUCCESS;
if (get_irqnum(mmu->irq) == INT_MALI_PP2_MMU)
{
+ if (group == NULL || group->pp_core == NULL)
+ MALI_SUCCESS;
if (group->pp_core->core_id == 0) {
if (malifix_get_mmu_int_process_state(0) == MMU_INT_HIT)
malifix_set_mmu_int_process_state(0, MMU_INT_TOP);
MALI_SUCCESS;
}
#endif
-#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
- if (MALI_FALSE == mali_pm_domain_lock_state(group->pm_domain)) {
- goto out;
+
+ if (NULL != group->gp_core) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
+ mali_mmu_get_rawstat(group->mmu), 0);
+ } else {
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
+ mali_pp_core_get_id(group->pp_core)),
+ mali_mmu_get_rawstat(group->mmu), 0);
}
-#endif
- /* Check if it was our device which caused the interrupt (we could be sharing the IRQ line) */
- int_stat = mali_mmu_get_int_status(mmu);
- if (0 != int_stat) {
- struct mali_group *parent = group->parent_group;
+ ret = mali_executor_interrupt_mmu(group, MALI_TRUE);
- /* page fault or bus error, we thread them both in the same way */
- mali_mmu_mask_all_interrupts(mmu);
- if (NULL == parent) {
- _mali_osk_wq_schedule_work(group->bottom_half_work_mmu);
- } else {
- _mali_osk_wq_schedule_work(parent->bottom_half_work_mmu);
- }
- err = _MALI_OSK_ERR_OK;
- goto out;
+ if (NULL != group->gp_core) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
+ mali_mmu_get_rawstat(group->mmu), 0);
+ } else {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
+ mali_pp_core_get_id(group->pp_core)),
+ mali_mmu_get_rawstat(group->mmu), 0);
}
-out:
-#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
- mali_pm_domain_unlock_state(group->pm_domain);
-#endif
-
- return err;
+ return ret;
}
static void mali_group_bottom_half_mmu(void *data)
{
struct mali_group *group = (struct mali_group *)data;
- struct mali_mmu_core *mmu = group->mmu;
- u32 rawstat;
- MALI_DEBUG_CODE(u32 status);
-
- MALI_DEBUG_ASSERT_POINTER(mmu);
- mali_group_lock(group);
-
- MALI_DEBUG_ASSERT(NULL == group->parent_group);
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->mmu);
- if (MALI_FALSE == mali_group_power_is_on(group)) {
- MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", mmu->hw_core.description));
- mali_group_unlock(group);
- return;
+ if (NULL != group->gp_core) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), /* pid and tid */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
+ mali_mmu_get_rawstat(group->mmu), 0);
+ } else {
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), /* pid and tid */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
+ mali_pp_core_get_id(group->pp_core)),
+ mali_mmu_get_rawstat(group->mmu), 0);
}
- rawstat = mali_mmu_get_rawstat(mmu);
- MALI_DEBUG_CODE(status = mali_mmu_get_status(mmu));
+ mali_executor_interrupt_mmu(group, MALI_FALSE);
- MALI_DEBUG_PRINT(4, ("Mali MMU: Bottom half, interrupt 0x%08X, status 0x%08X\n", rawstat, status));
-
- if (rawstat & (MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR)) {
- /* An actual page fault has occurred. */
-#ifdef DEBUG
- u32 fault_address = mali_mmu_get_page_fault_addr(mmu);
- MALI_DEBUG_PRINT(2, ("Mali MMU: Page fault detected at 0x%08x from bus id %d of type %s on %s\n",
- fault_address,
- (status >> 6) & 0x1F,
- (status & 32) ? "write" : "read",
- mmu->hw_core.description));
- mali_mmu_pagedir_diag(group->session->page_directory, fault_address);
-#endif
-
- mali_group_mmu_page_fault_and_unlock(group);
- return;
+ if (NULL != group->gp_core) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), /* pid and tid */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
+ mali_mmu_get_rawstat(group->mmu), 0);
+ } else {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), /* pid and tid */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
+ mali_pp_core_get_id(group->pp_core)),
+ mali_mmu_get_rawstat(group->mmu), 0);
}
#if MESON_CPU_TYPE == MESON_CPU_TYPE_MESON6
if (get_irqnum(mmu->irq) == INT_MALI_PP2_MMU)
}
}
#endif
-
- mali_group_unlock(group);
}
_mali_osk_errcode_t mali_group_upper_half_gp(void *data)
{
- _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
struct mali_group *group = (struct mali_group *)data;
- struct mali_gp_core *core = group->gp_core;
- u32 irq_readout;
-
-#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
- if (MALI_FALSE == mali_pm_domain_lock_state(group->pm_domain)) {
- goto out;
- }
-#endif
+ _mali_osk_errcode_t ret;
- irq_readout = mali_gp_get_int_stat(core);
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ MALI_DEBUG_ASSERT_POINTER(group->mmu);
- if (MALIGP2_REG_VAL_IRQ_MASK_NONE != irq_readout) {
- /* Mask out all IRQs from this core until IRQ is handled */
- mali_gp_mask_all_interrupts(core);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
+ mali_gp_get_rawstat(group->gp_core), 0);
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0) | MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT, irq_readout, 0, 0, 0, 0);
+ MALI_DEBUG_PRINT(4, ("Group: Interrupt 0x%08X from %s\n",
+ mali_gp_get_rawstat(group->gp_core),
+ mali_group_core_description(group)));
- /* We do need to handle this in a bottom half */
- _mali_osk_wq_schedule_work(group->bottom_half_work_gp);
+ ret = mali_executor_interrupt_gp(group, MALI_TRUE);
- err = _MALI_OSK_ERR_OK;
- goto out;
- }
-
-out:
-#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
- mali_pm_domain_unlock_state(group->pm_domain);
-#endif
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
+ mali_gp_get_rawstat(group->gp_core), 0);
- return err;
+ return ret;
}
static void mali_group_bottom_half_gp(void *data)
{
struct mali_group *group = (struct mali_group *)data;
- u32 irq_readout;
- u32 irq_errors;
-
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0), 0, 0);
-
- mali_group_lock(group);
-
- if (MALI_FALSE == mali_group_power_is_on(group)) {
- MALI_PRINT_ERROR(("Mali group: Interrupt bottom half of %s when core is OFF.", mali_gp_get_hw_core_desc(group->gp_core)));
- mali_group_unlock(group);
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
- return;
- }
-
- irq_readout = mali_gp_read_rawstat(group->gp_core);
-
- MALI_DEBUG_PRINT(4, ("Mali group: GP bottom half IRQ 0x%08X from core %s\n", irq_readout, mali_gp_get_hw_core_desc(group->gp_core)));
-
- if (irq_readout & (MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST)) {
- u32 core_status = mali_gp_read_core_status(group->gp_core);
- if (0 == (core_status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE)) {
- MALI_DEBUG_PRINT(4, ("Mali group: GP job completed, calling group handler\n"));
- group->core_timed_out = MALI_FALSE;
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
- MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
- MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
- 0, _mali_osk_get_tid(), 0, 0, 0);
-
- mali_group_complete_gp_and_unlock(group, MALI_TRUE);
- return;
- }
- }
-
- /*
- * Now lets look at the possible error cases (IRQ indicating error or timeout)
- * END_CMD_LST, HANG and PLBU_OOM interrupts are not considered error.
- */
- irq_errors = irq_readout & ~(MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | MALIGP2_REG_VAL_IRQ_HANG | MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM);
- if (0 != irq_errors) {
- MALI_PRINT_ERROR(("Mali group: Unknown interrupt 0x%08X from core %s, aborting job\n", irq_readout, mali_gp_get_hw_core_desc(group->gp_core)));
- group->core_timed_out = MALI_FALSE;
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
- MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
- MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
- 0, _mali_osk_get_tid(), 0, 0, 0);
-
- mali_group_complete_gp_and_unlock(group, MALI_FALSE);
- return;
- } else if (group->core_timed_out) { /* SW timeout */
- group->core_timed_out = MALI_FALSE;
- if (!_mali_osk_timer_pending(group->timeout_timer) && NULL != group->gp_running_job) {
- MALI_PRINT(("Mali group: Job %d timed out\n", mali_gp_job_get_id(group->gp_running_job)));
-
- mali_group_complete_gp_and_unlock(group, MALI_FALSE);
- return;
- }
- } else if (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM) {
- /* GP wants more memory in order to continue. */
- MALI_DEBUG_PRINT(3, ("Mali group: PLBU needs more heap memory\n"));
-
- group->state = MALI_GROUP_STATE_OOM;
- mali_group_unlock(group); /* Nothing to do on the HW side, so just release group lock right away */
- mali_gp_scheduler_oom(group, group->gp_running_job);
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
- return;
- }
-
- /*
- * The only way to get here is if we only got one of two needed END_CMD_LST
- * interrupts. Enable all but not the complete interrupt that has been
- * received and continue to run.
- */
- mali_gp_enable_interrupts(group->gp_core, irq_readout & (MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST));
- mali_group_unlock(group);
-
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
-}
-
-static void mali_group_post_process_job_gp(struct mali_group *group, mali_bool suspend)
-{
- /* Stop the timeout timer. */
- _mali_osk_timer_del_async(group->timeout_timer);
-
- if (NULL == group->gp_running_job) {
- /* Nothing to do */
- return;
- }
- mali_gp_update_performance_counters(group->gp_core, group->gp_running_job, suspend);
-
-#if defined(CONFIG_MALI400_PROFILING)
- if (suspend) {
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
- mali_gp_job_get_perf_counter_value0(group->gp_running_job),
- mali_gp_job_get_perf_counter_value1(group->gp_running_job),
- mali_gp_job_get_perf_counter_src0(group->gp_running_job) | (mali_gp_job_get_perf_counter_src1(group->gp_running_job) << 8),
- 0, 0);
- } else {
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
- mali_gp_job_get_perf_counter_value0(group->gp_running_job),
- mali_gp_job_get_perf_counter_value1(group->gp_running_job),
- mali_gp_job_get_perf_counter_src0(group->gp_running_job) | (mali_gp_job_get_perf_counter_src1(group->gp_running_job) << 8),
- 0, 0);
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ MALI_DEBUG_ASSERT_POINTER(group->mmu);
- if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
- (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
- mali_group_report_l2_cache_counters_per_core(group, 0);
- }
-#endif
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), /* pid and tid */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
+ mali_gp_get_rawstat(group->gp_core), 0);
-#if defined(CONFIG_MALI400_PROFILING)
- trace_mali_core_active(mali_gp_job_get_pid(group->gp_running_job), 0 /* active */, 1 /* GP */, 0 /* core */,
- mali_gp_job_get_frame_builder_id(group->gp_running_job), mali_gp_job_get_flush_id(group->gp_running_job));
-#endif
+ mali_executor_interrupt_gp(group, MALI_FALSE);
- mali_gp_job_set_current_heap_addr(group->gp_running_job,
- mali_gp_read_plbu_alloc_start_addr(group->gp_core));
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), /* pid and tid */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
+ mali_gp_get_rawstat(group->gp_core), 0);
}
-_mali_osk_errcode_t mali_group_upper_half_pp(void *data)
-{
- _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
- struct mali_group *group = (struct mali_group *)data;
- struct mali_pp_core *core = group->pp_core;
- u32 irq_readout;
-
-#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
- if (MALI_FALSE == mali_pm_domain_lock_state(group->pm_domain)) {
- goto out;
- }
-#endif
-
- /*
- * For Mali-450 there is one particular case we need to watch out for:
- *
- * Criteria 1) this function call can be due to a shared interrupt,
- * and not necessary because this core signaled an interrupt.
- * Criteria 2) this core is a part of a virtual group, and thus it should
- * not do any post processing.
- * Criteria 3) this core has actually indicated that is has completed by
- * having set raw_stat/int_stat registers to != 0
- *
- * If all this criteria is meet, then we could incorrectly start post
- * processing on the wrong group object (this should only happen on the
- * parent group)
- */
-#if !defined(MALI_UPPER_HALF_SCHEDULING)
- if (mali_group_is_in_virtual(group)) {
- /*
- * This check is done without the group lock held, which could lead to
- * a potential race. This is however ok, since we will safely re-check
- * this with the group lock held at a later stage. This is just an
- * early out which will strongly benefit shared IRQ systems.
- */
- err = _MALI_OSK_ERR_OK;
- goto out;
- }
-#endif
-
- irq_readout = mali_pp_get_int_stat(core);
- if (MALI200_REG_VAL_IRQ_MASK_NONE != irq_readout) {
- /* Mask out all IRQs from this core until IRQ is handled */
- mali_pp_mask_all_interrupts(core);
-
-#if defined(CONFIG_MALI400_PROFILING)
- /* Currently no support for this interrupt event for the virtual PP core */
- if (!mali_group_is_virtual(group)) {
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
- MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_id) |
- MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT,
- irq_readout, 0, 0, 0, 0);
- }
-#endif
-
-#if defined(MALI_UPPER_HALF_SCHEDULING)
- /* Check if job is complete without errors */
- if (MALI200_REG_VAL_IRQ_END_OF_FRAME == irq_readout) {
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
- MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
- MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
- 0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
-
- MALI_DEBUG_PRINT(3, ("Mali PP: Job completed, calling group handler from upper half\n"));
-
- mali_group_lock(group);
-
- /* Check if job is complete without errors, again, after taking the group lock */
- irq_readout = mali_pp_read_rawstat(core);
- if (MALI200_REG_VAL_IRQ_END_OF_FRAME != irq_readout) {
- mali_pp_enable_interrupts(core);
- mali_group_unlock(group);
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
- MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
- MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
- 0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
- err = _MALI_OSK_ERR_OK;
- goto out;
- }
-
- if (mali_group_is_virtual(group)) {
- u32 status_readout = mali_pp_read_status(group->pp_core);
- if (status_readout & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE) {
- MALI_DEBUG_PRINT(6, ("Mali PP: Not all cores in broadcast completed\n"));
- mali_pp_enable_interrupts(core);
- mali_group_unlock(group);
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
- MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
- MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
- 0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
- err = _MALI_OSK_ERR_OK;
- goto out;
- }
- }
-
- if (mali_group_is_in_virtual(group)) {
- /* We're member of a virtual group, so interrupt should be handled by the virtual group */
- mali_pp_enable_interrupts(core);
- mali_group_unlock(group);
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
- MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
- MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
- 0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
- err = _MALI_OSK_ERR_FAULT;
- goto out;
- }
-
- group->core_timed_out = MALI_FALSE;
-
- mali_group_complete_pp_and_unlock(group, MALI_TRUE, MALI_TRUE);
-
- /* No need to enable interrupts again, since the core will be reset while completing the job */
-
- MALI_DEBUG_PRINT(6, ("Mali PP: Upper half job done\n"));
-
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
- MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
- MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
- 0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
-
- err = _MALI_OSK_ERR_OK;
- goto out;
- }
-#endif
-
- /* We do need to handle this in a bottom half */
- _mali_osk_wq_schedule_work(group->bottom_half_work_pp);
- err = _MALI_OSK_ERR_OK;
- goto out;
- }
-
-out:
-#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
- mali_pm_domain_unlock_state(group->pm_domain);
-#endif
-
- return err;
-}
#if MESON_CPU_TYPE == MESON_CPU_TYPE_MESON6
int PP0_int_cnt = 0;
int mali_PP0_int_cnt(void)
EXPORT_SYMBOL(mali_PP1_int_cnt);
#endif
-static void mali_group_bottom_half_pp(void *data)
+_mali_osk_errcode_t mali_group_upper_half_pp(void *data)
{
struct mali_group *group = (struct mali_group *)data;
- struct mali_pp_core *core = group->pp_core;
- u32 irq_readout;
- u32 irq_errors;
+ _mali_osk_errcode_t ret;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ MALI_DEBUG_ASSERT_POINTER(group->mmu);
_mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
- MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
- 0, _mali_osk_get_tid(), MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
-
- mali_group_lock(group);
-
- if (mali_group_is_in_virtual(group)) {
- /* We're member of a virtual group, so interrupt should be handled by the virtual group */
- mali_pp_enable_interrupts(core);
- mali_group_unlock(group);
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
- MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
- MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
- 0, _mali_osk_get_tid(), 0, 0, 0);
- return;
- }
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
+ mali_pp_core_get_id(group->pp_core)),
+ mali_pp_get_rawstat(group->pp_core), 0);
- if (MALI_FALSE == mali_group_power_is_on(group)) {
- MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", mali_pp_get_hw_core_desc(core)));
- mali_group_unlock(group);
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
- MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
- MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
- 0, _mali_osk_get_tid(), 0, 0, 0);
- return;
- }
-
- irq_readout = mali_pp_read_rawstat(group->pp_core);
-
- MALI_DEBUG_PRINT(4, ("Mali PP: Bottom half IRQ 0x%08X from core %s\n", irq_readout, mali_pp_get_hw_core_desc(group->pp_core)));
-
- /* Check if job is complete without errors */
- if (MALI200_REG_VAL_IRQ_END_OF_FRAME == irq_readout) {
- if (mali_group_is_virtual(group)) {
- u32 status_readout = mali_pp_read_status(group->pp_core);
-
- if (status_readout & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE && !group->core_timed_out) {
- MALI_DEBUG_PRINT(6, ("Mali PP: Not all cores in broadcast completed\n"));
- mali_pp_enable_interrupts(core);
- mali_group_unlock(group);
-
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
- MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
- MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
- 0, _mali_osk_get_tid(), 0, 0, 0);
- return;
- }
- }
+ MALI_DEBUG_PRINT(4, ("Group: Interrupt 0x%08X from %s\n",
+ mali_pp_get_rawstat(group->pp_core),
+ mali_group_core_description(group)));
- if (!group->core_timed_out) {
- MALI_DEBUG_PRINT(3, ("Mali PP: Job completed, calling group handler\n"));
- group->core_timed_out = MALI_FALSE;
+ ret = mali_executor_interrupt_pp(group, MALI_TRUE);
- mali_group_complete_pp_and_unlock(group, MALI_TRUE, MALI_FALSE);
-
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
- MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
- MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
- 0, _mali_osk_get_tid(), 0, 0, 0);
- return;
- }
- }
-
#if MESON_CPU_TYPE == MESON_CPU_TYPE_MESON6
if (core->core_id == 0)
PP0_int_cnt++;
PP1_int_cnt++;
#endif
- /*
- * Now lets look at the possible error cases (IRQ indicating error or timeout)
- * END_OF_FRAME and HANG interrupts are not considered error.
- */
- irq_errors = irq_readout & ~(MALI200_REG_VAL_IRQ_END_OF_FRAME | MALI200_REG_VAL_IRQ_HANG);
- if (0 != irq_errors) {
- MALI_PRINT_ERROR(("Mali PP: Unexpected interrupt 0x%08X from core %s, aborting job\n",
- irq_readout, mali_pp_get_hw_core_desc(group->pp_core)));
- group->core_timed_out = MALI_FALSE;
-
- mali_group_complete_pp_and_unlock(group, MALI_FALSE, MALI_FALSE);
-
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
- MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
- MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
- 0, _mali_osk_get_tid(), 0, 0, 0);
- return;
- } else if (group->core_timed_out) { /* SW timeout */
- group->core_timed_out = MALI_FALSE;
- if (!_mali_osk_timer_pending(group->timeout_timer) && NULL != group->pp_running_job) {
- MALI_PRINT(("Mali PP: Job %d timed out on core %s\n",
- mali_pp_job_get_id(group->pp_running_job), mali_pp_get_hw_core_desc(core)));
-
- mali_group_complete_pp_and_unlock(group, MALI_FALSE, MALI_FALSE);
- } else {
- mali_group_unlock(group);
- }
-
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
- MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
- MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
- 0, _mali_osk_get_tid(), 0, 0, 0);
- return;
- }
-
- /*
- * We should never get here, re-enable interrupts and continue
- */
- if (0 == irq_readout) {
- MALI_DEBUG_PRINT(3, ("Mali group: No interrupt found on core %s\n",
- mali_pp_get_hw_core_desc(group->pp_core)));
- } else {
- MALI_PRINT_ERROR(("Mali group: Unhandled PP interrupt 0x%08X on %s\n", irq_readout,
- mali_pp_get_hw_core_desc(group->pp_core)));
- }
- mali_pp_enable_interrupts(core);
- mali_group_unlock(group);
-
_mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
- MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
- 0, _mali_osk_get_tid(), 0, 0, 0);
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
+ mali_pp_core_get_id(group->pp_core)),
+ mali_pp_get_rawstat(group->pp_core), 0);
+
+ return ret;
}
-static void mali_group_post_process_job_pp(struct mali_group *group)
+static void mali_group_bottom_half_pp(void *data)
{
- MALI_ASSERT_GROUP_LOCKED(group);
-
- /* Stop the timeout timer. */
- _mali_osk_timer_del_async(group->timeout_timer);
-
- if (NULL != group->pp_running_job) {
- if (MALI_TRUE == mali_group_is_virtual(group)) {
- struct mali_group *child;
- struct mali_group *temp;
-
- /* update performance counters from each physical pp core within this virtual group */
- _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
- mali_pp_update_performance_counters(group->pp_core, child->pp_core, group->pp_running_job, mali_pp_core_get_id(child->pp_core));
- }
-
-#if defined(CONFIG_MALI400_PROFILING)
- /* send profiling data per physical core */
- _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
- MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
- MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
- mali_pp_job_get_perf_counter_value0(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
- mali_pp_job_get_perf_counter_value1(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
- mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8),
- 0, 0);
-
- trace_mali_core_active(mali_pp_job_get_pid(group->pp_running_job),
- 0 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core),
- mali_pp_job_get_frame_builder_id(group->pp_running_job),
- mali_pp_job_get_flush_id(group->pp_running_job));
- }
- if (0 != group->l2_cache_core_ref_count[0]) {
- if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
- (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
- mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
- }
- }
- if (0 != group->l2_cache_core_ref_count[1]) {
- if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) &&
- (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) {
- mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1]));
- }
- }
+ struct mali_group *group = (struct mali_group *)data;
-#endif
- } else {
- /* update performance counters for a physical group's pp core */
- mali_pp_update_performance_counters(group->pp_core, group->pp_core, group->pp_running_job, group->pp_running_sub_job);
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ MALI_DEBUG_ASSERT_POINTER(group->mmu);
-#if defined(CONFIG_MALI400_PROFILING)
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
- MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) |
- MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL,
- mali_pp_job_get_perf_counter_value0(group->pp_running_job, group->pp_running_sub_job),
- mali_pp_job_get_perf_counter_value1(group->pp_running_job, group->pp_running_sub_job),
- mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8),
- 0, 0);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), /* pid and tid */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
+ mali_pp_core_get_id(group->pp_core)),
+ mali_pp_get_rawstat(group->pp_core), 0);
- trace_mali_core_active(mali_pp_job_get_pid(group->pp_running_job), 0 /* active */, 0 /* PP */, mali_pp_core_get_id(group->pp_core),
- mali_pp_job_get_frame_builder_id(group->pp_running_job), mali_pp_job_get_flush_id(group->pp_running_job));
+ mali_executor_interrupt_pp(group, MALI_FALSE);
- if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
- (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
- mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
- }
-#endif
- }
- }
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), /* pid and tid */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
+ mali_pp_core_get_id(group->pp_core)),
+ mali_pp_get_rawstat(group->pp_core), 0);
}
static void mali_group_timeout(void *data)
{
struct mali_group *group = (struct mali_group *)data;
+ MALI_DEBUG_ASSERT_POINTER(group);
- group->core_timed_out = MALI_TRUE;
+ MALI_DEBUG_PRINT(2, ("Group: timeout handler for %s at %u\n",
+ mali_group_core_description(group),
+ _mali_osk_time_tickcount()));
if (mali_core_timeout < 65533)
mali_core_timeout++;
if (NULL != group->gp_core) {
- MALI_DEBUG_PRINT(2, ("Mali group: TIMEOUT on %s\n", mali_gp_get_hw_core_desc(group->gp_core)));
- _mali_osk_wq_schedule_work(group->bottom_half_work_gp);
+ mali_group_schedule_bottom_half_gp(group);
} else {
- MALI_DEBUG_PRINT(2, ("Mali group: TIMEOUT on %s\n", mali_pp_get_hw_core_desc(group->pp_core)));
- _mali_osk_wq_schedule_work(group->bottom_half_work_pp);
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ mali_group_schedule_bottom_half_pp(group);
}
}
-void mali_group_zap_session(struct mali_group *group, struct mali_session_data *session)
+mali_bool mali_group_zap_session(struct mali_group *group,
+ struct mali_session_data *session)
{
MALI_DEBUG_ASSERT_POINTER(group);
MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
- /* Early out - safe even if mutex is not held */
- if (group->session != session) return;
-
- mali_group_lock(group);
-
- mali_group_remove_session_if_unused(group, session);
+ if (group->session != session) {
+ /* not running from this session */
+ return MALI_TRUE; /* success */
+ }
- if (group->session == session) {
+ if (group->is_working) {
/* The Zap also does the stall and disable_stall */
mali_bool zap_success = mali_mmu_zap_tlb(group->mmu);
- if (MALI_TRUE != zap_success) {
- MALI_DEBUG_PRINT(2, ("Mali memory unmap failed. Doing pagefault handling.\n"));
-
- mali_group_mmu_page_fault_and_unlock(group);
- return;
- }
+ return zap_success;
+ } else {
+ /* Just remove the session instead of zapping */
+ mali_group_clear_session(group);
+ return MALI_TRUE; /* success */
}
-
- mali_group_unlock(group);
}
#if defined(CONFIG_MALI400_PROFILING)
u32 value1 = 0;
u32 profiling_channel = 0;
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
switch (core_num) {
case 0:
profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
_mali_osk_profiling_add_event(profiling_channel, source1 << 8 | source0, value0, value1, 0, 0);
}
#endif /* #if defined(CONFIG_MALI400_PROFILING) */
-
-mali_bool mali_group_is_enabled(struct mali_group *group)
-{
- mali_bool enabled = MALI_TRUE;
-
- MALI_DEBUG_ASSERT_POINTER(group);
-
- mali_group_lock(group);
- if (MALI_GROUP_STATE_DISABLED == group->state) {
- enabled = MALI_FALSE;
- }
- mali_group_unlock(group);
-
- return enabled;
-}
-
-void mali_group_enable(struct mali_group *group)
-{
- MALI_DEBUG_ASSERT_POINTER(group);
- MALI_DEBUG_ASSERT(NULL != mali_group_get_pp_core(group)
- || NULL != mali_group_get_gp_core(group));
-
- if (NULL != mali_group_get_pp_core(group)) {
- mali_pp_scheduler_enable_group(group);
- } else {
- mali_gp_scheduler_enable_group(group);
- }
-}
-
-void mali_group_disable(struct mali_group *group)
-{
- MALI_DEBUG_ASSERT_POINTER(group);
- MALI_DEBUG_ASSERT(NULL != mali_group_get_pp_core(group)
- || NULL != mali_group_get_gp_core(group));
-
- if (NULL != mali_group_get_pp_core(group)) {
- mali_pp_scheduler_disable_group(group);
- } else {
- mali_gp_scheduler_disable_group(group);
- }
-}
-
-static struct mali_pm_domain *mali_group_get_l2_domain(struct mali_group *group)
-{
- MALI_DEBUG_ASSERT(NULL == group->l2_cache_core[1]);
-
- /* l2_cache_core[0] stores the related l2 domain */
- return group->l2_cache_core[0]->pm_domain;
-}
-
-void mali_group_get_pm_domain_ref(struct mali_group *group)
-{
- MALI_DEBUG_ASSERT_POINTER(group);
-
- /* Get group used l2 domain ref */
- mali_pm_domain_ref_get(mali_group_get_l2_domain(group));
- /* Get group used core domain ref */
- mali_pm_domain_ref_get(group->pm_domain);
-}
-
-void mali_group_put_pm_domain_ref(struct mali_group *group)
-{
- MALI_DEBUG_ASSERT_POINTER(group);
-
- /* Put group used core domain ref */
- mali_pm_domain_ref_put(group->pm_domain);
- /* Put group used l2 domain ref */
- mali_pm_domain_ref_put(mali_group_get_l2_domain(group));
-}
#ifndef __MALI_GROUP_H__
#define __MALI_GROUP_H__
-#include "linux/jiffies.h"
#include "mali_osk.h"
#include "mali_l2_cache.h"
#include "mali_mmu.h"
#include "mali_gp.h"
#include "mali_pp.h"
#include "mali_session.h"
+#include "mali_osk_profiling.h"
/**
* @brief Default max runtime [ms] for a core job - used by timeout timers
*/
-#define MALI_MAX_JOB_RUNTIME_DEFAULT 4000
+#define MALI_MAX_JOB_RUNTIME_DEFAULT 5000
+
+extern int mali_max_job_runtime;
-/** @brief A mali group object represents a MMU and a PP and/or a GP core.
- *
- */
#define MALI_MAX_NUMBER_OF_GROUPS 10
+#define MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS 8
-enum mali_group_core_state {
- MALI_GROUP_STATE_IDLE,
- MALI_GROUP_STATE_WORKING,
- MALI_GROUP_STATE_OOM,
- MALI_GROUP_STATE_IN_VIRTUAL,
- MALI_GROUP_STATE_JOINING_VIRTUAL,
- MALI_GROUP_STATE_LEAVING_VIRTUAL,
- MALI_GROUP_STATE_DISABLED,
+enum mali_group_state {
+ MALI_GROUP_STATE_INACTIVE,
+ MALI_GROUP_STATE_ACTIVATION_PENDING,
+ MALI_GROUP_STATE_ACTIVE,
};
-/* Forward declaration from mali_pm_domain.h */
-struct mali_pm_domain;
-
/**
* The structure represents a render group
* A render group is defined by all the cores that share the same Mali MMU
struct mali_mmu_core *mmu;
struct mali_session_data *session;
- mali_bool power_is_on;
- enum mali_group_core_state state;
+ enum mali_group_state state;
+ mali_bool power_is_on;
+
+ mali_bool is_working;
+ unsigned long start_time; /* in ticks */
struct mali_gp_core *gp_core;
struct mali_gp_job *gp_running_job;
struct mali_pp_job *pp_running_job;
u32 pp_running_sub_job;
+ struct mali_pm_domain *pm_domain;
+
struct mali_l2_cache_core *l2_cache_core[2];
u32 l2_cache_core_ref_count[2];
+ /* Parent virtual group (if any) */
+ struct mali_group *parent_group;
+
struct mali_dlbu_core *dlbu_core;
struct mali_bcast_unit *bcast_core;
-#ifdef MALI_UPPER_HALF_SCHEDULING
- _mali_osk_spinlock_irq_t *lock;
-#else
- _mali_osk_spinlock_t *lock;
-#endif
-
- _mali_osk_list_t pp_scheduler_list;
+ /* Used for working groups which needs to be disabled */
+ mali_bool disable_requested;
- /* List used for virtual groups. For a virtual group, the list represents the
- * head element. */
+ /* Used by group to link child groups (for virtual group) */
_mali_osk_list_t group_list;
- struct mali_group *pm_domain_list;
- struct mali_pm_domain *pm_domain;
+ /* Used by executor module in order to link groups of same state */
+ _mali_osk_list_t executor_list;
- /* Parent virtual group (if any) */
- struct mali_group *parent_group;
+ /* Used by PM domains to link groups of same domain */
+ _mali_osk_list_t pm_domain_list;
_mali_osk_wq_work_t *bottom_half_work_mmu;
_mali_osk_wq_work_t *bottom_half_work_gp;
_mali_osk_wq_work_t *bottom_half_work_pp;
_mali_osk_timer_t *timeout_timer;
- mali_bool core_timed_out;
};
/** @brief Create a new Mali group object
*
- * @param cluster Pointer to the cluster to which the group is connected.
- * @param mmu Pointer to the MMU that defines this group
* @return A pointer to a new group object
*/
struct mali_group *mali_group_create(struct mali_l2_cache_core *core,
struct mali_dlbu_core *dlbu,
- struct mali_bcast_unit *bcast);
+ struct mali_bcast_unit *bcast,
+ u32 domain_index);
+
+void mali_group_delete(struct mali_group *group);
-_mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group, struct mali_mmu_core *mmu_core);
+_mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group,
+ struct mali_mmu_core *mmu_core);
void mali_group_remove_mmu_core(struct mali_group *group);
-_mali_osk_errcode_t mali_group_add_gp_core(struct mali_group *group, struct mali_gp_core *gp_core);
+_mali_osk_errcode_t mali_group_add_gp_core(struct mali_group *group,
+ struct mali_gp_core *gp_core);
void mali_group_remove_gp_core(struct mali_group *group);
-_mali_osk_errcode_t mali_group_add_pp_core(struct mali_group *group, struct mali_pp_core *pp_core);
+_mali_osk_errcode_t mali_group_add_pp_core(struct mali_group *group,
+ struct mali_pp_core *pp_core);
void mali_group_remove_pp_core(struct mali_group *group);
-void mali_group_set_pm_domain(struct mali_group *group, struct mali_pm_domain *domain);
-
-void mali_group_delete(struct mali_group *group);
-
-/** @brief Virtual groups */
-void mali_group_add_group(struct mali_group *parent, struct mali_group *child, mali_bool update_hw);
-void mali_group_remove_group(struct mali_group *parent, struct mali_group *child);
-struct mali_group *mali_group_acquire_group(struct mali_group *parent);
+MALI_STATIC_INLINE const char *mali_group_core_description(
+ struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ if (NULL != group->pp_core) {
+ return mali_pp_core_description(group->pp_core);
+ } else {
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ return mali_gp_core_description(group->gp_core);
+ }
+}
MALI_STATIC_INLINE mali_bool mali_group_is_virtual(struct mali_group *group)
{
+ MALI_DEBUG_ASSERT_POINTER(group);
+
#if defined(CONFIG_MALI450)
return (NULL != group->dlbu_core);
#else
#endif
}
-/** @brief Check if a group is considered as part of a virtual group
- *
- * @note A group is considered to be "part of" a virtual group also during the transition
- * in to / out of the virtual group.
+/** @brief Check if a group is a part of a virtual group or not
*/
MALI_STATIC_INLINE mali_bool mali_group_is_in_virtual(struct mali_group *group)
{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
#if defined(CONFIG_MALI450)
- return (MALI_GROUP_STATE_IN_VIRTUAL == group->state ||
- MALI_GROUP_STATE_JOINING_VIRTUAL == group->state ||
- MALI_GROUP_STATE_LEAVING_VIRTUAL == group->state);
+ return (NULL != group->parent_group) ? MALI_TRUE : MALI_FALSE;
#else
return MALI_FALSE;
#endif
/** @brief Reset group
*
- * This function will reset the entire group, including all the cores present in the group.
+ * This function will reset the entire group,
+ * including all the cores present in the group.
*
* @param group Pointer to the group to reset
*/
void mali_group_reset(struct mali_group *group);
-/** @brief Zap MMU TLB on all groups
- *
- * Zap TLB on group if \a session is active.
+MALI_STATIC_INLINE struct mali_session_data *mali_group_get_session(
+ struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ return group->session;
+}
+
+MALI_STATIC_INLINE void mali_group_clear_session(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ if (NULL != group->session) {
+ mali_mmu_activate_empty_page_directory(group->mmu);
+ group->session = NULL;
+ }
+}
+
+enum mali_group_state mali_group_activate(struct mali_group *group);
+
+/*
+ * Change state from ACTIVATION_PENDING to ACTIVE
+ * For virtual group, all childs need to be ACTIVE first
*/
-void mali_group_zap_session(struct mali_group *group, struct mali_session_data *session);
+mali_bool mali_group_set_active(struct mali_group *group);
-/** @brief Get pointer to GP core object
+/*
+ * @return MALI_TRUE means one or more domains can now be powered off,
+ * and caller should call either mali_pm_update_async() or
+ * mali_pm_update_sync() in order to do so.
*/
-struct mali_gp_core *mali_group_get_gp_core(struct mali_group *group);
+mali_bool mali_group_deactivate(struct mali_group *group);
-/** @brief Get pointer to PP core object
+MALI_STATIC_INLINE enum mali_group_state mali_group_get_state(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return group->state;
+}
+
+MALI_STATIC_INLINE mali_bool mali_group_power_is_on(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ return group->power_is_on;
+}
+
+void mali_group_power_up(struct mali_group *group);
+void mali_group_power_down(struct mali_group *group);
+
+MALI_STATIC_INLINE void mali_group_set_disable_request(
+ struct mali_group *group, mali_bool disable)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ group->disable_requested = disable;
+
+ /**
+ * When one of child group's disable_requeset is set TRUE, then
+ * the disable_request of parent group should also be set to TRUE.
+ * While, the disable_request of parent group should only be set to FALSE
+ * only when all of its child group's disable_request are set to FALSE.
+ */
+ if (NULL != group->parent_group && MALI_TRUE == disable) {
+ group->parent_group->disable_requested = disable;
+ }
+}
+
+MALI_STATIC_INLINE mali_bool mali_group_disable_requested(
+ struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return group->disable_requested;
+}
+
+/** @brief Virtual groups */
+void mali_group_add_group(struct mali_group *parent, struct mali_group *child);
+struct mali_group *mali_group_acquire_group(struct mali_group *parent);
+void mali_group_remove_group(struct mali_group *parent, struct mali_group *child);
+
+/** @brief Checks if the group is working.
*/
-struct mali_pp_core *mali_group_get_pp_core(struct mali_group *group);
+MALI_STATIC_INLINE mali_bool mali_group_is_working(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ if (mali_group_is_in_virtual(group)) {
+ struct mali_group *tmp_group = mali_executor_get_virtual_group();
+ return tmp_group->is_working;
+ }
+ return group->is_working;
+}
-/** @brief Lock group object
- *
- * Most group functions will lock the group object themselves. The expection is
- * the group_bottom_half which requires the group to be locked on entry.
+MALI_STATIC_INLINE struct mali_gp_job *mali_group_get_running_gp_job(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return group->gp_running_job;
+}
+
+/** @brief Zap MMU TLB on all groups
*
- * @param group Pointer to group to lock
+ * Zap TLB on group if \a session is active.
*/
-void mali_group_lock(struct mali_group *group);
+mali_bool mali_group_zap_session(struct mali_group *group,
+ struct mali_session_data *session);
-/** @brief Unlock group object
- *
- * @param group Pointer to group to unlock
+/** @brief Get pointer to GP core object
*/
-void mali_group_unlock(struct mali_group *group);
-#ifdef DEBUG
-void mali_group_assert_locked(struct mali_group *group);
-#define MALI_ASSERT_GROUP_LOCKED(group) mali_group_assert_locked(group)
-#else
-#define MALI_ASSERT_GROUP_LOCKED(group)
-#endif
+MALI_STATIC_INLINE struct mali_gp_core *mali_group_get_gp_core(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ return group->gp_core;
+}
+
+/** @brief Get pointer to PP core object
+ */
+MALI_STATIC_INLINE struct mali_pp_core *mali_group_get_pp_core(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ return group->pp_core;
+}
/** @brief Start GP job
*/
void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job);
+void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job);
+
/** @brief Start virtual group Job on a virtual group
*/
void mali_group_start_job_on_virtual(struct mali_group *group, struct mali_pp_job *job, u32 first_subjob, u32 last_subjob);
/** @brief Resume GP job that suspended waiting for more heap memory
*/
-struct mali_gp_job *mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr);
-/** @brief Abort GP job
- *
- * Used to abort suspended OOM jobs when user space failed to allocte more memory.
- */
-void mali_group_abort_gp_job(struct mali_group *group, u32 job_id);
-/** @brief Abort all GP jobs from \a session
- *
- * Used on session close when terminating all running and queued jobs from \a session.
- */
-void mali_group_abort_session(struct mali_group *group, struct mali_session_data *session);
+void mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr);
-mali_bool mali_group_power_is_on(struct mali_group *group);
-void mali_group_power_on_group(struct mali_group *group);
-void mali_group_power_off_group(struct mali_group *group, mali_bool power_status);
-void mali_group_power_on(void);
-
-/** @brief Prepare group for power off
- *
- * Update the group's state and prepare for the group to be powered off.
- *
- * If do_power_change is MALI_FALSE group session will be set to NULL so that
- * no more activity will happen to this group, but the power state flag will be
- * left unchanged.
- *
- * @do_power_change MALI_TRUE if power status is to be updated
- */
-void mali_group_power_off(mali_bool do_power_change);
+MALI_STATIC_INLINE enum mali_interrupt_result mali_group_get_interrupt_result_gp(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return mali_gp_get_interrupt_result(group->gp_core);
+}
-struct mali_group *mali_group_get_glob_group(u32 index);
-u32 mali_group_get_glob_num_groups(void);
+MALI_STATIC_INLINE enum mali_interrupt_result mali_group_get_interrupt_result_pp(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return mali_pp_get_interrupt_result(group->pp_core);
+}
-u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size);
+MALI_STATIC_INLINE enum mali_interrupt_result mali_group_get_interrupt_result_mmu(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->mmu);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return mali_mmu_get_interrupt_result(group->mmu);
+}
-/* MMU-related functions */
-_mali_osk_errcode_t mali_group_upper_half_mmu(void *data);
+MALI_STATIC_INLINE mali_bool mali_group_gp_is_active(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return mali_gp_is_active(group->gp_core);
+}
-/* GP-related functions */
-_mali_osk_errcode_t mali_group_upper_half_gp(void *data);
+MALI_STATIC_INLINE mali_bool mali_group_pp_is_active(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return mali_pp_is_active(group->pp_core);
+}
-/* PP-related functions */
-_mali_osk_errcode_t mali_group_upper_half_pp(void *data);
+MALI_STATIC_INLINE mali_bool mali_group_has_timed_out(struct mali_group *group)
+{
+ unsigned long time_cost;
+ struct mali_group *tmp_group = group;
-/** @brief Check if group is enabled
- *
- * @param group group to check
- * @return MALI_TRUE if enabled, MALI_FALSE if not
- */
-mali_bool mali_group_is_enabled(struct mali_group *group);
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
-/** @brief Enable group
- *
- * An enabled job is put on the idle scheduler list and can be used to handle jobs. Does nothing if
- * group is already enabled.
- *
- * @param group group to enable
- */
-void mali_group_enable(struct mali_group *group);
+ /* if the group is in virtual need to use virtual_group's start time */
+ if (mali_group_is_in_virtual(group)) {
+ tmp_group = mali_executor_get_virtual_group();
+ }
-/** @brief Disable group
- *
- * A disabled group will no longer be used by the scheduler. If part of a virtual group, the group
- * will be removed before being disabled. Cores part of a disabled group is safe to power down.
- *
- * @param group group to disable
- */
-void mali_group_disable(struct mali_group *group);
+ time_cost = _mali_osk_time_tickcount() - tmp_group->start_time;
+ if (_mali_osk_time_mstoticks(mali_max_job_runtime) <= time_cost) {
+ /*
+ * current tick is at or after timeout end time,
+ * so this is a valid timeout
+ */
+ return MALI_TRUE;
+ } else {
+ /*
+ * Not a valid timeout. A HW interrupt probably beat
+ * us to it, and the timer wasn't properly deleted
+ * (async deletion used due to atomic context).
+ */
+ return MALI_FALSE;
+ }
+}
-MALI_STATIC_INLINE mali_bool mali_group_virtual_disable_if_empty(struct mali_group *group)
+MALI_STATIC_INLINE void mali_group_mask_all_interrupts_gp(struct mali_group *group)
{
- mali_bool empty = MALI_FALSE;
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return mali_gp_mask_all_interrupts(group->gp_core);
+}
- MALI_ASSERT_GROUP_LOCKED(group);
- MALI_DEBUG_ASSERT(mali_group_is_virtual(group));
+MALI_STATIC_INLINE void mali_group_mask_all_interrupts_pp(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return mali_pp_mask_all_interrupts(group->pp_core);
+}
- if (_mali_osk_list_empty(&group->group_list)) {
- group->state = MALI_GROUP_STATE_DISABLED;
- group->session = NULL;
+MALI_STATIC_INLINE void mali_group_enable_interrupts_gp(
+ struct mali_group *group,
+ enum mali_interrupt_result exceptions)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ mali_gp_enable_interrupts(group->gp_core, exceptions);
+}
- empty = MALI_TRUE;
- }
+MALI_STATIC_INLINE void mali_group_schedule_bottom_half_gp(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ _mali_osk_wq_schedule_work(group->bottom_half_work_gp);
+}
- return empty;
+MALI_STATIC_INLINE void mali_group_schedule_bottom_half_pp(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ _mali_osk_wq_schedule_work(group->bottom_half_work_pp);
}
-MALI_STATIC_INLINE mali_bool mali_group_virtual_enable_if_empty(struct mali_group *group)
+MALI_STATIC_INLINE void mali_group_schedule_bottom_half_mmu(struct mali_group *group)
{
- mali_bool empty = MALI_FALSE;
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->mmu);
+ _mali_osk_wq_schedule_work(group->bottom_half_work_mmu);
+}
- MALI_ASSERT_GROUP_LOCKED(group);
- MALI_DEBUG_ASSERT(mali_group_is_virtual(group));
+struct mali_pp_job *mali_group_complete_pp(struct mali_group *group, mali_bool success, u32 *sub_job);
- if (_mali_osk_list_empty(&group->group_list)) {
- MALI_DEBUG_ASSERT(MALI_GROUP_STATE_DISABLED == group->state);
+struct mali_gp_job *mali_group_complete_gp(struct mali_group *group, mali_bool success);
- group->state = MALI_GROUP_STATE_IDLE;
+#if defined(CONFIG_MALI400_PROFILING)
+MALI_STATIC_INLINE void mali_group_oom(struct mali_group *group)
+{
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+ 0, 0, 0, 0, 0);
+}
+#endif
- empty = MALI_TRUE;
- }
+struct mali_group *mali_group_get_glob_group(u32 index);
+u32 mali_group_get_glob_num_groups(void);
- return empty;
-}
+u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size);
-/* Get group used l2 domain and core domain ref */
-void mali_group_get_pm_domain_ref(struct mali_group *group);
-/* Put group used l2 domain and core domain ref */
-void mali_group_put_pm_domain_ref(struct mali_group *group);
+_mali_osk_errcode_t mali_group_upper_half_mmu(void *data);
+_mali_osk_errcode_t mali_group_upper_half_gp(void *data);
+_mali_osk_errcode_t mali_group_upper_half_pp(void *data);
+
+MALI_STATIC_INLINE mali_bool mali_group_is_empty(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT(mali_group_is_virtual(group));
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return _mali_osk_list_empty(&group->group_list);
+}
#endif /* __MALI_GROUP_H__ */
void mali_hw_core_delete(struct mali_hw_core *core)
{
- _mali_osk_mem_unmapioregion(core->phys_addr, core->size, core->mapped_registers);
- core->mapped_registers = NULL;
+ if (NULL != core->mapped_registers) {
+ _mali_osk_mem_unmapioregion(core->phys_addr, core->size, core->mapped_registers);
+ core->mapped_registers = NULL;
+ }
_mali_osk_mem_unreqregion(core->phys_addr, core->size);
}
* This struct is embedded inside all core specific structs.
*/
struct mali_hw_core {
- u32 phys_addr; /**< Physical address of the registers */
+ uintptr_t phys_addr; /**< Physical address of the registers */
u32 phys_offset; /**< Offset from start of Mali to registers */
u32 size; /**< Size of registers */
mali_io_address mapped_registers; /**< Virtual mapping of the registers */
#define MALI_REG_POLL_COUNT_FAST 1000
#define MALI_REG_POLL_COUNT_SLOW 1000000
+/*
+ * GP and PP core translate their int_stat/rawstat into one of these
+ */
+enum mali_interrupt_result {
+ MALI_INTERRUPT_RESULT_NONE,
+ MALI_INTERRUPT_RESULT_SUCCESS,
+ MALI_INTERRUPT_RESULT_SUCCESS_VS,
+ MALI_INTERRUPT_RESULT_SUCCESS_PLBU,
+ MALI_INTERRUPT_RESULT_OOM,
+ MALI_INTERRUPT_RESULT_ERROR
+};
+
_mali_osk_errcode_t mali_hw_core_create(struct mali_hw_core *core, const _mali_osk_resource_t *resource, u32 reg_size);
void mali_hw_core_delete(struct mali_hw_core *core);
}
}
-
MALI_STATIC_INLINE void mali_hw_core_register_write(struct mali_hw_core *core, u32 relative_address, u32 new_val)
{
MALI_DEBUG_PRINT(6, ("register_write for core %s, relative addr=0x%04X, val=0x%08X\n",
#include "mali_broadcast.h"
#include "mali_gp.h"
#include "mali_pp.h"
-#include "mali_gp_scheduler.h"
-#include "mali_pp_scheduler.h"
+#include "mali_executor.h"
#include "mali_pp_job.h"
#include "mali_group.h"
#include "mali_pm.h"
#include "mali_scheduler.h"
#include "mali_kernel_utilization.h"
#include "mali_l2_cache.h"
-#include "mali_dma.h"
#include "mali_timeline.h"
#include "mali_soft_job.h"
#include "mali_pm_domain.h"
#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
#include "mali_profiling_internal.h"
#endif
+#include "mali_control_timer.h"
+#include "mali_dvfs_policy.h"
+#define MALI_SHARED_MEMORY_DEFAULT_SIZE 0xffffffff
/* Mali GPU memory. Real values come from module parameter or from device specific data */
unsigned int mali_dedicated_mem_start = 0;
unsigned int mali_dedicated_mem_size = 0;
-unsigned int mali_shared_mem_size = 0;
+
+/* Default shared memory size is set to 4G. */
+unsigned int mali_shared_mem_size = MALI_SHARED_MEMORY_DEFAULT_SIZE;
/* Frame buffer memory to be accessible by Mali GPU */
int mali_fb_start = 0;
int mali_inited_pp_cores_group_2 = 0;
static _mali_product_id_t global_product_id = _MALI_PRODUCT_ID_UNKNOWN;
-static u32 global_gpu_base_address = 0;
+static uintptr_t global_gpu_base_address = 0;
static u32 global_gpu_major_version = 0;
static u32 global_gpu_minor_version = 0;
static _mali_osk_errcode_t mali_set_global_gpu_base_address(void)
{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+
global_gpu_base_address = _mali_osk_resource_base_address();
if (0 == global_gpu_base_address) {
- return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ err = _MALI_OSK_ERR_ITEM_NOT_FOUND;
}
- return _MALI_OSK_ERR_OK;
+ return err;
}
static u32 mali_get_bcast_id(_mali_osk_resource_t *resource_pp)
static _mali_osk_errcode_t mali_parse_product_info(void)
{
- /*
- * Mali-200 has the PP core first, while Mali-300, Mali-400 and Mali-450 have the GP core first.
- * Look at the version register for the first PP core in order to determine the GPU HW revision.
- */
-
- u32 first_pp_offset;
_mali_osk_resource_t first_pp_resource;
- /* Find out where the first PP core is located */
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x8000, NULL)) {
- /* Mali-300/400/450 */
- first_pp_offset = 0x8000;
- } else {
- /* Mali-200 */
- first_pp_offset = 0x0000;
- }
-
/* Find the first PP core resource (again) */
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + first_pp_offset, &first_pp_resource)) {
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_PP0, &first_pp_resource)) {
/* Create a dummy PP object for this core so that we can read the version register */
- struct mali_group *group = mali_group_create(NULL, NULL, NULL);
+ struct mali_group *group = mali_group_create(NULL, NULL, NULL, MALI_DOMAIN_INDEX_PP0);
if (NULL != group) {
struct mali_pp_core *pp_core = mali_pp_create(&first_pp_resource, group, MALI_FALSE, mali_get_bcast_id(&first_pp_resource));
if (NULL != pp_core) {
- u32 pp_version = mali_pp_core_get_version(pp_core);
+ u32 pp_version;
+
+ pp_version = mali_pp_core_get_version(pp_core);
+
mali_group_delete(group);
global_gpu_major_version = (pp_version >> 8) & 0xFF;
return _MALI_OSK_ERR_FAULT;
}
-
-static void mali_resource_count(u32 *pp_count, u32 *l2_count)
-{
- *pp_count = 0;
- *l2_count = 0;
-
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x08000, NULL)) {
- ++(*pp_count);
- }
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0A000, NULL)) {
- ++(*pp_count);
- }
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0C000, NULL)) {
- ++(*pp_count);
- }
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0E000, NULL)) {
- ++(*pp_count);
- }
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x28000, NULL)) {
- ++(*pp_count);
- }
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2A000, NULL)) {
- ++(*pp_count);
- }
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2C000, NULL)) {
- ++(*pp_count);
- }
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2E000, NULL)) {
- ++(*pp_count);
- }
-
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x1000, NULL)) {
- ++(*l2_count);
- }
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x10000, NULL)) {
- ++(*l2_count);
- }
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x11000, NULL)) {
- ++(*l2_count);
- }
-}
-
static void mali_delete_groups(void)
{
struct mali_group *group;
MALI_DEBUG_ASSERT(0 == mali_l2_cache_core_get_glob_num_l2_cores());
}
-static struct mali_l2_cache_core *mali_create_l2_cache_core(_mali_osk_resource_t *resource)
+static struct mali_l2_cache_core *mali_create_l2_cache_core(_mali_osk_resource_t *resource, u32 domain_index)
{
struct mali_l2_cache_core *l2_cache = NULL;
MALI_DEBUG_PRINT(3, ("Found L2 cache %s\n", resource->description));
- l2_cache = mali_l2_cache_create(resource);
+ l2_cache = mali_l2_cache_create(resource, domain_index);
if (NULL == l2_cache) {
MALI_PRINT_ERROR(("Failed to create L2 cache object\n"));
return NULL;
if (mali_is_mali400()) {
_mali_osk_resource_t l2_resource;
- if (_MALI_OSK_ERR_OK != _mali_osk_resource_find(global_gpu_base_address + 0x1000, &l2_resource)) {
+ if (_MALI_OSK_ERR_OK != _mali_osk_resource_find(MALI400_OFFSET_L2_CACHE0, &l2_resource)) {
MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache in config file\n"));
return _MALI_OSK_ERR_FAULT;
}
- l2_cache = mali_create_l2_cache_core(&l2_resource);
+ l2_cache = mali_create_l2_cache_core(&l2_resource, MALI_DOMAIN_INDEX_L20);
if (NULL == l2_cache) {
return _MALI_OSK_ERR_FAULT;
}
- mali_pm_domain_add_l2(mali_pmu_get_domain_mask(MALI_L20_DOMAIN_INDEX), l2_cache);
} else if (mali_is_mali450()) {
/*
* L2 for GP at 0x10000
_mali_osk_resource_t l2_pp_grp1_resource;
/* Make cluster for GP's L2 */
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x10000, &l2_gp_resource)) {
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI450_OFFSET_L2_CACHE0, &l2_gp_resource)) {
MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for GP\n"));
- l2_cache = mali_create_l2_cache_core(&l2_gp_resource);
+ l2_cache = mali_create_l2_cache_core(&l2_gp_resource, MALI_DOMAIN_INDEX_L20);
if (NULL == l2_cache) {
return _MALI_OSK_ERR_FAULT;
}
- mali_pm_domain_add_l2(mali_pmu_get_domain_mask(MALI_L20_DOMAIN_INDEX), l2_cache);
} else {
MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for GP in config file\n"));
return _MALI_OSK_ERR_FAULT;
}
/* Find corresponding l2 domain */
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x1000, &l2_pp_grp0_resource)) {
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI450_OFFSET_L2_CACHE1, &l2_pp_grp0_resource)) {
MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for PP group 0\n"));
- l2_cache = mali_create_l2_cache_core(&l2_pp_grp0_resource);
+ l2_cache = mali_create_l2_cache_core(&l2_pp_grp0_resource, MALI_DOMAIN_INDEX_L21);
if (NULL == l2_cache) {
return _MALI_OSK_ERR_FAULT;
}
- mali_pm_domain_add_l2(mali_pmu_get_domain_mask(MALI_L21_DOMAIN_INDEX), l2_cache);
} else {
MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for PP group 0 in config file\n"));
return _MALI_OSK_ERR_FAULT;
}
/* Second PP core group is optional, don't fail if we don't find it */
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x11000, &l2_pp_grp1_resource)) {
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI450_OFFSET_L2_CACHE2, &l2_pp_grp1_resource)) {
MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for PP group 1\n"));
- l2_cache = mali_create_l2_cache_core(&l2_pp_grp1_resource);
+ l2_cache = mali_create_l2_cache_core(&l2_pp_grp1_resource, MALI_DOMAIN_INDEX_L22);
if (NULL == l2_cache) {
return _MALI_OSK_ERR_FAULT;
}
- mali_pm_domain_add_l2(mali_pmu_get_domain_mask(MALI_L22_DOMAIN_INDEX), l2_cache);
}
}
static struct mali_group *mali_create_group(struct mali_l2_cache_core *cache,
_mali_osk_resource_t *resource_mmu,
_mali_osk_resource_t *resource_gp,
- _mali_osk_resource_t *resource_pp)
+ _mali_osk_resource_t *resource_pp,
+ u32 domain_index)
{
struct mali_mmu_core *mmu;
struct mali_group *group;
MALI_DEBUG_PRINT(3, ("Starting new group for MMU %s\n", resource_mmu->description));
/* Create the group object */
- group = mali_group_create(cache, NULL, NULL);
+ group = mali_group_create(cache, NULL, NULL, domain_index);
if (NULL == group) {
MALI_PRINT_ERROR(("Failed to create group object for MMU %s\n", resource_mmu->description));
return NULL;
}
}
- /* Reset group */
- mali_group_lock(group);
- mali_group_reset(group);
- mali_group_unlock(group);
-
return group;
}
mali_bcast_remove_group(bcast_core, phys_group);
}
#endif /* DEBUG */
- group = mali_group_create(NULL, dlbu_core, bcast_core);
+ group = mali_group_create(NULL, dlbu_core, bcast_core, MALI_DOMAIN_INDEX_DUMMY);
if (NULL == group) {
MALI_PRINT_ERROR(("Failed to create group object for MMU PP broadcast core %s\n", resource_mmu_pp_bcast->description));
mali_bcast_unit_delete(bcast_core);
cluster_id_pp_grp1 = 2;
}
- resource_gp_found = _mali_osk_resource_find(global_gpu_base_address + 0x00000, &resource_gp);
- resource_gp_mmu_found = _mali_osk_resource_find(global_gpu_base_address + 0x03000, &resource_gp_mmu);
- resource_pp_found[0] = _mali_osk_resource_find(global_gpu_base_address + 0x08000, &(resource_pp[0]));
- resource_pp_found[1] = _mali_osk_resource_find(global_gpu_base_address + 0x0A000, &(resource_pp[1]));
- resource_pp_found[2] = _mali_osk_resource_find(global_gpu_base_address + 0x0C000, &(resource_pp[2]));
- resource_pp_found[3] = _mali_osk_resource_find(global_gpu_base_address + 0x0E000, &(resource_pp[3]));
- resource_pp_found[4] = _mali_osk_resource_find(global_gpu_base_address + 0x28000, &(resource_pp[4]));
- resource_pp_found[5] = _mali_osk_resource_find(global_gpu_base_address + 0x2A000, &(resource_pp[5]));
- resource_pp_found[6] = _mali_osk_resource_find(global_gpu_base_address + 0x2C000, &(resource_pp[6]));
- resource_pp_found[7] = _mali_osk_resource_find(global_gpu_base_address + 0x2E000, &(resource_pp[7]));
- resource_pp_mmu_found[0] = _mali_osk_resource_find(global_gpu_base_address + 0x04000, &(resource_pp_mmu[0]));
- resource_pp_mmu_found[1] = _mali_osk_resource_find(global_gpu_base_address + 0x05000, &(resource_pp_mmu[1]));
- resource_pp_mmu_found[2] = _mali_osk_resource_find(global_gpu_base_address + 0x06000, &(resource_pp_mmu[2]));
- resource_pp_mmu_found[3] = _mali_osk_resource_find(global_gpu_base_address + 0x07000, &(resource_pp_mmu[3]));
- resource_pp_mmu_found[4] = _mali_osk_resource_find(global_gpu_base_address + 0x1C000, &(resource_pp_mmu[4]));
- resource_pp_mmu_found[5] = _mali_osk_resource_find(global_gpu_base_address + 0x1D000, &(resource_pp_mmu[5]));
- resource_pp_mmu_found[6] = _mali_osk_resource_find(global_gpu_base_address + 0x1E000, &(resource_pp_mmu[6]));
- resource_pp_mmu_found[7] = _mali_osk_resource_find(global_gpu_base_address + 0x1F000, &(resource_pp_mmu[7]));
+ resource_gp_found = _mali_osk_resource_find(MALI_OFFSET_GP, &resource_gp);
+ resource_gp_mmu_found = _mali_osk_resource_find(MALI_OFFSET_GP_MMU, &resource_gp_mmu);
+ resource_pp_found[0] = _mali_osk_resource_find(MALI_OFFSET_PP0, &(resource_pp[0]));
+ resource_pp_found[1] = _mali_osk_resource_find(MALI_OFFSET_PP1, &(resource_pp[1]));
+ resource_pp_found[2] = _mali_osk_resource_find(MALI_OFFSET_PP2, &(resource_pp[2]));
+ resource_pp_found[3] = _mali_osk_resource_find(MALI_OFFSET_PP3, &(resource_pp[3]));
+ resource_pp_found[4] = _mali_osk_resource_find(MALI_OFFSET_PP4, &(resource_pp[4]));
+ resource_pp_found[5] = _mali_osk_resource_find(MALI_OFFSET_PP5, &(resource_pp[5]));
+ resource_pp_found[6] = _mali_osk_resource_find(MALI_OFFSET_PP6, &(resource_pp[6]));
+ resource_pp_found[7] = _mali_osk_resource_find(MALI_OFFSET_PP7, &(resource_pp[7]));
+ resource_pp_mmu_found[0] = _mali_osk_resource_find(MALI_OFFSET_PP0_MMU, &(resource_pp_mmu[0]));
+ resource_pp_mmu_found[1] = _mali_osk_resource_find(MALI_OFFSET_PP1_MMU, &(resource_pp_mmu[1]));
+ resource_pp_mmu_found[2] = _mali_osk_resource_find(MALI_OFFSET_PP2_MMU, &(resource_pp_mmu[2]));
+ resource_pp_mmu_found[3] = _mali_osk_resource_find(MALI_OFFSET_PP3_MMU, &(resource_pp_mmu[3]));
+ resource_pp_mmu_found[4] = _mali_osk_resource_find(MALI_OFFSET_PP4_MMU, &(resource_pp_mmu[4]));
+ resource_pp_mmu_found[5] = _mali_osk_resource_find(MALI_OFFSET_PP5_MMU, &(resource_pp_mmu[5]));
+ resource_pp_mmu_found[6] = _mali_osk_resource_find(MALI_OFFSET_PP6_MMU, &(resource_pp_mmu[6]));
+ resource_pp_mmu_found[7] = _mali_osk_resource_find(MALI_OFFSET_PP7_MMU, &(resource_pp_mmu[7]));
if (mali_is_mali450()) {
- resource_bcast_found = _mali_osk_resource_find(global_gpu_base_address + 0x13000, &resource_bcast);
- resource_dlbu_found = _mali_osk_resource_find(global_gpu_base_address + 0x14000, &resource_dlbu);
- resource_pp_mmu_bcast_found = _mali_osk_resource_find(global_gpu_base_address + 0x15000, &resource_pp_mmu_bcast);
- resource_pp_bcast_found = _mali_osk_resource_find(global_gpu_base_address + 0x16000, &resource_pp_bcast);
+ resource_bcast_found = _mali_osk_resource_find(MALI_OFFSET_BCAST, &resource_bcast);
+ resource_dlbu_found = _mali_osk_resource_find(MALI_OFFSET_DLBU, &resource_dlbu);
+ resource_pp_mmu_bcast_found = _mali_osk_resource_find(MALI_OFFSET_PP_BCAST_MMU, &resource_pp_mmu_bcast);
+ resource_pp_bcast_found = _mali_osk_resource_find(MALI_OFFSET_PP_BCAST, &resource_pp_bcast);
if (_MALI_OSK_ERR_OK != resource_bcast_found ||
_MALI_OSK_ERR_OK != resource_dlbu_found ||
}
MALI_DEBUG_ASSERT(1 <= mali_l2_cache_core_get_glob_num_l2_cores());
- group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_gp), &resource_gp_mmu, &resource_gp, NULL);
+ group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_gp), &resource_gp_mmu, &resource_gp, NULL, MALI_DOMAIN_INDEX_GP);
if (NULL == group) {
return _MALI_OSK_ERR_FAULT;
}
- /* Add GP in group, for PMU ref count */
- mali_pm_domain_add_group(mali_pmu_get_domain_mask(MALI_GP_DOMAIN_INDEX), group);
-
/* Create group for first (and mandatory) PP core */
MALI_DEBUG_ASSERT(mali_l2_cache_core_get_glob_num_l2_cores() >= (cluster_id_pp_grp0 + 1)); /* >= 1 on Mali-300 and Mali-400, >= 2 on Mali-450 */
- group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp0), &resource_pp_mmu[0], NULL, &resource_pp[0]);
+ group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp0), &resource_pp_mmu[0], NULL, &resource_pp[0], MALI_DOMAIN_INDEX_PP0);
if (NULL == group) {
return _MALI_OSK_ERR_FAULT;
}
- /* Find corresponding pp domain */
- mali_pm_domain_add_group(mali_pmu_get_domain_mask(MALI_PP0_DOMAIN_INDEX), group);
-
mali_inited_pp_cores_group_1++;
/* Create groups for rest of the cores in the first PP core group */
for (i = 1; i < 4; i++) { /* First half of the PP cores belong to first core group */
if (mali_inited_pp_cores_group_1 < mali_max_pp_cores_group_1) {
if (_MALI_OSK_ERR_OK == resource_pp_found[i] && _MALI_OSK_ERR_OK == resource_pp_mmu_found[i]) {
- group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp0), &resource_pp_mmu[i], NULL, &resource_pp[i]);
+ group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp0), &resource_pp_mmu[i], NULL, &resource_pp[i], MALI_DOMAIN_INDEX_PP0 + i);
if (NULL == group) {
return _MALI_OSK_ERR_FAULT;
}
- mali_pm_domain_add_group(mali_pmu_get_domain_mask(i + MALI_PP0_DOMAIN_INDEX), group);
-
mali_inited_pp_cores_group_1++;
}
}
if (mali_inited_pp_cores_group_2 < mali_max_pp_cores_group_2) {
if (_MALI_OSK_ERR_OK == resource_pp_found[i] && _MALI_OSK_ERR_OK == resource_pp_mmu_found[i]) {
MALI_DEBUG_ASSERT(mali_l2_cache_core_get_glob_num_l2_cores() >= 2); /* Only Mali-450 have a second core group */
- group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp1), &resource_pp_mmu[i], NULL, &resource_pp[i]);
+ group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp1), &resource_pp_mmu[i], NULL, &resource_pp[i], MALI_DOMAIN_INDEX_PP0 + i);
if (NULL == group) {
return _MALI_OSK_ERR_FAULT;
}
- mali_pm_domain_add_group(mali_pmu_get_domain_mask(i + MALI_PP0_DOMAIN_INDEX), group);
+
mali_inited_pp_cores_group_2++;
}
}
return _MALI_OSK_ERR_OK;
}
-static _mali_osk_errcode_t mali_create_pm_domains(void)
-{
- int i;
-
- for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
- if (0x0 == mali_pmu_get_domain_mask(i)) continue;
-
- if (NULL == mali_pm_domain_create(mali_pmu_get_domain_mask(i))) {
- return _MALI_OSK_ERR_NOMEM;
- }
- }
-
- return _MALI_OSK_ERR_OK;
-}
-
-static void mali_use_default_pm_domain_config(void)
-{
- u32 pp_count_gr1 = 0;
- u32 pp_count_gr2 = 0;
- u32 l2_count = 0;
-
- MALI_DEBUG_ASSERT(0 != global_gpu_base_address);
-
- /* GP core */
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x00000, NULL)) {
- mali_pmu_set_domain_mask(MALI_GP_DOMAIN_INDEX, 0x01);
- }
-
- /* PP0 - PP3 core */
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x08000, NULL)) {
- ++pp_count_gr1;
-
- if (mali_is_mali400()) {
- mali_pmu_set_domain_mask(MALI_PP0_DOMAIN_INDEX, 0x01 << 2);
- } else if (mali_is_mali450()) {
- mali_pmu_set_domain_mask(MALI_PP0_DOMAIN_INDEX, 0x01 << 1);
- }
- }
-
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0A000, NULL)) {
- ++pp_count_gr1;
-
- if (mali_is_mali400()) {
- mali_pmu_set_domain_mask(MALI_PP1_DOMAIN_INDEX, 0x01 << 3);
- } else if (mali_is_mali450()) {
- mali_pmu_set_domain_mask(MALI_PP1_DOMAIN_INDEX, 0x01 << 2);
- }
- }
-
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0C000, NULL)) {
- ++pp_count_gr1;
-
- if (mali_is_mali400()) {
- mali_pmu_set_domain_mask(MALI_PP2_DOMAIN_INDEX, 0x01 << 4);
- } else if (mali_is_mali450()) {
- mali_pmu_set_domain_mask(MALI_PP2_DOMAIN_INDEX, 0x01 << 2);
- }
- }
-
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0E000, NULL)) {
- ++pp_count_gr1;
-
- if (mali_is_mali400()) {
- mali_pmu_set_domain_mask(MALI_PP3_DOMAIN_INDEX, 0x01 << 5);
- } else if (mali_is_mali450()) {
- mali_pmu_set_domain_mask(MALI_PP3_DOMAIN_INDEX, 0x01 << 2);
- }
- }
-
- /* PP4 - PP7 */
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x28000, NULL)) {
- ++pp_count_gr2;
-
- mali_pmu_set_domain_mask(MALI_PP4_DOMAIN_INDEX, 0x01 << 3);
- }
-
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2A000, NULL)) {
- ++pp_count_gr2;
-
- mali_pmu_set_domain_mask(MALI_PP5_DOMAIN_INDEX, 0x01 << 3);
- }
-
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2C000, NULL)) {
- ++pp_count_gr2;
-
- mali_pmu_set_domain_mask(MALI_PP6_DOMAIN_INDEX, 0x01 << 3);
- }
-
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2E000, NULL)) {
- ++pp_count_gr2;
-
- mali_pmu_set_domain_mask(MALI_PP7_DOMAIN_INDEX, 0x01 << 3);
- }
-
- /* L2gp/L2PP0/L2PP4 */
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x10000, NULL)) {
- ++l2_count;
-
- if (mali_is_mali400()) {
- mali_pmu_set_domain_mask(MALI_L20_DOMAIN_INDEX, 0x01 << 1);
- } else if (mali_is_mali450()) {
- mali_pmu_set_domain_mask(MALI_L20_DOMAIN_INDEX, 0x01 << 0);
- }
- }
-
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x1000, NULL)) {
- ++l2_count;
-
- mali_pmu_set_domain_mask(MALI_L21_DOMAIN_INDEX, 0x01 << 1);
- }
-
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x11000, NULL)) {
- ++l2_count;
-
- mali_pmu_set_domain_mask(MALI_L22_DOMAIN_INDEX, 0x01 << 3);
- }
-
- MALI_DEBUG_PRINT(2, ("Using default PMU domain config: (%d) gr1_pp_cores, (%d) gr2_pp_cores, (%d) l2_count. \n", pp_count_gr1, pp_count_gr2, l2_count));
-}
-
-static void mali_set_pmu_global_domain_config(void)
-{
- _mali_osk_device_data data = { 0, };
- int i = 0;
-
- if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
- /* Check whether has customized pmu domain configure */
- for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
- if (0 != data.pmu_domain_config[i]) break;
- }
-
- if (MALI_MAX_NUMBER_OF_DOMAINS == i) {
- mali_use_default_pm_domain_config();
- } else {
- /* Copy the customer config to global config */
- mali_pmu_copy_domain_mask(data.pmu_domain_config, sizeof(data.pmu_domain_config));
- }
- }
-}
-
static _mali_osk_errcode_t mali_parse_config_pmu(void)
{
_mali_osk_resource_t resource_pmu;
MALI_DEBUG_ASSERT(0 != global_gpu_base_address);
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x02000, &resource_pmu)) {
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_PMU, &resource_pmu)) {
struct mali_pmu_core *pmu;
- mali_set_pmu_global_domain_config();
-
pmu = mali_pmu_create(&resource_pmu);
if (NULL == pmu) {
MALI_PRINT_ERROR(("Failed to create PMU\n"));
return _MALI_OSK_ERR_OK;
}
-static _mali_osk_errcode_t mali_parse_config_dma(void)
-{
- _mali_osk_resource_t resource_dma;
-
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x12000, &resource_dma)) {
- if (NULL == mali_dma_create(&resource_dma)) {
- return _MALI_OSK_ERR_FAULT;
- }
- return _MALI_OSK_ERR_OK;
- } else {
- return _MALI_OSK_ERR_ITEM_NOT_FOUND;
- }
-}
-
static _mali_osk_errcode_t mali_parse_config_memory(void)
{
+ _mali_osk_device_data data = { 0, };
_mali_osk_errcode_t ret;
- if (0 == mali_dedicated_mem_start && 0 == mali_dedicated_mem_size && 0 == mali_shared_mem_size) {
+ /* The priority of setting the value of mali_shared_mem_size,
+ * mali_dedicated_mem_start and mali_dedicated_mem_size:
+ * 1. module parameter;
+ * 2. platform data;
+ * 3. default value;
+ **/
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
/* Memory settings are not overridden by module parameters, so use device settings */
- _mali_osk_device_data data = { 0, };
-
- if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+ if (0 == mali_dedicated_mem_start && 0 == mali_dedicated_mem_size) {
/* Use device specific settings (if defined) */
mali_dedicated_mem_start = data.dedicated_mem_start;
mali_dedicated_mem_size = data.dedicated_mem_size;
- mali_shared_mem_size = data.shared_mem_size;
}
- if (0 == mali_dedicated_mem_start && 0 == mali_dedicated_mem_size && 0 == mali_shared_mem_size) {
- /* No GPU memory specified */
- return _MALI_OSK_ERR_INVALID_ARGS;
+ if (MALI_SHARED_MEMORY_DEFAULT_SIZE == mali_shared_mem_size &&
+ 0 != data.shared_mem_size) {
+ mali_shared_mem_size = data.shared_mem_size;
}
-
- MALI_DEBUG_PRINT(2, ("Using device defined memory settings (dedicated: 0x%08X@0x%08X, shared: 0x%08X)\n",
- mali_dedicated_mem_size, mali_dedicated_mem_start, mali_shared_mem_size));
- } else {
- MALI_DEBUG_PRINT(2, ("Using module defined memory settings (dedicated: 0x%08X@0x%08X, shared: 0x%08X)\n",
- mali_dedicated_mem_size, mali_dedicated_mem_start, mali_shared_mem_size));
}
if (0 < mali_dedicated_mem_size && 0 != mali_dedicated_mem_start) {
+ MALI_DEBUG_PRINT(2, ("Mali memory settings (dedicated: 0x%08X@0x%08X)\n",
+ mali_dedicated_mem_size, mali_dedicated_mem_start));
+
/* Dedicated memory */
ret = mali_memory_core_resource_dedicated_memory(mali_dedicated_mem_start, mali_dedicated_mem_size);
if (_MALI_OSK_ERR_OK != ret) {
}
if (0 < mali_shared_mem_size) {
+ MALI_DEBUG_PRINT(2, ("Mali memory settings (shared: 0x%08X)\n", mali_shared_mem_size));
+
/* Shared OS memory */
ret = mali_memory_core_resource_os_memory(mali_shared_mem_size);
if (_MALI_OSK_ERR_OK != ret) {
static void mali_detect_gpu_class(void)
{
- u32 number_of_pp_cores = 0;
- u32 number_of_l2_caches = 0;
-
- mali_resource_count(&number_of_pp_cores, &number_of_l2_caches);
- if (number_of_l2_caches > 1) {
+ if (_mali_osk_l2_resource_count() > 1) {
mali_gpu_class_is_mali450 = MALI_TRUE;
}
}
/* Ensure broadcast unit is in a good state before we start creating
* groups and cores.
*/
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x13000, &resource_bcast)) {
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_BCAST, &resource_bcast)) {
struct mali_bcast_unit *bcast_core;
bcast_core = mali_bcast_unit_create(&resource_bcast);
_mali_osk_errcode_t mali_initialize_subsystems(void)
{
_mali_osk_errcode_t err;
- struct mali_pmu_core *pmu;
+
+#ifdef CONFIG_MALI_DT
+ err = _mali_osk_resource_initialize();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
+#endif
mali_pp_job_initialize();
+ mali_timeline_initialize();
+
err = mali_session_initialize();
- if (_MALI_OSK_ERR_OK != err) goto session_init_failed;
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
#if defined(CONFIG_MALI400_PROFILING)
err = _mali_osk_profiling_init(mali_boot_profiling ? MALI_TRUE : MALI_FALSE);
#endif
err = mali_memory_initialize();
- if (_MALI_OSK_ERR_OK != err) goto memory_init_failed;
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
+
+ err = mali_executor_initialize();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
+
+ err = mali_scheduler_initialize();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
- /* Configure memory early. Memory allocation needed for mali_mmu_initialize. */
+ /* Configure memory early, needed by mali_mmu_initialize. */
err = mali_parse_config_memory();
- if (_MALI_OSK_ERR_OK != err) goto parse_memory_config_failed;
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
err = mali_set_global_gpu_base_address();
- if (_MALI_OSK_ERR_OK != err) goto set_global_gpu_base_address_failed;
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
- /* Detect gpu class according to l2 cache number */
+ /* Detect GPU class (uses L2 cache count) */
mali_detect_gpu_class();
err = mali_check_shared_interrupts();
- if (_MALI_OSK_ERR_OK != err) goto check_shared_interrupts_failed;
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
- err = mali_pp_scheduler_initialize();
- if (_MALI_OSK_ERR_OK != err) goto pp_scheduler_init_failed;
+ /* Initialize the MALI PMU (will not touch HW!) */
+ err = mali_parse_config_pmu();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
/* Initialize the power management module */
err = mali_pm_initialize();
- if (_MALI_OSK_ERR_OK != err) goto pm_init_failed;
-
- /* Initialize the MALI PMU */
- err = mali_parse_config_pmu();
- if (_MALI_OSK_ERR_OK != err) goto parse_pmu_config_failed;
-
- /* Make sure the power stays on for the rest of this function */
- err = _mali_osk_pm_dev_ref_add();
- if (_MALI_OSK_ERR_OK != err) goto pm_always_on_failed;
-
- /*
- * If run-time PM is used, then the mali_pm module has now already been
- * notified that the power now is on (through the resume callback functions).
- * However, if run-time PM is not used, then there will probably not be any
- * calls to the resume callback functions, so we need to explicitly tell it
- * that the power is on.
- */
- mali_pm_set_power_is_on();
-
- /* Reset PMU HW and ensure all Mali power domains are on */
- pmu = mali_pmu_get_global_pmu_core();
- if (NULL != pmu) {
- err = mali_pmu_reset(pmu);
- if (_MALI_OSK_ERR_OK != err) goto pmu_reset_failed;
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
}
+ /* Make sure the entire GPU stays on for the rest of this function */
+ mali_pm_init_begin();
+
/* Ensure HW is in a good state before starting to access cores. */
err = mali_init_hw_reset();
- if (_MALI_OSK_ERR_OK != err) goto init_hw_reset_failed;
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
/* Detect which Mali GPU we are dealing with */
err = mali_parse_product_info();
- if (_MALI_OSK_ERR_OK != err) goto product_info_parsing_failed;
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_init_end();
+ mali_terminate_subsystems();
+ return err;
+ }
/* The global_product_id is now populated with the correct Mali GPU */
- /* Create PM domains only if PMU exists */
- if (NULL != pmu) {
- err = mali_create_pm_domains();
- if (_MALI_OSK_ERR_OK != err) goto pm_domain_failed;
- }
+ /* Start configuring the actual Mali hardware. */
- /* Initialize MMU module */
err = mali_mmu_initialize();
- if (_MALI_OSK_ERR_OK != err) goto mmu_init_failed;
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_init_end();
+ mali_terminate_subsystems();
+ return err;
+ }
if (mali_is_mali450()) {
err = mali_dlbu_initialize();
- if (_MALI_OSK_ERR_OK != err) goto dlbu_init_failed;
-
- err = mali_parse_config_dma();
- if (_MALI_OSK_ERR_OK != err) goto dma_parsing_failed;
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_init_end();
+ mali_terminate_subsystems();
+ return err;
+ }
}
- /* Start configuring the actual Mali hardware. */
err = mali_parse_config_l2_cache();
- if (_MALI_OSK_ERR_OK != err) goto config_parsing_failed;
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_init_end();
+ mali_terminate_subsystems();
+ return err;
+ }
+
err = mali_parse_config_groups();
- if (_MALI_OSK_ERR_OK != err) goto config_parsing_failed;
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_init_end();
+ mali_terminate_subsystems();
+ return err;
+ }
- /* Initialize the schedulers */
- err = mali_scheduler_initialize();
- if (_MALI_OSK_ERR_OK != err) goto scheduler_init_failed;
- err = mali_gp_scheduler_initialize();
- if (_MALI_OSK_ERR_OK != err) goto gp_scheduler_init_failed;
+ /* Move groups into executor */
+ mali_executor_populate();
+
+ /* Need call after all group has assigned a domain */
+ mali_pm_power_cost_setup();
- /* PP scheduler population can't fail */
- mali_pp_scheduler_populate();
+ /* Initialize the GPU timer */
+ err = mali_control_timer_init();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_init_end();
+ mali_terminate_subsystems();
+ return err;
+ }
/* Initialize the GPU utilization tracking */
err = mali_utilization_init();
- if (_MALI_OSK_ERR_OK != err) goto utilization_init_failed;
-
- /* Allowing the system to be turned off */
- _mali_osk_pm_dev_ref_dec();
-
- MALI_SUCCESS; /* all ok */
-
- /* Error handling */
-
-utilization_init_failed:
- mali_pp_scheduler_depopulate();
- mali_gp_scheduler_terminate();
-gp_scheduler_init_failed:
- mali_scheduler_terminate();
-scheduler_init_failed:
-config_parsing_failed:
- mali_delete_groups(); /* Delete any groups not (yet) owned by a scheduler */
- mali_delete_l2_cache_cores(); /* Delete L2 cache cores even if config parsing failed. */
- {
- struct mali_dma_core *dma = mali_dma_get_global_dma_core();
- if (NULL != dma) mali_dma_delete(dma);
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_init_end();
+ mali_terminate_subsystems();
+ return err;
}
-dma_parsing_failed:
- mali_dlbu_terminate();
-dlbu_init_failed:
- mali_mmu_terminate();
-mmu_init_failed:
- mali_pm_domain_terminate();
-pm_domain_failed:
- /* Nothing to roll back */
-product_info_parsing_failed:
- /* Nothing to roll back */
-init_hw_reset_failed:
- /* Nothing to roll back */
-pmu_reset_failed:
- /* Allowing the system to be turned off */
- _mali_osk_pm_dev_ref_dec();
-pm_always_on_failed:
- pmu = mali_pmu_get_global_pmu_core();
- if (NULL != pmu) {
- mali_pmu_delete(pmu);
+
+#if defined(CONFIG_MALI_DVFS)
+ err = mali_dvfs_policy_init();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_init_end();
+ mali_terminate_subsystems();
+ return err;
}
-parse_pmu_config_failed:
- mali_pm_terminate();
-pm_init_failed:
- mali_pp_scheduler_terminate();
-pp_scheduler_init_failed:
-check_shared_interrupts_failed:
- global_gpu_base_address = 0;
-set_global_gpu_base_address_failed:
- /* undoing mali_parse_config_memory() is done by mali_memory_terminate() */
-parse_memory_config_failed:
- mali_memory_terminate();
-memory_init_failed:
-#if defined(CONFIG_MALI400_PROFILING)
- _mali_osk_profiling_term();
#endif
- mali_session_terminate();
-session_init_failed:
- mali_pp_job_terminate();
- return err;
+
+ /* Allowing the system to be turned off */
+ mali_pm_init_end();
+
+ return _MALI_OSK_ERR_OK; /* all ok */
}
void mali_terminate_subsystems(void)
{
struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
- struct mali_dma_core *dma = mali_dma_get_global_dma_core();
MALI_DEBUG_PRINT(2, ("terminate_subsystems() called\n"));
- /* shut down subsystems in reverse order from startup */
+ mali_utilization_term();
+ mali_control_timer_term();
- /* We need the GPU to be powered up for the terminate sequence */
- _mali_osk_pm_dev_ref_add();
+ mali_executor_depopulate();
+ mali_delete_groups(); /* Delete groups not added to executor */
+ mali_executor_terminate();
- mali_utilization_term();
- mali_pp_scheduler_depopulate();
- mali_gp_scheduler_terminate();
mali_scheduler_terminate();
+ mali_pp_job_terminate();
mali_delete_l2_cache_cores();
+ mali_mmu_terminate();
+
if (mali_is_mali450()) {
mali_dlbu_terminate();
}
- mali_mmu_terminate();
+
+ mali_pm_terminate();
+
if (NULL != pmu) {
mali_pmu_delete(pmu);
}
- if (NULL != dma) {
- mali_dma_delete(dma);
- }
- mali_pm_terminate();
- mali_memory_terminate();
+
#if defined(CONFIG_MALI400_PROFILING)
_mali_osk_profiling_term();
#endif
- /* Allowing the system to be turned off */
- _mali_osk_pm_dev_ref_dec();
+ mali_memory_terminate();
- mali_pp_scheduler_terminate();
mali_session_terminate();
- mali_pp_job_terminate();
+ mali_timeline_terminate();
+
+ global_gpu_base_address = 0;
}
_mali_product_id_t mali_kernel_core_get_product_id(void)
args->version = _MALI_UK_API_VERSION; /* report our version */
/* success regardless of being compatible or not */
- MALI_SUCCESS;
+ return _MALI_OSK_ERR_OK;;
}
_mali_osk_errcode_t _mali_ukk_wait_for_notification(_mali_uk_wait_for_notification_s *args)
if (NULL == queue) {
MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n"));
args->type = _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS;
- MALI_SUCCESS;
+ return _MALI_OSK_ERR_OK;;
}
/* receive a notification, might sleep */
/* finished with the notification */
_mali_osk_notification_delete(notification);
- MALI_SUCCESS; /* all ok */
+ return _MALI_OSK_ERR_OK;; /* all ok */
}
_mali_osk_errcode_t _mali_ukk_post_notification(_mali_uk_post_notification_s *args)
/* if the queue does not exist we're currently shutting down */
if (NULL == queue) {
MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n"));
- MALI_SUCCESS;
+ return _MALI_OSK_ERR_OK;;
}
notification = _mali_osk_notification_create(args->type, 0);
_mali_osk_notification_queue_send(queue, notification);
- MALI_SUCCESS; /* all ok */
+ return _MALI_OSK_ERR_OK;; /* all ok */
}
_mali_osk_errcode_t _mali_ukk_request_high_priority(_mali_uk_request_high_priority_s *args)
MALI_DEBUG_PRINT(2, ("Session 0x%08X with pid %d was granted higher priority.\n", session, _mali_osk_get_pid()));
}
- MALI_SUCCESS;
+ return _MALI_OSK_ERR_OK;;
}
_mali_osk_errcode_t _mali_ukk_open(void **context)
MALI_ERROR(_MALI_OSK_ERR_NOMEM);
}
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
- if (_MALI_OSK_ERR_OK != _mali_osk_atomic_init(&session->number_of_window_jobs, 0)) {
- MALI_DEBUG_PRINT_ERROR(("Initialization of atomic number_of_window_jobs failed.\n"));
- mali_timeline_system_destroy(session->timeline_system);
- mali_soft_job_system_destroy(session->soft_job_system);
- mali_memory_session_end(session);
- mali_mmu_pagedir_free(session->page_directory);
- _mali_osk_notification_queue_term(session->ioctl_queue);
- _mali_osk_free(session);
- return _MALI_OSK_ERR_FAULT;
- }
+#if defined(CONFIG_MALI_DVFS)
+ _mali_osk_atomic_init(&session->number_of_window_jobs, 0);
#endif
session->use_high_priority_job_queue = MALI_FALSE;
_MALI_OSK_INIT_LIST_HEAD(&session->pp_job_fb_lookup_list[i]);
}
+ session->pid = _mali_osk_get_pid();
+ session->comm = _mali_osk_get_comm();
+ session->max_mali_mem_allocated = 0;
+ _mali_osk_memset(session->mali_mem_array, 0, sizeof(size_t) * MALI_MEM_TYPE_MAX);
*context = (void *)session;
/* Add session to the list of all sessions. */
mali_session_add(session);
- MALI_DEBUG_PRINT(2, ("Session started\n"));
- MALI_SUCCESS;
+ MALI_DEBUG_PRINT(3, ("Session started\n"));
+ return _MALI_OSK_ERR_OK;;
}
+#if defined(DEBUG)
+/* parameter used for debug */
+extern u32 num_pm_runtime_resume;
+extern u32 num_pm_updates;
+extern u32 num_pm_updates_up;
+extern u32 num_pm_updates_down;
+#endif
+
_mali_osk_errcode_t _mali_ukk_close(void **context)
{
struct mali_session_data *session;
/* Stop the soft job timer. */
mali_timeline_system_stop_timer(session->timeline_system);
- /* Abort queued and running GP and PP jobs. */
- mali_gp_scheduler_abort_session(session);
- mali_pp_scheduler_abort_session(session);
+ /* Abort queued jobs */
+ mali_scheduler_abort_session(session);
+
+ /* Abort executing jobs */
+ mali_executor_abort_session(session);
/* Abort the soft job system. */
mali_soft_job_system_abort(session->soft_job_system);
/* Free remaining memory allocated to this session */
mali_memory_session_end(session);
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+#if defined(CONFIG_MALI_DVFS)
_mali_osk_atomic_term(&session->number_of_window_jobs);
#endif
*context = NULL;
- MALI_DEBUG_PRINT(2, ("Session has ended\n"));
+ MALI_DEBUG_PRINT(3, ("Session has ended\n"));
- MALI_SUCCESS;
+#if defined(DEBUG)
+ MALI_DEBUG_PRINT(3, ("Stats: # runtime resumes: %u\n", num_pm_runtime_resume));
+ MALI_DEBUG_PRINT(3, (" # PM updates: .... %u (up %u, down %u)\n", num_pm_updates, num_pm_updates_up, num_pm_updates_down));
+
+ num_pm_runtime_resume = 0;
+ num_pm_updates = 0;
+ num_pm_updates_up = 0;
+ num_pm_updates_down = 0;
+#endif
+
+ return _MALI_OSK_ERR_OK;;
}
#if MALI_STATE_TRACKING
{
int n = 0; /* Number of bytes written to buf */
- n += mali_gp_scheduler_dump_state(buf + n, size - n);
- n += mali_pp_scheduler_dump_state(buf + n, size - n);
+ n += mali_scheduler_dump_state(buf + n, size - n);
+ n += mali_executor_dump_state(buf + n, size - n);
return n;
}
#include "mali_kernel_descriptor_mapping.h"
#include "mali_osk.h"
#include "mali_osk_bitops.h"
+#include "mali_memory_types.h"
+#include "mali_session.h"
#define MALI_PAD_INT(x) (((x) + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1))
{
_mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
int new_descriptor;
+ mali_mem_allocation *descriptor;
+ struct mali_session_data *session;
MALI_DEBUG_ASSERT_POINTER(map);
MALI_DEBUG_ASSERT_POINTER(odescriptor);
+ MALI_DEBUG_ASSERT_POINTER(target);
_mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
new_descriptor = _mali_osk_find_first_zero_bit(map->table->usage, map->current_nr_mappings);
_mali_osk_set_nonatomic_bit(new_descriptor, map->table->usage);
map->table->mappings[new_descriptor] = target;
*odescriptor = new_descriptor;
+
+ /* To calculate the mali mem usage for the session */
+ descriptor = (mali_mem_allocation *)target;
+ session = descriptor->session;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ session->mali_mem_array[descriptor->type] += descriptor->size;
+ if ((MALI_MEM_OS == descriptor->type || MALI_MEM_BLOCK == descriptor->type) &&
+ (session->mali_mem_array[MALI_MEM_OS] + session->mali_mem_array[MALI_MEM_BLOCK] > session->max_mali_mem_allocated)) {
+ session->max_mali_mem_allocated = session->mali_mem_array[MALI_MEM_OS] + session->mali_mem_array[MALI_MEM_BLOCK];
+ }
err = _MALI_OSK_ERR_OK;
unlock_and_exit:
void *mali_descriptor_mapping_free(mali_descriptor_mapping *map, int descriptor)
{
void *old_value = NULL;
+ mali_mem_allocation *tmp_descriptor;
+ struct mali_session_data *session;
_mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
if ((descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage)) {
map->table->mappings[descriptor] = NULL;
_mali_osk_clear_nonatomic_bit(descriptor, map->table->usage);
}
+ if (NULL != old_value) {
+ tmp_descriptor = (mali_mem_allocation *)old_value;
+ session = tmp_descriptor->session;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ MALI_DEBUG_ASSERT(session->mali_mem_array[tmp_descriptor->type] >= tmp_descriptor->size);
+
+ session->mali_mem_array[tmp_descriptor->type] -= tmp_descriptor->size;
+ }
_mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
return old_value;
static void descriptor_table_free(mali_descriptor_table *table)
{
_mali_osk_free(table);
-}
+}
\ No newline at end of file
#include "mali_osk.h"
+struct mali_session_data;
+
/**
* The actual descriptor mapping table, never directly accessed by clients
*/
#include "mali_session.h"
#include "mali_scheduler.h"
+#include "mali_executor.h"
+#include "mali_dvfs_policy.h"
+#include "mali_control_timer.h"
+
/* Thresholds for GP bound detection. */
#define MALI_GP_BOUND_GP_UTILIZATION_THRESHOLD 240
#define MALI_GP_BOUND_PP_UTILIZATION_THRESHOLD 250
-/* Define how often to calculate and report GPU utilization, in milliseconds */
-static _mali_osk_spinlock_irq_t *time_data_lock;
+static _mali_osk_spinlock_irq_t *utilization_data_lock;
-static u32 num_running_gp_cores;
-static u32 num_running_pp_cores;
+static u32 num_running_gp_cores = 0;
+static u32 num_running_pp_cores = 0;
static u64 work_start_time_gpu = 0;
static u64 work_start_time_gp = 0;
static u64 accumulated_work_time_gp = 0;
static u64 accumulated_work_time_pp = 0;
-static u64 period_start_time = 0;
-static _mali_osk_timer_t *utilization_timer = NULL;
-static mali_bool timer_running = MALI_FALSE;
-
static u32 last_utilization_gpu = 0 ;
static u32 last_utilization_gp = 0 ;
static u32 last_utilization_pp = 0 ;
-static u32 mali_utilization_timeout = 1000;
void (*mali_utilization_callback)(struct mali_gpu_utilization_data *data) = NULL;
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
-extern void mali_power_performance_policy_callback(struct mali_gpu_utilization_data *data);
-#define NUMBER_OF_NANOSECONDS_PER_SECOND 1000000000ULL
-static u32 calculate_window_render_fps(u64 time_period)
-{
- u32 max_window_number;
- u64 tmp;
- u64 max = time_period;
- u32 leading_zeroes;
- u32 shift_val;
- u32 time_period_shift;
- u32 max_window_number_shift;
- u32 ret_val;
-
- max_window_number = mali_session_max_window_num();
- /* To avoid float division, extend the dividend to ns unit */
- tmp = (u64)max_window_number * NUMBER_OF_NANOSECONDS_PER_SECOND;
- if (tmp > time_period) {
- max = tmp;
- }
+/* Define the first timer control timer timeout in milliseconds */
+static u32 mali_control_first_timeout = 100;
+static struct mali_gpu_utilization_data mali_util_data = {0, };
- /*
- * We may have 64-bit values, a dividend or a divisor or both
- * To avoid dependencies to a 64-bit divider, we shift down the two values
- * equally first.
- */
- leading_zeroes = _mali_osk_clz((u32)(max >> 32));
- shift_val = 32 - leading_zeroes;
-
- time_period_shift = (u32)(time_period >> shift_val);
- max_window_number_shift = (u32)(tmp >> shift_val);
-
- ret_val = max_window_number_shift / time_period_shift;
-
- return ret_val;
-}
-#endif /* defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY) */
-
-static void calculate_gpu_utilization(void *arg)
+struct mali_gpu_utilization_data *mali_utilization_calculate(u64 *start_time, u64 *time_period)
{
u64 time_now;
- u64 time_period;
u32 leading_zeroes;
u32 shift_val;
u32 work_normalized_gpu;
u32 utilization_gpu;
u32 utilization_gp;
u32 utilization_pp;
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
- u32 window_render_fps;
-#endif
- _mali_osk_spinlock_irq_lock(time_data_lock);
+ mali_utilization_data_lock();
+
+ time_now = _mali_osk_time_get_ns();
+
+ *time_period = time_now - *start_time;
if (accumulated_work_time_gpu == 0 && work_start_time_gpu == 0) {
/*
* - No need to reschedule timer
* - Report zero usage
*/
- timer_running = MALI_FALSE;
-
last_utilization_gpu = 0;
last_utilization_gp = 0;
last_utilization_pp = 0;
- _mali_osk_spinlock_irq_unlock(time_data_lock);
+ mali_util_data.utilization_gpu = last_utilization_gpu;
+ mali_util_data.utilization_gp = last_utilization_gp;
+ mali_util_data.utilization_pp = last_utilization_pp;
- if (NULL != mali_utilization_callback) {
- struct mali_gpu_utilization_data data = { 0, };
- mali_utilization_callback(&data);
- }
+ mali_utilization_data_unlock();
- mali_scheduler_hint_disable(MALI_SCHEDULER_HINT_GP_BOUND);
+ /* Stop add timer until the next job submited */
+ mali_control_timer_suspend(MALI_FALSE);
- return;
- }
+ mali_executor_hint_disable(MALI_EXECUTOR_HINT_GP_BOUND);
- time_now = _mali_osk_time_get_ns();
+ MALI_DEBUG_PRINT(4, ("last_utilization_gpu = %d \n", last_utilization_gpu));
+ MALI_DEBUG_PRINT(4, ("last_utilization_gp = %d \n", last_utilization_gp));
+ MALI_DEBUG_PRINT(4, ("last_utilization_pp = %d \n", last_utilization_pp));
- time_period = time_now - period_start_time;
+ return &mali_util_data;
+ }
/* If we are currently busy, update working period up to now */
if (work_start_time_gpu != 0) {
*/
/* Shift the 64-bit values down so they fit inside a 32-bit integer */
- leading_zeroes = _mali_osk_clz((u32)(time_period >> 32));
+ leading_zeroes = _mali_osk_clz((u32)(*time_period >> 32));
shift_val = 32 - leading_zeroes;
work_normalized_gpu = (u32)(accumulated_work_time_gpu >> shift_val);
work_normalized_gp = (u32)(accumulated_work_time_gp >> shift_val);
work_normalized_pp = (u32)(accumulated_work_time_pp >> shift_val);
- period_normalized = (u32)(time_period >> shift_val);
+ period_normalized = (u32)(*time_period >> shift_val);
/*
* Now, we should report the usage in parts of 256
utilization_gp = work_normalized_gp / period_normalized;
utilization_pp = work_normalized_pp / period_normalized;
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
- window_render_fps = calculate_window_render_fps(time_period);
-#endif
-
last_utilization_gpu = utilization_gpu;
last_utilization_gp = utilization_gp;
last_utilization_pp = utilization_pp;
if ((MALI_GP_BOUND_GP_UTILIZATION_THRESHOLD < last_utilization_gp) &&
(MALI_GP_BOUND_PP_UTILIZATION_THRESHOLD > last_utilization_pp)) {
- mali_scheduler_hint_enable(MALI_SCHEDULER_HINT_GP_BOUND);
+ mali_executor_hint_enable(MALI_EXECUTOR_HINT_GP_BOUND);
} else {
- mali_scheduler_hint_disable(MALI_SCHEDULER_HINT_GP_BOUND);
+ mali_executor_hint_disable(MALI_EXECUTOR_HINT_GP_BOUND);
}
/* starting a new period */
accumulated_work_time_gpu = 0;
accumulated_work_time_gp = 0;
accumulated_work_time_pp = 0;
- period_start_time = time_now;
- _mali_osk_spinlock_irq_unlock(time_data_lock);
+ *start_time = time_now;
- _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks(mali_utilization_timeout));
+ mali_util_data.utilization_gp = last_utilization_gp;
+ mali_util_data.utilization_gpu = last_utilization_gpu;
+ mali_util_data.utilization_pp = last_utilization_pp;
- if (NULL != mali_utilization_callback) {
- struct mali_gpu_utilization_data data = {
- utilization_gpu, utilization_gp, utilization_pp,
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
- window_render_fps, window_render_fps
-#endif
- };
- mali_utilization_callback(&data);
- }
+ mali_utilization_data_unlock();
+
+ MALI_DEBUG_PRINT(4, ("last_utilization_gpu = %d \n", last_utilization_gpu));
+ MALI_DEBUG_PRINT(4, ("last_utilization_gp = %d \n", last_utilization_gp));
+ MALI_DEBUG_PRINT(4, ("last_utilization_pp = %d \n", last_utilization_pp));
+
+ return &mali_util_data;
}
_mali_osk_errcode_t mali_utilization_init(void)
_mali_osk_device_data data;
if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
- /* Use device specific settings (if defined) */
- if (0 != data.utilization_interval) {
- mali_utilization_timeout = data.utilization_interval;
- }
if (NULL != data.utilization_callback) {
mali_utilization_callback = data.utilization_callback;
- MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: Platform has it's own policy \n"));
- MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: Utilization handler installed with interval %u\n", mali_utilization_timeout));
+ MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: Utilization handler installed \n"));
}
}
-#endif
-
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
- if (mali_utilization_callback == NULL) {
- MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: MALI Power Performance Policy Algorithm \n"));
- mali_utilization_callback = mali_power_performance_policy_callback;
- }
-#endif
+#endif /* defined(USING_GPU_UTILIZATION) */
if (NULL == mali_utilization_callback) {
- MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: No utilization handler installed\n"));
+ MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: No platform utilization handler installed\n"));
}
- time_data_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_UTILIZATION);
-
- if (NULL == time_data_lock) {
+ utilization_data_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_UTILIZATION);
+ if (NULL == utilization_data_lock) {
return _MALI_OSK_ERR_FAULT;
}
num_running_gp_cores = 0;
num_running_pp_cores = 0;
- utilization_timer = _mali_osk_timer_init();
- if (NULL == utilization_timer) {
- _mali_osk_spinlock_irq_term(time_data_lock);
- return _MALI_OSK_ERR_FAULT;
- }
- _mali_osk_timer_setcallback(utilization_timer, calculate_gpu_utilization, NULL);
-
return _MALI_OSK_ERR_OK;
}
-void mali_utilization_suspend(void)
-{
- _mali_osk_spinlock_irq_lock(time_data_lock);
-
- if (timer_running == MALI_TRUE) {
- timer_running = MALI_FALSE;
- _mali_osk_spinlock_irq_unlock(time_data_lock);
- _mali_osk_timer_del(utilization_timer);
- return;
- }
-
- _mali_osk_spinlock_irq_unlock(time_data_lock);
-}
-
void mali_utilization_term(void)
{
- if (NULL != utilization_timer) {
- _mali_osk_timer_del(utilization_timer);
- timer_running = MALI_FALSE;
- _mali_osk_timer_term(utilization_timer);
- utilization_timer = NULL;
+ if (NULL != utilization_data_lock) {
+ _mali_osk_spinlock_irq_term(utilization_data_lock);
}
-
- _mali_osk_spinlock_irq_term(time_data_lock);
}
void mali_utilization_gp_start(void)
{
- _mali_osk_spinlock_irq_lock(time_data_lock);
+ mali_utilization_data_lock();
++num_running_gp_cores;
if (1 == num_running_gp_cores) {
work_start_time_gp = time_now;
if (0 == num_running_pp_cores) {
+ mali_bool is_resume = MALI_FALSE;
/*
* There are no PP cores running, so this is also the point
* at which we consider the GPU to be busy as well.
*/
work_start_time_gpu = time_now;
- }
-
- /* Start a new period (and timer) if needed */
- if (timer_running != MALI_TRUE) {
- timer_running = MALI_TRUE;
- period_start_time = time_now;
- /* Clear session->number_of_window_jobs */
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
- mali_session_max_window_num();
+ is_resume = mali_control_timer_resume(time_now);
+
+ mali_utilization_data_unlock();
+
+ if (is_resume) {
+ /* Do some policy in new period for performance consideration */
+#if defined(CONFIG_MALI_DVFS)
+ /* Clear session->number_of_window_jobs, prepare parameter for dvfs */
+ mali_session_max_window_num();
+ if (0 == last_utilization_gpu) {
+ /*
+ * for mali_dev_pause is called in set clock,
+ * so each time we change clock, we will set clock to
+ * highest step even if under down clock case,
+ * it is not nessesary, so we only set the clock under
+ * last time utilization equal 0, we stop the timer then
+ * start the GPU again case
+ */
+ mali_dvfs_policy_new_period();
+ }
#endif
- _mali_osk_spinlock_irq_unlock(time_data_lock);
-
- _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks(mali_utilization_timeout));
+ /*
+ * First timeout using short interval for power consideration
+ * because we give full power in the new period, but if the
+ * job loading is light, finish in 10ms, the other time all keep
+ * in high freq it will wast time.
+ */
+ mali_control_timer_add(mali_control_first_timeout);
+ }
} else {
- _mali_osk_spinlock_irq_unlock(time_data_lock);
+ mali_utilization_data_unlock();
}
+
} else {
/* Nothing to do */
- _mali_osk_spinlock_irq_unlock(time_data_lock);
+ mali_utilization_data_unlock();
}
}
void mali_utilization_pp_start(void)
{
- _mali_osk_spinlock_irq_lock(time_data_lock);
+ mali_utilization_data_lock();
++num_running_pp_cores;
if (1 == num_running_pp_cores) {
work_start_time_pp = time_now;
if (0 == num_running_gp_cores) {
+ mali_bool is_resume = MALI_FALSE;
/*
* There are no GP cores running, so this is also the point
* at which we consider the GPU to be busy as well.
*/
work_start_time_gpu = time_now;
- }
-
- /* Start a new period (and timer) if needed */
- if (timer_running != MALI_TRUE) {
- timer_running = MALI_TRUE;
- period_start_time = time_now;
- /* Clear session->number_of_window_jobs */
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
- mali_session_max_window_num();
+ /* Start a new period if stoped */
+ is_resume = mali_control_timer_resume(time_now);
+
+ mali_utilization_data_unlock();
+
+ if (is_resume) {
+#if defined(CONFIG_MALI_DVFS)
+ /* Clear session->number_of_window_jobs, prepare parameter for dvfs */
+ mali_session_max_window_num();
+ if (0 == last_utilization_gpu) {
+ /*
+ * for mali_dev_pause is called in set clock,
+ * so each time we change clock, we will set clock to
+ * highest step even if under down clock case,
+ * it is not nessesary, so we only set the clock under
+ * last time utilization equal 0, we stop the timer then
+ * start the GPU again case
+ */
+ mali_dvfs_policy_new_period();
+ }
#endif
- _mali_osk_spinlock_irq_unlock(time_data_lock);
- _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks(mali_utilization_timeout));
+ /*
+ * First timeout using short interval for power consideration
+ * because we give full power in the new period, but if the
+ * job loading is light, finish in 10ms, the other time all keep
+ * in high freq it will wast time.
+ */
+ mali_control_timer_add(mali_control_first_timeout);
+ }
} else {
- _mali_osk_spinlock_irq_unlock(time_data_lock);
+ mali_utilization_data_unlock();
}
} else {
/* Nothing to do */
- _mali_osk_spinlock_irq_unlock(time_data_lock);
+ mali_utilization_data_unlock();
}
}
void mali_utilization_gp_end(void)
{
- _mali_osk_spinlock_irq_lock(time_data_lock);
+ mali_utilization_data_lock();
--num_running_gp_cores;
if (0 == num_running_gp_cores) {
}
}
- _mali_osk_spinlock_irq_unlock(time_data_lock);
+ mali_utilization_data_unlock();
}
void mali_utilization_pp_end(void)
{
- _mali_osk_spinlock_irq_lock(time_data_lock);
+ mali_utilization_data_lock();
--num_running_pp_cores;
if (0 == num_running_pp_cores) {
}
}
- _mali_osk_spinlock_irq_unlock(time_data_lock);
+ mali_utilization_data_unlock();
+}
+
+mali_bool mali_utilization_enabled(void)
+{
+#if defined(CONFIG_MALI_DVFS)
+ return mali_dvfs_policy_enabled();
+#else
+ return (NULL != mali_utilization_callback);
+#endif /* defined(CONFIG_MALI_DVFS) */
+}
+
+void mali_utilization_platform_realize(struct mali_gpu_utilization_data *util_data)
+{
+ MALI_DEBUG_ASSERT_POINTER(mali_utilization_callback);
+
+ mali_utilization_callback(util_data);
+}
+
+void mali_utilization_reset(void)
+{
+ accumulated_work_time_gpu = 0;
+ accumulated_work_time_gp = 0;
+ accumulated_work_time_pp = 0;
+
+ last_utilization_gpu = 0;
+ last_utilization_gp = 0;
+ last_utilization_pp = 0;
+}
+
+void mali_utilization_data_lock(void)
+{
+ _mali_osk_spinlock_irq_lock(utilization_data_lock);
+}
+
+void mali_utilization_data_unlock(void)
+{
+ _mali_osk_spinlock_irq_unlock(utilization_data_lock);
}
u32 _mali_ukk_utilization_gp_pp(void)
#include <linux/mali/mali_utgard.h>
#include "mali_osk.h"
-extern void (*mali_utilization_callback)(struct mali_gpu_utilization_data *data);
-
/**
* Initialize/start the Mali GPU utilization metrics reporting.
*
/**
* Check if Mali utilization is enabled
*/
-MALI_STATIC_INLINE mali_bool mali_utilization_enabled(void)
-{
- return (NULL != mali_utilization_callback);
-}
+mali_bool mali_utilization_enabled(void);
/**
* Should be called when a job is about to execute a GP job
void mali_utilization_pp_end(void);
/**
- * Should be called to stop the utilization timer during system suspend
+ * Should be called to calcution the GPU utilization
*/
-void mali_utilization_suspend(void);
+struct mali_gpu_utilization_data *mali_utilization_calculate(u64 *start_time, u64 *time_period);
+
+_mali_osk_spinlock_irq_t *mali_utilization_get_lock(void);
+
+void mali_utilization_platform_realize(struct mali_gpu_utilization_data *util_data);
+
+void mali_utilization_data_lock(void);
+
+void mali_utilization_data_unlock(void);
+
+void mali_utilization_reset(void);
#endif /* __MALI_KERNEL_UTILIZATION_H__ */
#include "mali_osk.h"
#include "mali_ukk.h"
-#if defined(CONFIG_MALI400_PROFILING)
#include "mali_osk_profiling.h"
-#endif
_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args)
{
_mali_uk_vsync_event event = (_mali_uk_vsync_event)args->event;
MALI_IGNORE(event); /* event is not used for release code, and that is OK */
-#if defined(CONFIG_MALI400_PROFILING)
/*
* Manually generate user space events in kernel space.
* This saves user space from calling kernel space twice in this case.
}
if (event == _MALI_UK_VSYNC_EVENT_END_WAIT) {
-
_mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME |
MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC,
_mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0);
}
-#endif
+
MALI_DEBUG_PRINT(4, ("Received VSYNC event: %d\n", event));
MALI_SUCCESS;
#include "mali_l2_cache.h"
#include "mali_hw_core.h"
#include "mali_scheduler.h"
+#include "mali_pm.h"
#include "mali_pm_domain.h"
/**
MALI400_L2_CACHE_REGISTER_SIZE = 0x0004,
MALI400_L2_CACHE_REGISTER_STATUS = 0x0008,
/*unused = 0x000C */
- MALI400_L2_CACHE_REGISTER_COMMAND = 0x0010, /**< Misc cache commands, e.g. clear */
+ MALI400_L2_CACHE_REGISTER_COMMAND = 0x0010,
MALI400_L2_CACHE_REGISTER_CLEAR_PAGE = 0x0014,
- MALI400_L2_CACHE_REGISTER_MAX_READS = 0x0018, /**< Limit of outstanding read requests */
- MALI400_L2_CACHE_REGISTER_ENABLE = 0x001C, /**< Enable misc cache features */
+ MALI400_L2_CACHE_REGISTER_MAX_READS = 0x0018,
+ MALI400_L2_CACHE_REGISTER_ENABLE = 0x001C,
MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0 = 0x0020,
MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0 = 0x0024,
MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1 = 0x0028,
* These are the commands that can be sent to the Mali L2 cache unit
*/
typedef enum mali_l2_cache_command {
- MALI400_L2_CACHE_COMMAND_CLEAR_ALL = 0x01, /**< Clear the entire cache */
- /* Read HW TRM carefully before adding/using other commands than the clear above */
+ MALI400_L2_CACHE_COMMAND_CLEAR_ALL = 0x01,
} mali_l2_cache_command;
/**
* These are the commands that can be sent to the Mali L2 cache unit
*/
typedef enum mali_l2_cache_enable {
- MALI400_L2_CACHE_ENABLE_DEFAULT = 0x0, /**< Default state of enable register */
- MALI400_L2_CACHE_ENABLE_ACCESS = 0x01, /**< Permit cacheable accesses */
- MALI400_L2_CACHE_ENABLE_READ_ALLOCATE = 0x02, /**< Permit cache read allocate */
+ MALI400_L2_CACHE_ENABLE_DEFAULT = 0x0, /* Default */
+ MALI400_L2_CACHE_ENABLE_ACCESS = 0x01,
+ MALI400_L2_CACHE_ENABLE_READ_ALLOCATE = 0x02,
} mali_l2_cache_enable;
/**
* Mali L2 cache status bits
*/
typedef enum mali_l2_cache_status {
- MALI400_L2_CACHE_STATUS_COMMAND_BUSY = 0x01, /**< Command handler of L2 cache is busy */
- MALI400_L2_CACHE_STATUS_DATA_BUSY = 0x02, /**< L2 cache is busy handling data requests */
+ MALI400_L2_CACHE_STATUS_COMMAND_BUSY = 0x01,
+ MALI400_L2_CACHE_STATUS_DATA_BUSY = 0x02,
} mali_l2_cache_status;
-#define MALI400_L2_MAX_READS_DEFAULT 0x1C
+#define MALI400_L2_MAX_READS_NOT_SET -1
-static struct mali_l2_cache_core *mali_global_l2_cache_cores[MALI_MAX_NUMBER_OF_L2_CACHE_CORES] = { NULL, };
-static u32 mali_global_num_l2_cache_cores = 0;
+static struct mali_l2_cache_core *
+ mali_global_l2s[MALI_MAX_NUMBER_OF_L2_CACHE_CORES] = { NULL, };
+static u32 mali_global_num_l2s = 0;
-int mali_l2_max_reads = MALI400_L2_MAX_READS_DEFAULT;
+int mali_l2_max_reads = MALI400_L2_MAX_READS_NOT_SET;
/* Local helper functions */
-static _mali_osk_errcode_t mali_l2_cache_send_command(struct mali_l2_cache_core *cache, u32 reg, u32 val);
+static void mali_l2_cache_reset(struct mali_l2_cache_core *cache);
-static void mali_l2_cache_counter_lock(struct mali_l2_cache_core *cache)
-{
-#ifdef MALI_UPPER_HALF_SCHEDULING
- _mali_osk_spinlock_irq_lock(cache->counter_lock);
-#else
- _mali_osk_spinlock_lock(cache->counter_lock);
-#endif
-}
+static _mali_osk_errcode_t mali_l2_cache_send_command(
+ struct mali_l2_cache_core *cache, u32 reg, u32 val);
-static void mali_l2_cache_counter_unlock(struct mali_l2_cache_core *cache)
+static void mali_l2_cache_lock(struct mali_l2_cache_core *cache)
{
-#ifdef MALI_UPPER_HALF_SCHEDULING
- _mali_osk_spinlock_irq_unlock(cache->counter_lock);
-#else
- _mali_osk_spinlock_unlock(cache->counter_lock);
-#endif
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ _mali_osk_spinlock_irq_lock(cache->lock);
}
-static void mali_l2_cache_command_lock(struct mali_l2_cache_core *cache)
+static void mali_l2_cache_unlock(struct mali_l2_cache_core *cache)
{
-#ifdef MALI_UPPER_HALF_SCHEDULING
- _mali_osk_spinlock_irq_lock(cache->command_lock);
-#else
- _mali_osk_spinlock_lock(cache->command_lock);
-#endif
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ _mali_osk_spinlock_irq_unlock(cache->lock);
}
-static void mali_l2_cache_command_unlock(struct mali_l2_cache_core *cache)
-{
-#ifdef MALI_UPPER_HALF_SCHEDULING
- _mali_osk_spinlock_irq_unlock(cache->command_lock);
-#else
- _mali_osk_spinlock_unlock(cache->command_lock);
-#endif
-}
+/* Implementation of the L2 cache interface */
-struct mali_l2_cache_core *mali_l2_cache_create(_mali_osk_resource_t *resource)
+struct mali_l2_cache_core *mali_l2_cache_create(
+ _mali_osk_resource_t *resource, u32 domain_index)
{
struct mali_l2_cache_core *cache = NULL;
+#if defined(DEBUG)
+ u32 cache_size;
+#endif
- MALI_DEBUG_PRINT(4, ("Mali L2 cache: Creating Mali L2 cache: %s\n", resource->description));
+ MALI_DEBUG_PRINT(4, ("Mali L2 cache: Creating Mali L2 cache: %s\n",
+ resource->description));
- if (mali_global_num_l2_cache_cores >= MALI_MAX_NUMBER_OF_L2_CACHE_CORES) {
- MALI_PRINT_ERROR(("Mali L2 cache: Too many L2 cache core objects created\n"));
+ if (mali_global_num_l2s >= MALI_MAX_NUMBER_OF_L2_CACHE_CORES) {
+ MALI_PRINT_ERROR(("Mali L2 cache: Too many L2 caches\n"));
return NULL;
}
cache = _mali_osk_malloc(sizeof(struct mali_l2_cache_core));
- if (NULL != cache) {
- cache->core_id = mali_global_num_l2_cache_cores;
- cache->counter_src0 = MALI_HW_CORE_NO_COUNTER;
- cache->counter_src1 = MALI_HW_CORE_NO_COUNTER;
- cache->pm_domain = NULL;
- cache->mali_l2_status = MALI_L2_NORMAL;
- if (_MALI_OSK_ERR_OK == mali_hw_core_create(&cache->hw_core, resource, MALI400_L2_CACHE_REGISTERS_SIZE)) {
- MALI_DEBUG_CODE(u32 cache_size = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_SIZE));
- MALI_DEBUG_PRINT(2, ("Mali L2 cache: Created %s: % 3uK, %u-way, % 2ubyte cache line, % 3ubit external bus\n",
- resource->description,
- 1 << (((cache_size >> 16) & 0xff) - 10),
- 1 << ((cache_size >> 8) & 0xff),
- 1 << (cache_size & 0xff),
- 1 << ((cache_size >> 24) & 0xff)));
-
-#ifdef MALI_UPPER_HALF_SCHEDULING
- cache->command_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
-#else
- cache->command_lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
-#endif
- if (NULL != cache->command_lock) {
-#ifdef MALI_UPPER_HALF_SCHEDULING
- cache->counter_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
-#else
- cache->counter_lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
-#endif
- if (NULL != cache->counter_lock) {
- mali_l2_cache_reset(cache);
-
- cache->last_invalidated_id = 0;
-
- mali_global_l2_cache_cores[mali_global_num_l2_cache_cores] = cache;
- mali_global_num_l2_cache_cores++;
-
- return cache;
- } else {
- MALI_PRINT_ERROR(("Mali L2 cache: Failed to create counter lock for L2 cache core %s\n", cache->hw_core.description));
- }
-#ifdef MALI_UPPER_HALF_SCHEDULING
- _mali_osk_spinlock_irq_term(cache->command_lock);
-#else
- _mali_osk_spinlock_term(cache->command_lock);
-#endif
- } else {
- MALI_PRINT_ERROR(("Mali L2 cache: Failed to create command lock for L2 cache core %s\n", cache->hw_core.description));
- }
+ if (NULL == cache) {
+ MALI_PRINT_ERROR(("Mali L2 cache: Failed to allocate memory for L2 cache core\n"));
+ return NULL;
+ }
- mali_hw_core_delete(&cache->hw_core);
- }
+ cache->core_id = mali_global_num_l2s;
+ cache->counter_src0 = MALI_HW_CORE_NO_COUNTER;
+ cache->counter_src1 = MALI_HW_CORE_NO_COUNTER;
+ cache->counter_value0_base = 0;
+ cache->counter_value1_base = 0;
+ cache->pm_domain = NULL;
+ cache->power_is_on = MALI_FALSE;
+ cache->last_invalidated_id = 0;
+
+ if (_MALI_OSK_ERR_OK != mali_hw_core_create(&cache->hw_core,
+ resource, MALI400_L2_CACHE_REGISTERS_SIZE)) {
+ _mali_osk_free(cache);
+ return NULL;
+ }
+#if defined(DEBUG)
+ cache_size = mali_hw_core_register_read(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_SIZE);
+ MALI_DEBUG_PRINT(2, ("Mali L2 cache: Created %s: % 3uK, %u-way, % 2ubyte cache line, % 3ubit external bus\n",
+ resource->description,
+ 1 << (((cache_size >> 16) & 0xff) - 10),
+ 1 << ((cache_size >> 8) & 0xff),
+ 1 << (cache_size & 0xff),
+ 1 << ((cache_size >> 24) & 0xff)));
+#endif
+
+ cache->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED,
+ _MALI_OSK_LOCK_ORDER_L2);
+ if (NULL == cache->lock) {
+ MALI_PRINT_ERROR(("Mali L2 cache: Failed to create counter lock for L2 cache core %s\n",
+ cache->hw_core.description));
+ mali_hw_core_delete(&cache->hw_core);
_mali_osk_free(cache);
- } else {
- MALI_PRINT_ERROR(("Mali L2 cache: Failed to allocate memory for L2 cache core\n"));
+ return NULL;
}
- return NULL;
+ /* register with correct power domain */
+ cache->pm_domain = mali_pm_register_l2_cache(
+ domain_index, cache);
+
+ mali_global_l2s[mali_global_num_l2s] = cache;
+ mali_global_num_l2s++;
+
+ return cache;
}
void mali_l2_cache_delete(struct mali_l2_cache_core *cache)
{
u32 i;
+ for (i = 0; i < mali_global_num_l2s; i++) {
+ if (mali_global_l2s[i] != cache) {
+ continue;
+ }
- /* reset to defaults */
- mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)MALI400_L2_MAX_READS_DEFAULT);
- mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_DEFAULT);
-
-#ifdef MALI_UPPER_HALF_SCHEDULING
- _mali_osk_spinlock_irq_term(cache->counter_lock);
- _mali_osk_spinlock_irq_term(cache->command_lock);
-#else
- _mali_osk_spinlock_term(cache->command_lock);
- _mali_osk_spinlock_term(cache->counter_lock);
-#endif
-
- mali_hw_core_delete(&cache->hw_core);
-
- for (i = 0; i < mali_global_num_l2_cache_cores; i++) {
- if (mali_global_l2_cache_cores[i] == cache) {
- mali_global_l2_cache_cores[i] = NULL;
- mali_global_num_l2_cache_cores--;
-
- if (i != mali_global_num_l2_cache_cores) {
- /* We removed a l2 cache from the middle of the array -- move the last
- * l2 cache to the current position to close the gap */
- mali_global_l2_cache_cores[i] = mali_global_l2_cache_cores[mali_global_num_l2_cache_cores];
- mali_global_l2_cache_cores[mali_global_num_l2_cache_cores] = NULL;
- }
+ mali_global_l2s[i] = NULL;
+ mali_global_num_l2s--;
+ if (i == mali_global_num_l2s) {
+ /* Removed last element, nothing more to do */
break;
}
+
+ /*
+ * We removed a l2 cache from the middle of the array,
+ * so move the last l2 cache to current position
+ */
+ mali_global_l2s[i] = mali_global_l2s[mali_global_num_l2s];
+ mali_global_l2s[mali_global_num_l2s] = NULL;
+
+ /* All good */
+ break;
}
+ _mali_osk_spinlock_irq_term(cache->lock);
+ mali_hw_core_delete(&cache->hw_core);
_mali_osk_free(cache);
}
-u32 mali_l2_cache_get_id(struct mali_l2_cache_core *cache)
+void mali_l2_cache_power_up(struct mali_l2_cache_core *cache)
{
- return cache->core_id;
+ MALI_DEBUG_ASSERT_POINTER(cache);
+
+ mali_l2_cache_lock(cache);
+
+ mali_l2_cache_reset(cache);
+
+ MALI_DEBUG_ASSERT(MALI_FALSE == cache->power_is_on);
+ cache->power_is_on = MALI_TRUE;
+
+ mali_l2_cache_unlock(cache);
}
-static void mali_l2_cache_core_set_counter_internal(struct mali_l2_cache_core *cache, u32 source_id, u32 counter)
+void mali_l2_cache_power_down(struct mali_l2_cache_core *cache)
{
- u32 value = 0; /* disabled src */
- u32 reg_offset = 0;
- mali_bool core_is_on;
-
MALI_DEBUG_ASSERT_POINTER(cache);
- core_is_on = mali_l2_cache_lock_power_state(cache);
+ mali_l2_cache_lock(cache);
- mali_l2_cache_counter_lock(cache);
+ MALI_DEBUG_ASSERT(MALI_TRUE == cache->power_is_on);
- switch (source_id) {
- case 0:
- cache->counter_src0 = counter;
- reg_offset = MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0;
- break;
-
- case 1:
- cache->counter_src1 = counter;
- reg_offset = MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1;
- break;
+ /*
+ * The HW counters will start from zero again when we resume,
+ * but we should report counters as always increasing.
+ * Take a copy of the HW values now in order to add this to
+ * the values we report after being powered up.
+ *
+ * The physical power off of the L2 cache might be outside our
+ * own control (e.g. runtime PM). That is why we must manually
+ * set set the counter value to zero as well.
+ */
- default:
- MALI_DEBUG_ASSERT(0);
- break;
+ if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) {
+ cache->counter_value0_base += mali_hw_core_register_read(
+ &cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0);
+ mali_hw_core_register_write(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0, 0);
}
- if (MALI_L2_PAUSE == cache->mali_l2_status) {
- mali_l2_cache_counter_unlock(cache);
- mali_l2_cache_unlock_power_state(cache);
- return;
+ if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) {
+ cache->counter_value1_base += mali_hw_core_register_read(
+ &cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1);
+ mali_hw_core_register_write(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1, 0);
}
- if (MALI_HW_CORE_NO_COUNTER != counter) {
- value = counter;
- }
- if (MALI_TRUE == core_is_on) {
- mali_hw_core_register_write(&cache->hw_core, reg_offset, value);
- }
+ cache->power_is_on = MALI_FALSE;
- mali_l2_cache_counter_unlock(cache);
- mali_l2_cache_unlock_power_state(cache);
+ mali_l2_cache_unlock(cache);
}
-void mali_l2_cache_core_set_counter_src0(struct mali_l2_cache_core *cache, u32 counter)
+void mali_l2_cache_core_set_counter_src(
+ struct mali_l2_cache_core *cache, u32 source_id, u32 counter)
{
- mali_l2_cache_core_set_counter_internal(cache, 0, counter);
-}
+ u32 reg_offset_src;
+ u32 reg_offset_val;
-void mali_l2_cache_core_set_counter_src1(struct mali_l2_cache_core *cache, u32 counter)
-{
- mali_l2_cache_core_set_counter_internal(cache, 1, counter);
-}
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ MALI_DEBUG_ASSERT(source_id >= 0 && source_id <= 1);
-u32 mali_l2_cache_core_get_counter_src0(struct mali_l2_cache_core *cache)
-{
- return cache->counter_src0;
-}
+ mali_l2_cache_lock(cache);
-u32 mali_l2_cache_core_get_counter_src1(struct mali_l2_cache_core *cache)
-{
- return cache->counter_src1;
+ if (0 == source_id) {
+ /* start counting from 0 */
+ cache->counter_value0_base = 0;
+ cache->counter_src0 = counter;
+ reg_offset_src = MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0;
+ reg_offset_val = MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0;
+ } else {
+ /* start counting from 0 */
+ cache->counter_value1_base = 0;
+ cache->counter_src1 = counter;
+ reg_offset_src = MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1;
+ reg_offset_val = MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1;
+ }
+
+ if (cache->power_is_on) {
+ u32 hw_src;
+
+ if (MALI_HW_CORE_NO_COUNTER != counter) {
+ hw_src = counter;
+ } else {
+ hw_src = 0; /* disable value for HW */
+ }
+
+ /* Set counter src */
+ mali_hw_core_register_write(&cache->hw_core,
+ reg_offset_src, hw_src);
+
+ /* Make sure the HW starts counting from 0 again */
+ mali_hw_core_register_write(&cache->hw_core,
+ reg_offset_val, 0);
+ }
+
+ mali_l2_cache_unlock(cache);
}
-void mali_l2_cache_core_get_counter_values(struct mali_l2_cache_core *cache, u32 *src0, u32 *value0, u32 *src1, u32 *value1)
+void mali_l2_cache_core_get_counter_values(
+ struct mali_l2_cache_core *cache,
+ u32 *src0, u32 *value0, u32 *src1, u32 *value1)
{
+ MALI_DEBUG_ASSERT_POINTER(cache);
MALI_DEBUG_ASSERT(NULL != src0);
MALI_DEBUG_ASSERT(NULL != value0);
MALI_DEBUG_ASSERT(NULL != src1);
MALI_DEBUG_ASSERT(NULL != value1);
- /* Caller must hold the PM lock and know that we are powered on */
-
- mali_l2_cache_counter_lock(cache);
-
- if (MALI_L2_PAUSE == cache->mali_l2_status) {
- mali_l2_cache_counter_unlock(cache);
-
- return;
- }
+ mali_l2_cache_lock(cache);
*src0 = cache->counter_src0;
*src1 = cache->counter_src1;
if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) {
- *value0 = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0);
- }
+ if (MALI_TRUE == cache->power_is_on) {
+ *value0 = mali_hw_core_register_read(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0);
+ } else {
+ *value0 = 0;
+ }
- if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) {
- *value1 = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1);
+ /* Add base offset value (in case we have been power off) */
+ *value0 += cache->counter_value0_base;
}
- mali_l2_cache_counter_unlock(cache);
-}
-
-static void mali_l2_cache_reset_counters_all(void)
-{
- int i;
- u32 value;
- struct mali_l2_cache_core *cache;
- u32 num_cores = mali_l2_cache_core_get_glob_num_l2_cores();
-
- for (i = 0; i < num_cores; i++) {
- cache = mali_l2_cache_core_get_glob_l2_core(i);
- if (!cache)
- continue;
-
- if (mali_l2_cache_lock_power_state(cache)) {
- mali_l2_cache_counter_lock(cache);
-
- if (MALI_L2_PAUSE == cache->mali_l2_status) {
- mali_l2_cache_counter_unlock(cache);
- mali_l2_cache_unlock_power_state(cache);
- return;
- }
-
- /* Reset performance counters */
- if (MALI_HW_CORE_NO_COUNTER == cache->counter_src0) {
- value = 0;
- } else {
- value = cache->counter_src0;
- }
- mali_hw_core_register_write(&cache->hw_core,
- MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, value);
-
- if (MALI_HW_CORE_NO_COUNTER == cache->counter_src1) {
- value = 0;
- } else {
- value = cache->counter_src1;
- }
- mali_hw_core_register_write(&cache->hw_core,
- MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, value);
-
- mali_l2_cache_counter_unlock(cache);
+ if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) {
+ if (MALI_TRUE == cache->power_is_on) {
+ *value1 = mali_hw_core_register_read(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1);
+ } else {
+ *value1 = 0;
}
- mali_l2_cache_unlock_power_state(cache);
+ /* Add base offset value (in case we have been power off) */
+ *value1 += cache->counter_value1_base;
}
-}
+ mali_l2_cache_unlock(cache);
+}
struct mali_l2_cache_core *mali_l2_cache_core_get_glob_l2_core(u32 index)
{
- if (mali_global_num_l2_cache_cores > index) {
- return mali_global_l2_cache_cores[index];
+ if (mali_global_num_l2s > index) {
+ return mali_global_l2s[index];
}
return NULL;
u32 mali_l2_cache_core_get_glob_num_l2_cores(void)
{
- return mali_global_num_l2_cache_cores;
+ return mali_global_num_l2s;
}
-void mali_l2_cache_reset(struct mali_l2_cache_core *cache)
+void mali_l2_cache_invalidate(struct mali_l2_cache_core *cache)
{
- /* Kasin Added, skip off power domain. */
- if (cache && cache->pm_domain && cache->pm_domain->state == MALI_PM_DOMAIN_OFF) {
+ MALI_DEBUG_ASSERT_POINTER(cache);
+
+ if (NULL == cache) {
return;
}
-
- /* Invalidate cache (just to keep it in a known state at startup) */
- mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
- mali_l2_cache_counter_lock(cache);
+ mali_l2_cache_lock(cache);
+
+ cache->last_invalidated_id = mali_scheduler_get_new_cache_order();
+ mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND,
+ MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
- if (MALI_L2_PAUSE == cache->mali_l2_status) {
- mali_l2_cache_counter_unlock(cache);
+ mali_l2_cache_unlock(cache);
+}
+
+void mali_l2_cache_invalidate_conditional(
+ struct mali_l2_cache_core *cache, u32 id)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ if (NULL == cache) {
return;
}
- /* Enable cache */
- mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_ACCESS | (u32)MALI400_L2_CACHE_ENABLE_READ_ALLOCATE);
- mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)mali_l2_max_reads);
+ /*
+ * If the last cache invalidation was done by a job with a higher id we
+ * don't have to flush. Since user space will store jobs w/ their
+ * corresponding memory in sequence (first job #0, then job #1, ...),
+ * we don't have to flush for job n-1 if job n has already invalidated
+ * the cache since we know for sure that job n-1's memory was already
+ * written when job n was started.
+ */
- /* Restart any performance counters (if enabled) */
- if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) {
- mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, cache->counter_src0);
- }
+ mali_l2_cache_lock(cache);
- if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) {
- mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, cache->counter_src1);
+ if (((s32)id) > ((s32)cache->last_invalidated_id)) {
+ /* Set latest invalidated id to current "point in time" */
+ cache->last_invalidated_id =
+ mali_scheduler_get_new_cache_order();
+ mali_l2_cache_send_command(cache,
+ MALI400_L2_CACHE_REGISTER_COMMAND,
+ MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
}
- mali_l2_cache_counter_unlock(cache);
+ mali_l2_cache_unlock(cache);
}
-void mali_l2_cache_reset_all(void)
+void mali_l2_cache_invalidate_all(void)
{
- int i;
- u32 num_cores = mali_l2_cache_core_get_glob_num_l2_cores();
+ u32 i;
+ for (i = 0; i < mali_global_num_l2s; i++) {
+ struct mali_l2_cache_core *cache = mali_global_l2s[i];
+ _mali_osk_errcode_t ret;
- for (i = 0; i < num_cores; i++) {
- mali_l2_cache_reset(mali_l2_cache_core_get_glob_l2_core(i));
- }
-}
+ MALI_DEBUG_ASSERT_POINTER(cache);
-void mali_l2_cache_invalidate(struct mali_l2_cache_core *cache)
-{
- MALI_DEBUG_ASSERT_POINTER(cache);
+ mali_l2_cache_lock(cache);
- if (NULL != cache) {
- cache->last_invalidated_id = mali_scheduler_get_new_cache_order();
- mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
- }
-}
+ if (MALI_TRUE != cache->power_is_on) {
+ mali_l2_cache_unlock(cache);
+ continue;
+ }
-mali_bool mali_l2_cache_invalidate_conditional(struct mali_l2_cache_core *cache, u32 id)
-{
- MALI_DEBUG_ASSERT_POINTER(cache);
+ cache->last_invalidated_id =
+ mali_scheduler_get_new_cache_order();
- if (NULL != cache) {
- /* If the last cache invalidation was done by a job with a higher id we
- * don't have to flush. Since user space will store jobs w/ their
- * corresponding memory in sequence (first job #0, then job #1, ...),
- * we don't have to flush for job n-1 if job n has already invalidated
- * the cache since we know for sure that job n-1's memory was already
- * written when job n was started. */
- if (((s32)id) <= ((s32)cache->last_invalidated_id)) {
- return MALI_FALSE;
- } else {
- cache->last_invalidated_id = mali_scheduler_get_new_cache_order();
+ ret = mali_l2_cache_send_command(cache,
+ MALI400_L2_CACHE_REGISTER_COMMAND,
+ MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_PRINT_ERROR(("Failed to invalidate cache\n"));
}
- mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+ mali_l2_cache_unlock(cache);
}
- return MALI_TRUE;
}
-void mali_l2_cache_invalidate_all(void)
+void mali_l2_cache_invalidate_all_pages(u32 *pages, u32 num_pages)
{
u32 i;
- for (i = 0; i < mali_global_num_l2_cache_cores; i++) {
- /*additional check*/
- if (MALI_TRUE == mali_l2_cache_lock_power_state(mali_global_l2_cache_cores[i])) {
+ for (i = 0; i < mali_global_num_l2s; i++) {
+ struct mali_l2_cache_core *cache = mali_global_l2s[i];
+ u32 j;
+
+ MALI_DEBUG_ASSERT_POINTER(cache);
+
+ mali_l2_cache_lock(cache);
+
+ if (MALI_TRUE != cache->power_is_on) {
+ mali_l2_cache_unlock(cache);
+ continue;
+ }
+
+ for (j = 0; j < num_pages; j++) {
_mali_osk_errcode_t ret;
- mali_global_l2_cache_cores[i]->last_invalidated_id = mali_scheduler_get_new_cache_order();
- ret = mali_l2_cache_send_command(mali_global_l2_cache_cores[i], MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+
+ ret = mali_l2_cache_send_command(cache,
+ MALI400_L2_CACHE_REGISTER_CLEAR_PAGE,
+ pages[j]);
if (_MALI_OSK_ERR_OK != ret) {
- MALI_PRINT_ERROR(("Failed to invalidate cache\n"));
+ MALI_PRINT_ERROR(("Failed to invalidate cache (page)\n"));
}
}
- mali_l2_cache_unlock_power_state(mali_global_l2_cache_cores[i]);
+
+ mali_l2_cache_unlock(cache);
}
}
-void mali_l2_cache_invalidate_all_pages(u32 *pages, u32 num_pages)
+/* -------- local helper functions below -------- */
+
+static void mali_l2_cache_reset(struct mali_l2_cache_core *cache)
{
- u32 i;
- for (i = 0; i < mali_global_num_l2_cache_cores; i++) {
- /*additional check*/
- if (MALI_TRUE == mali_l2_cache_lock_power_state(mali_global_l2_cache_cores[i])) {
- u32 j;
- for (j = 0; j < num_pages; j++) {
- _mali_osk_errcode_t ret;
- ret = mali_l2_cache_send_command(mali_global_l2_cache_cores[i], MALI400_L2_CACHE_REGISTER_CLEAR_PAGE, pages[j]);
- if (_MALI_OSK_ERR_OK != ret) {
- MALI_PRINT_ERROR(("Failed to invalidate page cache\n"));
- }
- }
- }
- mali_l2_cache_unlock_power_state(mali_global_l2_cache_cores[i]);
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ MALI_DEBUG_ASSERT_LOCK_HELD(cache->lock);
+
+ /* Kasin Added, skip off power domain. */
+ if (cache && cache->pm_domain && cache->pm_domain->power_is_on == MALI_TRUE) {
+ return;
}
-}
+
-mali_bool mali_l2_cache_lock_power_state(struct mali_l2_cache_core *cache)
-{
- return mali_pm_domain_lock_state(cache->pm_domain);
-}
+ /* Invalidate cache (just to keep it in a known state at startup) */
+ mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND,
+ MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
-void mali_l2_cache_unlock_power_state(struct mali_l2_cache_core *cache)
-{
- return mali_pm_domain_unlock_state(cache->pm_domain);
-}
+ /* Enable cache */
+ mali_hw_core_register_write(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_ENABLE,
+ (u32)MALI400_L2_CACHE_ENABLE_ACCESS |
+ (u32)MALI400_L2_CACHE_ENABLE_READ_ALLOCATE);
+
+ if (MALI400_L2_MAX_READS_NOT_SET != mali_l2_max_reads) {
+ mali_hw_core_register_write(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_MAX_READS,
+ (u32)mali_l2_max_reads);
+ }
-/* -------- local helper functions below -------- */
+ /* Restart any performance counters (if enabled) */
+ if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) {
+ mali_hw_core_register_write(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0,
+ cache->counter_src0);
+ }
+
+ if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) {
+ mali_hw_core_register_write(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1,
+ cache->counter_src1);
+ }
+}
-static _mali_osk_errcode_t mali_l2_cache_send_command(struct mali_l2_cache_core *cache, u32 reg, u32 val)
+static _mali_osk_errcode_t mali_l2_cache_send_command(
+ struct mali_l2_cache_core *cache, u32 reg, u32 val)
{
int i = 0;
const int loop_count = 100000;
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ MALI_DEBUG_ASSERT_LOCK_HELD(cache->lock);
+
/*
- * Grab lock in order to send commands to the L2 cache in a serialized fashion.
- * The L2 cache will ignore commands if it is busy.
+ * First, wait for L2 cache command handler to go idle.
+ * (Commands received while processing another command will be ignored)
*/
- mali_l2_cache_command_lock(cache);
-
- if (MALI_L2_PAUSE == cache->mali_l2_status) {
- mali_l2_cache_command_unlock(cache);
- MALI_DEBUG_PRINT(1, ("Mali L2 cache: aborting wait for L2 come back\n"));
-
- MALI_ERROR(_MALI_OSK_ERR_BUSY);
- }
-
- /* First, wait for L2 cache command handler to go idle */
-
for (i = 0; i < loop_count; i++) {
- if (!(mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_STATUS) & (u32)MALI400_L2_CACHE_STATUS_COMMAND_BUSY)) {
+ if (!(mali_hw_core_register_read(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_STATUS) &
+ (u32)MALI400_L2_CACHE_STATUS_COMMAND_BUSY)) {
break;
}
}
if (i == loop_count) {
- mali_l2_cache_command_unlock(cache);
MALI_DEBUG_PRINT(1, ("Mali L2 cache: aborting wait for command interface to go idle\n"));
- MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ return _MALI_OSK_ERR_FAULT;
}
/* then issue the command */
mali_hw_core_register_write(&cache->hw_core, reg, val);
- mali_l2_cache_command_unlock(cache);
-
- MALI_SUCCESS;
-}
-
-void mali_l2_cache_pause_all(mali_bool pause)
-{
- int i;
- struct mali_l2_cache_core *cache;
- u32 num_cores = mali_l2_cache_core_get_glob_num_l2_cores();
- mali_l2_power_status status = MALI_L2_NORMAL;
-
- if (pause) {
- status = MALI_L2_PAUSE;
- }
-
- for (i = 0; i < num_cores; i++) {
- cache = mali_l2_cache_core_get_glob_l2_core(i);
- if (NULL != cache) {
- cache->mali_l2_status = status;
-
- /* Take and release the counter and command locks to
- * ensure there are no active threads that didn't get
- * the status flag update.
- *
- * The locks will also ensure the necessary memory
- * barriers are done on SMP systems.
- */
- mali_l2_cache_counter_lock(cache);
- mali_l2_cache_counter_unlock(cache);
-
- mali_l2_cache_command_lock(cache);
- mali_l2_cache_command_unlock(cache);
- }
- }
-
- /* Resume from pause: do the cache invalidation here to prevent any
- * loss of cache operation during the pause period to make sure the SW
- * status is consistent with L2 cache status.
- */
- if (!pause) {
- mali_l2_cache_invalidate_all();
- mali_l2_cache_reset_counters_all();
- }
+ return _MALI_OSK_ERR_OK;
}
#include "mali_hw_core.h"
#define MALI_MAX_NUMBER_OF_L2_CACHE_CORES 3
-/* Maximum 1 GP and 4 PP for an L2 cache core (Mali-400 Quad-core) */
+/* Maximum 1 GP and 4 PP for an L2 cache core (Mali-400 MP4) */
#define MALI_MAX_NUMBER_OF_GROUPS_PER_L2_CACHE 5
-struct mali_group;
-struct mali_pm_domain;
-
-/* Flags describing state of the L2 */
-typedef enum mali_l2_power_status {
- MALI_L2_NORMAL, /**< L2 is in normal state and operational */
- MALI_L2_PAUSE, /**< L2 may not be accessed and may be powered off */
-} mali_l2_power_status;
-
/**
* Definition of the L2 cache core struct
* Used to track a L2 cache unit in the system.
* Contains information about the mapping of the registers
*/
struct mali_l2_cache_core {
- struct mali_hw_core hw_core; /**< Common for all HW cores */
- u32 core_id; /**< Unique core ID */
-#ifdef MALI_UPPER_HALF_SCHEDULING
- _mali_osk_spinlock_irq_t *command_lock; /**< Serialize all L2 cache commands */
- _mali_osk_spinlock_irq_t *counter_lock; /**< Synchronize L2 cache counter access */
-#else
- _mali_osk_spinlock_t *command_lock;
- _mali_osk_spinlock_t *counter_lock;
-#endif
- u32 counter_src0; /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
- u32 counter_src1; /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
- u32 last_invalidated_id;
+ /* Common HW core functionality */
+ struct mali_hw_core hw_core;
+
+ /* Synchronize L2 cache access */
+ _mali_osk_spinlock_irq_t *lock;
+
+ /* Unique core ID */
+ u32 core_id;
+
+ /* The power domain this L2 cache belongs to */
struct mali_pm_domain *pm_domain;
- mali_l2_power_status mali_l2_status; /**< Indicate whether the L2 is paused or not */
+
+ /* MALI_TRUE if power is on for this L2 cache */
+ mali_bool power_is_on;
+
+ /* A "timestamp" to avoid unnecessary flushes */
+ u32 last_invalidated_id;
+
+ /* Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
+ u32 counter_src0;
+
+ /* Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
+ u32 counter_src1;
+
+ /*
+ * Performance counter 0 value base/offset
+ * (allows accumulative reporting even after power off)
+ */
+ u32 counter_value0_base;
+
+ /*
+ * Performance counter 0 value base/offset
+ * (allows accumulative reporting even after power off)
+ */
+ u32 counter_value1_base;
+
+ /* Used by PM domains to link L2 caches of same domain */
+ _mali_osk_list_t pm_domain_list;
};
_mali_osk_errcode_t mali_l2_cache_initialize(void);
void mali_l2_cache_terminate(void);
-/**
- * L2 pause is just a status that the L2 can't be accessed temporarily.
-*/
-void mali_l2_cache_pause_all(mali_bool pause);
-struct mali_l2_cache_core *mali_l2_cache_create(_mali_osk_resource_t *resource);
+
+struct mali_l2_cache_core *mali_l2_cache_create(
+ _mali_osk_resource_t *resource, u32 domain_index);
void mali_l2_cache_delete(struct mali_l2_cache_core *cache);
-MALI_STATIC_INLINE void mali_l2_cache_set_pm_domain(struct mali_l2_cache_core *cache, struct mali_pm_domain *domain)
+MALI_STATIC_INLINE u32 mali_l2_cache_get_id(struct mali_l2_cache_core *cache)
{
- cache->pm_domain = domain;
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ return cache->core_id;
}
-u32 mali_l2_cache_get_id(struct mali_l2_cache_core *cache);
+MALI_STATIC_INLINE struct mali_pm_domain *mali_l2_cache_get_pm_domain(
+ struct mali_l2_cache_core *cache)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ return cache->pm_domain;
+}
+
+void mali_l2_cache_power_up(struct mali_l2_cache_core *cache);
+void mali_l2_cache_power_down(struct mali_l2_cache_core *cache);
+
+void mali_l2_cache_core_set_counter_src(
+ struct mali_l2_cache_core *cache, u32 source_id, u32 counter);
+
+MALI_STATIC_INLINE u32 mali_l2_cache_core_get_counter_src0(
+ struct mali_l2_cache_core *cache)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ return cache->counter_src0;
+}
+
+MALI_STATIC_INLINE u32 mali_l2_cache_core_get_counter_src1(
+ struct mali_l2_cache_core *cache)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ return cache->counter_src1;
+}
+
+void mali_l2_cache_core_get_counter_values(
+ struct mali_l2_cache_core *cache,
+ u32 *src0, u32 *value0, u32 *src1, u32 *value1);
-void mali_l2_cache_core_set_counter_src0(struct mali_l2_cache_core *cache, u32 counter);
-void mali_l2_cache_core_set_counter_src1(struct mali_l2_cache_core *cache, u32 counter);
-u32 mali_l2_cache_core_get_counter_src0(struct mali_l2_cache_core *cache);
-u32 mali_l2_cache_core_get_counter_src1(struct mali_l2_cache_core *cache);
-void mali_l2_cache_core_get_counter_values(struct mali_l2_cache_core *cache, u32 *src0, u32 *value0, u32 *src1, u32 *value1);
struct mali_l2_cache_core *mali_l2_cache_core_get_glob_l2_core(u32 index);
u32 mali_l2_cache_core_get_glob_num_l2_cores(void);
-void mali_l2_cache_reset(struct mali_l2_cache_core *cache);
-void mali_l2_cache_reset_all(void);
-
-struct mali_group *mali_l2_cache_get_group(struct mali_l2_cache_core *cache, u32 index);
+struct mali_group *mali_l2_cache_get_group(
+ struct mali_l2_cache_core *cache, u32 index);
void mali_l2_cache_invalidate(struct mali_l2_cache_core *cache);
-mali_bool mali_l2_cache_invalidate_conditional(struct mali_l2_cache_core *cache, u32 id);
+void mali_l2_cache_invalidate_conditional(
+ struct mali_l2_cache_core *cache, u32 id);
+
void mali_l2_cache_invalidate_all(void);
void mali_l2_cache_invalidate_all_pages(u32 *pages, u32 num_pages);
-mali_bool mali_l2_cache_lock_power_state(struct mali_l2_cache_core *cache);
-void mali_l2_cache_unlock_power_state(struct mali_l2_cache_core *cache);
-
#endif /* __MALI_KERNEL_L2_CACHE_H__ */
void mali_mmu_page_fault_done(struct mali_mmu_core *mmu);
-/*** Register reading/writing functions ***/
+MALI_STATIC_INLINE enum mali_interrupt_result mali_mmu_get_interrupt_result(struct mali_mmu_core *mmu)
+{
+ u32 rawstat_used = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT);
+ if (0 == rawstat_used) {
+ return MALI_INTERRUPT_RESULT_NONE;
+ }
+ return MALI_INTERRUPT_RESULT_ERROR;
+}
+
+
MALI_STATIC_INLINE u32 mali_mmu_get_int_status(struct mali_mmu_core *mmu)
{
return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_STATUS);
*
* @param atom pointer to an atomic counter
* @param val the value to initialize the atomic counter.
- * @return _MALI_OSK_ERR_OK on success, otherwise, a suitable
- * _mali_osk_errcode_t on failure.
*/
-_mali_osk_errcode_t _mali_osk_atomic_init(_mali_osk_atomic_t *atom, u32 val);
+void _mali_osk_atomic_init(_mali_osk_atomic_t *atom, u32 val);
/** @brief Read a value from an atomic counter
*
* @return On success, a Mali IO address through which the mapped-in
* memory/registers can be accessed. NULL on failure.
*/
-mali_io_address _mali_osk_mem_mapioregion(u32 phys, u32 size, const char *description);
+mali_io_address _mali_osk_mem_mapioregion(uintptr_t phys, u32 size, const char *description);
/** @brief Unmap a physically contiguous address range from kernel space.
*
* @param mapping The Mali IO address through which the mapping is
* accessed.
*/
-void _mali_osk_mem_unmapioregion(u32 phys, u32 size, mali_io_address mapping);
+void _mali_osk_mem_unmapioregion(uintptr_t phys, u32 size, mali_io_address mapping);
/** @brief Allocate and Map a physically contiguous region into kernel space
*
* @return _MALI_OSK_ERR_OK on success. Otherwise, a suitable
* _mali_osk_errcode_t on failure.
*/
-_mali_osk_errcode_t _mali_osk_mem_reqregion(u32 phys, u32 size, const char *description);
+_mali_osk_errcode_t _mali_osk_mem_reqregion(uintptr_t phys, u32 size, const char *description);
/** @brief Un-request a region of physically contiguous memory
*
* @param size the number of bytes of physically contiguous address space to
* un-request.
*/
-void _mali_osk_mem_unreqregion(u32 phys, u32 size);
+void _mali_osk_mem_unreqregion(uintptr_t phys, u32 size);
/** @brief Read from a location currently mapped in through
* _mali_osk_mem_mapioregion
* @param ticks_to_expire the amount of time in ticks for the timer to run
* before triggering.
*/
-void _mali_osk_timer_add(_mali_osk_timer_t *tim, u32 ticks_to_expire);
+void _mali_osk_timer_add(_mali_osk_timer_t *tim, unsigned long ticks_to_expire);
/** @brief Modify a timer
*
* should trigger.
*
*/
-void _mali_osk_timer_mod(_mali_osk_timer_t *tim, u32 ticks_to_expire);
+void _mali_osk_timer_mod(_mali_osk_timer_t *tim, unsigned long ticks_to_expire);
/** @brief Stop a timer, and block on its completion.
*
*
* @{ */
-/** @brief Return whether ticka occurs after tickb
+/** @brief Return whether ticka occurs after or at the same time as tickb
*
- * Some OSs handle tick 'rollover' specially, and so can be more robust against
- * tick counters rolling-over. This function must therefore be called to
- * determine if a time (in ticks) really occurs after another time (in ticks).
+ * Systems where ticks can wrap must handle that.
*
* @param ticka ticka
* @param tickb tickb
- * @return non-zero if ticka represents a time that occurs after tickb.
- * Zero otherwise.
+ * @return MALI_TRUE if ticka represents a time that occurs at or after tickb.
*/
-int _mali_osk_time_after(u32 ticka, u32 tickb);
+mali_bool _mali_osk_time_after_eq(unsigned long ticka, unsigned long tickb);
/** @brief Convert milliseconds to OS 'ticks'
*
* @param ms time interval in milliseconds
* @return the corresponding time interval in OS ticks.
*/
-u32 _mali_osk_time_mstoticks(u32 ms);
+unsigned long _mali_osk_time_mstoticks(u32 ms);
/** @brief Convert OS 'ticks' to milliseconds
*
* @param ticks time interval in OS ticks.
* @return the corresponding time interval in milliseconds
*/
-u32 _mali_osk_time_tickstoms(u32 ticks);
+u32 _mali_osk_time_tickstoms(unsigned long ticks);
/** @brief Get the current time in OS 'ticks'.
* @return the current time in OS 'ticks'.
*/
-u32 _mali_osk_time_tickcount(void);
+unsigned long _mali_osk_time_tickcount(void);
/** @brief Cause a microsecond delay
*
*/
u64 _mali_osk_time_get_ns(void);
+/** @brief Return time in nano seconds, since boot time.
+ *
+ * @return Time in nano seconds
+ */
+u64 _mali_osk_boot_time_get_ns(void);
/** @} */ /* end group _mali_osk_time */
*/
u32 _mali_osk_get_pid(void);
+/** @brief Return an name for calling process.
+ *
+ * @return name for calling process.
+ */
+char *_mali_osk_get_comm(void);
+
/** @brief Return an identificator for calling thread.
*
* @return Identificator for calling thread.
*/
u32 _mali_osk_get_tid(void);
-/** @brief Enable OS controlled runtime power management
- */
-void _mali_osk_pm_dev_enable(void);
-
-/** @brief Disable OS controlled runtime power management
- */
-void _mali_osk_pm_dev_disable(void);
-
-/** @brief Take a reference to the power manager system for the Mali device.
+/** @brief Take a reference to the power manager system for the Mali device (synchronously).
*
* When function returns successfully, Mali is ON.
*
- * @note Call \a _mali_osk_pm_dev_ref_dec() to release this reference.
- */
-_mali_osk_errcode_t _mali_osk_pm_dev_ref_add(void);
-
-
-/** @brief Release the reference to the power manger system for the Mali device.
- *
- * When reference count reach zero, the cores can be off.
- *
- * @note This must be used to release references taken with \a _mali_osk_pm_dev_ref_add().
+ * @note Call \a _mali_osk_pm_dev_ref_put() to release this reference.
*/
-void _mali_osk_pm_dev_ref_dec(void);
-
+_mali_osk_errcode_t _mali_osk_pm_dev_ref_get_sync(void);
-/** @brief Take a reference to the power manager system for the Mali device.
+/** @brief Take a reference to the external power manager system for the Mali device (asynchronously).
*
- * Will leave the cores powered off if they are already powered off.
+ * Mali might not yet be on after this function as returned.
+ * Please use \a _mali_osk_pm_dev_barrier() or \a _mali_osk_pm_dev_ref_get_sync()
+ * to wait for Mali to be powered on.
*
* @note Call \a _mali_osk_pm_dev_ref_dec() to release this reference.
- *
- * @return MALI_TRUE if the Mali GPU is powered on, otherwise MALI_FALSE.
*/
-mali_bool _mali_osk_pm_dev_ref_add_no_power_on(void);
-
+_mali_osk_errcode_t _mali_osk_pm_dev_ref_get_async(void);
-/** @brief Releasing the reference to the power manger system for the Mali device.
+/** @brief Release the reference to the external power manger system for the Mali device.
*
* When reference count reach zero, the cores can be off.
*
- * @note This must be used to release references taken with \a _mali_osk_pm_dev_ref_add_no_power_on().
+ * @note This must be used to release references taken with
+ * \a _mali_osk_pm_dev_ref_get_sync() or \a _mali_osk_pm_dev_ref_get_sync().
*/
-void _mali_osk_pm_dev_ref_dec_no_power_on(void);
+void _mali_osk_pm_dev_ref_put(void);
-/** @brief Block untill pending PM operations are done
+/** @brief Block until pending PM operations are done
*/
void _mali_osk_pm_dev_barrier(void);
*/
typedef struct mali_gpu_device_data _mali_osk_device_data;
+#ifdef CONFIG_MALI_DT
+/** @brief Initialize those device resources when we use device tree
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_resource_initialize(void);
+#endif
+
/** @brief Find Mali GPU HW resource
*
* @param addr Address of Mali GPU resource to find
*
* @return 0 if resources are found, otherwise the Mali GPU component with lowest address.
*/
-u32 _mali_osk_resource_base_address(void);
+uintptr_t _mali_osk_resource_base_address(void);
+
+/** @brief Find the number of L2 cache cores.
+ *
+ * @return return the number of l2 cache cores we find in device resources.
+ */
+u32 _mali_osk_l2_resource_count(void);
/** @brief Retrieve the Mali GPU specific data
*
*/
_mali_osk_errcode_t _mali_osk_device_data_get(_mali_osk_device_data *data);
+/** @brief Find the pmu domain config from device data.
+ *
+ * @param domain_config_array used to store pmu domain config found in device data.
+ * @param array_size is the size of array domain_config_array.
+ */
+void _mali_osk_device_data_pmu_config_get(u16 *domain_config_array, int array_size);
+
+/** @brief Get Mali PMU switch delay
+ *
+ *@return pmu switch delay if it is configured
+ */
+u32 _mali_osk_get_pmu_switch_delay(void);
+
/** @brief Determines if Mali GPU has been configured with shared interrupts.
*
* @return MALI_TRUE if shared interrupts, MALI_FALSE if not.
_MALI_OSK_LOCK_ORDER_MEM_INFO,
_MALI_OSK_LOCK_ORDER_MEM_PT_CACHE,
_MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP,
- _MALI_OSK_LOCK_ORDER_GROUP_VIRTUAL,
- _MALI_OSK_LOCK_ORDER_GROUP,
+ _MALI_OSK_LOCK_ORDER_PM_EXECUTION,
+ _MALI_OSK_LOCK_ORDER_EXECUTOR,
_MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM,
_MALI_OSK_LOCK_ORDER_SCHEDULER,
_MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED,
- _MALI_OSK_LOCK_ORDER_PM_CORE_STATE,
- _MALI_OSK_LOCK_ORDER_L2_COMMAND,
- _MALI_OSK_LOCK_ORDER_DMA_COMMAND,
_MALI_OSK_LOCK_ORDER_PROFILING,
- _MALI_OSK_LOCK_ORDER_L2_COUNTER,
+ _MALI_OSK_LOCK_ORDER_L2,
+ _MALI_OSK_LOCK_ORDER_L2_COMMAND,
_MALI_OSK_LOCK_ORDER_UTILIZATION,
- _MALI_OSK_LOCK_ORDER_PM_EXECUTE,
_MALI_OSK_LOCK_ORDER_SESSION_PENDING_JOBS,
- _MALI_OSK_LOCK_ORDER_PM_DOMAIN,
- _MALI_OSK_LOCK_ORDER_PMU,
+ _MALI_OSK_LOCK_ORDER_PM_STATE,
_MALI_OSK_LOCK_ORDER_LAST,
} _mali_osk_lock_order_t;
*/
typedef struct _mali_osk_resource {
const char *description; /**< short description of the resource */
- u32 base; /**< Physical base address of the resource, as seen by Mali resources. */
+ uintptr_t base; /**< Physical base address of the resource, as seen by Mali resources. */
+ const char *irq_name; /**< Name of irq belong to this resource */
u32 irq; /**< IRQ number delivered to the CPU, or -1 to tell the driver to probe for it (if possible) */
} _mali_osk_resource_t;
/** @} */ /* end group _mali_osk_miscellaneous */
#include "mali_pm.h"
#include "mali_kernel_common.h"
#include "mali_osk.h"
-#include "mali_gp_scheduler.h"
-#include "mali_pp_scheduler.h"
+#include "mali_osk_mali.h"
#include "mali_scheduler.h"
-#include "mali_kernel_utilization.h"
#include "mali_group.h"
#include "mali_pm_domain.h"
#include "mali_pmu.h"
-static mali_bool mali_power_on = MALI_FALSE;
+#include "mali_executor.h"
+#include "mali_control_timer.h"
+
+#if defined(DEBUG)
+u32 num_pm_runtime_resume = 0;
+u32 num_pm_updates = 0;
+u32 num_pm_updates_up = 0;
+u32 num_pm_updates_down = 0;
+#endif
+
+#define MALI_PM_DOMAIN_DUMMY_MASK (1 << MALI_DOMAIN_INDEX_DUMMY)
+
+/* lock protecting power state (including pm_domains) */
+static _mali_osk_spinlock_irq_t *pm_lock_state = NULL;
+
+/* the wanted domain mask (protected by pm_lock_state) */
+static u32 pd_mask_wanted = 0;
+
+/* used to deferring the actual power changes */
+static _mali_osk_wq_work_t *pm_work = NULL;
+
+/* lock protecting power change execution */
+static _mali_osk_mutex_t *pm_lock_exec = NULL;
+
+/* PMU domains which are actually powered on (protected by pm_lock_exec) */
+static u32 pmu_mask_current = 0;
+
+/*
+ * domains which marked as powered on (protected by pm_lock_exec)
+ * This can be different from pmu_mask_current right after GPU power on
+ * if the PMU domains default to powered up.
+ */
+static u32 pd_mask_current = 0;
+
+static u16 domain_config[MALI_MAX_NUMBER_OF_DOMAINS] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1 << MALI_DOMAIN_INDEX_DUMMY
+};
+
+/* The relative core power cost */
+#define MALI_GP_COST 3
+#define MALI_PP_COST 6
+#define MALI_L2_COST 1
+
+/*
+ *We have MALI_MAX_NUMBER_OF_PP_PHYSICAL_CORES + 1 rows in this matrix
+ *because we mush store the mask of different pp cores: 0, 1, 2, 3, 4, 5, 6, 7, 8.
+ */
+static int mali_pm_domain_power_cost_result[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS + 1][MALI_MAX_NUMBER_OF_DOMAINS];
+/*
+ * Keep track of runtime PM state, so that we know
+ * how to resume during OS resume.
+ */
+#ifdef CONFIG_PM_RUNTIME
+static mali_bool mali_pm_runtime_active = MALI_FALSE;
+#else
+/* when kernel don't enable PM_RUNTIME, set the flag always true,
+ * for GPU will not power off by runtime */
+static mali_bool mali_pm_runtime_active = MALI_TRUE;
+#endif
+
+static void mali_pm_state_lock(void);
+static void mali_pm_state_unlock(void);
+static _mali_osk_errcode_t mali_pm_create_pm_domains(void);
+static void mali_pm_set_pmu_domain_config(void);
+static u32 mali_pm_get_registered_cores_mask(void);
+static void mali_pm_update_sync_internal(void);
+static mali_bool mali_pm_common_suspend(void);
+static void mali_pm_update_work(void *data);
+#if defined(DEBUG)
+const char *mali_pm_mask_to_string(u32 mask);
+const char *mali_pm_group_stats_to_string(void);
+#endif
_mali_osk_errcode_t mali_pm_initialize(void)
{
- _mali_osk_pm_dev_enable();
+ _mali_osk_errcode_t err;
+ struct mali_pmu_core *pmu;
+
+ pm_lock_state = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED,
+ _MALI_OSK_LOCK_ORDER_PM_STATE);
+ if (NULL == pm_lock_state) {
+ mali_pm_terminate();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ pm_lock_exec = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED,
+ _MALI_OSK_LOCK_ORDER_PM_STATE);
+ if (NULL == pm_lock_exec) {
+ mali_pm_terminate();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ pm_work = _mali_osk_wq_create_work(mali_pm_update_work, NULL);
+ if (NULL == pm_work) {
+ mali_pm_terminate();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ pmu = mali_pmu_get_global_pmu_core();
+ if (NULL != pmu) {
+ /*
+ * We have a Mali PMU, set the correct domain
+ * configuration (default or custom)
+ */
+
+ u32 registered_cores_mask;
+
+ mali_pm_set_pmu_domain_config();
+
+ registered_cores_mask = mali_pm_get_registered_cores_mask();
+ mali_pmu_set_registered_cores_mask(pmu, registered_cores_mask);
+
+ MALI_DEBUG_ASSERT(0 == pd_mask_wanted);
+ }
+
+ /* Create all power domains needed (at least one dummy domain) */
+ err = mali_pm_create_pm_domains();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_terminate();
+ return err;
+ }
+
return _MALI_OSK_ERR_OK;
}
void mali_pm_terminate(void)
{
+ if (NULL != pm_work) {
+ _mali_osk_wq_delete_work(pm_work);
+ pm_work = NULL;
+ }
+
mali_pm_domain_terminate();
- _mali_osk_pm_dev_disable();
+
+ if (NULL != pm_lock_exec) {
+ _mali_osk_mutex_term(pm_lock_exec);
+ pm_lock_exec = NULL;
+ }
+
+ if (NULL != pm_lock_state) {
+ _mali_osk_spinlock_irq_term(pm_lock_state);
+ pm_lock_state = NULL;
+ }
+}
+
+struct mali_pm_domain *mali_pm_register_l2_cache(u32 domain_index,
+ struct mali_l2_cache_core *l2_cache)
+{
+ struct mali_pm_domain *domain;
+
+ domain = mali_pm_domain_get_from_mask(domain_config[domain_index]);
+ if (NULL == domain) {
+ MALI_DEBUG_ASSERT(0 == domain_config[domain_index]);
+ domain = mali_pm_domain_get_from_index(
+ MALI_DOMAIN_INDEX_DUMMY);
+ domain_config[domain_index] = MALI_PM_DOMAIN_DUMMY_MASK;
+ } else {
+ MALI_DEBUG_ASSERT(0 != domain_config[domain_index]);
+ }
+
+ MALI_DEBUG_ASSERT(NULL != domain);
+
+ mali_pm_domain_add_l2_cache(domain, l2_cache);
+
+ return domain; /* return the actual domain this was registered in */
}
-/* Reset GPU after power up */
-static void mali_pm_reset_gpu(void)
+struct mali_pm_domain *mali_pm_register_group(u32 domain_index,
+ struct mali_group *group)
{
- /* Reset all L2 caches */
- mali_l2_cache_reset_all();
+ struct mali_pm_domain *domain;
- /* Reset all groups */
- mali_scheduler_reset_all_groups();
+ domain = mali_pm_domain_get_from_mask(domain_config[domain_index]);
+ if (NULL == domain) {
+ MALI_DEBUG_ASSERT(0 == domain_config[domain_index]);
+ domain = mali_pm_domain_get_from_index(
+ MALI_DOMAIN_INDEX_DUMMY);
+ domain_config[domain_index] = MALI_PM_DOMAIN_DUMMY_MASK;
+ } else {
+ MALI_DEBUG_ASSERT(0 != domain_config[domain_index]);
+ }
+
+ MALI_DEBUG_ASSERT(NULL != domain);
+
+ mali_pm_domain_add_group(domain, group);
+
+ return domain; /* return the actual domain this was registered in */
}
-void mali_pm_os_suspend(void)
+mali_bool mali_pm_get_domain_refs(struct mali_pm_domain **domains,
+ struct mali_group **groups,
+ u32 num_domains)
{
- MALI_DEBUG_PRINT(3, ("Mali PM: OS suspend\n"));
- mali_gp_scheduler_suspend();
- mali_pp_scheduler_suspend();
- mali_utilization_suspend();
- mali_group_power_off(MALI_TRUE);
- mali_power_on = MALI_FALSE;
+ mali_bool ret = MALI_TRUE; /* Assume all is powered on instantly */
+ u32 i;
+
+ mali_pm_state_lock();
+
+ for (i = 0; i < num_domains; i++) {
+ MALI_DEBUG_ASSERT_POINTER(domains[i]);
+ pd_mask_wanted |= mali_pm_domain_ref_get(domains[i]);
+ if (MALI_FALSE == mali_pm_domain_power_is_on(domains[i])) {
+ /*
+ * Tell caller that the corresponding group
+ * was not already powered on.
+ */
+ ret = MALI_FALSE;
+ } else {
+ /*
+ * There is a time gap between we power on the domain and
+ * set the power state of the corresponding groups to be on.
+ */
+ if (NULL != groups[i] &&
+ MALI_FALSE == mali_group_power_is_on(groups[i])) {
+ ret = MALI_FALSE;
+ }
+ }
+ }
+
+ MALI_DEBUG_PRINT(3, ("PM: wanted domain mask = 0x%08X (get refs)\n", pd_mask_wanted));
+
+ mali_pm_state_unlock();
+
+ return ret;
}
-void mali_pm_os_resume(void)
+mali_bool mali_pm_put_domain_refs(struct mali_pm_domain **domains,
+ u32 num_domains)
+{
+ u32 mask = 0;
+ mali_bool ret;
+ u32 i;
+
+ mali_pm_state_lock();
+
+ for (i = 0; i < num_domains; i++) {
+ MALI_DEBUG_ASSERT_POINTER(domains[i]);
+ mask |= mali_pm_domain_ref_put(domains[i]);
+ }
+
+ if (0 == mask) {
+ /* return false, all domains should still stay on */
+ ret = MALI_FALSE;
+ } else {
+ /* Assert that we are dealing with a change */
+ MALI_DEBUG_ASSERT((pd_mask_wanted & mask) == mask);
+
+ /* Update our desired domain mask */
+ pd_mask_wanted &= ~mask;
+
+ /* return true; one or more domains can now be powered down */
+ ret = MALI_TRUE;
+ }
+
+ MALI_DEBUG_PRINT(3, ("PM: wanted domain mask = 0x%08X (put refs)\n", pd_mask_wanted));
+
+ mali_pm_state_unlock();
+
+ return ret;
+}
+
+void mali_pm_init_begin(void)
{
struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
- mali_bool do_reset = MALI_FALSE;
- MALI_DEBUG_PRINT(3, ("Mali PM: OS resume\n"));
+ _mali_osk_pm_dev_ref_get_sync();
- if (MALI_TRUE != mali_power_on) {
- do_reset = MALI_TRUE;
+ /* Ensure all PMU domains are on */
+ if (NULL != pmu) {
+ mali_pmu_power_up_all(pmu);
}
+}
+void mali_pm_init_end(void)
+{
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+ /* Ensure all PMU domains are off */
if (NULL != pmu) {
- mali_pmu_reset(pmu);
+ mali_pmu_power_down_all(pmu);
}
- mali_power_on = MALI_TRUE;
- _mali_osk_write_mem_barrier();
+ _mali_osk_pm_dev_ref_put();
+}
+
+void mali_pm_update_sync(void)
+{
+ mali_pm_exec_lock();
- if (do_reset) {
- mali_pm_reset_gpu();
- mali_group_power_on();
+ if (MALI_TRUE == mali_pm_runtime_active) {
+ /*
+ * Only update if GPU is powered on.
+ * Deactivation of the last group will result in both a
+ * deferred runtime PM suspend operation and
+ * deferred execution of this function.
+ * mali_pm_runtime_active will be false if runtime PM
+ * executed first and thus the GPU is now fully powered off.
+ */
+ mali_pm_update_sync_internal();
}
- mali_gp_scheduler_resume();
- mali_pp_scheduler_resume();
+ mali_pm_exec_unlock();
+}
+
+void mali_pm_update_async(void)
+{
+ _mali_osk_wq_schedule_work(pm_work);
}
-void mali_pm_runtime_suspend(void)
+void mali_pm_os_suspend(mali_bool os_suspend)
{
+ int ret;
+
+ MALI_DEBUG_PRINT(3, ("Mali PM: OS suspend\n"));
+
+ /* Suspend execution of all jobs, and go to inactive state */
+ mali_executor_suspend();
+
+ if (os_suspend) {
+ mali_control_timer_suspend(MALI_TRUE);
+ }
+
+ mali_pm_exec_lock();
+
+ ret = mali_pm_common_suspend();
+
+ MALI_DEBUG_ASSERT(MALI_TRUE == ret);
+ MALI_IGNORE(ret);
+
+ mali_pm_exec_unlock();
+}
+
+void mali_pm_os_resume(void)
+{
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+ MALI_DEBUG_PRINT(3, ("Mali PM: OS resume\n"));
+
+ mali_pm_exec_lock();
+
+#if defined(DEBUG)
+ mali_pm_state_lock();
+
+ /* Assert that things are as we left them in os_suspend(). */
+ MALI_DEBUG_ASSERT(0 == pd_mask_wanted);
+ MALI_DEBUG_ASSERT(0 == pd_mask_current);
+ MALI_DEBUG_ASSERT(0 == pmu_mask_current);
+
+ MALI_DEBUG_ASSERT(MALI_TRUE == mali_pm_domain_all_unused());
+
+ mali_pm_state_unlock();
+#endif
+
+ if (MALI_TRUE == mali_pm_runtime_active) {
+ /* Runtime PM was active, so reset PMU */
+ if (NULL != pmu) {
+ mali_pmu_reset(pmu);
+ pmu_mask_current = mali_pmu_get_mask(pmu);
+
+ MALI_DEBUG_PRINT(3, ("Mali PM: OS resume 0x%x \n", pmu_mask_current));
+ }
+
+ mali_pm_update_sync_internal();
+ }
+
+ mali_pm_exec_unlock();
+
+ /* Start executing jobs again */
+ mali_executor_resume();
+}
+
+mali_bool mali_pm_runtime_suspend(void)
+{
+ mali_bool ret;
+
MALI_DEBUG_PRINT(3, ("Mali PM: Runtime suspend\n"));
- mali_group_power_off(MALI_TRUE);
- mali_power_on = MALI_FALSE;
+
+ mali_pm_exec_lock();
+
+ /*
+ * Put SW state directly into "off" state, and do not bother to power
+ * down each power domain, because entire GPU will be powered off
+ * when we return.
+ * For runtime PM suspend, in contrast to OS suspend, there is a race
+ * between this function and the mali_pm_update_sync_internal(), which
+ * is fine...
+ */
+ ret = mali_pm_common_suspend();
+ if (MALI_TRUE == ret) {
+ mali_pm_runtime_active = MALI_FALSE;
+ } else {
+ /*
+ * Process the "power up" instead,
+ * which could have been "lost"
+ */
+ mali_pm_update_sync_internal();
+ }
+
+ mali_pm_exec_unlock();
+
+ return ret;
}
void mali_pm_runtime_resume(void)
{
struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
- mali_bool do_reset = MALI_FALSE;
- MALI_DEBUG_PRINT(3, ("Mali PM: Runtime resume\n"));
+ mali_pm_exec_lock();
- if (MALI_TRUE != mali_power_on) {
- do_reset = MALI_TRUE;
- }
+ mali_pm_runtime_active = MALI_TRUE;
+
+#if defined(DEBUG)
+ ++num_pm_runtime_resume;
+
+ mali_pm_state_lock();
+
+ /*
+ * Assert that things are as we left them in runtime_suspend(),
+ * except for pd_mask_wanted which normally will be the reason we
+ * got here (job queued => domains wanted)
+ */
+ MALI_DEBUG_ASSERT(0 == pd_mask_current);
+ MALI_DEBUG_ASSERT(0 == pmu_mask_current);
+
+ mali_pm_state_unlock();
+#endif
if (NULL != pmu) {
mali_pmu_reset(pmu);
+ pmu_mask_current = mali_pmu_get_mask(pmu);
+ MALI_DEBUG_PRINT(3, ("Mali PM: Runtime resume 0x%x \n", pmu_mask_current));
}
- mali_power_on = MALI_TRUE;
- _mali_osk_write_mem_barrier();
+ /*
+ * Normally we are resumed because a job has just been queued.
+ * pd_mask_wanted should thus be != 0.
+ * It is however possible for others to take a Mali Runtime PM ref
+ * without having a job queued.
+ * We should however always call mali_pm_update_sync_internal(),
+ * because this will take care of any potential mismatch between
+ * pmu_mask_current and pd_mask_current.
+ */
+ mali_pm_update_sync_internal();
+
+ mali_pm_exec_unlock();
+}
- if (do_reset) {
- mali_pm_reset_gpu();
- mali_group_power_on();
+#if MALI_STATE_TRACKING
+u32 mali_pm_dump_state_domain(struct mali_pm_domain *domain,
+ char *buf, u32 size)
+{
+ int n = 0;
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tPower domain: id %u\n",
+ mali_pm_domain_get_id(domain));
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\t\tMask: 0x%04x\n",
+ mali_pm_domain_get_mask(domain));
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\t\tUse count: %u\n",
+ mali_pm_domain_get_use_count(domain));
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\t\tCurrent power state: %s\n",
+ (mali_pm_domain_get_mask(domain) & pd_mask_current) ?
+ "On" : "Off");
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\t\tWanted power state: %s\n",
+ (mali_pm_domain_get_mask(domain) & pd_mask_wanted) ?
+ "On" : "Off");
+
+ return n;
+}
+#endif
+
+static void mali_pm_state_lock(void)
+{
+ _mali_osk_spinlock_irq_lock(pm_lock_state);
+}
+
+static void mali_pm_state_unlock(void)
+{
+ _mali_osk_spinlock_irq_unlock(pm_lock_state);
+}
+
+void mali_pm_exec_lock(void)
+{
+ _mali_osk_mutex_wait(pm_lock_exec);
+}
+
+void mali_pm_exec_unlock(void)
+{
+ _mali_osk_mutex_signal(pm_lock_exec);
+}
+
+static void mali_pm_domain_power_up(u32 power_up_mask,
+ struct mali_group *groups_up[MALI_MAX_NUMBER_OF_GROUPS],
+ u32 *num_groups_up,
+ struct mali_l2_cache_core *l2_up[MALI_MAX_NUMBER_OF_L2_CACHE_CORES],
+ u32 *num_l2_up)
+{
+ u32 domain_bit;
+ u32 notify_mask = power_up_mask;
+
+ MALI_DEBUG_ASSERT(0 != power_up_mask);
+ MALI_DEBUG_ASSERT_POINTER(groups_up);
+ MALI_DEBUG_ASSERT_POINTER(num_groups_up);
+ MALI_DEBUG_ASSERT(0 == *num_groups_up);
+ MALI_DEBUG_ASSERT_POINTER(l2_up);
+ MALI_DEBUG_ASSERT_POINTER(num_l2_up);
+ MALI_DEBUG_ASSERT(0 == *num_l2_up);
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec);
+ MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_state);
+
+ MALI_DEBUG_PRINT(5,
+ ("PM update: Powering up domains: . [%s]\n",
+ mali_pm_mask_to_string(power_up_mask)));
+
+ pd_mask_current |= power_up_mask;
+
+ domain_bit = _mali_osk_fls(notify_mask);
+ while (0 != domain_bit) {
+ u32 domain_id = domain_bit - 1;
+ struct mali_pm_domain *domain =
+ mali_pm_domain_get_from_index(
+ domain_id);
+ struct mali_l2_cache_core *l2_cache;
+ struct mali_l2_cache_core *l2_cache_tmp;
+ struct mali_group *group;
+ struct mali_group *group_tmp;
+
+ /* Mark domain as powered up */
+ mali_pm_domain_set_power_on(domain, MALI_TRUE);
+
+ /*
+ * Make a note of the L2 and/or group(s) to notify
+ * (need to release the PM state lock before doing so)
+ */
+
+ _MALI_OSK_LIST_FOREACHENTRY(l2_cache,
+ l2_cache_tmp,
+ mali_pm_domain_get_l2_cache_list(
+ domain),
+ struct mali_l2_cache_core,
+ pm_domain_list) {
+ MALI_DEBUG_ASSERT(*num_l2_up <
+ MALI_MAX_NUMBER_OF_L2_CACHE_CORES);
+ l2_up[*num_l2_up] = l2_cache;
+ (*num_l2_up)++;
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group,
+ group_tmp,
+ mali_pm_domain_get_group_list(domain),
+ struct mali_group,
+ pm_domain_list) {
+ MALI_DEBUG_ASSERT(*num_groups_up <
+ MALI_MAX_NUMBER_OF_GROUPS);
+ groups_up[*num_groups_up] = group;
+
+ (*num_groups_up)++;
+ }
+
+ /* Remove current bit and find next */
+ notify_mask &= ~(1 << (domain_id));
+ domain_bit = _mali_osk_fls(notify_mask);
}
}
+static void mali_pm_domain_power_down(u32 power_down_mask,
+ struct mali_group *groups_down[MALI_MAX_NUMBER_OF_GROUPS],
+ u32 *num_groups_down,
+ struct mali_l2_cache_core *l2_down[MALI_MAX_NUMBER_OF_L2_CACHE_CORES],
+ u32 *num_l2_down)
+{
+ u32 domain_bit;
+ u32 notify_mask = power_down_mask;
+
+ MALI_DEBUG_ASSERT(0 != power_down_mask);
+ MALI_DEBUG_ASSERT_POINTER(groups_down);
+ MALI_DEBUG_ASSERT_POINTER(num_groups_down);
+ MALI_DEBUG_ASSERT(0 == *num_groups_down);
+ MALI_DEBUG_ASSERT_POINTER(l2_down);
+ MALI_DEBUG_ASSERT_POINTER(num_l2_down);
+ MALI_DEBUG_ASSERT(0 == *num_l2_down);
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec);
+ MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_state);
+
+ MALI_DEBUG_PRINT(5,
+ ("PM update: Powering down domains: [%s]\n",
+ mali_pm_mask_to_string(power_down_mask)));
+
+ pd_mask_current &= ~power_down_mask;
+
+ domain_bit = _mali_osk_fls(notify_mask);
+ while (0 != domain_bit) {
+ u32 domain_id = domain_bit - 1;
+ struct mali_pm_domain *domain =
+ mali_pm_domain_get_from_index(domain_id);
+ struct mali_l2_cache_core *l2_cache;
+ struct mali_l2_cache_core *l2_cache_tmp;
+ struct mali_group *group;
+ struct mali_group *group_tmp;
+
+ /* Mark domain as powered down */
+ mali_pm_domain_set_power_on(domain, MALI_FALSE);
+
+ /*
+ * Make a note of the L2s and/or groups to notify
+ * (need to release the PM state lock before doing so)
+ */
+
+ _MALI_OSK_LIST_FOREACHENTRY(l2_cache,
+ l2_cache_tmp,
+ mali_pm_domain_get_l2_cache_list(domain),
+ struct mali_l2_cache_core,
+ pm_domain_list) {
+ MALI_DEBUG_ASSERT(*num_l2_down <
+ MALI_MAX_NUMBER_OF_L2_CACHE_CORES);
+ l2_down[*num_l2_down] = l2_cache;
+ (*num_l2_down)++;
+ }
-void mali_pm_set_power_is_on(void)
+ _MALI_OSK_LIST_FOREACHENTRY(group,
+ group_tmp,
+ mali_pm_domain_get_group_list(domain),
+ struct mali_group,
+ pm_domain_list) {
+ MALI_DEBUG_ASSERT(*num_groups_down <
+ MALI_MAX_NUMBER_OF_GROUPS);
+ groups_down[*num_groups_down] = group;
+ (*num_groups_down)++;
+ }
+
+ /* Remove current bit and find next */
+ notify_mask &= ~(1 << (domain_id));
+ domain_bit = _mali_osk_fls(notify_mask);
+ }
+}
+
+/*
+ * Execute pending power domain changes
+ * pm_lock_exec lock must be taken by caller.
+ */
+static void mali_pm_update_sync_internal(void)
+{
+ /*
+ * This should only be called in non-atomic context
+ * (normally as deferred work)
+ *
+ * Look at the pending power domain changes, and execute these.
+ * Make sure group and schedulers are notified about changes.
+ */
+
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+ u32 power_down_mask;
+ u32 power_up_mask;
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec);
+
+#if defined(DEBUG)
+ ++num_pm_updates;
+#endif
+
+ /* Hold PM state lock while we look at (and obey) the wanted state */
+ mali_pm_state_lock();
+
+ MALI_DEBUG_PRINT(5, ("PM update pre: Wanted domain mask: .. [%s]\n",
+ mali_pm_mask_to_string(pd_mask_wanted)));
+ MALI_DEBUG_PRINT(5, ("PM update pre: Current domain mask: . [%s]\n",
+ mali_pm_mask_to_string(pd_mask_current)));
+ MALI_DEBUG_PRINT(5, ("PM update pre: Current PMU mask: .... [%s]\n",
+ mali_pm_mask_to_string(pmu_mask_current)));
+ MALI_DEBUG_PRINT(5, ("PM update pre: Group power stats: ... <%s>\n",
+ mali_pm_group_stats_to_string()));
+
+ /* Figure out which cores we need to power on */
+ power_up_mask = pd_mask_wanted &
+ (pd_mask_wanted ^ pd_mask_current);
+
+ if (0 != power_up_mask) {
+ u32 power_up_mask_pmu;
+ struct mali_group *groups_up[MALI_MAX_NUMBER_OF_GROUPS];
+ u32 num_groups_up = 0;
+ struct mali_l2_cache_core *
+ l2_up[MALI_MAX_NUMBER_OF_L2_CACHE_CORES];
+ u32 num_l2_up = 0;
+ u32 i;
+
+#if defined(DEBUG)
+ ++num_pm_updates_up;
+#endif
+
+ /*
+ * Make sure dummy/global domain is always included when
+ * powering up, since this is controlled by runtime PM,
+ * and device power is on at this stage.
+ */
+ power_up_mask |= MALI_PM_DOMAIN_DUMMY_MASK;
+
+ /* Power up only real PMU domains */
+ power_up_mask_pmu = power_up_mask & ~MALI_PM_DOMAIN_DUMMY_MASK;
+
+ /* But not those that happen to be powered on already */
+ power_up_mask_pmu &= (power_up_mask ^ pmu_mask_current) &
+ power_up_mask;
+
+ if (0 != power_up_mask_pmu) {
+ MALI_DEBUG_ASSERT(NULL != pmu);
+ pmu_mask_current |= power_up_mask_pmu;
+ mali_pmu_power_up(pmu, power_up_mask_pmu);
+ }
+
+ /*
+ * Put the domains themselves in power up state.
+ * We get the groups and L2s to notify in return.
+ */
+ mali_pm_domain_power_up(power_up_mask,
+ groups_up, &num_groups_up,
+ l2_up, &num_l2_up);
+
+ /* Need to unlock PM state lock before notifying L2 + groups */
+ mali_pm_state_unlock();
+
+ /* Notify each L2 cache that we have be powered up */
+ for (i = 0; i < num_l2_up; i++) {
+ mali_l2_cache_power_up(l2_up[i]);
+ }
+
+ /*
+ * Tell execution module about all the groups we have
+ * powered up. Groups will be notified as a result of this.
+ */
+ mali_executor_group_power_up(groups_up, num_groups_up);
+
+ /* Lock state again before checking for power down */
+ mali_pm_state_lock();
+ }
+
+ /* Figure out which cores we need to power off */
+ power_down_mask = pd_mask_current &
+ (pd_mask_wanted ^ pd_mask_current);
+
+ /*
+ * Never power down the dummy/global domain here. This is to be done
+ * from a suspend request (since this domain is only physicall powered
+ * down at that point)
+ */
+ power_down_mask &= ~MALI_PM_DOMAIN_DUMMY_MASK;
+
+ if (0 != power_down_mask) {
+ u32 power_down_mask_pmu;
+ struct mali_group *groups_down[MALI_MAX_NUMBER_OF_GROUPS];
+ u32 num_groups_down = 0;
+ struct mali_l2_cache_core *
+ l2_down[MALI_MAX_NUMBER_OF_L2_CACHE_CORES];
+ u32 num_l2_down = 0;
+ u32 i;
+
+#if defined(DEBUG)
+ ++num_pm_updates_down;
+#endif
+
+ /*
+ * Put the domains themselves in power down state.
+ * We get the groups and L2s to notify in return.
+ */
+ mali_pm_domain_power_down(power_down_mask,
+ groups_down, &num_groups_down,
+ l2_down, &num_l2_down);
+
+ /* Need to unlock PM state lock before notifying L2 + groups */
+ mali_pm_state_unlock();
+
+ /*
+ * Tell execution module about all the groups we will be
+ * powering down. Groups will be notified as a result of this.
+ */
+ if (0 < num_groups_down) {
+ mali_executor_group_power_down(groups_down, num_groups_down);
+ }
+
+ /* Notify each L2 cache that we will be powering down */
+ for (i = 0; i < num_l2_down; i++) {
+ mali_l2_cache_power_down(l2_down[i]);
+ }
+
+ /*
+ * Power down only PMU domains which should not stay on
+ * Some domains might for instance currently be incorrectly
+ * powered up if default domain power state is all on.
+ */
+ power_down_mask_pmu = pmu_mask_current & (~pd_mask_current);
+
+ if (0 != power_down_mask_pmu) {
+ MALI_DEBUG_ASSERT(NULL != pmu);
+ pmu_mask_current &= ~power_down_mask_pmu;
+ mali_pmu_power_down(pmu, power_down_mask_pmu);
+
+ }
+ } else {
+ /*
+ * Power down only PMU domains which should not stay on
+ * Some domains might for instance currently be incorrectly
+ * powered up if default domain power state is all on.
+ */
+ u32 power_down_mask_pmu;
+
+ /* No need for state lock since we'll only update PMU */
+ mali_pm_state_unlock();
+
+ power_down_mask_pmu = pmu_mask_current & (~pd_mask_current);
+
+ if (0 != power_down_mask_pmu) {
+ MALI_DEBUG_ASSERT(NULL != pmu);
+ pmu_mask_current &= ~power_down_mask_pmu;
+ mali_pmu_power_down(pmu, power_down_mask_pmu);
+ }
+ }
+
+ MALI_DEBUG_PRINT(5, ("PM update post: Current domain mask: . [%s]\n",
+ mali_pm_mask_to_string(pd_mask_current)));
+ MALI_DEBUG_PRINT(5, ("PM update post: Current PMU mask: .... [%s]\n",
+ mali_pm_mask_to_string(pmu_mask_current)));
+ MALI_DEBUG_PRINT(5, ("PM update post: Group power stats: ... <%s>\n",
+ mali_pm_group_stats_to_string()));
+}
+
+static mali_bool mali_pm_common_suspend(void)
+{
+ mali_pm_state_lock();
+
+ if (0 != pd_mask_wanted) {
+ MALI_DEBUG_PRINT(5, ("PM: Aborting suspend operation\n\n\n"));
+ mali_pm_state_unlock();
+ return MALI_FALSE;
+ }
+
+ MALI_DEBUG_PRINT(5, ("PM suspend pre: Wanted domain mask: .. [%s]\n",
+ mali_pm_mask_to_string(pd_mask_wanted)));
+ MALI_DEBUG_PRINT(5, ("PM suspend pre: Current domain mask: . [%s]\n",
+ mali_pm_mask_to_string(pd_mask_current)));
+ MALI_DEBUG_PRINT(5, ("PM suspend pre: Current PMU mask: .... [%s]\n",
+ mali_pm_mask_to_string(pmu_mask_current)));
+ MALI_DEBUG_PRINT(5, ("PM suspend pre: Group power stats: ... <%s>\n",
+ mali_pm_group_stats_to_string()));
+
+ if (0 != pd_mask_current) {
+ /*
+ * We have still some domains powered on.
+ * It is for instance very normal that at least the
+ * dummy/global domain is marked as powered on at this point.
+ * (because it is physically powered on until this function
+ * returns)
+ */
+
+ struct mali_group *groups_down[MALI_MAX_NUMBER_OF_GROUPS];
+ u32 num_groups_down = 0;
+ struct mali_l2_cache_core *
+ l2_down[MALI_MAX_NUMBER_OF_L2_CACHE_CORES];
+ u32 num_l2_down = 0;
+ u32 i;
+
+ /*
+ * Put the domains themselves in power down state.
+ * We get the groups and L2s to notify in return.
+ */
+ mali_pm_domain_power_down(pd_mask_current,
+ groups_down,
+ &num_groups_down,
+ l2_down,
+ &num_l2_down);
+
+ MALI_DEBUG_ASSERT(0 == pd_mask_current);
+ MALI_DEBUG_ASSERT(MALI_TRUE == mali_pm_domain_all_unused());
+
+ /* Need to unlock PM state lock before notifying L2 + groups */
+ mali_pm_state_unlock();
+
+ /*
+ * Tell execution module about all the groups we will be
+ * powering down. Groups will be notified as a result of this.
+ */
+ if (0 < num_groups_down) {
+ mali_executor_group_power_down(groups_down, num_groups_down);
+ }
+
+ /* Notify each L2 cache that we will be powering down */
+ for (i = 0; i < num_l2_down; i++) {
+ mali_l2_cache_power_down(l2_down[i]);
+ }
+
+ pmu_mask_current = 0;
+ } else {
+ MALI_DEBUG_ASSERT(0 == pmu_mask_current);
+
+ MALI_DEBUG_ASSERT(MALI_TRUE == mali_pm_domain_all_unused());
+
+ mali_pm_state_unlock();
+ }
+
+ MALI_DEBUG_PRINT(5, ("PM suspend post: Current domain mask: [%s]\n",
+ mali_pm_mask_to_string(pd_mask_current)));
+ MALI_DEBUG_PRINT(5, ("PM suspend post: Current PMU mask: ... [%s]\n",
+ mali_pm_mask_to_string(pmu_mask_current)));
+ MALI_DEBUG_PRINT(5, ("PM suspend post: Group power stats: .. <%s>\n",
+ mali_pm_group_stats_to_string()));
+
+ return MALI_TRUE;
+}
+
+static void mali_pm_update_work(void *data)
+{
+ MALI_IGNORE(data);
+ mali_pm_update_sync();
+}
+
+static _mali_osk_errcode_t mali_pm_create_pm_domains(void)
+{
+ int i;
+
+ /* Create all domains (including dummy domain) */
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ if (0x0 == domain_config[i]) continue;
+
+ if (NULL == mali_pm_domain_create(domain_config[i])) {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static void mali_pm_set_default_pm_domain_config(void)
+{
+ MALI_DEBUG_ASSERT(0 != _mali_osk_resource_base_address());
+
+ /* GP core */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_GP, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_GP] = 0x01;
+ }
+
+ /* PP0 - PP3 core */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_PP0, NULL)) {
+ if (mali_is_mali400()) {
+ domain_config[MALI_DOMAIN_INDEX_PP0] = 0x01 << 2;
+ } else if (mali_is_mali450()) {
+ domain_config[MALI_DOMAIN_INDEX_PP0] = 0x01 << 1;
+ }
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_PP1, NULL)) {
+ if (mali_is_mali400()) {
+ domain_config[MALI_DOMAIN_INDEX_PP1] = 0x01 << 3;
+ } else if (mali_is_mali450()) {
+ domain_config[MALI_DOMAIN_INDEX_PP1] = 0x01 << 2;
+ }
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_PP2, NULL)) {
+ if (mali_is_mali400()) {
+ domain_config[MALI_DOMAIN_INDEX_PP2] = 0x01 << 4;
+ } else if (mali_is_mali450()) {
+ domain_config[MALI_DOMAIN_INDEX_PP2] = 0x01 << 2;
+ }
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_PP3, NULL)) {
+ if (mali_is_mali400()) {
+ domain_config[MALI_DOMAIN_INDEX_PP3] = 0x01 << 5;
+ } else if (mali_is_mali450()) {
+ domain_config[MALI_DOMAIN_INDEX_PP3] = 0x01 << 2;
+ }
+ }
+
+ /* PP4 - PP7 */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_PP4, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_PP4] = 0x01 << 3;
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_PP5, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_PP5] = 0x01 << 3;
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_PP6, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_PP6] = 0x01 << 3;
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_PP7, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_PP7] = 0x01 << 3;
+ }
+
+ /* L2gp/L2PP0/L2PP4 */
+ if (mali_is_mali400()) {
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI400_OFFSET_L2_CACHE0, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_L20] = 0x01 << 1;
+ }
+ } else if (mali_is_mali450()) {
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI450_OFFSET_L2_CACHE0, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_L20] = 0x01 << 0;
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI450_OFFSET_L2_CACHE1, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_L21] = 0x01 << 1;
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI450_OFFSET_L2_CACHE2, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_L22] = 0x01 << 3;
+ }
+ }
+}
+
+static u32 mali_pm_get_registered_cores_mask(void)
{
- mali_power_on = MALI_TRUE;
+ int i = 0;
+ u32 mask = 0;
+
+ for (i = 0; i < MALI_DOMAIN_INDEX_DUMMY; i++) {
+ mask |= domain_config[i];
+ }
+
+ return mask;
}
-mali_bool mali_pm_is_power_on(void)
+static void mali_pm_set_pmu_domain_config(void)
{
- return mali_power_on;
+ int i = 0;
+
+ _mali_osk_device_data_pmu_config_get(domain_config, MALI_MAX_NUMBER_OF_DOMAINS - 1);
+
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS - 1; i++) {
+ if (0 != domain_config[i]) {
+ break;
+ }
+ }
+
+ if (MALI_MAX_NUMBER_OF_DOMAINS - 1 == i) {
+ mali_pm_set_default_pm_domain_config();
+ }
+
+ /* Can't override dummy domain mask */
+ domain_config[MALI_DOMAIN_INDEX_DUMMY] =
+ 1 << MALI_DOMAIN_INDEX_DUMMY;
+}
+
+#if defined(DEBUG)
+const char *mali_pm_mask_to_string(u32 mask)
+{
+ static char bit_str[MALI_MAX_NUMBER_OF_DOMAINS + 1];
+ int bit;
+ int str_pos = 0;
+
+ /* Must be protected by lock since we use shared string buffer */
+ if (NULL != pm_lock_exec) {
+ MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec);
+ }
+
+ for (bit = MALI_MAX_NUMBER_OF_DOMAINS - 1; bit >= 0; bit--) {
+ if (mask & (1 << bit)) {
+ bit_str[str_pos] = 'X';
+ } else {
+ bit_str[str_pos] = '-';
+ }
+ str_pos++;
+ }
+
+ bit_str[MALI_MAX_NUMBER_OF_DOMAINS] = '\0';
+
+ return bit_str;
+}
+
+const char *mali_pm_group_stats_to_string(void)
+{
+ static char bit_str[MALI_MAX_NUMBER_OF_GROUPS + 1];
+ u32 num_groups = mali_group_get_glob_num_groups();
+ u32 i;
+
+ /* Must be protected by lock since we use shared string buffer */
+ if (NULL != pm_lock_exec) {
+ MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec);
+ }
+
+ for (i = 0; i < num_groups && i < MALI_MAX_NUMBER_OF_GROUPS; i++) {
+ struct mali_group *group;
+
+ group = mali_group_get_glob_group(i);
+
+ if (MALI_TRUE == mali_group_power_is_on(group)) {
+ bit_str[i] = 'X';
+ } else {
+ bit_str[i] = '-';
+ }
+ }
+
+ bit_str[i] = '\0';
+
+ return bit_str;
+}
+#endif
+
+/*
+ * num_pp is the number of PP cores which will be powered on given this mask
+ * cost is the total power cost of cores which will be powered on given this mask
+ */
+static void mali_pm_stat_from_mask(u32 mask, u32 *num_pp, u32 *cost)
+{
+ u32 i;
+
+ /* loop through all cores */
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ if (!(domain_config[i] & mask)) {
+ continue;
+ }
+
+ switch (i) {
+ case MALI_DOMAIN_INDEX_GP:
+ *cost += MALI_GP_COST;
+
+ break;
+ case MALI_DOMAIN_INDEX_PP0: /* Fall through */
+ case MALI_DOMAIN_INDEX_PP1: /* Fall through */
+ case MALI_DOMAIN_INDEX_PP2: /* Fall through */
+ case MALI_DOMAIN_INDEX_PP3:
+ if (mali_is_mali400()) {
+ if ((domain_config[MALI_DOMAIN_INDEX_L20] & mask)
+ || (domain_config[MALI_DOMAIN_INDEX_DUMMY]
+ == domain_config[MALI_DOMAIN_INDEX_L20])) {
+ *num_pp += 1;
+ }
+ } else {
+ if ((domain_config[MALI_DOMAIN_INDEX_L21] & mask)
+ || (domain_config[MALI_DOMAIN_INDEX_DUMMY]
+ == domain_config[MALI_DOMAIN_INDEX_L21])) {
+ *num_pp += 1;
+ }
+ }
+
+ *cost += MALI_PP_COST;
+ break;
+ case MALI_DOMAIN_INDEX_PP4: /* Fall through */
+ case MALI_DOMAIN_INDEX_PP5: /* Fall through */
+ case MALI_DOMAIN_INDEX_PP6: /* Fall through */
+ case MALI_DOMAIN_INDEX_PP7:
+ MALI_DEBUG_ASSERT(mali_is_mali450());
+
+ if ((domain_config[MALI_DOMAIN_INDEX_L22] & mask)
+ || (domain_config[MALI_DOMAIN_INDEX_DUMMY]
+ == domain_config[MALI_DOMAIN_INDEX_L22])) {
+ *num_pp += 1;
+ }
+
+ *cost += MALI_PP_COST;
+ break;
+ case MALI_DOMAIN_INDEX_L20: /* Fall through */
+ case MALI_DOMAIN_INDEX_L21: /* Fall through */
+ case MALI_DOMAIN_INDEX_L22:
+ *cost += MALI_L2_COST;
+
+ break;
+ }
+ }
+}
+
+void mali_pm_power_cost_setup(void)
+{
+ /*
+ * Two parallel arrays which store the best domain mask and its cost
+ * The index is the number of PP cores, E.g. Index 0 is for 1 PP option,
+ * might have mask 0x2 and with cost of 1, lower cost is better
+ */
+ u32 best_mask[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS] = { 0 };
+ u32 best_cost[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS] = { 0 };
+ /* Array cores_in_domain is used to store the total pp cores in each pm domain. */
+ u32 cores_in_domain[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 };
+ /* Domain_count is used to represent the max domain we have.*/
+ u32 max_domain_mask = 0;
+ u32 max_domain_id = 0;
+ u32 always_on_pp_cores = 0;
+
+ u32 num_pp, cost, mask;
+ u32 i, j , k;
+
+ /* Initialize statistics */
+ for (i = 0; i < MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS; i++) {
+ best_mask[i] = 0;
+ best_cost[i] = 0xFFFFFFFF; /* lower cost is better */
+ }
+
+ for (i = 0; i < MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS + 1; i++) {
+ for (j = 0; j < MALI_MAX_NUMBER_OF_DOMAINS; j++) {
+ mali_pm_domain_power_cost_result[i][j] = 0;
+ }
+ }
+
+ /* Caculate number of pp cores of a given domain config. */
+ for (i = MALI_DOMAIN_INDEX_PP0; i <= MALI_DOMAIN_INDEX_PP7; i++) {
+ if (0 < domain_config[i]) {
+ /* Get the max domain mask value used to caculate power cost
+ * and we don't count in always on pp cores. */
+ if (MALI_PM_DOMAIN_DUMMY_MASK != domain_config[i]
+ && max_domain_mask < domain_config[i]) {
+ max_domain_mask = domain_config[i];
+ }
+
+ if (MALI_PM_DOMAIN_DUMMY_MASK == domain_config[i]) {
+ always_on_pp_cores++;
+ }
+ }
+ }
+ max_domain_id = _mali_osk_fls(max_domain_mask);
+
+ /*
+ * Try all combinations of power domains and check how many PP cores
+ * they have and their power cost.
+ */
+ for (mask = 0; mask < (1 << max_domain_id); mask++) {
+ num_pp = 0;
+ cost = 0;
+
+ mali_pm_stat_from_mask(mask, &num_pp, &cost);
+
+ /* This mask is usable for all MP1 up to num_pp PP cores, check statistics for all */
+ for (i = 0; i < num_pp; i++) {
+ if (best_cost[i] >= cost) {
+ best_cost[i] = cost;
+ best_mask[i] = mask;
+ }
+ }
+ }
+
+ /*
+ * If we want to enable x pp cores, if x is less than number of always_on pp cores,
+ * all of pp cores we will enable must be always_on pp cores.
+ */
+ for (i = 0; i < mali_executor_get_num_cores_total(); i++) {
+ if (i < always_on_pp_cores) {
+ mali_pm_domain_power_cost_result[i + 1][MALI_MAX_NUMBER_OF_DOMAINS - 1]
+ = i + 1;
+ } else {
+ mali_pm_domain_power_cost_result[i + 1][MALI_MAX_NUMBER_OF_DOMAINS - 1]
+ = always_on_pp_cores;
+ }
+ }
+
+ /* In this loop, variable i represent for the number of non-always on pp cores we want to enabled. */
+ for (i = 0; i < (mali_executor_get_num_cores_total() - always_on_pp_cores); i++) {
+ if (best_mask[i] == 0) {
+ /* This MP variant is not available */
+ continue;
+ }
+
+ for (j = 0; j < MALI_MAX_NUMBER_OF_DOMAINS; j++) {
+ cores_in_domain[j] = 0;
+ }
+
+ for (j = MALI_DOMAIN_INDEX_PP0; j <= MALI_DOMAIN_INDEX_PP7; j++) {
+ if (0 < domain_config[j]
+ && (MALI_PM_DOMAIN_DUMMY_MASK != domain_config[i])) {
+ cores_in_domain[_mali_osk_fls(domain_config[j]) - 1]++;
+ }
+ }
+
+ /* In this loop, j represent for the number we have already enabled.*/
+ for (j = 0; j <= i;) {
+ /* j used to visit all of domain to get the number of pp cores remained in it. */
+ for (k = 0; k < max_domain_id; k++) {
+ /* If domain k in best_mask[i] is enabled and this domain has extra pp cores,
+ * we know we must pick at least one pp core from this domain.
+ * And then we move to next enabled pm domain. */
+ if ((best_mask[i] & (0x1 << k)) && (0 < cores_in_domain[k])) {
+ cores_in_domain[k]--;
+ mali_pm_domain_power_cost_result[always_on_pp_cores + i + 1][k]++;
+ j++;
+ if (j > i) {
+ break;
+ }
+ }
+ }
+ }
+ }
+}
+
+/*
+ * When we are doing core scaling,
+ * this function is called to return the best mask to
+ * achieve the best pp group power cost.
+ */
+void mali_pm_get_best_power_cost_mask(int num_requested, int *dst)
+{
+ MALI_DEBUG_ASSERT((mali_executor_get_num_cores_total() >= num_requested) && (0 <= num_requested));
+
+ _mali_osk_memcpy(dst, mali_pm_domain_power_cost_result[num_requested], MALI_MAX_NUMBER_OF_DOMAINS * sizeof(int));
}
#define __MALI_PM_H__
#include "mali_osk.h"
+#include "mali_pm_domain.h"
+#define MALI_DOMAIN_INDEX_GP 0
+#define MALI_DOMAIN_INDEX_PP0 1
+#define MALI_DOMAIN_INDEX_PP1 2
+#define MALI_DOMAIN_INDEX_PP2 3
+#define MALI_DOMAIN_INDEX_PP3 4
+#define MALI_DOMAIN_INDEX_PP4 5
+#define MALI_DOMAIN_INDEX_PP5 6
+#define MALI_DOMAIN_INDEX_PP6 7
+#define MALI_DOMAIN_INDEX_PP7 8
+#define MALI_DOMAIN_INDEX_L20 9
+#define MALI_DOMAIN_INDEX_L21 10
+#define MALI_DOMAIN_INDEX_L22 11
+/*
+ * The dummy domain is used when there is no physical power domain
+ * (e.g. no PMU or always on cores)
+ */
+#define MALI_DOMAIN_INDEX_DUMMY 12
+#define MALI_MAX_NUMBER_OF_DOMAINS 13
+
+/**
+ * Initialize the Mali PM module
+ *
+ * PM module covers Mali PM core, PM domains and Mali PMU
+ */
_mali_osk_errcode_t mali_pm_initialize(void);
+
+/**
+ * Terminate the Mali PM module
+ */
void mali_pm_terminate(void);
-/* Callback functions registered for the runtime PMM system */
-void mali_pm_os_suspend(void);
+void mali_pm_exec_lock(void);
+void mali_pm_exec_unlock(void);
+
+
+struct mali_pm_domain *mali_pm_register_l2_cache(u32 domain_index,
+ struct mali_l2_cache_core *l2_cache);
+struct mali_pm_domain *mali_pm_register_group(u32 domain_index,
+ struct mali_group *group);
+
+mali_bool mali_pm_get_domain_refs(struct mali_pm_domain **domains,
+ struct mali_group **groups,
+ u32 num_domains);
+mali_bool mali_pm_put_domain_refs(struct mali_pm_domain **domains,
+ u32 num_domains);
+
+void mali_pm_init_begin(void);
+void mali_pm_init_end(void);
+
+void mali_pm_update_sync(void);
+void mali_pm_update_async(void);
+
+/* Callback functions for system power management */
+void mali_pm_os_suspend(mali_bool os_suspend);
void mali_pm_os_resume(void);
-void mali_pm_runtime_suspend(void);
+
+mali_bool mali_pm_runtime_suspend(void);
void mali_pm_runtime_resume(void);
-void mali_pm_set_power_is_on(void);
-mali_bool mali_pm_is_power_on(void);
+#if MALI_STATE_TRACKING
+u32 mali_pm_dump_state_domain(struct mali_pm_domain *domain,
+ char *buf, u32 size);
+#endif
+
+void mali_pm_power_cost_setup(void);
+
+void mali_pm_get_best_power_cost_mask(int num_requested, int *dst);
+
+#if defined(DEBUG)
+const char *mali_pm_mask_to_string(u32 mask);
+#endif
#endif /* __MALI_PM_H__ */
#include "mali_pm_domain.h"
#include "mali_pmu.h"
#include "mali_group.h"
+#include "mali_pm.h"
-static struct mali_pm_domain *mali_pm_domains[MALI_MAX_NUMBER_OF_DOMAINS] = { NULL, };
+static struct mali_pm_domain *mali_pm_domains[MALI_MAX_NUMBER_OF_DOMAINS] =
+{ NULL, };
-static void mali_pm_domain_lock(struct mali_pm_domain *domain)
+void mali_pm_domain_initialize(void)
{
- _mali_osk_spinlock_irq_lock(domain->lock);
+ /* Domains will be initialized/created on demand */
}
-static void mali_pm_domain_unlock(struct mali_pm_domain *domain)
+void mali_pm_domain_terminate(void)
{
- _mali_osk_spinlock_irq_unlock(domain->lock);
-}
+ int i;
-MALI_STATIC_INLINE void mali_pm_domain_state_set(struct mali_pm_domain *domain, mali_pm_domain_state state)
-{
- domain->state = state;
+ /* Delete all domains that has been created */
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ mali_pm_domain_delete(mali_pm_domains[i]);
+ mali_pm_domains[i] = NULL;
+ }
}
struct mali_pm_domain *mali_pm_domain_create(u32 pmu_mask)
domain = mali_pm_domain_get_from_mask(pmu_mask);
if (NULL != domain) return domain;
- MALI_DEBUG_PRINT(2, ("Mali PM domain: Creating Mali PM domain (mask=0x%08X)\n", pmu_mask));
+ MALI_DEBUG_PRINT(2,
+ ("Mali PM domain: Creating Mali PM domain (mask=0x%08X)\n",
+ pmu_mask));
- domain = (struct mali_pm_domain *)_mali_osk_malloc(sizeof(struct mali_pm_domain));
+ domain = (struct mali_pm_domain *)_mali_osk_malloc(
+ sizeof(struct mali_pm_domain));
if (NULL != domain) {
- domain->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_PM_DOMAIN);
- if (NULL == domain->lock) {
- _mali_osk_free(domain);
- return NULL;
- }
-
- domain->state = MALI_PM_DOMAIN_ON;
+ domain->power_is_on = MALI_FALSE;
domain->pmu_mask = pmu_mask;
domain->use_count = 0;
- domain->group_list = NULL;
- domain->group_count = 0;
- domain->l2 = NULL;
+ _mali_osk_list_init(&domain->group_list);
+ _mali_osk_list_init(&domain->l2_cache_list);
domain_id = _mali_osk_fls(pmu_mask) - 1;
/* Verify the domain_id */
if (NULL == domain) {
return;
}
- _mali_osk_spinlock_irq_term(domain->lock);
-
- _mali_osk_free(domain);
-}
-void mali_pm_domain_terminate(void)
-{
- int i;
+ _mali_osk_list_delinit(&domain->group_list);
+ _mali_osk_list_delinit(&domain->l2_cache_list);
- /* Delete all domains */
- for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
- mali_pm_domain_delete(mali_pm_domains[i]);
- }
+ _mali_osk_free(domain);
}
-void mali_pm_domain_add_group(u32 mask, struct mali_group *group)
+void mali_pm_domain_add_group(struct mali_pm_domain *domain,
+ struct mali_group *group)
{
- struct mali_pm_domain *domain = mali_pm_domain_get_from_mask(mask);
- struct mali_group *next;
-
- if (NULL == domain) return;
-
+ MALI_DEBUG_ASSERT_POINTER(domain);
MALI_DEBUG_ASSERT_POINTER(group);
- ++domain->group_count;
- next = domain->group_list;
-
- domain->group_list = group;
-
- group->pm_domain_list = next;
-
- mali_group_set_pm_domain(group, domain);
-
- /* Get pm domain ref after mali_group_set_pm_domain */
- mali_group_get_pm_domain_ref(group);
+ /*
+ * Use addtail because virtual group is created last and it needs
+ * to be at the end of the list (in order to be activated after
+ * all children.
+ */
+ _mali_osk_list_addtail(&group->pm_domain_list, &domain->group_list);
}
-void mali_pm_domain_add_l2(u32 mask, struct mali_l2_cache_core *l2)
+void mali_pm_domain_add_l2_cache(struct mali_pm_domain *domain,
+ struct mali_l2_cache_core *l2_cache)
{
- struct mali_pm_domain *domain = mali_pm_domain_get_from_mask(mask);
-
- if (NULL == domain) return;
-
- MALI_DEBUG_ASSERT(NULL == domain->l2);
- MALI_DEBUG_ASSERT(NULL != l2);
-
- domain->l2 = l2;
-
- mali_l2_cache_set_pm_domain(l2, domain);
+ MALI_DEBUG_ASSERT_POINTER(domain);
+ MALI_DEBUG_ASSERT_POINTER(l2_cache);
+ _mali_osk_list_add(&l2_cache->pm_domain_list, &domain->l2_cache_list);
}
struct mali_pm_domain *mali_pm_domain_get_from_mask(u32 mask)
{
u32 id = 0;
- if (0 == mask) return NULL;
+ if (0 == mask) {
+ return NULL;
+ }
id = _mali_osk_fls(mask) - 1;
return mali_pm_domains[id];
}
-void mali_pm_domain_ref_get(struct mali_pm_domain *domain)
+u32 mali_pm_domain_ref_get(struct mali_pm_domain *domain)
{
- if (NULL == domain) return;
+ MALI_DEBUG_ASSERT_POINTER(domain);
- mali_pm_domain_lock(domain);
- ++domain->use_count;
+ if (0 == domain->use_count) {
+ _mali_osk_pm_dev_ref_get_async();
+ }
- if (MALI_PM_DOMAIN_ON != domain->state) {
- /* Power on */
- struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+ ++domain->use_count;
+ MALI_DEBUG_PRINT(4, ("PM domain %p: ref_get, use_count => %u\n", domain, domain->use_count));
- MALI_DEBUG_PRINT(3, ("PM Domain: Powering on 0x%08x\n", domain->pmu_mask));
+ /* Return our mask so caller can check this against wanted mask */
+ return domain->pmu_mask;
+}
- if (NULL != pmu) {
- _mali_osk_errcode_t err;
+u32 mali_pm_domain_ref_put(struct mali_pm_domain *domain)
+{
+ MALI_DEBUG_ASSERT_POINTER(domain);
- err = mali_pmu_power_up(pmu, domain->pmu_mask);
+ --domain->use_count;
+ MALI_DEBUG_PRINT(4, ("PM domain %p: ref_put, use_count => %u\n", domain, domain->use_count));
- if (_MALI_OSK_ERR_OK != err && _MALI_OSK_ERR_BUSY != err) {
- MALI_PRINT_ERROR(("PM Domain: Failed to power up PM domain 0x%08x\n",
- domain->pmu_mask));
- }
- }
- mali_pm_domain_state_set(domain, MALI_PM_DOMAIN_ON);
- } else {
- MALI_DEBUG_ASSERT(MALI_PM_DOMAIN_ON == mali_pm_domain_state_get(domain));
+ if (0 == domain->use_count) {
+ _mali_osk_pm_dev_ref_put();
}
- mali_pm_domain_unlock(domain);
+ /*
+ * Return the PMU mask which now could be be powered down
+ * (the bit for this domain).
+ * This is the responsibility of the caller (mali_pm)
+ */
+ return (0 == domain->use_count ? domain->pmu_mask : 0);
}
-void mali_pm_domain_ref_put(struct mali_pm_domain *domain)
+#if MALI_STATE_TRACKING
+u32 mali_pm_domain_get_id(struct mali_pm_domain *domain)
{
- if (NULL == domain) return;
-
- mali_pm_domain_lock(domain);
- --domain->use_count;
-
- if (0 == domain->use_count && MALI_PM_DOMAIN_OFF != domain->state) {
- /* Power off */
- struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
-
- MALI_DEBUG_PRINT(3, ("PM Domain: Powering off 0x%08x\n", domain->pmu_mask));
+ u32 id = 0;
- mali_pm_domain_state_set(domain, MALI_PM_DOMAIN_OFF);
+ MALI_DEBUG_ASSERT_POINTER(domain);
+ MALI_DEBUG_ASSERT(0 != domain->pmu_mask);
- if (NULL != pmu) {
- _mali_osk_errcode_t err;
+ id = _mali_osk_fls(domain->pmu_mask) - 1;
- err = mali_pmu_power_down(pmu, domain->pmu_mask);
+ MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > id);
+ /* Verify that pmu_mask only one bit is set */
+ MALI_DEBUG_ASSERT((1 << id) == domain->pmu_mask);
+ /* Verify that we have stored the domain at right id/index */
+ MALI_DEBUG_ASSERT(domain == mali_pm_domains[id]);
- if (_MALI_OSK_ERR_OK != err && _MALI_OSK_ERR_BUSY != err) {
- MALI_PRINT_ERROR(("PM Domain: Failed to power down PM domain 0x%08x\n",
- domain->pmu_mask));
- }
- }
- }
- mali_pm_domain_unlock(domain);
+ return id;
}
+#endif
-mali_bool mali_pm_domain_lock_state(struct mali_pm_domain *domain)
+#if defined(DEBUG)
+mali_bool mali_pm_domain_all_unused(void)
{
- mali_bool is_powered = MALI_TRUE;
+ int i;
- /* Take a reference without powering on */
- if (NULL != domain) {
- mali_pm_domain_lock(domain);
- ++domain->use_count;
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ if (NULL == mali_pm_domains[i]) {
+ /* Nothing to check */
+ continue;
+ }
- if (MALI_PM_DOMAIN_ON != domain->state) {
- is_powered = MALI_FALSE;
+ if (MALI_TRUE == mali_pm_domains[i]->power_is_on) {
+ /* Not ready for suspend! */
+ return MALI_FALSE;
}
- mali_pm_domain_unlock(domain);
- }
- if (!_mali_osk_pm_dev_ref_add_no_power_on()) {
- is_powered = MALI_FALSE;
+ if (0 != mali_pm_domains[i]->use_count) {
+ /* Not ready for suspend! */
+ return MALI_FALSE;
+ }
}
- return is_powered;
-}
-
-void mali_pm_domain_unlock_state(struct mali_pm_domain *domain)
-{
- _mali_osk_pm_dev_ref_dec_no_power_on();
-
- if (NULL != domain) {
- mali_pm_domain_ref_put(domain);
- }
+ return MALI_TRUE;
}
+#endif
#include "mali_group.h"
#include "mali_pmu.h"
-typedef enum {
- MALI_PM_DOMAIN_ON,
- MALI_PM_DOMAIN_OFF,
-} mali_pm_domain_state;
-
+/* Instances are protected by PM state lock */
struct mali_pm_domain {
- mali_pm_domain_state state;
- _mali_osk_spinlock_irq_t *lock;
-
+ mali_bool power_is_on;
s32 use_count;
-
u32 pmu_mask;
- int group_count;
- struct mali_group *group_list;
+ /* Zero or more groups can belong to this domain */
+ _mali_osk_list_t group_list;
- struct mali_l2_cache_core *l2;
+ /* Zero or more L2 caches can belong to this domain */
+ _mali_osk_list_t l2_cache_list;
};
-struct mali_pm_domain *mali_pm_domain_create(u32 pmu_mask);
-void mali_pm_domain_add_group(u32 mask, struct mali_group *group);
+void mali_pm_domain_initialize(void);
+void mali_pm_domain_terminate(void);
-void mali_pm_domain_add_l2(u32 mask, struct mali_l2_cache_core *l2);
+struct mali_pm_domain *mali_pm_domain_create(u32 pmu_mask);
void mali_pm_domain_delete(struct mali_pm_domain *domain);
-void mali_pm_domain_terminate(void);
+void mali_pm_domain_add_l2_cache(
+ struct mali_pm_domain *domain,
+ struct mali_l2_cache_core *l2_cache);
+void mali_pm_domain_add_group(struct mali_pm_domain *domain,
+ struct mali_group *group);
-/** Get PM domain from domain ID
- */
struct mali_pm_domain *mali_pm_domain_get_from_mask(u32 mask);
struct mali_pm_domain *mali_pm_domain_get_from_index(u32 id);
/* Ref counting */
-void mali_pm_domain_ref_get(struct mali_pm_domain *domain);
-void mali_pm_domain_ref_put(struct mali_pm_domain *domain);
+u32 mali_pm_domain_ref_get(struct mali_pm_domain *domain);
+u32 mali_pm_domain_ref_put(struct mali_pm_domain *domain);
+
+MALI_STATIC_INLINE _mali_osk_list_t *mali_pm_domain_get_group_list(
+ struct mali_pm_domain *domain)
+{
+ MALI_DEBUG_ASSERT_POINTER(domain);
+ return &domain->group_list;
+}
+
+MALI_STATIC_INLINE _mali_osk_list_t *mali_pm_domain_get_l2_cache_list(
+ struct mali_pm_domain *domain)
+{
+ MALI_DEBUG_ASSERT_POINTER(domain);
+ return &domain->l2_cache_list;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pm_domain_power_is_on(
+ struct mali_pm_domain *domain)
+{
+ MALI_DEBUG_ASSERT_POINTER(domain);
+ return domain->power_is_on;
+}
-MALI_STATIC_INLINE struct mali_l2_cache_core *mali_pm_domain_l2_get(struct mali_pm_domain *domain)
+MALI_STATIC_INLINE void mali_pm_domain_set_power_on(
+ struct mali_pm_domain *domain,
+ mali_bool power_is_on)
{
- return domain->l2;
+ MALI_DEBUG_ASSERT_POINTER(domain);
+ domain->power_is_on = power_is_on;
}
-MALI_STATIC_INLINE mali_pm_domain_state mali_pm_domain_state_get(struct mali_pm_domain *domain)
+MALI_STATIC_INLINE u32 mali_pm_domain_get_use_count(
+ struct mali_pm_domain *domain)
{
- return domain->state;
+ MALI_DEBUG_ASSERT_POINTER(domain);
+ return domain->use_count;
}
-mali_bool mali_pm_domain_lock_state(struct mali_pm_domain *domain);
-void mali_pm_domain_unlock_state(struct mali_pm_domain *domain);
+#if MALI_STATE_TRACKING
+u32 mali_pm_domain_get_id(struct mali_pm_domain *domain);
+
+MALI_STATIC_INLINE u32 mali_pm_domain_get_mask(struct mali_pm_domain *domain)
+{
+ MALI_DEBUG_ASSERT_POINTER(domain);
+ return domain->pmu_mask;
+}
+#endif
-#define MALI_PM_DOMAIN_FOR_EACH_GROUP(group, domain) for ((group) = (domain)->group_list;\
- NULL != (group); (group) = (group)->pm_domain_list)
+#if defined(DEBUG)
+mali_bool mali_pm_domain_all_unused(void);
+#endif
#endif /* __MALI_PM_DOMAIN_H__ */
#include "mali_pm.h"
#include "mali_osk_mali.h"
-u16 mali_pmu_global_domain_config[MALI_MAX_NUMBER_OF_DOMAINS] = {0};
+struct mali_pmu_core *mali_global_pmu_core = NULL;
-static u32 mali_pmu_detect_mask(void);
-
-/** @brief MALI inbuilt PMU hardware info and PMU hardware has knowledge of cores power mask
- */
-struct mali_pmu_core {
- struct mali_hw_core hw_core;
- _mali_osk_spinlock_t *lock;
- u32 registered_cores_mask;
- u32 active_cores_mask;
- u32 switch_delay;
-};
-
-static struct mali_pmu_core *mali_global_pmu_core = NULL;
-
-/** @brief Register layout for hardware PMU
- */
-typedef enum {
- PMU_REG_ADDR_MGMT_POWER_UP = 0x00, /*< Power up register */
- PMU_REG_ADDR_MGMT_POWER_DOWN = 0x04, /*< Power down register */
- PMU_REG_ADDR_MGMT_STATUS = 0x08, /*< Core sleep status register */
- PMU_REG_ADDR_MGMT_INT_MASK = 0x0C, /*< Interrupt mask register */
- PMU_REG_ADDR_MGMT_INT_RAWSTAT = 0x10, /*< Interrupt raw status register */
- PMU_REG_ADDR_MGMT_INT_CLEAR = 0x18, /*< Interrupt clear register */
- PMU_REG_ADDR_MGMT_SW_DELAY = 0x1C, /*< Switch delay register */
- PMU_REGISTER_ADDRESS_SPACE_SIZE = 0x28, /*< Size of register space */
-} pmu_reg_addr_mgmt_addr;
-
-#define PMU_REG_VAL_IRQ 1
+static _mali_osk_errcode_t mali_pmu_wait_for_command_finish(
+ struct mali_pmu_core *pmu);
struct mali_pmu_core *mali_pmu_create(_mali_osk_resource_t *resource)
{
MALI_DEBUG_ASSERT(NULL == mali_global_pmu_core);
MALI_DEBUG_PRINT(2, ("Mali PMU: Creating Mali PMU core\n"));
- pmu = (struct mali_pmu_core *)_mali_osk_malloc(sizeof(struct mali_pmu_core));
+ pmu = (struct mali_pmu_core *)_mali_osk_malloc(
+ sizeof(struct mali_pmu_core));
if (NULL != pmu) {
- pmu->lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_PMU);
- if (NULL != pmu->lock) {
- pmu->registered_cores_mask = mali_pmu_detect_mask();
- pmu->active_cores_mask = pmu->registered_cores_mask;
-
- if (_MALI_OSK_ERR_OK == mali_hw_core_create(&pmu->hw_core, resource, PMU_REGISTER_ADDRESS_SPACE_SIZE)) {
- _mali_osk_errcode_t err;
- _mali_osk_device_data data = { 0, };
-
- err = _mali_osk_device_data_get(&data);
- if (_MALI_OSK_ERR_OK == err) {
- pmu->switch_delay = data.pmu_switch_delay;
- mali_global_pmu_core = pmu;
- return pmu;
- }
- mali_hw_core_delete(&pmu->hw_core);
- }
- _mali_osk_spinlock_term(pmu->lock);
+ pmu->registered_cores_mask = 0; /* to be set later */
+
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&pmu->hw_core,
+ resource, PMU_REGISTER_ADDRESS_SPACE_SIZE)) {
+
+ pmu->switch_delay = _mali_osk_get_pmu_switch_delay();
+
+ mali_global_pmu_core = pmu;
+
+ return pmu;
}
_mali_osk_free(pmu);
}
{
MALI_DEBUG_ASSERT_POINTER(pmu);
MALI_DEBUG_ASSERT(pmu == mali_global_pmu_core);
+
MALI_DEBUG_PRINT(2, ("Mali PMU: Deleting Mali PMU core\n"));
- _mali_osk_spinlock_term(pmu->lock);
+ mali_global_pmu_core = NULL;
+
mali_hw_core_delete(&pmu->hw_core);
_mali_osk_free(pmu);
- mali_global_pmu_core = NULL;
}
-static void mali_pmu_lock(struct mali_pmu_core *pmu)
+void mali_pmu_set_registered_cores_mask(struct mali_pmu_core *pmu, u32 mask)
{
- _mali_osk_spinlock_lock(pmu->lock);
+ pmu->registered_cores_mask = mask;
}
-static void mali_pmu_unlock(struct mali_pmu_core *pmu)
+
+void mali_pmu_reset(struct mali_pmu_core *pmu)
{
- _mali_osk_spinlock_unlock(pmu->lock);
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+
+ /* Setup the desired defaults */
+ mali_hw_core_register_write_relaxed(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_INT_MASK, 0);
+ mali_hw_core_register_write_relaxed(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_SW_DELAY, pmu->switch_delay);
}
-static _mali_osk_errcode_t mali_pmu_wait_for_command_finish(struct mali_pmu_core *pmu)
+void mali_pmu_power_up_all(struct mali_pmu_core *pmu)
{
- u32 rawstat;
- u32 timeout = MALI_REG_POLL_COUNT_SLOW;
+ u32 stat;
- MALI_DEBUG_ASSERT(pmu);
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
- /* Wait for the command to complete */
- do {
- rawstat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_RAWSTAT);
- --timeout;
- } while (0 == (rawstat & PMU_REG_VAL_IRQ) && 0 < timeout);
+ mali_pm_exec_lock();
- MALI_DEBUG_ASSERT(0 < timeout);
- if (0 == timeout) {
- return _MALI_OSK_ERR_TIMEOUT;
- }
+ mali_pmu_reset(pmu);
- mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_CLEAR, PMU_REG_VAL_IRQ);
+ /* Now simply power up the domains which are marked as powered down */
+ stat = mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_STATUS);
+ mali_pmu_power_up(pmu, stat);
- return _MALI_OSK_ERR_OK;
+ mali_pm_exec_unlock();
}
-static _mali_osk_errcode_t mali_pmu_power_up_internal(struct mali_pmu_core *pmu, const u32 mask)
+void mali_pmu_power_down_all(struct mali_pmu_core *pmu)
{
u32 stat;
- u32 active_mask;
- u32 mask_ck;
- u32 swt_dly;
- u32 xxd = 1;
-
- _mali_osk_errcode_t err;
-#if !defined(CONFIG_MALI_PMU_PARALLEL_POWER_UP)
- u32 current_domain;
-#endif
MALI_DEBUG_ASSERT_POINTER(pmu);
- MALI_DEBUG_ASSERT(0 == (mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_RAWSTAT)
- & PMU_REG_VAL_IRQ));
-
- stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
- stat &= pmu->registered_cores_mask;
- if (0 == mask || 0 == (stat & mask)) return _MALI_OSK_ERR_OK;
-
-#if defined(CONFIG_MALI_PMU_PARALLEL_POWER_UP)
- mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_POWER_UP, mask);
-
- err = mali_pmu_wait_for_command_finish(pmu);
- if (_MALI_OSK_ERR_OK != err) {
- return err;
- }
-#else
- active_mask = mask & stat;
- mask_ck = active_mask;
- swt_dly = 0xfff;
- for (current_domain = 1; current_domain <= pmu->registered_cores_mask; current_domain <<= 1) {
- if (current_domain & active_mask) {
- if (mask_ck == 1) {
- swt_dly = pmu->switch_delay;
- xxd = 0;
- }
- mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_SW_DELAY, swt_dly);
- mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_POWER_UP, current_domain);
-
- err = mali_pmu_wait_for_command_finish(pmu);
- if (_MALI_OSK_ERR_OK != err) {
- return err;
- }
- }
- mask_ck = mask_ck >> 1;
- }
- if (xxd != 0) {
- printk("@@@@ warn\n");
- printk("mask_ck:%d,active_mask:%d\n", mask_ck, active_mask);
- //panic(0);
- }
- if (swt_dly != pmu->switch_delay)
- mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_SW_DELAY, pmu->switch_delay);
-#endif
+ MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
-#if defined(DEBUG)
- /* Get power status of cores */
- stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
- stat &= pmu->registered_cores_mask;
+ mali_pm_exec_lock();
- MALI_DEBUG_ASSERT(0 == (stat & mask));
- MALI_DEBUG_ASSERT(0 == (stat & pmu->active_cores_mask));
-#endif /* defined(DEBUG) */
+ /* Now simply power down the domains which are marked as powered up */
+ stat = mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_STATUS);
+ mali_pmu_power_down(pmu, (~stat) & pmu->registered_cores_mask);
- return _MALI_OSK_ERR_OK;
+ mali_pm_exec_unlock();
}
-static _mali_osk_errcode_t mali_pmu_power_down_internal(struct mali_pmu_core *pmu, const u32 mask)
+_mali_osk_errcode_t mali_pmu_power_down(struct mali_pmu_core *pmu, u32 mask)
{
u32 stat;
_mali_osk_errcode_t err;
MALI_DEBUG_ASSERT_POINTER(pmu);
- MALI_DEBUG_ASSERT(0 == (mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_RAWSTAT)
- & PMU_REG_VAL_IRQ));
-
- stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
- stat &= pmu->registered_cores_mask;
+ MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+ MALI_DEBUG_ASSERT(mask <= pmu->registered_cores_mask);
+ MALI_DEBUG_ASSERT(0 == (mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_INT_RAWSTAT) &
+ PMU_REG_VAL_IRQ));
+
+ MALI_DEBUG_PRINT(3,
+ ("PMU power down: ...................... [%s]\n",
+ mali_pm_mask_to_string(mask)));
+
+ stat = mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_STATUS);
+
+ /*
+ * Assert that we are not powering down domains which are already
+ * powered down.
+ */
+ MALI_DEBUG_ASSERT(0 == (stat & mask));
if (0 == mask || 0 == ((~stat) & mask)) return _MALI_OSK_ERR_OK;
- mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_POWER_DOWN, mask);
+ mali_hw_core_register_write(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_POWER_DOWN, mask);
- /* Do not wait for interrupt on Mali-300/400 if all domains are powered off
- * by our power down command, because the HW will simply not generate an
- * interrupt in this case.*/
+ /*
+ * Do not wait for interrupt on Mali-300/400 if all domains are
+ * powered off by our power down command, because the HW will simply
+ * not generate an interrupt in this case.
+ */
if (mali_is_mali450() || pmu->registered_cores_mask != (mask | stat)) {
err = mali_pmu_wait_for_command_finish(pmu);
if (_MALI_OSK_ERR_OK != err) {
return err;
}
} else {
- mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_CLEAR, PMU_REG_VAL_IRQ);
- }
-#if defined(DEBUG)
- /* Get power status of cores */
- stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
- stat &= pmu->registered_cores_mask;
-
- //MALI_DEBUG_ASSERT(mask == (stat & mask));
-#endif
-
- return _MALI_OSK_ERR_OK;
-}
-
-_mali_osk_errcode_t mali_pmu_reset(struct mali_pmu_core *pmu)
-{
- _mali_osk_errcode_t err;
- u32 cores_off_mask, cores_on_mask, stat;
-
- mali_pmu_lock(pmu);
-
- /* Setup the desired defaults */
- mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_MASK, 0);
- mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_SW_DELAY, pmu->switch_delay);
-
- /* Get power status of cores */
- stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
-
- cores_off_mask = pmu->registered_cores_mask & ~(stat | pmu->active_cores_mask);
- cores_on_mask = pmu->registered_cores_mask & (stat & pmu->active_cores_mask);
-
- if (0 != cores_off_mask) {
- err = mali_pmu_power_down_internal(pmu, cores_off_mask);
- if (_MALI_OSK_ERR_OK != err) return err;
- }
-
- if (0 != cores_on_mask) {
- err = mali_pmu_power_up_internal(pmu, cores_on_mask);
- if (_MALI_OSK_ERR_OK != err) return err;
+ mali_hw_core_register_write(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_INT_CLEAR, PMU_REG_VAL_IRQ);
}
#if defined(DEBUG)
- {
- stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
- stat &= pmu->registered_cores_mask;
-
- MALI_DEBUG_ASSERT(stat == (pmu->registered_cores_mask & ~pmu->active_cores_mask));
- }
-#endif /* defined(DEBUG) */
-
- mali_pmu_unlock(pmu);
+ /* Verify power status of domains after power down */
+ stat = mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_STATUS);
+ MALI_DEBUG_ASSERT(mask == (stat & mask));
+#endif
return _MALI_OSK_ERR_OK;
}
-_mali_osk_errcode_t mali_pmu_power_down(struct mali_pmu_core *pmu, u32 mask)
-{
- _mali_osk_errcode_t err;
-
- MALI_DEBUG_ASSERT_POINTER(pmu);
- MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
-
- /* Make sure we have a valid power domain mask */
- if (mask > pmu->registered_cores_mask) {
- return _MALI_OSK_ERR_INVALID_ARGS;
- }
-
- mali_pmu_lock(pmu);
-
- MALI_DEBUG_PRINT(4, ("Mali PMU: Power down (0x%08X)\n", mask));
-
- pmu->active_cores_mask &= ~mask;
-
- _mali_osk_pm_dev_ref_add_no_power_on();
- if (!mali_pm_is_power_on()) {
- /* Don't touch hardware if all of Mali is powered off. */
- _mali_osk_pm_dev_ref_dec_no_power_on();
- mali_pmu_unlock(pmu);
-
- MALI_DEBUG_PRINT(4, ("Mali PMU: Skipping power down (0x%08X) since Mali is off\n", mask));
-
- return _MALI_OSK_ERR_BUSY;
- }
-
- err = mali_pmu_power_down_internal(pmu, mask);
-
- _mali_osk_pm_dev_ref_dec_no_power_on();
- mali_pmu_unlock(pmu);
-
- return err;
-}
-
_mali_osk_errcode_t mali_pmu_power_up(struct mali_pmu_core *pmu, u32 mask)
{
+ u32 stat;
_mali_osk_errcode_t err;
+#if !defined(CONFIG_MALI_PMU_PARALLEL_POWER_UP)
+ u32 current_domain;
+#endif
MALI_DEBUG_ASSERT_POINTER(pmu);
MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+ MALI_DEBUG_ASSERT(mask <= pmu->registered_cores_mask);
+ MALI_DEBUG_ASSERT(0 == (mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_INT_RAWSTAT) &
+ PMU_REG_VAL_IRQ));
- /* Make sure we have a valid power domain mask */
- if (mask & ~pmu->registered_cores_mask) {
- return _MALI_OSK_ERR_INVALID_ARGS;
- }
-
- mali_pmu_lock(pmu);
-
- MALI_DEBUG_PRINT(4, ("Mali PMU: Power up (0x%08X)\n", mask));
+ MALI_DEBUG_PRINT(3,
+ ("PMU power up: ........................ [%s]\n",
+ mali_pm_mask_to_string(mask)));
- pmu->active_cores_mask |= mask;
+ stat = mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_STATUS);
+ stat &= pmu->registered_cores_mask;
+ if (0 == mask || 0 == (stat & mask)) return _MALI_OSK_ERR_OK;
- _mali_osk_pm_dev_ref_add_no_power_on();
- if (!mali_pm_is_power_on()) {
- /* Don't touch hardware if all of Mali is powered off. */
- _mali_osk_pm_dev_ref_dec_no_power_on();
- mali_pmu_unlock(pmu);
+ /*
+ * Assert that we are only powering up domains which are currently
+ * powered down.
+ */
+ MALI_DEBUG_ASSERT(mask == (stat & mask));
- MALI_DEBUG_PRINT(4, ("Mali PMU: Skipping power up (0x%08X) since Mali is off\n", mask));
+#if defined(CONFIG_MALI_PMU_PARALLEL_POWER_UP)
+ mali_hw_core_register_write(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_POWER_UP, mask);
- return _MALI_OSK_ERR_BUSY;
+ err = mali_pmu_wait_for_command_finish(pmu);
+ if (_MALI_OSK_ERR_OK != err) {
+ return err;
}
+#else
+ for (current_domain = 1;
+ current_domain <= pmu->registered_cores_mask;
+ current_domain <<= 1) {
+ if (current_domain & mask & stat) {
+ mali_hw_core_register_write(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_POWER_UP,
+ current_domain);
- err = mali_pmu_power_up_internal(pmu, mask);
-
- _mali_osk_pm_dev_ref_dec_no_power_on();
- mali_pmu_unlock(pmu);
-
- return err;
-}
-
-_mali_osk_errcode_t mali_pmu_power_down_all(struct mali_pmu_core *pmu)
-{
- _mali_osk_errcode_t err;
-
- MALI_DEBUG_ASSERT_POINTER(pmu);
- MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
-
- mali_pmu_lock(pmu);
-
- /* Setup the desired defaults in case we were called before mali_pmu_reset() */
- mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_MASK, 0);
- mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_SW_DELAY, pmu->switch_delay);
-
- err = mali_pmu_power_down_internal(pmu, pmu->registered_cores_mask);
-
- mali_pmu_unlock(pmu);
-
- return err;
-}
-
-_mali_osk_errcode_t mali_pmu_power_up_all(struct mali_pmu_core *pmu)
-{
- _mali_osk_errcode_t err;
-
- MALI_DEBUG_ASSERT_POINTER(pmu);
- MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
-
- mali_pmu_lock(pmu);
-
- /* Setup the desired defaults in case we were called before mali_pmu_reset() */
- mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_MASK, 0);
- mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_SW_DELAY, pmu->switch_delay);
-
- err = mali_pmu_power_up_internal(pmu, pmu->active_cores_mask);
+ err = mali_pmu_wait_for_command_finish(pmu);
+ if (_MALI_OSK_ERR_OK != err) {
+ return err;
+ }
+ }
+ }
+#endif
- mali_pmu_unlock(pmu);
- return err;
-}
+#if defined(DEBUG)
+ /* Verify power status of domains after power up */
+ stat = mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_STATUS);
+ MALI_DEBUG_ASSERT(0 == (stat & mask));
+#endif /* defined(DEBUG) */
-struct mali_pmu_core *mali_pmu_get_global_pmu_core(void)
-{
- return mali_global_pmu_core;
+ return _MALI_OSK_ERR_OK;
}
-static u32 mali_pmu_detect_mask(void)
+static _mali_osk_errcode_t mali_pmu_wait_for_command_finish(
+ struct mali_pmu_core *pmu)
{
- int dynamic_config_pp = 0;
- int dynamic_config_l2 = 0;
- int i = 0;
- u32 mask = 0;
-
- /* Check if PM domain compatible with actually pp core and l2 cache and collection info about domain */
- mask = mali_pmu_get_domain_mask(MALI_GP_DOMAIN_INDEX);
+ u32 rawstat;
+ u32 timeout = MALI_REG_POLL_COUNT_SLOW;
- for (i = MALI_PP0_DOMAIN_INDEX; i <= MALI_PP7_DOMAIN_INDEX; i++) {
- mask |= mali_pmu_get_domain_mask(i);
+ MALI_DEBUG_ASSERT(pmu);
- if (0x0 != mali_pmu_get_domain_mask(i)) {
- dynamic_config_pp++;
- }
- }
+ /* Wait for the command to complete */
+ do {
+ rawstat = mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_INT_RAWSTAT);
+ --timeout;
+ } while (0 == (rawstat & PMU_REG_VAL_IRQ) && 0 < timeout);
- for (i = MALI_L20_DOMAIN_INDEX; i <= MALI_L22_DOMAIN_INDEX; i++) {
- mask |= mali_pmu_get_domain_mask(i);
+ MALI_DEBUG_ASSERT(0 < timeout);
- if (0x0 != mali_pmu_get_domain_mask(i)) {
- dynamic_config_l2++;
- }
+ if (0 == timeout) {
+ return _MALI_OSK_ERR_TIMEOUT;
}
- MALI_DEBUG_PRINT(2, ("Mali PMU: mask 0x%x, pp_core %d, l2_core %d \n", mask, dynamic_config_pp, dynamic_config_l2));
+ mali_hw_core_register_write(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_INT_CLEAR, PMU_REG_VAL_IRQ);
- return mask;
-}
-
-/*
- *
- * kasin.li@amlogic.com.
- **/
-
-u32 mali_pmu_get_status(void)
-{
- u32 ret;
- MALI_DEBUG_ASSERT_POINTER(mali_global_pmu_core);
- mali_pmu_lock(mali_global_pmu_core);
- ret = mali_hw_core_register_read(&mali_global_pmu_core->hw_core, PMU_REG_ADDR_MGMT_STATUS);
- mali_pmu_unlock(mali_global_pmu_core);
- return ret;
-
+ return _MALI_OSK_ERR_OK;
}
#define __MALI_PMU_H__
#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_hw_core.h"
-#define MALI_GP_DOMAIN_INDEX 0
-#define MALI_PP0_DOMAIN_INDEX 1
-#define MALI_PP1_DOMAIN_INDEX 2
-#define MALI_PP2_DOMAIN_INDEX 3
-#define MALI_PP3_DOMAIN_INDEX 4
-#define MALI_PP4_DOMAIN_INDEX 5
-#define MALI_PP5_DOMAIN_INDEX 6
-#define MALI_PP6_DOMAIN_INDEX 7
-#define MALI_PP7_DOMAIN_INDEX 8
-#define MALI_L20_DOMAIN_INDEX 9
-#define MALI_L21_DOMAIN_INDEX 10
-#define MALI_L22_DOMAIN_INDEX 11
-
-#define MALI_MAX_NUMBER_OF_DOMAINS 12
-
-/* Record the domain config from the customer or default config */
-extern u16 mali_pmu_global_domain_config[];
-
-static inline u16 mali_pmu_get_domain_mask(u32 index)
-{
- MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > index);
-
- return mali_pmu_global_domain_config[index];
-}
-
-static inline void mali_pmu_set_domain_mask(u32 index, u16 value)
-{
- MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > index);
+/** @brief MALI inbuilt PMU hardware info and PMU hardware has knowledge of cores power mask
+ */
+struct mali_pmu_core {
+ struct mali_hw_core hw_core;
+ u32 registered_cores_mask;
+ u32 switch_delay;
+};
- mali_pmu_global_domain_config[index] = value;
-}
+/** @brief Register layout for hardware PMU
+ */
+typedef enum {
+ PMU_REG_ADDR_MGMT_POWER_UP = 0x00, /*< Power up register */
+ PMU_REG_ADDR_MGMT_POWER_DOWN = 0x04, /*< Power down register */
+ PMU_REG_ADDR_MGMT_STATUS = 0x08, /*< Core sleep status register */
+ PMU_REG_ADDR_MGMT_INT_MASK = 0x0C, /*< Interrupt mask register */
+ PMU_REG_ADDR_MGMT_INT_RAWSTAT = 0x10, /*< Interrupt raw status register */
+ PMU_REG_ADDR_MGMT_INT_CLEAR = 0x18, /*< Interrupt clear register */
+ PMU_REG_ADDR_MGMT_SW_DELAY = 0x1C, /*< Switch delay register */
+ PMU_REGISTER_ADDRESS_SPACE_SIZE = 0x28, /*< Size of register space */
+} pmu_reg_addr_mgmt_addr;
-static inline void mali_pmu_copy_domain_mask(void *src, u32 len)
-{
- _mali_osk_memcpy(mali_pmu_global_domain_config, src, len);
-}
+#define PMU_REG_VAL_IRQ 1
-struct mali_pmu_core;
+extern struct mali_pmu_core *mali_global_pmu_core;
/** @brief Initialisation of MALI PMU
*
*/
void mali_pmu_delete(struct mali_pmu_core *pmu);
-/** @brief Reset PMU core
+/** @brief Set registered cores mask
*
- * @param pmu Pointer to PMU core object to reset
- * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ * @param pmu Pointer to PMU core object
+ * @param mask All available/valid domain bits
*/
-_mali_osk_errcode_t mali_pmu_reset(struct mali_pmu_core *pmu);
+void mali_pmu_set_registered_cores_mask(struct mali_pmu_core *pmu, u32 mask);
-/** @brief MALI GPU power down using MALI in-built PMU
- *
- * Called to power down the specified cores. The mask will be saved so that \a
- * mali_pmu_power_up_all will bring the PMU back to the previous state set with
- * this function or \a mali_pmu_power_up.
+/** @brief Retrieves the Mali PMU core object (if any)
*
- * @param pmu Pointer to PMU core object to power down
- * @param mask Mask specifying which power domains to power down
- * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ * @return The Mali PMU object, or NULL if no PMU exists.
*/
-_mali_osk_errcode_t mali_pmu_power_down(struct mali_pmu_core *pmu, u32 mask);
+MALI_STATIC_INLINE struct mali_pmu_core *mali_pmu_get_global_pmu_core(void)
+{
+ return mali_global_pmu_core;
+}
-/** @brief MALI GPU power up using MALI in-built PMU
+/** @brief Reset PMU core
*
- * Called to power up the specified cores. The mask will be saved so that \a
- * mali_pmu_power_up_all will bring the PMU back to the previous state set with
- * this function or \a mali_pmu_power_down.
+ * @param pmu Pointer to PMU core object to reset
+ */
+void mali_pmu_reset(struct mali_pmu_core *pmu);
+
+void mali_pmu_power_up_all(struct mali_pmu_core *pmu);
+
+void mali_pmu_power_down_all(struct mali_pmu_core *pmu);
+
+/** @brief Returns a mask of the currently powered up domains
*
- * @param pmu Pointer to PMU core object to power up
- * @param mask Mask specifying which power domains to power up
- * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ * @param pmu Pointer to PMU core object
*/
-_mali_osk_errcode_t mali_pmu_power_up(struct mali_pmu_core *pmu, u32 mask);
+MALI_STATIC_INLINE u32 mali_pmu_get_mask(struct mali_pmu_core *pmu)
+{
+ u32 stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
+ return ((~stat) & pmu->registered_cores_mask);
+}
/** @brief MALI GPU power down using MALI in-built PMU
*
- * called to power down all cores
+ * Called to power down the specified cores.
*
* @param pmu Pointer to PMU core object to power down
+ * @param mask Mask specifying which power domains to power down
* @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
*/
-_mali_osk_errcode_t mali_pmu_power_down_all(struct mali_pmu_core *pmu);
+_mali_osk_errcode_t mali_pmu_power_down(struct mali_pmu_core *pmu, u32 mask);
/** @brief MALI GPU power up using MALI in-built PMU
*
- * called to power up all cores
+ * Called to power up the specified cores.
*
* @param pmu Pointer to PMU core object to power up
+ * @param mask Mask specifying which power domains to power up
* @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
*/
-_mali_osk_errcode_t mali_pmu_power_up_all(struct mali_pmu_core *pmu);
-
-/** @brief Retrieves the Mali PMU core object (if any)
- *
- * @return The Mali PMU object, or NULL if no PMU exists.
- */
-struct mali_pmu_core *mali_pmu_get_global_pmu_core(void);
-/** @brief Retrieves the Mali Power Domain status.
- *
- * @return the Mali Power Domain status 1 off, 0 on.
- */
-extern u32 mali_pmu_get_status(void);
+_mali_osk_errcode_t mali_pmu_power_up(struct mali_pmu_core *pmu, u32 mask);
#endif /* __MALI_PMU_H__ */
#include "regs/mali_200_regs.h"
#include "mali_kernel_common.h"
#include "mali_kernel_core.h"
-#include "mali_dma.h"
#if defined(CONFIG_MALI400_PROFILING)
#include "mali_osk_profiling.h"
#endif
u32 rawstat = 0;
for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
- if (!(mali_pp_read_status(core) & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE)) {
+ u32 status = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS);
+ if (!(status & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE)) {
rawstat = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT);
if (rawstat == MALI400PP_REG_VAL_IRQ_RESET_COMPLETED) {
break;
return mali_pp_reset_wait(core);
}
-void mali_pp_job_dma_cmd_prepare(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job,
- mali_dma_cmd_buf *buf)
+void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job, mali_bool restart_virtual)
{
u32 relative_address;
u32 start_index;
MALI_DEBUG_ASSERT_POINTER(core);
- /* Write registers between MALI200_REG_ADDR_FRAME and MALI200_REG_ADDR_STACK */
- relative_address = MALI200_REG_ADDR_RSW;
- start_index = MALI200_REG_ADDR_RSW / sizeof(u32);
- nr_of_regs = (MALI200_REG_ADDR_STACK - MALI200_REG_ADDR_RSW) / sizeof(u32);
-
- mali_dma_write_array_conditional(buf, &core->hw_core,
- relative_address, &frame_registers[start_index],
- nr_of_regs, &mali_frame_registers_reset_values[start_index]);
-
- /* MALI200_REG_ADDR_STACK_SIZE */
- relative_address = MALI200_REG_ADDR_STACK_SIZE;
- start_index = MALI200_REG_ADDR_STACK_SIZE / sizeof(u32);
-
- mali_dma_write_conditional(buf, &core->hw_core,
- relative_address, frame_registers[start_index],
- mali_frame_registers_reset_values[start_index]);
-
- /* Skip 2 reserved registers */
-
- /* Write remaining registers */
- relative_address = MALI200_REG_ADDR_ORIGIN_OFFSET_X;
- start_index = MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32);
- nr_of_regs = MALI_PP_MALI400_NUM_FRAME_REGISTERS - MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32);
-
- mali_dma_write_array_conditional(buf, &core->hw_core,
- relative_address, &frame_registers[start_index],
- nr_of_regs, &mali_frame_registers_reset_values[start_index]);
-
- /* Write WBx registers */
- if (wb0_registers[0]) { /* M200_WB0_REG_SOURCE_SELECT register */
- mali_dma_write_array_conditional(buf, &core->hw_core, MALI200_REG_ADDR_WB0, wb0_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
- }
-
- if (wb1_registers[0]) { /* M200_WB1_REG_SOURCE_SELECT register */
- mali_dma_write_array_conditional(buf, &core->hw_core, MALI200_REG_ADDR_WB1, wb1_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
- }
-
- if (wb2_registers[0]) { /* M200_WB2_REG_SOURCE_SELECT register */
- mali_dma_write_array_conditional(buf, &core->hw_core, MALI200_REG_ADDR_WB2, wb2_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
- }
+ /* Write frame registers */
- if (MALI_HW_CORE_NO_COUNTER != counter_src0) {
- mali_dma_write(buf, &core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC, counter_src0);
- mali_dma_write_conditional(buf, &core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value);
- }
- if (MALI_HW_CORE_NO_COUNTER != counter_src1) {
- mali_dma_write(buf, &core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC, counter_src1);
- mali_dma_write_conditional(buf, &core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value);
- }
-
- /* This is the command that starts the core.
- *
- * Don't actually run the job if PROFILING_SKIP_PP_JOBS are set, just
- * force core to assert the completion interrupt.
+ /*
+ * There are two frame registers which are different for each sub job:
+ * 1. The Renderer List Address Register (MALI200_REG_ADDR_FRAME)
+ * 2. The FS Stack Address Register (MALI200_REG_ADDR_STACK)
*/
-#if !defined(PROFILING_SKIP_PP_JOBS)
- mali_dma_write(buf, &core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_START_RENDERING);
-#else
- mali_dma_write(buf, &core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_END_OF_FRAME);
-#endif
-}
+ mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_FRAME, mali_pp_job_get_addr_frame(job, sub_job), mali_frame_registers_reset_values[MALI200_REG_ADDR_FRAME / sizeof(u32)]);
-void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job)
-{
- u32 relative_address;
- u32 start_index;
- u32 nr_of_regs;
- u32 *frame_registers = mali_pp_job_get_frame_registers(job);
- u32 *wb0_registers = mali_pp_job_get_wb0_registers(job);
- u32 *wb1_registers = mali_pp_job_get_wb1_registers(job);
- u32 *wb2_registers = mali_pp_job_get_wb2_registers(job);
- u32 counter_src0 = mali_pp_job_get_perf_counter_src0(job, sub_job);
- u32 counter_src1 = mali_pp_job_get_perf_counter_src1(job, sub_job);
-
- MALI_DEBUG_ASSERT_POINTER(core);
+ /* For virtual jobs, the stack address shouldn't be broadcast but written individually */
+ if (!mali_pp_job_is_virtual(job) || restart_virtual) {
+ mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_STACK, mali_pp_job_get_addr_stack(job, sub_job), mali_frame_registers_reset_values[MALI200_REG_ADDR_STACK / sizeof(u32)]);
+ }
/* Write registers between MALI200_REG_ADDR_FRAME and MALI200_REG_ADDR_STACK */
relative_address = MALI200_REG_ADDR_RSW;
#include "mali_osk.h"
#include "mali_pp_job.h"
#include "mali_hw_core.h"
-#include "mali_dma.h"
struct mali_group;
_mali_osk_errcode_t mali_pp_reset(struct mali_pp_core *core);
_mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core);
-void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job);
-
-/**
- * @brief Add commands to DMA command buffer to start PP job on core.
- */
-void mali_pp_job_dma_cmd_prepare(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job,
- mali_dma_cmd_buf *buf);
+void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job, mali_bool restart_virtual);
u32 mali_pp_core_get_version(struct mali_pp_core *core);
*/
void mali_pp_update_performance_counters(struct mali_pp_core *parent, struct mali_pp_core *child, struct mali_pp_job *job, u32 subjob);
-MALI_STATIC_INLINE const char *mali_pp_get_hw_core_desc(struct mali_pp_core *core)
+MALI_STATIC_INLINE const char *mali_pp_core_description(struct mali_pp_core *core)
{
return core->hw_core.description;
}
-/*** Register reading/writing functions ***/
-MALI_STATIC_INLINE u32 mali_pp_get_int_stat(struct mali_pp_core *core)
+MALI_STATIC_INLINE enum mali_interrupt_result mali_pp_get_interrupt_result(struct mali_pp_core *core)
{
- return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS);
+ u32 rawstat_used = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) &
+ MALI200_REG_VAL_IRQ_MASK_USED;
+ if (0 == rawstat_used) {
+ return MALI_INTERRUPT_RESULT_NONE;
+ } else if (MALI200_REG_VAL_IRQ_END_OF_FRAME == rawstat_used) {
+ return MALI_INTERRUPT_RESULT_SUCCESS;
+ }
+ return MALI_INTERRUPT_RESULT_ERROR;
}
-MALI_STATIC_INLINE u32 mali_pp_read_rawstat(struct mali_pp_core *core)
+MALI_STATIC_INLINE u32 mali_pp_get_rawstat(struct mali_pp_core *core)
{
- return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) & MALI200_REG_VAL_IRQ_MASK_USED;
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return mali_hw_core_register_read(&core->hw_core,
+ MALI200_REG_ADDR_MGMT_INT_RAWSTAT);
}
-MALI_STATIC_INLINE u32 mali_pp_read_status(struct mali_pp_core *core)
+
+MALI_STATIC_INLINE u32 mali_pp_is_active(struct mali_pp_core *core)
{
- return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS);
+ u32 status = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS);
+ return (status & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE) ? MALI_TRUE : MALI_FALSE;
}
MALI_STATIC_INLINE void mali_pp_mask_all_interrupts(struct mali_pp_core *core)
mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_NONE);
}
-MALI_STATIC_INLINE void mali_pp_clear_hang_interrupt(struct mali_pp_core *core)
-{
- mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_HANG);
-}
-
MALI_STATIC_INLINE void mali_pp_enable_interrupts(struct mali_pp_core *core)
{
mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
}
-MALI_STATIC_INLINE void mali_pp_write_addr_stack(struct mali_pp_core *core, struct mali_pp_job *job, u32 subjob)
+MALI_STATIC_INLINE void mali_pp_write_addr_stack(struct mali_pp_core *core, struct mali_pp_job *job)
{
- u32 addr = mali_pp_job_get_addr_stack(job, subjob);
+ u32 addr = mali_pp_job_get_addr_stack(job, core->core_id);
mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_STACK, addr);
}
#include "mali_pp.h"
#include "mali_pp_job.h"
-#include "mali_dma.h"
#include "mali_osk.h"
#include "mali_osk_list.h"
#include "mali_kernel_common.h"
#include "mali_uk_types.h"
-#include "mali_pp_scheduler.h"
+#include "mali_executor.h"
#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
#include "linux/mali_memory_dma_buf.h"
#endif
_mali_osk_list_init(&job->list);
job->session = session;
- _mali_osk_list_init(&job->session_list);
job->id = id;
job->sub_jobs_num = job->uargs.num_cores ? job->uargs.num_cores : 1;
job->pid = _mali_osk_get_pid();
job->tid = _mali_osk_get_tid();
- job->num_memory_cookies = job->uargs.num_memory_cookies;
- if (job->num_memory_cookies > 0) {
+ _mali_osk_atomic_init(&job->sub_jobs_completed, 0);
+ _mali_osk_atomic_init(&job->sub_job_errors, 0);
+
+ if (job->uargs.num_memory_cookies > 0) {
u32 size;
u32 __user *memory_cookies = (u32 __user *)(uintptr_t)job->uargs.memory_cookies;
goto fail;
}
- size = sizeof(*memory_cookies) * job->num_memory_cookies;
+ size = sizeof(*memory_cookies) * job->uargs.num_memory_cookies;
job->memory_cookies = _mali_osk_malloc(size);
if (NULL == job->memory_cookies) {
}
#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
- job->num_dma_bufs = job->num_memory_cookies;
- job->dma_bufs = _mali_osk_calloc(job->num_dma_bufs, sizeof(struct mali_dma_buf_attachment *));
- if (NULL == job->dma_bufs) {
- MALI_PRINT_ERROR(("Mali PP job: Failed to allocate dma_bufs array!\n"));
- goto fail;
+ if (0 < job->uargs.num_memory_cookies) {
+ job->dma_bufs = _mali_osk_calloc(job->uargs.num_memory_cookies,
+ sizeof(struct mali_dma_buf_attachment *));
+ if (NULL == job->dma_bufs) {
+ MALI_PRINT_ERROR(("Mali PP job: Failed to allocate dma_bufs array!\n"));
+ goto fail;
+ }
}
#endif
}
- /* Prepare DMA command buffer to start job, if it is virtual. */
- if (mali_pp_job_is_virtual_group_job(job)) {
- struct mali_pp_core *core;
- _mali_osk_errcode_t err = mali_dma_get_cmd_buf(&job->dma_cmd_buf);
-
- if (_MALI_OSK_ERR_OK != err) {
- MALI_PRINT_ERROR(("Mali PP job: Failed to allocate DMA command buffer\n"));
- goto fail;
- }
-
- core = mali_pp_scheduler_get_virtual_pp();
- MALI_DEBUG_ASSERT_POINTER(core);
-
- mali_pp_job_dma_cmd_prepare(core, job, 0, &job->dma_cmd_buf);
- }
-
if (_MALI_OSK_ERR_OK != mali_pp_job_check(job)) {
/* Not a valid job. */
goto fail;
void mali_pp_job_delete(struct mali_pp_job *job)
{
- mali_dma_put_cmd_buf(&job->dma_cmd_buf);
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_fb_lookup_list));
+
if (NULL != job->finished_notification) {
_mali_osk_notification_delete(job->finished_notification);
}
- _mali_osk_free(job->memory_cookies);
-
#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
/* Unmap buffers attached to job */
- if (0 < job->num_dma_bufs) {
+ if (0 < job->uargs.num_memory_cookies) {
mali_dma_buf_unmap_job(job);
+ if (NULL != job->dma_bufs) {
+ _mali_osk_free(job->dma_bufs);
+ }
}
-
- _mali_osk_free(job->dma_bufs);
#endif /* CONFIG_DMA_SHARED_BUFFER */
+ if (NULL != job->memory_cookies) {
+ _mali_osk_free(job->memory_cookies);
+ }
+
+ _mali_osk_atomic_term(&job->sub_jobs_completed);
+ _mali_osk_atomic_term(&job->sub_job_errors);
+
_mali_osk_free(job);
}
+void mali_pp_job_list_add(struct mali_pp_job *job, _mali_osk_list_t *list)
+{
+ struct mali_pp_job *iter;
+ struct mali_pp_job *tmp;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+
+ /* Find position in list/queue where job should be added. */
+ _MALI_OSK_LIST_FOREACHENTRY_REVERSE(iter, tmp, list,
+ struct mali_pp_job, list) {
+ /* job should be started after iter if iter is in progress. */
+ if (0 < iter->sub_jobs_started) {
+ break;
+ }
+
+ /*
+ * job should be started after iter if it has a higher
+ * job id. A span is used to handle job id wrapping.
+ */
+ if ((mali_pp_job_get_id(job) -
+ mali_pp_job_get_id(iter)) <
+ MALI_SCHEDULER_JOB_ID_SPAN) {
+ break;
+ }
+ }
+
+ _mali_osk_list_add(&job->list, &iter->list);
+}
+
+
u32 mali_pp_job_get_perf_counter_src0(struct mali_pp_job *job, u32 sub_job)
{
/* Virtual jobs always use the global job counter (or if there are per sub job counters at all) */
- if (mali_pp_job_is_virtual_group_job(job) || 0 == job->perf_counter_per_sub_job_count) {
+ if (mali_pp_job_is_virtual(job) || 0 == job->perf_counter_per_sub_job_count) {
return job->uargs.perf_counter_src0;
}
u32 mali_pp_job_get_perf_counter_src1(struct mali_pp_job *job, u32 sub_job)
{
/* Virtual jobs always use the global job counter (or if there are per sub job counters at all) */
- if (mali_pp_job_is_virtual_group_job(job) || 0 == job->perf_counter_per_sub_job_count) {
+ if (mali_pp_job_is_virtual(job) || 0 == job->perf_counter_per_sub_job_count) {
/* Virtual jobs always use the global job counter */
return job->uargs.perf_counter_src1;
}
#include "mali_kernel_common.h"
#include "regs/mali_200_regs.h"
#include "mali_kernel_core.h"
-#include "mali_dma.h"
#include "mali_dlbu.h"
#include "mali_timeline.h"
+#include "mali_scheduler.h"
+#include "mali_executor.h"
#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
#include "linux/mali_memory_dma_buf.h"
#endif
/**
- * The structure represents a PP job, including all sub-jobs
- * (This struct unfortunately needs to be public because of how the _mali_osk_list_*
- * mechanism works)
+ * This structure represents a PP job, including all sub jobs.
+ *
+ * The PP job object itself is not protected by any single lock,
+ * but relies on other locks instead (scheduler, executor and timeline lock).
+ * Think of the job object as moving between these sub systems through-out
+ * its lifetime. Different part of the PP job struct is used by different
+ * subsystems. Accessor functions ensure that correct lock is taken.
+ * Do NOT access any data members directly from outside this module!
*/
struct mali_pp_job {
- _mali_osk_list_t list; /**< Used to link jobs together in the scheduler queue */
- struct mali_session_data *session; /**< Session which submitted this job */
- _mali_osk_list_t session_list; /**< Used to link jobs together in the session job list */
- _mali_osk_list_t session_fb_lookup_list; /**< Used to link jobs together from the same frame builder in the session */
+ /*
+ * These members are typically only set at creation,
+ * and only read later on.
+ * They do not require any lock protection.
+ */
_mali_uk_pp_start_job_s uargs; /**< Arguments from user space */
- mali_dma_cmd_buf dma_cmd_buf; /**< Command buffer for starting job using Mali-450 DMA unit */
- u32 id; /**< Identifier for this job in kernel space (sequential numbering) */
- u32 cache_order; /**< Cache order used for L2 cache flushing (sequential numbering) */
- u32 perf_counter_value0[_MALI_PP_MAX_SUB_JOBS]; /**< Value of performance counter 0 (to be returned to user space), one for each sub job */
- u32 perf_counter_value1[_MALI_PP_MAX_SUB_JOBS]; /**< Value of performance counter 1 (to be returned to user space), one for each sub job */
- u32 sub_jobs_num; /**< Number of subjobs; set to 1 for Mali-450 if DLBU is used, otherwise equals number of PP cores */
- u32 sub_jobs_started; /**< Total number of sub-jobs started (always started in ascending order) */
- u32 sub_jobs_completed; /**< Number of completed sub-jobs in this superjob */
- u32 sub_job_errors; /**< Bitfield with errors (errors for each single sub-job is or'ed together) */
+ struct mali_session_data *session; /**< Session which submitted this job */
u32 pid; /**< Process ID of submitting process */
u32 tid; /**< Thread ID of submitting thread */
+ u32 id; /**< Identifier for this job in kernel space (sequential numbering) */
+ u32 cache_order; /**< Cache order used for L2 cache flushing (sequential numbering) */
+ struct mali_timeline_tracker tracker; /**< Timeline tracker for this job */
_mali_osk_notification_t *finished_notification; /**< Notification sent back to userspace on job complete */
- u32 num_memory_cookies; /**< Number of memory cookies attached to job */
+ u32 perf_counter_per_sub_job_count; /**< Number of values in the two arrays which is != MALI_HW_CORE_NO_COUNTER */
+ u32 perf_counter_per_sub_job_src0[_MALI_PP_MAX_SUB_JOBS]; /**< Per sub job counters src0 */
+ u32 perf_counter_per_sub_job_src1[_MALI_PP_MAX_SUB_JOBS]; /**< Per sub job counters src1 */
+ u32 sub_jobs_num; /**< Number of subjobs; set to 1 for Mali-450 if DLBU is used, otherwise equals number of PP cores */
+
+ /*
+ * These members are used by both scheduler and executor.
+ * They are "protected" by atomic operations.
+ */
+ _mali_osk_atomic_t sub_jobs_completed; /**< Number of completed sub-jobs in this superjob */
+ _mali_osk_atomic_t sub_job_errors; /**< Bitfield with errors (errors for each single sub-job is or'ed together) */
+
+ /*
+ * These members are used by scheduler, but only when no one else
+ * knows about this job object but the working function.
+ * No lock is thus needed for these.
+ */
u32 *memory_cookies; /**< Memory cookies attached to job */
#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
struct mali_dma_buf_attachment **dma_bufs; /**< Array of DMA-bufs used by job */
- u32 num_dma_bufs; /**< Number of DMA-bufs used by job */
#endif
- struct mali_timeline_tracker tracker; /**< Timeline tracker for this job */
- u32 perf_counter_per_sub_job_count; /**< Number of values in the two arrays which is != MALI_HW_CORE_NO_COUNTER */
- u32 perf_counter_per_sub_job_src0[_MALI_PP_MAX_SUB_JOBS]; /**< Per sub job counters src0 */
- u32 perf_counter_per_sub_job_src1[_MALI_PP_MAX_SUB_JOBS]; /**< Per sub job counters src1 */
+
+ /*
+ * These members are used by the scheduler,
+ * protected by scheduler lock
+ */
+ _mali_osk_list_t list; /**< Used to link jobs together in the scheduler queue */
+ _mali_osk_list_t session_fb_lookup_list; /**< Used to link jobs together from the same frame builder in the session */
+ u32 sub_jobs_started; /**< Total number of sub-jobs started (always started in ascending order) */
+
+ /*
+ * Set by executor/group on job completion, read by scheduler when
+ * returning job to user. Hold executor lock when setting,
+ * no lock needed when reading
+ */
+ u32 perf_counter_value0[_MALI_PP_MAX_SUB_JOBS]; /**< Value of performance counter 0 (to be returned to user space), one for each sub job */
+ u32 perf_counter_value1[_MALI_PP_MAX_SUB_JOBS]; /**< Value of performance counter 1 (to be returned to user space), one for each sub job */
};
void mali_pp_job_initialize(void);
MALI_STATIC_INLINE u32 mali_pp_job_get_id(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return (NULL == job) ? 0 : job->id;
}
+MALI_STATIC_INLINE void mali_pp_job_set_cache_order(struct mali_pp_job *job,
+ u32 cache_order)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ job->cache_order = cache_order;
+}
+
MALI_STATIC_INLINE u32 mali_pp_job_get_cache_order(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return (NULL == job) ? 0 : job->cache_order;
}
MALI_STATIC_INLINE u64 mali_pp_job_get_user_id(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->uargs.user_job_ptr;
}
MALI_STATIC_INLINE u32 mali_pp_job_get_frame_builder_id(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->uargs.frame_builder_id;
}
MALI_STATIC_INLINE u32 mali_pp_job_get_flush_id(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->uargs.flush_id;
}
MALI_STATIC_INLINE u32 mali_pp_job_get_pid(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->pid;
}
MALI_STATIC_INLINE u32 mali_pp_job_get_tid(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->tid;
}
MALI_STATIC_INLINE u32 *mali_pp_job_get_frame_registers(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->uargs.frame_registers;
}
MALI_STATIC_INLINE u32 *mali_pp_job_get_dlbu_registers(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->uargs.dlbu_registers;
}
-MALI_STATIC_INLINE mali_bool mali_pp_job_is_virtual_group_job(struct mali_pp_job *job)
-{
- if (mali_is_mali450()) {
- return 1 != job->uargs.num_cores;
- }
-
- return MALI_FALSE;
-}
-
-MALI_STATIC_INLINE mali_bool mali_pp_job_is_with_dlbu(struct mali_pp_job *job)
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_virtual(struct mali_pp_job *job)
{
#if defined(CONFIG_MALI450)
- return 0 == job->uargs.num_cores;
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (0 == job->uargs.num_cores) ? MALI_TRUE : MALI_FALSE;
#else
return MALI_FALSE;
#endif
MALI_STATIC_INLINE u32 mali_pp_job_get_addr_frame(struct mali_pp_job *job, u32 sub_job)
{
- if (mali_pp_job_is_with_dlbu(job)) {
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ if (mali_pp_job_is_virtual(job)) {
return MALI_DLBU_VIRT_ADDR;
} else if (0 == sub_job) {
return job->uargs.frame_registers[MALI200_REG_ADDR_FRAME / sizeof(u32)];
MALI_STATIC_INLINE u32 mali_pp_job_get_addr_stack(struct mali_pp_job *job, u32 sub_job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
if (0 == sub_job) {
return job->uargs.frame_registers[MALI200_REG_ADDR_STACK / sizeof(u32)];
} else if (sub_job < _MALI_PP_MAX_SUB_JOBS) {
return 0;
}
+void mali_pp_job_list_add(struct mali_pp_job *job, _mali_osk_list_t *list);
+
+MALI_STATIC_INLINE void mali_pp_job_list_addtail(struct mali_pp_job *job,
+ _mali_osk_list_t *list)
+{
+ _mali_osk_list_addtail(&job->list, list);
+}
+
+MALI_STATIC_INLINE void mali_pp_job_list_move(struct mali_pp_job *job,
+ _mali_osk_list_t *list)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ MALI_DEBUG_ASSERT(!_mali_osk_list_empty(&job->list));
+ _mali_osk_list_move(&job->list, list);
+}
+
+MALI_STATIC_INLINE void mali_pp_job_list_remove(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ _mali_osk_list_delinit(&job->list);
+}
+
MALI_STATIC_INLINE u32 *mali_pp_job_get_wb0_registers(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->uargs.wb0_registers;
}
MALI_STATIC_INLINE u32 *mali_pp_job_get_wb1_registers(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->uargs.wb1_registers;
}
MALI_STATIC_INLINE u32 *mali_pp_job_get_wb2_registers(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->uargs.wb2_registers;
}
+MALI_STATIC_INLINE u32 mali_pp_job_get_wb0_source_addr(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR / sizeof(u32)];
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_wb1_source_addr(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR / sizeof(u32)];
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_wb2_source_addr(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR / sizeof(u32)];
+}
+
MALI_STATIC_INLINE void mali_pp_job_disable_wb0(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
}
MALI_STATIC_INLINE void mali_pp_job_disable_wb1(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
}
MALI_STATIC_INLINE void mali_pp_job_disable_wb2(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
}
return MALI_TRUE;
}
-MALI_STATIC_INLINE u32 mali_pp_job_get_fb_lookup_id(struct mali_pp_job *job)
+MALI_STATIC_INLINE void mali_pp_job_fb_lookup_add(struct mali_pp_job *job)
{
+ u32 fb_lookup_id;
+
MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+
+ fb_lookup_id = MALI_PP_JOB_FB_LOOKUP_LIST_MASK & job->uargs.frame_builder_id;
+
+ MALI_DEBUG_ASSERT(MALI_PP_JOB_FB_LOOKUP_LIST_SIZE > fb_lookup_id);
- return MALI_PP_JOB_FB_LOOKUP_LIST_MASK & job->uargs.frame_builder_id;
+ _mali_osk_list_addtail(&job->session_fb_lookup_list,
+ &job->session->pp_job_fb_lookup_list[fb_lookup_id]);
+}
+
+MALI_STATIC_INLINE void mali_pp_job_fb_lookup_remove(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ _mali_osk_list_delinit(&job->session_fb_lookup_list);
}
MALI_STATIC_INLINE struct mali_session_data *mali_pp_job_get_session(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->session;
}
+MALI_STATIC_INLINE mali_bool mali_pp_job_has_started_sub_jobs(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ return (0 < job->sub_jobs_started) ? MALI_TRUE : MALI_FALSE;
+}
+
MALI_STATIC_INLINE mali_bool mali_pp_job_has_unstarted_sub_jobs(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
return (job->sub_jobs_started < job->sub_jobs_num) ? MALI_TRUE : MALI_FALSE;
}
Makes sure that no new subjobs are started. */
MALI_STATIC_INLINE void mali_pp_job_mark_unstarted_failed(struct mali_pp_job *job)
{
- u32 jobs_remaining = job->sub_jobs_num - job->sub_jobs_started;
- job->sub_jobs_started += jobs_remaining;
- job->sub_jobs_completed += jobs_remaining;
- job->sub_job_errors += jobs_remaining;
-}
+ u32 jobs_remaining;
+ u32 i;
-MALI_STATIC_INLINE void mali_pp_job_mark_unstarted_success(struct mali_pp_job *job)
-{
- u32 jobs_remaining = job->sub_jobs_num - job->sub_jobs_started;
- job->sub_jobs_started += jobs_remaining;
- job->sub_jobs_completed += jobs_remaining;
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+
+ jobs_remaining = job->sub_jobs_num - job->sub_jobs_started;
+ job->sub_jobs_started += jobs_remaining;
+
+ /* Not the most optimal way, but this is only used in error cases */
+ for (i = 0; i < jobs_remaining; i++) {
+ _mali_osk_atomic_inc(&job->sub_jobs_completed);
+ _mali_osk_atomic_inc(&job->sub_job_errors);
+ }
}
MALI_STATIC_INLINE mali_bool mali_pp_job_is_complete(struct mali_pp_job *job)
{
- return (job->sub_jobs_num == job->sub_jobs_completed) ? MALI_TRUE : MALI_FALSE;
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (job->sub_jobs_num ==
+ _mali_osk_atomic_read(&job->sub_jobs_completed)) ?
+ MALI_TRUE : MALI_FALSE;
}
MALI_STATIC_INLINE u32 mali_pp_job_get_first_unstarted_sub_job(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
return job->sub_jobs_started;
}
MALI_STATIC_INLINE u32 mali_pp_job_get_sub_job_count(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->sub_jobs_num;
}
+MALI_STATIC_INLINE u32 mali_pp_job_unstarted_sub_job_count(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ MALI_DEBUG_ASSERT(job->sub_jobs_num >= job->sub_jobs_started);
+ return (job->sub_jobs_num - job->sub_jobs_started);
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_num_memory_cookies(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.num_memory_cookies;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_memory_cookie(
+ struct mali_pp_job *job, u32 index)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT(index < job->uargs.num_memory_cookies);
+ MALI_DEBUG_ASSERT_POINTER(job->memory_cookies);
+ return job->memory_cookies[index];
+}
+
MALI_STATIC_INLINE mali_bool mali_pp_job_needs_dma_buf_mapping(struct mali_pp_job *job)
{
- MALI_DEBUG_ASSERT(job);
+ MALI_DEBUG_ASSERT_POINTER(job);
- if (0 != job->num_memory_cookies) {
+ if (0 < job->uargs.num_memory_cookies) {
return MALI_TRUE;
}
return MALI_FALSE;
}
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+MALI_STATIC_INLINE u32 mali_pp_job_num_dma_bufs(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.num_memory_cookies;
+}
+
+MALI_STATIC_INLINE struct mali_dma_buf_attachment *mali_pp_job_get_dma_buf(
+ struct mali_pp_job *job, u32 index)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT(index < job->uargs.num_memory_cookies);
+ MALI_DEBUG_ASSERT_POINTER(job->dma_bufs);
+ return job->dma_bufs[index];
+}
+
+MALI_STATIC_INLINE void mali_pp_job_set_dma_buf(struct mali_pp_job *job,
+ u32 index, struct mali_dma_buf_attachment *mem)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT(index < job->uargs.num_memory_cookies);
+ MALI_DEBUG_ASSERT_POINTER(job->dma_bufs);
+ job->dma_bufs[index] = mem;
+}
+#endif
+
MALI_STATIC_INLINE void mali_pp_job_mark_sub_job_started(struct mali_pp_job *job, u32 sub_job)
{
MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
/* Assert that we are marking the "first unstarted sub job" as started */
MALI_DEBUG_ASSERT(job->sub_jobs_started == sub_job);
MALI_STATIC_INLINE void mali_pp_job_mark_sub_job_completed(struct mali_pp_job *job, mali_bool success)
{
- job->sub_jobs_completed++;
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ _mali_osk_atomic_inc(&job->sub_jobs_completed);
if (MALI_FALSE == success) {
- job->sub_job_errors++;
+ _mali_osk_atomic_inc(&job->sub_job_errors);
}
}
MALI_STATIC_INLINE mali_bool mali_pp_job_was_success(struct mali_pp_job *job)
{
- if (0 == job->sub_job_errors) {
+ MALI_DEBUG_ASSERT_POINTER(job);
+ if (0 == _mali_osk_atomic_read(&job->sub_job_errors)) {
return MALI_TRUE;
}
return MALI_FALSE;
}
-MALI_STATIC_INLINE mali_bool mali_pp_job_use_no_notification(struct mali_pp_job *job)
+MALI_STATIC_INLINE mali_bool mali_pp_job_use_no_notification(
+ struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (job->uargs.flags & _MALI_PP_JOB_FLAG_NO_NOTIFICATION) ?
+ MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_pilot_job(struct mali_pp_job *job)
+{
+ /*
+ * A pilot job is currently identified as jobs which
+ * require no callback notification.
+ */
+ return mali_pp_job_use_no_notification(job);
+}
+
+MALI_STATIC_INLINE _mali_osk_notification_t *
+mali_pp_job_get_finished_notification(struct mali_pp_job *job)
+{
+ _mali_osk_notification_t *notification;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(job->finished_notification);
+
+ notification = job->finished_notification;
+ job->finished_notification = NULL;
+
+ return notification;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_window_surface(
+ struct mali_pp_job *job)
{
- return job->uargs.flags & _MALI_PP_JOB_FLAG_NO_NOTIFICATION ? MALI_TRUE : MALI_FALSE;
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (job->uargs.flags & _MALI_PP_JOB_FLAG_IS_WINDOW_SURFACE)
+ ? MALI_TRUE : MALI_FALSE;
}
MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_flag(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->uargs.perf_counter_flag;
}
-
MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_value0(struct mali_pp_job *job, u32 sub_job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->perf_counter_value0[sub_job];
}
MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_value1(struct mali_pp_job *job, u32 sub_job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->perf_counter_value1[sub_job];
}
MALI_STATIC_INLINE void mali_pp_job_set_perf_counter_value0(struct mali_pp_job *job, u32 sub_job, u32 value)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
job->perf_counter_value0[sub_job] = value;
}
MALI_STATIC_INLINE void mali_pp_job_set_perf_counter_value1(struct mali_pp_job *job, u32 sub_job, u32 value)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
job->perf_counter_value1[sub_job] = value;
}
MALI_STATIC_INLINE _mali_osk_errcode_t mali_pp_job_check(struct mali_pp_job *job)
{
- if (mali_pp_job_is_with_dlbu(job) && job->sub_jobs_num != 1) {
+ MALI_DEBUG_ASSERT_POINTER(job);
+ if (mali_pp_job_is_virtual(job) && job->sub_jobs_num != 1) {
return _MALI_OSK_ERR_FAULT;
}
return _MALI_OSK_ERR_OK;
}
-/**
- * Returns MALI_TRUE if first job should be started after second job.
- *
- * @param first First job.
- * @param second Second job.
- * @return MALI_TRUE if first job should be started after second job, MALI_FALSE if not.
- */
-MALI_STATIC_INLINE mali_bool mali_pp_job_should_start_after(struct mali_pp_job *first, struct mali_pp_job *second)
-{
- MALI_DEBUG_ASSERT_POINTER(first);
- MALI_DEBUG_ASSERT_POINTER(second);
-
- /* First job should be started after second job if second job is in progress. */
- if (0 < second->sub_jobs_started) {
- return MALI_TRUE;
- }
-
- /* First job should be started after second job if first job has a higher job id. A span is
- used to handle job id wrapping. */
- if ((mali_pp_job_get_id(first) - mali_pp_job_get_id(second)) < MALI_SCHEDULER_JOB_ID_SPAN) {
- return MALI_TRUE;
- }
-
- /* Second job should be started after first job. */
- return MALI_FALSE;
-}
-
/**
* Returns MALI_TRUE if this job has more than two sub jobs and all sub jobs are unstarted.
*
MALI_STATIC_INLINE mali_bool mali_pp_job_is_large_and_unstarted(struct mali_pp_job *job)
{
MALI_DEBUG_ASSERT_POINTER(job);
- MALI_DEBUG_ASSERT(!mali_pp_job_is_virtual_group_job(job));
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ MALI_DEBUG_ASSERT(!mali_pp_job_is_virtual(job));
return (0 == job->sub_jobs_started && 2 < job->sub_jobs_num);
}
return &(job->tracker);
}
+MALI_STATIC_INLINE u32 *mali_pp_job_get_timeline_point_ptr(
+ struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (u32 __user *)(uintptr_t)job->uargs.timeline_point_ptr;
+}
+
+
#endif /* __MALI_PP_JOB_H__ */
+++ /dev/null
-/*
- * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
- *
- * This program is free software and is provided to you under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
- *
- * A copy of the licence is included with the program, and can also be obtained from Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#include "mali_pp_scheduler.h"
-#include "mali_kernel_common.h"
-#include "mali_kernel_core.h"
-#include "mali_osk.h"
-#include "mali_osk_list.h"
-#include "mali_scheduler.h"
-#include "mali_pp.h"
-#include "mali_pp_job.h"
-#include "mali_group.h"
-#include "mali_pm.h"
-#include "mali_timeline.h"
-#include "mali_osk_profiling.h"
-#include "mali_kernel_utilization.h"
-#include "mali_session.h"
-#include "mali_pm_domain.h"
-#include "linux/mali/mali_utgard.h"
-
-#if defined(CONFIG_DMA_SHARED_BUFFER)
-#include "mali_memory_dma_buf.h"
-#endif
-#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
-#include <linux/sched.h>
-#include <trace/events/gpu.h>
-#endif
-
-/* Queue type used for physical and virtual job queues. */
-struct mali_pp_scheduler_job_queue {
- _MALI_OSK_LIST_HEAD(normal_pri); /* List of jobs with some unscheduled work. */
- _MALI_OSK_LIST_HEAD(high_pri); /* List of high priority jobs with some unscheduled work. */
- u32 depth; /* Depth of combined queues. */
-};
-
-/* If dma_buf with map on demand is used, we defer job deletion and job queue if in atomic context,
- * since both might sleep. */
-#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
-#define MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE 1
-#define MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE 1
-#endif /* !defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) */
-
-static void mali_pp_scheduler_job_queued(void);
-static void mali_pp_scheduler_job_completed(mali_bool job_started);
-
-/* Maximum of 8 PP cores (a group can only have maximum of 1 PP core) */
-#define MALI_MAX_NUMBER_OF_PP_GROUPS 9
-
-static mali_bool mali_pp_scheduler_is_suspended(void *data);
-
-static u32 pp_version = 0;
-
-/* Physical job queue */
-static struct mali_pp_scheduler_job_queue job_queue;
-
-/* Physical groups */
-static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_working); /* List of physical groups with working jobs on the pp core */
-static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_idle); /* List of physical groups with idle jobs on the pp core */
-static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_disabled); /* List of disabled physical groups */
-
-/* Virtual job queue (Mali-450 only) */
-static struct mali_pp_scheduler_job_queue virtual_group_job_queue;
-
-/**
- * Add job to scheduler queue.
- *
- * @param job Job to queue.
- * @return Schedule mask.
- */
-static mali_scheduler_mask mali_pp_scheduler_queue_job(struct mali_pp_job *job);
-
-/* Virtual group (Mali-450 only) */
-static struct mali_group *virtual_group = NULL; /* Virtual group (if any) */
-static enum {
- VIRTUAL_GROUP_IDLE,
- VIRTUAL_GROUP_WORKING,
- VIRTUAL_GROUP_DISABLED,
-}
-virtual_group_state = VIRTUAL_GROUP_IDLE; /* Flag which indicates whether the virtual group is working or idle */
-
-/* Number of physical cores */
-static u32 num_cores = 0;
-
-/* Number of physical cores which are enabled */
-static u32 enabled_cores = 0;
-
-/* Enable or disable core scaling */
-static mali_bool core_scaling_enabled = MALI_TRUE;
-
-/* Variables to allow safe pausing of the scheduler */
-static _mali_osk_wait_queue_t *pp_scheduler_working_wait_queue = NULL;
-static u32 pause_count = 0;
-
-#if defined(MALI_UPPER_HALF_SCHEDULING)
-static _mali_osk_spinlock_irq_t *pp_scheduler_lock = NULL;
-#else
-static _mali_osk_spinlock_t *pp_scheduler_lock = NULL;
-#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
-
-MALI_STATIC_INLINE void mali_pp_scheduler_lock(void)
-{
-#if defined(MALI_UPPER_HALF_SCHEDULING)
- _mali_osk_spinlock_irq_lock(pp_scheduler_lock);
-#else
- _mali_osk_spinlock_lock(pp_scheduler_lock);
-#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
- MALI_DEBUG_PRINT(5, ("Mali PP scheduler: PP scheduler lock taken.\n"));
-}
-
-MALI_STATIC_INLINE void mali_pp_scheduler_unlock(void)
-{
- MALI_DEBUG_PRINT(5, ("Mali PP scheduler: Releasing PP scheduler lock.\n"));
-#if defined(MALI_UPPER_HALF_SCHEDULING)
- _mali_osk_spinlock_irq_unlock(pp_scheduler_lock);
-#else
- _mali_osk_spinlock_unlock(pp_scheduler_lock);
-#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
-}
-
-#if defined(DEBUG)
-#define MALI_ASSERT_PP_SCHEDULER_LOCKED() MALI_DEBUG_ASSERT_LOCK_HELD(pp_scheduler_lock)
-#else
-#define MALI_ASSERT_PP_SCHEDULER_LOCKED() do {} while (0)
-#endif /* defined(DEBUG) */
-
-#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
-
-static _mali_osk_wq_work_t *pp_scheduler_wq_job_delete = NULL;
-static _mali_osk_spinlock_irq_t *pp_scheduler_job_delete_lock = NULL;
-static _MALI_OSK_LIST_HEAD_STATIC_INIT(pp_scheduler_job_deletion_queue);
-
-static void mali_pp_scheduler_deferred_job_delete(struct mali_pp_job *job)
-{
- MALI_DEBUG_ASSERT_POINTER(job);
-
- _mali_osk_spinlock_irq_lock(pp_scheduler_job_delete_lock);
-
- /* This job object should not be on any lists. */
- MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
- MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_list));
- MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_fb_lookup_list));
-
- _mali_osk_list_addtail(&job->list, &pp_scheduler_job_deletion_queue);
-
- _mali_osk_spinlock_irq_unlock(pp_scheduler_job_delete_lock);
-
- _mali_osk_wq_schedule_work(pp_scheduler_wq_job_delete);
-}
-
-static void mali_pp_scheduler_do_job_delete(void *arg)
-{
- _MALI_OSK_LIST_HEAD_STATIC_INIT(list);
- struct mali_pp_job *job;
- struct mali_pp_job *tmp;
-
- MALI_IGNORE(arg);
-
- _mali_osk_spinlock_irq_lock(pp_scheduler_job_delete_lock);
-
- /*
- * Quickly "unhook" the jobs pending to be deleted, so we can release the lock before
- * we start deleting the job objects (without any locks held
- */
- _mali_osk_list_move_list(&pp_scheduler_job_deletion_queue, &list);
-
- _mali_osk_spinlock_irq_unlock(pp_scheduler_job_delete_lock);
-
- _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list, struct mali_pp_job, list) {
- mali_pp_job_delete(job); /* delete the job object itself */
- }
-}
-
-#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE) */
-
-#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
-
-static _mali_osk_wq_work_t *pp_scheduler_wq_job_queue = NULL;
-static _mali_osk_spinlock_irq_t *pp_scheduler_job_queue_lock = NULL;
-static _MALI_OSK_LIST_HEAD_STATIC_INIT(pp_scheduler_job_queue_list);
-
-static void mali_pp_scheduler_deferred_job_queue(struct mali_pp_job *job)
-{
- MALI_DEBUG_ASSERT_POINTER(job);
-
- _mali_osk_spinlock_irq_lock(pp_scheduler_job_queue_lock);
- _mali_osk_list_addtail(&job->list, &pp_scheduler_job_queue_list);
- _mali_osk_spinlock_irq_unlock(pp_scheduler_job_queue_lock);
-
- _mali_osk_wq_schedule_work(pp_scheduler_wq_job_queue);
-}
-
-static void mali_pp_scheduler_do_job_queue(void *arg)
-{
- _MALI_OSK_LIST_HEAD_STATIC_INIT(list);
- struct mali_pp_job *job;
- struct mali_pp_job *tmp;
- mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
-
- MALI_IGNORE(arg);
-
- _mali_osk_spinlock_irq_lock(pp_scheduler_job_queue_lock);
-
- /*
- * Quickly "unhook" the jobs pending to be queued, so we can release the lock before
- * we start queueing the job objects (without any locks held)
- */
- _mali_osk_list_move_list(&pp_scheduler_job_queue_list, &list);
-
- _mali_osk_spinlock_irq_unlock(pp_scheduler_job_queue_lock);
-
- _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list, struct mali_pp_job, list) {
- _mali_osk_list_delinit(&job->list);
- schedule_mask |= mali_pp_scheduler_queue_job(job);
- }
-
- mali_scheduler_schedule_from_mask(schedule_mask, MALI_FALSE);
-}
-
-#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
-
-MALI_STATIC_INLINE mali_bool mali_pp_scheduler_has_virtual_group(void)
-{
-#if defined(CONFIG_MALI450)
- return NULL != virtual_group;
-#else
- return MALI_FALSE;
-#endif /* defined(CONFIG_MALI450) */
-}
-
-_mali_osk_errcode_t mali_pp_scheduler_initialize(void)
-{
- _MALI_OSK_INIT_LIST_HEAD(&job_queue.normal_pri);
- _MALI_OSK_INIT_LIST_HEAD(&job_queue.high_pri);
- job_queue.depth = 0;
-
- _MALI_OSK_INIT_LIST_HEAD(&virtual_group_job_queue.normal_pri);
- _MALI_OSK_INIT_LIST_HEAD(&virtual_group_job_queue.high_pri);
- virtual_group_job_queue.depth = 0;
-
-#if defined(MALI_UPPER_HALF_SCHEDULING)
- pp_scheduler_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER);
-#else
- pp_scheduler_lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER);
-#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
- if (NULL == pp_scheduler_lock) goto cleanup;
-
- pp_scheduler_working_wait_queue = _mali_osk_wait_queue_init();
- if (NULL == pp_scheduler_working_wait_queue) goto cleanup;
-
-#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
- pp_scheduler_wq_job_delete = _mali_osk_wq_create_work(mali_pp_scheduler_do_job_delete, NULL);
- if (NULL == pp_scheduler_wq_job_delete) goto cleanup;
-
- pp_scheduler_job_delete_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED);
- if (NULL == pp_scheduler_job_delete_lock) goto cleanup;
-#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE) */
-
-#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
- pp_scheduler_wq_job_queue = _mali_osk_wq_create_work(mali_pp_scheduler_do_job_queue, NULL);
- if (NULL == pp_scheduler_wq_job_queue) goto cleanup;
-
- pp_scheduler_job_queue_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED);
- if (NULL == pp_scheduler_job_queue_lock) goto cleanup;
-#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
-
- return _MALI_OSK_ERR_OK;
-
-cleanup:
-#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
- if (NULL != pp_scheduler_job_queue_lock) {
- _mali_osk_spinlock_irq_term(pp_scheduler_job_queue_lock);
- pp_scheduler_job_queue_lock = NULL;
- }
-
- if (NULL != pp_scheduler_wq_job_queue) {
- _mali_osk_wq_delete_work(pp_scheduler_wq_job_queue);
- pp_scheduler_wq_job_queue = NULL;
- }
-#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
-
-#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
- if (NULL != pp_scheduler_job_delete_lock) {
- _mali_osk_spinlock_irq_term(pp_scheduler_job_delete_lock);
- pp_scheduler_job_delete_lock = NULL;
- }
-
- if (NULL != pp_scheduler_wq_job_delete) {
- _mali_osk_wq_delete_work(pp_scheduler_wq_job_delete);
- pp_scheduler_wq_job_delete = NULL;
- }
-#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE) */
-
- if (NULL != pp_scheduler_working_wait_queue) {
- _mali_osk_wait_queue_term(pp_scheduler_working_wait_queue);
- pp_scheduler_working_wait_queue = NULL;
- }
-
- if (NULL != pp_scheduler_lock) {
-#if defined(MALI_UPPER_HALF_SCHEDULING)
- _mali_osk_spinlock_irq_term(pp_scheduler_lock);
-#else
- _mali_osk_spinlock_term(pp_scheduler_lock);
-#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
- pp_scheduler_lock = NULL;
- }
-
- return _MALI_OSK_ERR_NOMEM;
-}
-
-void mali_pp_scheduler_terminate(void)
-{
-#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
- _mali_osk_spinlock_irq_term(pp_scheduler_job_queue_lock);
- _mali_osk_wq_delete_work(pp_scheduler_wq_job_queue);
-#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
-
-#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
- _mali_osk_spinlock_irq_term(pp_scheduler_job_delete_lock);
- _mali_osk_wq_delete_work(pp_scheduler_wq_job_delete);
-#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE) */
-
- _mali_osk_wait_queue_term(pp_scheduler_working_wait_queue);
-
-#if defined(MALI_UPPER_HALF_SCHEDULING)
- _mali_osk_spinlock_irq_term(pp_scheduler_lock);
-#else
- _mali_osk_spinlock_term(pp_scheduler_lock);
-#endif /* defined(MALI_UPPER_HALF_SCHEDULING) */
-}
-
-void mali_pp_scheduler_populate(void)
-{
- struct mali_group *group;
- struct mali_pp_core *pp_core;
- u32 num_groups;
- u32 i;
-
- num_groups = mali_group_get_glob_num_groups();
-
- /* Do we have a virtual group? */
- for (i = 0; i < num_groups; i++) {
- group = mali_group_get_glob_group(i);
-
- if (mali_group_is_virtual(group)) {
- MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Found virtual group %p.\n", group));
-
- virtual_group = group;
- break;
- }
- }
-
- /* Find all the available PP cores */
- for (i = 0; i < num_groups; i++) {
- group = mali_group_get_glob_group(i);
- pp_core = mali_group_get_pp_core(group);
-
- if (NULL != pp_core && !mali_group_is_virtual(group)) {
- if (0 == pp_version) {
- /* Retrieve PP version from the first available PP core */
- pp_version = mali_pp_core_get_version(pp_core);
- }
-
- if (mali_pp_scheduler_has_virtual_group()) {
- /* Add all physical PP cores to the virtual group */
- mali_group_lock(virtual_group);
- group->state = MALI_GROUP_STATE_JOINING_VIRTUAL;
- mali_group_add_group(virtual_group, group, MALI_TRUE);
- mali_group_unlock(virtual_group);
- } else {
- _mali_osk_list_add(&group->pp_scheduler_list, &group_list_idle);
- }
-
- num_cores++;
- }
- }
-
- enabled_cores = num_cores;
-}
-
-void mali_pp_scheduler_depopulate(void)
-{
- struct mali_group *group, *temp;
-
- MALI_DEBUG_ASSERT(_mali_osk_list_empty(&group_list_working));
- MALI_DEBUG_ASSERT(VIRTUAL_GROUP_WORKING != virtual_group_state);
-
- /* Delete all groups owned by scheduler */
- if (mali_pp_scheduler_has_virtual_group()) {
- mali_group_delete(virtual_group);
- }
-
- _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, pp_scheduler_list) {
- mali_group_delete(group);
- }
- _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, pp_scheduler_list) {
- mali_group_delete(group);
- }
-}
-
-MALI_STATIC_INLINE void mali_pp_scheduler_disable_empty_virtual(void)
-{
- MALI_ASSERT_GROUP_LOCKED(virtual_group);
-
- if (mali_group_virtual_disable_if_empty(virtual_group)) {
- MALI_DEBUG_PRINT(4, ("Disabling empty virtual group\n"));
-
- MALI_DEBUG_ASSERT(VIRTUAL_GROUP_IDLE == virtual_group_state);
-
- virtual_group_state = VIRTUAL_GROUP_DISABLED;
- }
-}
-
-MALI_STATIC_INLINE void mali_pp_scheduler_enable_empty_virtual(void)
-{
- MALI_ASSERT_GROUP_LOCKED(virtual_group);
-
- if (mali_group_virtual_enable_if_empty(virtual_group)) {
- MALI_DEBUG_PRINT(4, ("Re-enabling empty virtual group\n"));
-
- MALI_DEBUG_ASSERT(VIRTUAL_GROUP_DISABLED == virtual_group_state);
-
- virtual_group_state = VIRTUAL_GROUP_IDLE;
- }
-}
-
-static struct mali_pp_job *mali_pp_scheduler_get_job(struct mali_pp_scheduler_job_queue *queue)
-{
- struct mali_pp_job *job = NULL;
-
- MALI_ASSERT_PP_SCHEDULER_LOCKED();
- MALI_DEBUG_ASSERT_POINTER(queue);
-
- /* Check if we have a normal priority job. */
- if (!_mali_osk_list_empty(&queue->normal_pri)) {
- MALI_DEBUG_ASSERT(queue->depth > 0);
- job = _MALI_OSK_LIST_ENTRY(queue->normal_pri.next, struct mali_pp_job, list);
- }
-
- /* Prefer normal priority job if it is in progress. */
- if (NULL != job && 0 < job->sub_jobs_started) {
- return job;
- }
-
- /* Check if we have a high priority job. */
- if (!_mali_osk_list_empty(&queue->high_pri)) {
- MALI_DEBUG_ASSERT(queue->depth > 0);
- job = _MALI_OSK_LIST_ENTRY(queue->high_pri.next, struct mali_pp_job, list);
- }
-
- return job;
-}
-
-/**
- * Returns a physical job if a physical job is ready to run
- */
-MALI_STATIC_INLINE struct mali_pp_job *mali_pp_scheduler_get_physical_job(void)
-{
- MALI_ASSERT_PP_SCHEDULER_LOCKED();
- return mali_pp_scheduler_get_job(&job_queue);
-}
-
-MALI_STATIC_INLINE void mali_pp_scheduler_dequeue_physical_job(struct mali_pp_job *job)
-{
- MALI_ASSERT_PP_SCHEDULER_LOCKED();
- MALI_DEBUG_ASSERT(job_queue.depth > 0);
-
- /* Remove job from queue */
- if (!mali_pp_job_has_unstarted_sub_jobs(job)) {
- /* All sub jobs have been started: remove job from queue */
- _mali_osk_list_delinit(&job->list);
- _mali_osk_list_delinit(&job->session_fb_lookup_list);
- }
-
- --job_queue.depth;
-}
-
-/**
- * Returns a virtual job if a virtual job is ready to run
- */
-MALI_STATIC_INLINE struct mali_pp_job *mali_pp_scheduler_get_virtual_group_job(void)
-{
- MALI_ASSERT_PP_SCHEDULER_LOCKED();
- MALI_DEBUG_ASSERT_POINTER(virtual_group);
- return mali_pp_scheduler_get_job(&virtual_group_job_queue);
-}
-
-static void mali_pp_scheduler_dequeue_virtual_group_job(struct mali_pp_job *job)
-{
- MALI_ASSERT_PP_SCHEDULER_LOCKED();
- MALI_DEBUG_ASSERT(virtual_group_job_queue.depth > 0);
-
- /* Remove job from queue */
- if (!mali_pp_job_has_unstarted_sub_jobs(job)) {
- _mali_osk_list_delinit(&job->list);
- _mali_osk_list_delinit(&job->session_fb_lookup_list);
- --virtual_group_job_queue.depth;
- }
-}
-
-MALI_STATIC_INLINE void mali_pp_scheduler_pick_virtual_group_job(struct mali_pp_job *job,
- u32 *first_subjob, u32 *last_subjob)
-{
- MALI_ASSERT_GROUP_LOCKED(virtual_group);
- MALI_ASSERT_PP_SCHEDULER_LOCKED();
-
- MALI_DEBUG_ASSERT(VIRTUAL_GROUP_IDLE == virtual_group_state);
- MALI_DEBUG_ASSERT_POINTER(job);
- MALI_DEBUG_ASSERT(mali_pp_job_is_virtual_group_job(job));
-
- MALI_DEBUG_ASSERT_POINTER(first_subjob);
- MALI_DEBUG_ASSERT_POINTER(last_subjob);
-
- MALI_DEBUG_ASSERT(virtual_group_job_queue.depth > 0);
-
- MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(job));
-
- *first_subjob = *last_subjob =
- mali_pp_job_get_first_unstarted_sub_job(job);
-
- if (mali_pp_job_is_with_dlbu(job)) {
- MALI_DEBUG_ASSERT(1 == mali_pp_job_get_sub_job_count(job));
- mali_pp_job_mark_sub_job_started(job, 0);
- } else {
- struct mali_group *child, *temp;
- _MALI_OSK_LIST_FOREACHENTRY(child, temp,
- &virtual_group->group_list, struct mali_group, group_list) {
- if (mali_pp_job_has_unstarted_sub_jobs(job)) {
- *last_subjob = mali_pp_job_get_first_unstarted_sub_job(job);
- mali_pp_job_mark_sub_job_started(job, *last_subjob);
- } else {
- break;
- }
- }
- }
-
- /* Virtual group is now working. */
- virtual_group_state = VIRTUAL_GROUP_WORKING;
-
- mali_pp_scheduler_dequeue_virtual_group_job(job);
-}
-
-
-/**
- * Checks if the criteria is met for removing a physical core from virtual group
- */
-MALI_STATIC_INLINE mali_bool mali_pp_scheduler_can_move_virtual_to_physical(void)
-{
- MALI_ASSERT_PP_SCHEDULER_LOCKED();
- MALI_DEBUG_ASSERT(mali_pp_scheduler_has_virtual_group());
- MALI_ASSERT_GROUP_LOCKED(virtual_group);
- /*
- * The criteria for taking out a physical group from a virtual group are the following:
- * - There virtual group is idle
- * - There are currently no physical groups (idle and working)
- * - There are physical jobs to be scheduled
- */
- return (VIRTUAL_GROUP_IDLE == virtual_group_state) &&
- _mali_osk_list_empty(&group_list_idle) &&
- _mali_osk_list_empty(&group_list_working) &&
- (NULL != mali_pp_scheduler_get_physical_job());
-}
-
-MALI_STATIC_INLINE struct mali_group *mali_pp_scheduler_acquire_physical_group(void)
-{
- MALI_ASSERT_PP_SCHEDULER_LOCKED();
-
- if (!_mali_osk_list_empty(&group_list_idle)) {
- MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Acquiring physical group from idle list.\n"));
- return _MALI_OSK_LIST_ENTRY(group_list_idle.next, struct mali_group, pp_scheduler_list);
- } else if (mali_pp_scheduler_has_virtual_group()) {
- MALI_ASSERT_GROUP_LOCKED(virtual_group);
- if (mali_pp_scheduler_can_move_virtual_to_physical()) {
- struct mali_group *group;
- MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Acquiring physical group from virtual group.\n"));
- group = mali_group_acquire_group(virtual_group);
-
- if (mali_pp_scheduler_has_virtual_group()) {
- mali_pp_scheduler_disable_empty_virtual();
- }
-
- return group;
- }
- }
-
- return NULL;
-}
-
-static void mali_pp_scheduler_return_job_to_user(struct mali_pp_job *job, mali_bool deferred)
-{
- if (MALI_FALSE == mali_pp_job_use_no_notification(job)) {
- u32 i;
- u32 num_counters_to_copy;
- mali_bool success = mali_pp_job_was_success(job);
-
- _mali_uk_pp_job_finished_s *jobres = job->finished_notification->result_buffer;
- _mali_osk_memset(jobres, 0, sizeof(_mali_uk_pp_job_finished_s)); /* @@@@ can be removed once we initialize all members in this struct */
- jobres->user_job_ptr = mali_pp_job_get_user_id(job);
- if (MALI_TRUE == success) {
- jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS;
- } else {
- jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR;
- }
-
- if (mali_pp_job_is_with_dlbu(job)) {
- num_counters_to_copy = num_cores; /* Number of physical cores available */
- } else {
- num_counters_to_copy = mali_pp_job_get_sub_job_count(job);
- }
-
- for (i = 0; i < num_counters_to_copy; i++) {
- jobres->perf_counter0[i] = mali_pp_job_get_perf_counter_value0(job, i);
- jobres->perf_counter1[i] = mali_pp_job_get_perf_counter_value1(job, i);
- jobres->perf_counter_src0 = mali_pp_job_get_pp_counter_global_src0();
- jobres->perf_counter_src1 = mali_pp_job_get_pp_counter_global_src1();
- }
-
- mali_session_send_notification(mali_pp_job_get_session(job), job->finished_notification);
- job->finished_notification = NULL;
- }
-
-#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
- if (MALI_TRUE == deferred) {
- /* The deletion of the job object (releasing sync refs etc) must be done in a different context */
- mali_pp_scheduler_deferred_job_delete(job);
- } else {
- mali_pp_job_delete(job);
- }
-#else
- MALI_DEBUG_ASSERT(MALI_FALSE == deferred); /* no use cases need this in this configuration */
- mali_pp_job_delete(job);
-#endif
-}
-
-static void mali_pp_scheduler_finalize_job(struct mali_pp_job *job, mali_bool job_started)
-{
- /* This job object should not be on any lists. */
- MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
- MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_list));
- MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_fb_lookup_list));
-
- /* Send notification back to user space */
-#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
- mali_pp_scheduler_return_job_to_user(job, MALI_TRUE);
-#else
- mali_pp_scheduler_return_job_to_user(job, MALI_FALSE);
-#endif
-
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
- if (_MALI_PP_JOB_FLAG_IS_WINDOW_SURFACE & job->uargs.flags) {
- _mali_osk_atomic_inc(&job->session->number_of_window_jobs);
- }
-#endif
-
- mali_pp_scheduler_job_completed(job_started);
-}
-
-void mali_pp_scheduler_schedule(void)
-{
- struct mali_group *physical_groups_to_start[MALI_MAX_NUMBER_OF_PP_GROUPS - 1];
- struct mali_pp_job *physical_jobs_to_start[MALI_MAX_NUMBER_OF_PP_GROUPS - 1];
- u32 physical_sub_jobs_to_start[MALI_MAX_NUMBER_OF_PP_GROUPS - 1];
- int num_physical_jobs_to_start = 0;
- int i;
-
- if (mali_pp_scheduler_has_virtual_group()) {
- /* Lock the virtual group since we might have to grab physical groups. */
- mali_group_lock(virtual_group);
- }
-
- mali_pp_scheduler_lock();
- if (pause_count > 0) {
- /* Scheduler is suspended, don't schedule any jobs. */
- mali_pp_scheduler_unlock();
- if (mali_pp_scheduler_has_virtual_group()) {
- mali_group_unlock(virtual_group);
- }
- return;
- }
-
- /* Find physical job(s) to schedule first. */
- while (1) {
- struct mali_group *group;
- struct mali_pp_job *job;
- u32 sub_job;
-
- job = mali_pp_scheduler_get_physical_job();
- if (NULL == job) {
- break; /* No job, early out. */
- }
-
- if (mali_scheduler_hint_is_enabled(MALI_SCHEDULER_HINT_GP_BOUND) &&
- mali_pp_job_is_large_and_unstarted(job) && !_mali_osk_list_empty(&group_list_working)) {
- /* Since not all groups are idle, don't schedule yet. */
- break;
- }
-
- MALI_DEBUG_ASSERT(!mali_pp_job_is_virtual_group_job(job));
- MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(job));
- MALI_DEBUG_ASSERT(1 <= mali_pp_job_get_sub_job_count(job));
-
- /* Acquire a physical group, either from the idle list or from the virtual group.
- * In case the group was acquired from the virtual group, it's state will be
- * LEAVING_VIRTUAL and must be set to IDLE before it can be used. */
- group = mali_pp_scheduler_acquire_physical_group();
- if (NULL == group) {
- /* Could not get a group to run the job on, early out. */
- MALI_DEBUG_PRINT(4, ("Mali PP scheduler: No more physical groups available.\n"));
- break;
- }
-
- MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Acquired physical group %p.\n", group));
-
- /* Mark sub job as started. */
- sub_job = mali_pp_job_get_first_unstarted_sub_job(job);
- mali_pp_job_mark_sub_job_started(job, sub_job);
-
- /* Remove job from queue (if this was the last sub job). */
- mali_pp_scheduler_dequeue_physical_job(job);
-
- /* Move group to working list. */
- _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_working);
-
- /* Keep track of this group, so that we actually can start the job once we are done with the scheduler lock we are now holding. */
- physical_groups_to_start[num_physical_jobs_to_start] = group;
- physical_jobs_to_start[num_physical_jobs_to_start] = job;
- physical_sub_jobs_to_start[num_physical_jobs_to_start] = sub_job;
- ++num_physical_jobs_to_start;
-
- MALI_DEBUG_ASSERT(num_physical_jobs_to_start < MALI_MAX_NUMBER_OF_PP_GROUPS);
- }
-
- if (mali_pp_scheduler_has_virtual_group()) {
- if (VIRTUAL_GROUP_IDLE == virtual_group_state) {
- /* We have a virtual group and it is idle. */
-
- struct mali_pp_job *job;
-
- /* Find a virtual job we can start. */
- job = mali_pp_scheduler_get_virtual_group_job();
-
- if (NULL != job) {
- u32 first_subjob, last_subjob;
-
- /* To mark necessary subjobs status of this job and remove the job from job queue
- * when all the subjobs will be run by this virtual group in one go
- */
- mali_pp_scheduler_pick_virtual_group_job(job, &first_subjob, &last_subjob);
-
- /* We no longer need the scheduler lock, but we still need the virtual lock
- * in order to start the virtual job.
- */
- mali_pp_scheduler_unlock();
-
- /* Start job. */
- mali_group_start_job_on_virtual(virtual_group, job, first_subjob, last_subjob);
- } else {
- /* No virtual job to start. */
- mali_pp_scheduler_unlock();
- }
- } else {
- /* We have a virtual group, but it is busy or disabled. */
- MALI_DEBUG_ASSERT(VIRTUAL_GROUP_IDLE != virtual_group_state);
-
- mali_pp_scheduler_unlock();
- }
- mali_group_unlock(virtual_group);
- } else {
- /* There is no virtual group. */
- mali_pp_scheduler_unlock();
- }
-
- /* We have now released the scheduler lock, and we are ready to start the physical jobs.
- * The reason we want to wait until we have released the scheduler lock is that job start
- * may take quite a bit of time (many registers have to be written). This will allow new
- * jobs from user space to come in, and post-processing of other PP jobs to happen at the
- * same time as we start jobs. */
- for (i = 0; i < num_physical_jobs_to_start; i++) {
- struct mali_group *group = physical_groups_to_start[i];
- struct mali_pp_job *job = physical_jobs_to_start[i];
- u32 sub_job = physical_sub_jobs_to_start[i];
-
- MALI_DEBUG_ASSERT_POINTER(group);
- MALI_DEBUG_ASSERT_POINTER(job);
- MALI_DEBUG_ASSERT(!mali_group_is_virtual(group));
- MALI_DEBUG_ASSERT(!mali_pp_job_is_virtual_group_job(job));
-
- mali_group_lock(group);
-
- /* Set state to IDLE if group was acquired from the virtual group. */
- group->state = MALI_GROUP_STATE_IDLE;
-
- mali_group_start_job_on_group(group, job, sub_job);
-
- MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Physical job %u (0x%08X) part %u/%u started (from schedule).\n",
- mali_pp_job_get_id(job), job, sub_job + 1,
- mali_pp_job_get_sub_job_count(job)));
-
- mali_group_unlock(group);
- }
-}
-
-/**
- * Set group idle.
- *
- * If @ref group is the virtual group, nothing is done since the virtual group should be idle
- * already.
- *
- * If @ref group is a physical group we rejoin the virtual group, if it exists. If not, we move the
- * physical group to the idle list.
- *
- * @note The group and the scheduler must both be locked when entering this function. Both will be
- * unlocked before exiting.
- *
- * @param group The group to set idle.
- */
-static void mali_pp_scheduler_set_group_idle_and_unlock(struct mali_group *group)
-{
- MALI_DEBUG_ASSERT_POINTER(group);
-
- MALI_ASSERT_GROUP_LOCKED(group);
- MALI_DEBUG_ASSERT_LOCK_HELD(pp_scheduler_lock);
-
- if (mali_group_is_virtual(group)) {
- /* The virtual group should have been set to non-working already. */
- MALI_DEBUG_ASSERT(VIRTUAL_GROUP_IDLE == virtual_group_state);
-
- mali_pp_scheduler_unlock();
- mali_group_unlock(group);
-
- return;
- } else {
- if (mali_pp_scheduler_has_virtual_group()) {
- /* Rejoin virtual group. */
-
- /* We're no longer needed on the scheduler list. */
- _mali_osk_list_delinit(&(group->pp_scheduler_list));
-
- /* Make sure no interrupts are handled for this group during the transition
- * from physical to virtual. */
- group->state = MALI_GROUP_STATE_JOINING_VIRTUAL;
-
- mali_pp_scheduler_unlock();
- mali_group_unlock(group);
-
- mali_group_lock(virtual_group);
-
- if (mali_pp_scheduler_has_virtual_group()) {
- mali_pp_scheduler_enable_empty_virtual();
- }
-
- /* We need to recheck the group state since it is possible that someone has
- * modified the group before we locked the virtual group. */
- if (MALI_GROUP_STATE_JOINING_VIRTUAL == group->state) {
- mali_group_add_group(virtual_group, group, MALI_TRUE);
- }
-
- mali_group_unlock(virtual_group);
- } else {
- /* Move physical group back to idle list. */
- _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_idle);
-
-#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
- trace_gpu_sched_switch(mali_pp_get_hw_core_desc(group->pp_core), sched_clock(), 0, 0, 0);
-#endif
-
- mali_pp_scheduler_unlock();
- mali_group_unlock(group);
- }
- }
-}
-
-/**
- * Schedule job on locked group.
- *
- * @note The group and the scheduler must both be locked when entering this function. Both will be
- * unlocked before exiting.
- *
- * @param group The group to schedule on.
- */
-static void mali_pp_scheduler_schedule_on_group_and_unlock(struct mali_group *group)
-{
- MALI_DEBUG_ASSERT_POINTER(group);
-
- MALI_ASSERT_GROUP_LOCKED(group);
- MALI_DEBUG_ASSERT_LOCK_HELD(pp_scheduler_lock);
-
- if (mali_group_is_virtual(group)) {
- /* Now that the virtual group is idle, check if we should reconfigure. */
-
- struct mali_pp_job *virtual_job = NULL;
- struct mali_pp_job *physical_job = NULL;
- struct mali_group *physical_group = NULL;
- u32 physical_sub_job = 0;
-
- MALI_DEBUG_ASSERT(VIRTUAL_GROUP_IDLE == virtual_group_state);
-
- if (mali_pp_scheduler_can_move_virtual_to_physical()) {
- /* There is a runnable physical job and we can acquire a physical group. */
- physical_job = mali_pp_scheduler_get_physical_job();
- MALI_DEBUG_ASSERT_POINTER(physical_job);
- MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(physical_job));
-
- /* Mark sub job as started. */
- physical_sub_job = mali_pp_job_get_first_unstarted_sub_job(physical_job);
- mali_pp_job_mark_sub_job_started(physical_job, physical_sub_job);
-
- /* Remove job from queue (if this was the last sub job). */
- mali_pp_scheduler_dequeue_physical_job(physical_job);
-
- /* Acquire a physical group from the virtual group. Its state will
- * be LEAVING_VIRTUAL and must be set to IDLE before it can be
- * used. */
- physical_group = mali_group_acquire_group(virtual_group);
-
- /* Move physical group to the working list, as we will soon start a job on it. */
- _mali_osk_list_move(&(physical_group->pp_scheduler_list), &group_list_working);
-
- mali_pp_scheduler_disable_empty_virtual();
- }
-
- /* Get next virtual job. */
- virtual_job = mali_pp_scheduler_get_virtual_group_job();
- if (NULL != virtual_job && VIRTUAL_GROUP_IDLE == virtual_group_state) {
- u32 first_subjob, last_subjob;
- /* To mark necessary subjobs status of this job and remove the job from job queue
- * when all the subjobs will be run by this virtual group in one go
- */
- mali_pp_scheduler_pick_virtual_group_job(virtual_job, &first_subjob, &last_subjob);
-
- /* We no longer need the scheduler lock, but we still need the virtual lock
- * in order to start the virtual job.
- */
- mali_pp_scheduler_unlock();
-
- /* Start job. */
- mali_group_start_job_on_virtual(group, virtual_job, first_subjob, last_subjob);
-
- MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Virtual job %u (0x%08X) part %u/%u started (from job_done).\n",
- mali_pp_job_get_id(virtual_job), virtual_job, 1,
- mali_pp_job_get_sub_job_count(virtual_job)));
- } else {
-#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
- trace_gpu_sched_switch("Mali_Virtual_PP", sched_clock(), 0, 0, 0);
-#endif
-
- mali_pp_scheduler_unlock();
- }
-
- /* Releasing the virtual group lock that was held when entering the function. */
- mali_group_unlock(group);
-
- /* Start a physical job (if we acquired a physical group earlier). */
- if (NULL != physical_job && NULL != physical_group) {
- mali_group_lock(physical_group);
-
- /* Change the group state from LEAVING_VIRTUAL to IDLE to complete the transition. */
- physical_group->state = MALI_GROUP_STATE_IDLE;
-
- /* Start job. */
- mali_group_start_job_on_group(physical_group, physical_job, physical_sub_job);
-
- MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Physical job %u (0x%08X) part %u/%u started (from job_done).\n",
- mali_pp_job_get_id(physical_job), physical_job, physical_sub_job + 1,
- mali_pp_job_get_sub_job_count(physical_job)));
-
- mali_group_unlock(physical_group);
- }
- } else {
- /* Physical group. */
- struct mali_pp_job *job = NULL;
- u32 sub_job = 0;
-
- job = mali_pp_scheduler_get_physical_job();
- if (NULL != job) {
- /* There is a runnable physical job. */
- MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(job));
-
- /* Mark sub job as started. */
- sub_job = mali_pp_job_get_first_unstarted_sub_job(job);
- mali_pp_job_mark_sub_job_started(job, sub_job);
-
- /* Remove job from queue (if this was the last sub job). */
- mali_pp_scheduler_dequeue_physical_job(job);
-
- mali_pp_scheduler_unlock();
-
- /* Group is already on the working list, so start the new job. */
- mali_group_start_job_on_group(group, job, sub_job);
-
- MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Physical job %u (0x%08X) part %u/%u started (from job_done).\n",
- mali_pp_job_get_id(job), job, sub_job + 1, mali_pp_job_get_sub_job_count(job)));
-
- mali_group_unlock(group);
- } else {
- mali_pp_scheduler_set_group_idle_and_unlock(group);
- }
- }
-}
-
-void mali_pp_scheduler_job_done(struct mali_group *group, struct mali_pp_job *job, u32 sub_job, mali_bool success, mali_bool in_upper_half)
-{
- mali_bool job_is_done = MALI_FALSE;
- mali_bool schedule_on_group = MALI_FALSE;
- mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
-
- MALI_DEBUG_PRINT(3, ("Mali PP scheduler: %s job %u (0x%08X) part %u/%u completed (%s).\n",
- mali_pp_job_is_virtual_group_job(job) ? "Virtual Group" : "Physical",
- mali_pp_job_get_id(job),
- job, sub_job + 1,
- mali_pp_job_get_sub_job_count(job),
- success ? "success" : "failure"));
-
- MALI_ASSERT_GROUP_LOCKED(group);
- mali_pp_scheduler_lock();
-
- if (mali_group_is_virtual(group) && !mali_pp_job_is_with_dlbu(job)) {
- u32 subjobs;
-
- /* Get how many subjobs are running parallel in this virtual group */
- subjobs = mali_pp_job_get_first_unstarted_sub_job(job) - group->pp_running_sub_job;
- MALI_DEBUG_ASSERT(subjobs > 0);
-
- for (; 0 < subjobs; subjobs--) {
- mali_pp_job_mark_sub_job_completed(job, success);
- }
-
- mali_group_non_dlbu_job_done_virtual(group);
- } else {
- mali_pp_job_mark_sub_job_completed(job, success);
- }
-
- MALI_DEBUG_ASSERT(mali_pp_job_is_virtual_group_job(job) == mali_group_is_virtual(group));
-
- job_is_done = mali_pp_job_is_complete(job);
-
- if (job_is_done) {
- /* Some groups added into the virtual group may take some subjobs to run but
- * without dequeuing the job, here do necessary dequeue under scheduler lock
- */
- if (mali_group_is_virtual(group) && !mali_pp_job_is_with_dlbu(job)) {
- if (!_mali_osk_list_empty(&job->list)) {
- mali_pp_scheduler_dequeue_virtual_group_job(job);
- }
- }
-
- MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
- MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_fb_lookup_list));
-
- /* Remove job from session list. */
- _mali_osk_list_delinit(&job->session_list);
-
- MALI_DEBUG_PRINT(4, ("Mali PP scheduler: All parts completed for %s job %u (0x%08X).\n",
- mali_pp_job_is_virtual_group_job(job) ? "virtual group" : "physical",
- mali_pp_job_get_id(job), job));
-
- mali_pp_scheduler_unlock();
-
- /* Release tracker. If other trackers are waiting on this tracker, this could
- * trigger activation. The returned scheduling mask can be used to determine if we
- * have to schedule GP, PP or both. */
- schedule_mask = mali_timeline_tracker_release(&job->tracker);
-
- mali_pp_scheduler_lock();
- }
-
- if (mali_group_is_virtual(group)) {
- /* Obey the policy. */
- virtual_group_state = VIRTUAL_GROUP_IDLE;
- }
-
- /* If paused, then this was the last job, so wake up sleeping workers and return. */
- if (pause_count > 0) {
- /* Wake up sleeping workers. Their wake-up condition is that
- * num_slots == num_slots_idle, so unless we are done working, no
- * threads will actually be woken up.
- */
- if (!mali_group_is_virtual(group)) {
- /* Move physical group to idle list. */
- _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_idle);
- }
-
-#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
- trace_gpu_sched_switch(mali_pp_get_hw_core_desc(group->pp_core), sched_clock(), 0, 0, 0);
-#endif
-
- _mali_osk_wait_queue_wake_up(pp_scheduler_working_wait_queue);
-
- mali_pp_scheduler_unlock();
- mali_group_unlock(group);
-
- if (job_is_done) {
- /* Return job to user and delete it. */
- mali_pp_scheduler_finalize_job(job, MALI_TRUE);
- }
-
- /* A GP job might be queued by tracker release above,
- * make sure GP scheduler has a chance to schedule this (if possible)
- */
- mali_scheduler_schedule_from_mask(schedule_mask & ~MALI_SCHEDULER_MASK_PP, in_upper_half);
-
- return;
- }
-
- /* Since this group just finished running a job, we can reschedule a new job on it
- * immediately. */
-
- /* By default, don't schedule on group. */
- schedule_on_group = MALI_FALSE;
-
- if (mali_group_is_virtual(group)) {
- /* Always schedule immediately on virtual group. */
- schedule_mask &= ~MALI_SCHEDULER_MASK_PP;
- schedule_on_group = MALI_TRUE;
- } else if (0 < job_queue.depth && (!mali_scheduler_mask_is_set(schedule_mask, MALI_SCHEDULER_MASK_PP) || _mali_osk_list_empty(&group_list_idle))) {
- struct mali_pp_job *next_job = NULL;
-
- next_job = mali_pp_scheduler_get_physical_job();
- MALI_DEBUG_ASSERT_POINTER(next_job);
-
- /* If no new jobs have been queued or if this group is the only idle group, we can
- * schedule immediately on this group, unless we are GP bound and the next job would
- * benefit from all its sub jobs being started concurrently. */
-
- if (mali_scheduler_hint_is_enabled(MALI_SCHEDULER_HINT_GP_BOUND) && mali_pp_job_is_large_and_unstarted(next_job)) {
- /* We are GP bound and the job would benefit from all sub jobs being started
- * concurrently. Postpone scheduling until after group has been unlocked. */
- schedule_mask |= MALI_SCHEDULER_MASK_PP;
- schedule_on_group = MALI_FALSE;
- } else {
- /* Schedule job immediately since we are not GP bound. */
- schedule_mask &= ~MALI_SCHEDULER_MASK_PP;
- schedule_on_group = MALI_TRUE;
- }
- } else if ((0 < virtual_group_job_queue.depth) && (!mali_scheduler_mask_is_set(schedule_mask, MALI_SCHEDULER_MASK_PP))) {
- /* This case is rare, only in virtual job has sub jobs case,
- * the last "pilot job" here might not be the real pilot job,
- * it may be a real pp job with only 1 subjob i.e. only 1 region inform,
- * so this job may not trigger any virtual job queued in virtual queue,
- * so it may suspend the pp scheduler, even when there are already
- * some jobs in the virtual queue, so in this case we need to explicit
- * set the schedule_mask */
- schedule_mask |= MALI_SCHEDULER_MASK_PP;
- }
-
- if (schedule_on_group) {
- /* Schedule a new job on this group. */
- mali_pp_scheduler_schedule_on_group_and_unlock(group);
- } else {
- /* Set group idle. Will rejoin virtual group, under appropriate conditions. */
- mali_pp_scheduler_set_group_idle_and_unlock(group);
- }
-
- if (!schedule_on_group || MALI_SCHEDULER_MASK_EMPTY != schedule_mask) {
- if (MALI_SCHEDULER_MASK_PP & schedule_mask) {
- /* Schedule PP directly. */
- mali_pp_scheduler_schedule();
- schedule_mask &= ~MALI_SCHEDULER_MASK_PP;
- }
-
- /* Schedule other jobs that were activated. */
- mali_scheduler_schedule_from_mask(schedule_mask, in_upper_half);
- }
-
- if (job_is_done) {
- /* Return job to user and delete it. */
- mali_pp_scheduler_finalize_job(job, MALI_TRUE);
- }
-}
-
-void mali_pp_scheduler_suspend(void)
-{
- mali_pp_scheduler_lock();
- pause_count++; /* Increment the pause_count so that no more jobs will be scheduled */
- mali_pp_scheduler_unlock();
-
- /* Go to sleep. When woken up again (in mali_pp_scheduler_job_done), the
- * mali_pp_scheduler_suspended() function will be called. This will return true
- * if state is idle and pause_count > 0, so if the core is active this
- * will not do anything.
- */
- _mali_osk_wait_queue_wait_event(pp_scheduler_working_wait_queue, mali_pp_scheduler_is_suspended, NULL);
-}
-
-void mali_pp_scheduler_resume(void)
-{
- mali_pp_scheduler_lock();
- pause_count--; /* Decrement pause_count to allow scheduling again (if it reaches 0) */
- mali_pp_scheduler_unlock();
- if (0 == pause_count) {
- mali_pp_scheduler_schedule();
- }
-}
-
-static mali_timeline_point mali_pp_scheduler_submit_job(struct mali_session_data *session, struct mali_pp_job *job)
-{
- mali_timeline_point point;
- u32 fb_lookup_id = 0;
-
- MALI_DEBUG_ASSERT_POINTER(session);
- MALI_DEBUG_ASSERT_POINTER(job);
-
- mali_pp_scheduler_lock();
-
- fb_lookup_id = mali_pp_job_get_fb_lookup_id(job);
- MALI_DEBUG_ASSERT(MALI_PP_JOB_FB_LOOKUP_LIST_SIZE > fb_lookup_id);
-
- /* Adding job to the lookup list used to quickly discard writeback units of queued jobs. */
- _mali_osk_list_addtail(&job->session_fb_lookup_list, &session->pp_job_fb_lookup_list[fb_lookup_id]);
-
- mali_pp_scheduler_unlock();
-
- /* We hold a PM reference for every job we hold queued (and running) */
- _mali_osk_pm_dev_ref_add();
-
- /* Add job to Timeline system. */
- point = mali_timeline_system_add_tracker(session->timeline_system, &job->tracker, MALI_TIMELINE_PP);
-
- return point;
-}
-
-_mali_osk_errcode_t _mali_ukk_pp_start_job(void *ctx, _mali_uk_pp_start_job_s *uargs)
-{
- struct mali_session_data *session;
- struct mali_pp_job *job;
- mali_timeline_point point;
- u32 __user *timeline_point_ptr = NULL;
-
- MALI_DEBUG_ASSERT_POINTER(uargs);
- MALI_DEBUG_ASSERT_POINTER(ctx);
-
- session = (struct mali_session_data *)ctx;
-
- job = mali_pp_job_create(session, uargs, mali_scheduler_get_new_id());
- if (NULL == job) {
- MALI_PRINT_ERROR(("Failed to create PP job.\n"));
- return _MALI_OSK_ERR_NOMEM;
- }
-
- timeline_point_ptr = (u32 __user *)(uintptr_t)job->uargs.timeline_point_ptr;
-
- point = mali_pp_scheduler_submit_job(session, job);
- job = NULL;
-
- if (0 != _mali_osk_put_user(((u32) point), timeline_point_ptr)) {
- /* Let user space know that something failed after the job was started. */
- return _MALI_OSK_ERR_ITEM_NOT_FOUND;
- }
-
- return _MALI_OSK_ERR_OK;
-}
-
-_mali_osk_errcode_t _mali_ukk_pp_and_gp_start_job(void *ctx, _mali_uk_pp_and_gp_start_job_s *uargs)
-{
- struct mali_session_data *session;
- _mali_uk_pp_and_gp_start_job_s kargs;
- struct mali_pp_job *pp_job;
- struct mali_gp_job *gp_job;
- u32 __user *timeline_point_ptr = NULL;
- mali_timeline_point point;
- _mali_uk_pp_start_job_s __user *pp_args;
- _mali_uk_gp_start_job_s __user *gp_args;
-
- MALI_DEBUG_ASSERT_POINTER(ctx);
- MALI_DEBUG_ASSERT_POINTER(uargs);
-
- session = (struct mali_session_data *) ctx;
-
- if (0 != _mali_osk_copy_from_user(&kargs, uargs, sizeof(_mali_uk_pp_and_gp_start_job_s))) {
- return _MALI_OSK_ERR_NOMEM;
- }
-
- pp_args = (_mali_uk_pp_start_job_s __user *)(uintptr_t)kargs.pp_args;
- gp_args = (_mali_uk_gp_start_job_s __user *)(uintptr_t)kargs.gp_args;
-
- pp_job = mali_pp_job_create(session, pp_args, mali_scheduler_get_new_id());
- if (NULL == pp_job) {
- MALI_PRINT_ERROR(("Failed to create PP job.\n"));
- return _MALI_OSK_ERR_NOMEM;
- }
-
- gp_job = mali_gp_job_create(session, gp_args, mali_scheduler_get_new_id(), mali_pp_job_get_tracker(pp_job));
- if (NULL == gp_job) {
- MALI_PRINT_ERROR(("Failed to create GP job.\n"));
- mali_pp_job_delete(pp_job);
- return _MALI_OSK_ERR_NOMEM;
- }
-
- timeline_point_ptr = (u32 __user *)(uintptr_t)pp_job->uargs.timeline_point_ptr;
-
- /* Submit GP job. */
- mali_gp_scheduler_submit_job(session, gp_job);
- gp_job = NULL;
-
- /* Submit PP job. */
- point = mali_pp_scheduler_submit_job(session, pp_job);
- pp_job = NULL;
-
- if (0 != _mali_osk_put_user(((u32) point), timeline_point_ptr)) {
- /* Let user space know that something failed after the jobs were started. */
- return _MALI_OSK_ERR_ITEM_NOT_FOUND;
- }
-
- return _MALI_OSK_ERR_OK;
-}
-
-_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores(_mali_uk_get_pp_number_of_cores_s *args)
-{
- MALI_DEBUG_ASSERT_POINTER(args);
-
- args->number_of_total_cores = num_cores;
- args->number_of_enabled_cores = enabled_cores;
-
- return _MALI_OSK_ERR_OK;
-}
-
-u32 mali_pp_scheduler_get_num_cores_total(void)
-{
- return num_cores;
-}
-
-u32 mali_pp_scheduler_get_num_cores_enabled(void)
-{
- return enabled_cores;
-}
-
-_mali_osk_errcode_t _mali_ukk_get_pp_core_version(_mali_uk_get_pp_core_version_s *args)
-{
- MALI_DEBUG_ASSERT_POINTER(args);
-
- args->version = pp_version;
-
- return _MALI_OSK_ERR_OK;
-}
-
-void _mali_ukk_pp_job_disable_wb(_mali_uk_pp_disable_wb_s *args)
-{
- struct mali_session_data *session;
- struct mali_pp_job *job;
- struct mali_pp_job *tmp;
- u32 fb_lookup_id;
-
- session = (struct mali_session_data *)(uintptr_t)args->ctx;
-
- MALI_DEBUG_ASSERT_POINTER(session);
- MALI_DEBUG_ASSERT_POINTER(args);
-
- fb_lookup_id = args->fb_id & MALI_PP_JOB_FB_LOOKUP_LIST_MASK;
-
- mali_pp_scheduler_lock();
-
- /* Iterate over all jobs for given frame builder_id. */
- _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &session->pp_job_fb_lookup_list[fb_lookup_id], struct mali_pp_job, session_fb_lookup_list) {
- MALI_DEBUG_CODE(u32 disable_mask = 0);
-
- if (mali_pp_job_get_frame_builder_id(job) == (u32) args->fb_id) {
- MALI_DEBUG_CODE(disable_mask |= 0xD << (4 * 3));
- if (args->wb0_memory == job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR / sizeof(u32)]) {
- MALI_DEBUG_CODE(disable_mask |= 0x1 << (4 * 1));
- mali_pp_job_disable_wb0(job);
- }
- if (args->wb1_memory == job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR / sizeof(u32)]) {
- MALI_DEBUG_CODE(disable_mask |= 0x2 << (4 * 2));
- mali_pp_job_disable_wb1(job);
- }
- if (args->wb2_memory == job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR / sizeof(u32)]) {
- MALI_DEBUG_CODE(disable_mask |= 0x3 << (4 * 3));
- mali_pp_job_disable_wb2(job);
- }
- MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Disable WB: 0x%X.\n", disable_mask));
- } else {
- MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Disable WB mismatching FB.\n"));
- }
- }
-
- mali_pp_scheduler_unlock();
-}
-
-void mali_pp_scheduler_abort_session(struct mali_session_data *session)
-{
- u32 i = 0;
- struct mali_pp_job *job, *tmp_job;
- struct mali_group *group, *tmp_group;
- struct mali_group *groups[MALI_MAX_NUMBER_OF_GROUPS];
- _MALI_OSK_LIST_HEAD_STATIC_INIT(removed_jobs);
-
- MALI_DEBUG_ASSERT_POINTER(session);
- MALI_DEBUG_ASSERT(session->is_aborting);
-
- MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Aborting all jobs from session 0x%08X.\n", session));
-
- mali_pp_scheduler_lock();
-
- /* Find all jobs from the aborting session. */
- _MALI_OSK_LIST_FOREACHENTRY(job, tmp_job, &session->pp_job_list, struct mali_pp_job, session_list) {
- /* Remove job from queue. */
- if (mali_pp_job_is_virtual_group_job(job)) {
- if (mali_pp_job_has_unstarted_sub_jobs(job))
- --virtual_group_job_queue.depth;
- } else {
- job_queue.depth -= mali_pp_job_get_sub_job_count(job) - mali_pp_job_get_first_unstarted_sub_job(job);
- }
-
- _mali_osk_list_delinit(&job->list);
- _mali_osk_list_delinit(&job->session_fb_lookup_list);
-
- mali_pp_job_mark_unstarted_failed(job);
-
- if (mali_pp_job_is_complete(job)) {
- /* Job is complete, remove from session list. */
- _mali_osk_list_delinit(&job->session_list);
-
- /* Move job to local list for release and deletion. */
- _mali_osk_list_add(&job->list, &removed_jobs);
-
- MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Aborted PP job %u (0x%08X).\n", mali_pp_job_get_id(job), job));
- } else {
- MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Keeping partially started PP job %u (0x%08X) in session.\n", mali_pp_job_get_id(job), job));
- }
- }
-
- _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_working, struct mali_group, pp_scheduler_list) {
- groups[i++] = group;
- }
-
- _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_idle, struct mali_group, pp_scheduler_list) {
- groups[i++] = group;
- }
-
- mali_pp_scheduler_unlock();
-
- /* Release and delete all found jobs from the aborting session. */
- _MALI_OSK_LIST_FOREACHENTRY(job, tmp_job, &removed_jobs, struct mali_pp_job, list) {
- mali_timeline_tracker_release(&job->tracker);
- mali_pp_job_delete(job);
- mali_pp_scheduler_job_completed(MALI_TRUE);
- }
-
- /* Abort any running jobs from the session. */
- while (i > 0) {
- mali_group_abort_session(groups[--i], session);
- }
-
- if (mali_pp_scheduler_has_virtual_group()) {
- mali_group_abort_session(virtual_group, session);
- }
-}
-
-static mali_bool mali_pp_scheduler_is_suspended(void *data)
-{
- mali_bool ret;
-
- /* This callback does not use the data pointer. */
- MALI_IGNORE(data);
-
- mali_pp_scheduler_lock();
-
- ret = pause_count > 0
- && _mali_osk_list_empty(&group_list_working)
- && VIRTUAL_GROUP_WORKING != virtual_group_state;
-
- mali_pp_scheduler_unlock();
-
- return ret;
-}
-
-struct mali_pp_core *mali_pp_scheduler_get_virtual_pp(void)
-{
- if (mali_pp_scheduler_has_virtual_group()) {
- return mali_group_get_pp_core(virtual_group);
- } else {
- return NULL;
- }
-}
-
-#if MALI_STATE_TRACKING
-u32 mali_pp_scheduler_dump_state(char *buf, u32 size)
-{
- int n = 0;
- struct mali_group *group;
- struct mali_group *temp;
-
- n += _mali_osk_snprintf(buf + n, size - n, "PP:\n");
- n += _mali_osk_snprintf(buf + n, size - n, "\tQueue is %s\n", _mali_osk_list_empty(&job_queue.normal_pri) ? "empty" : "not empty");
- n += _mali_osk_snprintf(buf + n, size - n, "\tHigh priority queue is %s\n", _mali_osk_list_empty(&job_queue.high_pri) ? "empty" : "not empty");
- n += _mali_osk_snprintf(buf + n, size - n, "\n");
-
- _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working, struct mali_group, pp_scheduler_list) {
- n += mali_group_dump_state(group, buf + n, size - n);
- }
-
- _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, pp_scheduler_list) {
- n += mali_group_dump_state(group, buf + n, size - n);
- }
-
- _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, pp_scheduler_list) {
- n += mali_group_dump_state(group, buf + n, size - n);
- }
-
- if (mali_pp_scheduler_has_virtual_group()) {
- n += mali_group_dump_state(virtual_group, buf + n, size - n);
- }
-
- n += _mali_osk_snprintf(buf + n, size - n, "\n");
- return n;
-}
-#endif
-
-/* This function is intended for power on reset of all cores.
- * No locking is done for the list iteration, which can only be safe if the
- * scheduler is paused and all cores idle. That is always the case on init and
- * power on. */
-void mali_pp_scheduler_reset_all_groups(void)
-{
- struct mali_group *group, *temp;
- struct mali_group *groups[MALI_MAX_NUMBER_OF_GROUPS];
- s32 i = 0;
-
- if (mali_pp_scheduler_has_virtual_group()) {
- mali_group_lock(virtual_group);
- mali_group_reset(virtual_group);
- mali_group_unlock(virtual_group);
- }
-
- MALI_DEBUG_ASSERT(_mali_osk_list_empty(&group_list_working));
- MALI_DEBUG_ASSERT(VIRTUAL_GROUP_WORKING != virtual_group_state);
- mali_pp_scheduler_lock();
- _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, pp_scheduler_list) {
- groups[i++] = group;
- }
- mali_pp_scheduler_unlock();
-
- while (i > 0) {
- group = groups[--i];
-
- mali_group_lock(group);
- mali_group_reset(group);
- mali_group_unlock(group);
- }
-}
-
-void mali_pp_scheduler_zap_all_active(struct mali_session_data *session)
-{
- struct mali_group *group, *temp;
- struct mali_group *groups[MALI_MAX_NUMBER_OF_GROUPS];
- s32 i = 0;
-
- if (mali_pp_scheduler_has_virtual_group()) {
- mali_group_zap_session(virtual_group, session);
- }
-
- mali_pp_scheduler_lock();
- _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working, struct mali_group, pp_scheduler_list) {
- groups[i++] = group;
- }
- mali_pp_scheduler_unlock();
-
- while (i > 0) {
- mali_group_zap_session(groups[--i], session);
- }
-}
-
-/* A pm reference must be taken with _mali_osk_pm_dev_ref_add_no_power_on
- * before calling this function to avoid Mali powering down as HW is accessed.
- */
-static void mali_pp_scheduler_enable_group_internal(struct mali_group *group)
-{
- MALI_DEBUG_ASSERT_POINTER(group);
-
- mali_group_lock(group);
-
- if (MALI_GROUP_STATE_DISABLED != group->state) {
- mali_group_unlock(group);
- MALI_DEBUG_PRINT(4, ("Mali PP scheduler: PP group %p already enabled.\n", group));
- return;
- }
-
- MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Enabling PP group %p.\n", group));
-
- mali_pp_scheduler_lock();
-
- MALI_DEBUG_ASSERT(MALI_GROUP_STATE_DISABLED == group->state);
- ++enabled_cores;
-
- if (mali_pp_scheduler_has_virtual_group()) {
- mali_bool update_hw;
-
- /* Add group to virtual group. */
- _mali_osk_list_delinit(&(group->pp_scheduler_list));
- group->state = MALI_GROUP_STATE_JOINING_VIRTUAL;
-
- mali_pp_scheduler_unlock();
- mali_group_unlock(group);
-
- mali_group_lock(virtual_group);
-
- update_hw = mali_pm_is_power_on();
- /* Get ref of group domain */
- mali_group_get_pm_domain_ref(group);
-
- MALI_DEBUG_ASSERT(NULL == group->pm_domain ||
- MALI_PM_DOMAIN_ON == mali_pm_domain_state_get(group->pm_domain));
-
- if (update_hw) {
- mali_group_lock(group);
- mali_group_power_on_group(group);
- mali_group_reset(group);
- mali_group_unlock(group);
- }
-
- mali_pp_scheduler_enable_empty_virtual();
- mali_group_add_group(virtual_group, group, update_hw);
- MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Done enabling group %p. Added to virtual group.\n", group));
-
- mali_group_unlock(virtual_group);
- } else {
- /* Get ref of group domain */
- mali_group_get_pm_domain_ref(group);
-
- MALI_DEBUG_ASSERT(NULL == group->pm_domain ||
- MALI_PM_DOMAIN_ON == mali_pm_domain_state_get(group->pm_domain));
-
- /* Put group on idle list. */
- if (mali_pm_is_power_on()) {
- mali_group_power_on_group(group);
- mali_group_reset(group);
- }
-
- _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_idle);
- group->state = MALI_GROUP_STATE_IDLE;
-
- MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Done enabling group %p. Now on idle list.\n", group));
- mali_pp_scheduler_unlock();
- mali_group_unlock(group);
- }
-}
-
-void mali_pp_scheduler_enable_group(struct mali_group *group)
-{
- MALI_DEBUG_ASSERT_POINTER(group);
-
- _mali_osk_pm_dev_ref_add_no_power_on();
-
- mali_pp_scheduler_enable_group_internal(group);
-
- _mali_osk_pm_dev_ref_dec_no_power_on();
-
- /* Pick up any jobs that might have been queued if all PP groups were disabled. */
- mali_pp_scheduler_schedule();
-}
-
-static void mali_pp_scheduler_disable_group_internal(struct mali_group *group)
-{
- if (mali_pp_scheduler_has_virtual_group()) {
- mali_group_lock(virtual_group);
-
- MALI_DEBUG_ASSERT(VIRTUAL_GROUP_WORKING != virtual_group_state);
- if (MALI_GROUP_STATE_JOINING_VIRTUAL == group->state) {
- /* The group was in the process of being added to the virtual group. We
- * only need to change the state to reverse this. */
- group->state = MALI_GROUP_STATE_LEAVING_VIRTUAL;
- } else if (MALI_GROUP_STATE_IN_VIRTUAL == group->state) {
- /* Remove group from virtual group. The state of the group will be
- * LEAVING_VIRTUAL and the group will not be on any scheduler list. */
- mali_group_remove_group(virtual_group, group);
-
- mali_pp_scheduler_disable_empty_virtual();
- }
-
- mali_group_unlock(virtual_group);
- }
-
- mali_group_lock(group);
- mali_pp_scheduler_lock();
-
- MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state
- || MALI_GROUP_STATE_LEAVING_VIRTUAL == group->state
- || MALI_GROUP_STATE_DISABLED == group->state);
-
- if (MALI_GROUP_STATE_DISABLED == group->state) {
- MALI_DEBUG_PRINT(4, ("Mali PP scheduler: PP group %p already disabled.\n", group));
- } else {
- MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Disabling PP group %p.\n", group));
-
- --enabled_cores;
- _mali_osk_list_move(&(group->pp_scheduler_list), &group_list_disabled);
- group->state = MALI_GROUP_STATE_DISABLED;
-
- mali_group_power_off_group(group, MALI_TRUE);
- mali_group_put_pm_domain_ref(group);
- }
-
- mali_pp_scheduler_unlock();
- mali_group_unlock(group);
-}
-
-void mali_pp_scheduler_disable_group(struct mali_group *group)
-{
- MALI_DEBUG_ASSERT_POINTER(group);
-
- mali_pp_scheduler_suspend();
-
- _mali_osk_pm_dev_ref_add_no_power_on();
-
- mali_pp_scheduler_disable_group_internal(group);
-
- _mali_osk_pm_dev_ref_dec_no_power_on();
-
- mali_pp_scheduler_resume();
-}
-
-static void mali_pp_scheduler_notify_core_change(u32 num_cores)
-{
- mali_bool done = MALI_FALSE;
-
- if (mali_is_mali450()) {
- return;
- }
-
- /*
- * This function gets a bit complicated because we can't hold the session lock while
- * allocating notification objects.
- */
-
- while (!done) {
- u32 i;
- u32 num_sessions_alloc;
- u32 num_sessions_with_lock;
- u32 used_notification_objects = 0;
- _mali_osk_notification_t **notobjs;
-
- /* Pre allocate the number of notifications objects we need right now (might change after lock has been taken) */
- num_sessions_alloc = mali_session_get_count();
- if (0 == num_sessions_alloc) {
- /* No sessions to report to */
- return;
- }
-
- notobjs = (_mali_osk_notification_t **)_mali_osk_malloc(sizeof(_mali_osk_notification_t *) * num_sessions_alloc);
- if (NULL == notobjs) {
- MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure)\n"));
- /* there is probably no point in trying again, system must be really low on memory and probably unusable now anyway */
- return;
- }
-
- for (i = 0; i < num_sessions_alloc; i++) {
- notobjs[i] = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_NUM_CORE_CHANGE, sizeof(_mali_uk_pp_num_cores_changed_s));
- if (NULL != notobjs[i]) {
- _mali_uk_pp_num_cores_changed_s *data = notobjs[i]->result_buffer;
- data->number_of_enabled_cores = num_cores;
- } else {
- MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure %u)\n", i));
- }
- }
-
- mali_session_lock();
-
- /* number of sessions will not change while we hold the lock */
- num_sessions_with_lock = mali_session_get_count();
-
- if (num_sessions_alloc >= num_sessions_with_lock) {
- /* We have allocated enough notification objects for all the sessions atm */
- struct mali_session_data *session, *tmp;
- MALI_SESSION_FOREACH(session, tmp, link) {
- MALI_DEBUG_ASSERT(used_notification_objects < num_sessions_alloc);
- if (NULL != notobjs[used_notification_objects]) {
- mali_session_send_notification(session, notobjs[used_notification_objects]);
- notobjs[used_notification_objects] = NULL; /* Don't track this notification object any more */
- }
- used_notification_objects++;
- }
- done = MALI_TRUE;
- }
-
- mali_session_unlock();
-
- /* Delete any remaining/unused notification objects */
- for (; used_notification_objects < num_sessions_alloc; used_notification_objects++) {
- if (NULL != notobjs[used_notification_objects]) {
- _mali_osk_notification_delete(notobjs[used_notification_objects]);
- }
- }
-
- _mali_osk_free(notobjs);
- }
-}
-
-static void mali_pp_scheduler_core_scale_up(unsigned int target_core_nr)
-{
- MALI_DEBUG_PRINT(2, ("Requesting %d cores: enabling %d cores\n", target_core_nr, target_core_nr - enabled_cores));
-
- _mali_osk_pm_dev_ref_add_no_power_on();
- _mali_osk_pm_dev_barrier();
-
- while (target_core_nr > enabled_cores) {
- /*
- * If there are any cores which do not belong to any domain,
- * then these will always be found at the head of the list and
- * we'll thus enabled these first.
- */
-
- mali_pp_scheduler_lock();
-
- if (!_mali_osk_list_empty(&group_list_disabled)) {
- struct mali_group *group;
-
- group = _MALI_OSK_LIST_ENTRY(group_list_disabled.next, struct mali_group, pp_scheduler_list);
-
- MALI_DEBUG_ASSERT_POINTER(group);
- MALI_DEBUG_ASSERT(MALI_GROUP_STATE_DISABLED == group->state);
-
- mali_pp_scheduler_unlock();
-
- mali_pp_scheduler_enable_group_internal(group);
- } else {
- mali_pp_scheduler_unlock();
- break; /* no more groups on disabled list */
- }
- }
-
- _mali_osk_pm_dev_ref_dec_no_power_on();
-
- mali_pp_scheduler_schedule();
-}
-
-static void mali_pp_scheduler_core_scale_down(unsigned int target_core_nr)
-{
- MALI_DEBUG_PRINT(2, ("Requesting %d cores: disabling %d cores\n", target_core_nr, enabled_cores - target_core_nr));
-
- mali_pp_scheduler_suspend();
-
- MALI_DEBUG_ASSERT(_mali_osk_list_empty(&group_list_working));
-
- _mali_osk_pm_dev_ref_add_no_power_on();
-
- if (NULL != mali_pmu_get_global_pmu_core()) {
- int i;
-
- for (i = MALI_MAX_NUMBER_OF_DOMAINS - 1; i >= 0; i--) {
- if (target_core_nr < enabled_cores) {
- struct mali_pm_domain *domain;
-
- domain = mali_pm_domain_get_from_index(i);
-
- /* Domain is valid and has pp cores */
- if ((NULL != domain) && (NULL != domain->group_list)) {
- struct mali_group *group;
-
- MALI_PM_DOMAIN_FOR_EACH_GROUP(group, domain) {
- /* If group is pp core */
- if (NULL != mali_group_get_pp_core(group)) {
- mali_pp_scheduler_disable_group_internal(group);
- if (target_core_nr >= enabled_cores) {
- break;
- }
- }
- }
- }
- } else {
- break;
- }
- }
- }
-
- /*
- * Didn't find enough cores associated with a power domain,
- * so we need to disable cores which we can't power off with the PMU.
- * Start with physical groups used by the scheduler,
- * then remove physical from virtual if even more groups are needed.
- */
-
- while (target_core_nr < enabled_cores) {
- mali_pp_scheduler_lock();
- if (!_mali_osk_list_empty(&group_list_idle)) {
- struct mali_group *group;
-
- group = _MALI_OSK_LIST_ENTRY(group_list_idle.next, struct mali_group, pp_scheduler_list);
- MALI_DEBUG_ASSERT_POINTER(group);
-
- mali_pp_scheduler_unlock();
-
- mali_pp_scheduler_disable_group_internal(group);
- } else {
- mali_pp_scheduler_unlock();
- break; /* No more physical groups */
- }
- }
-
- if (mali_pp_scheduler_has_virtual_group()) {
- while (target_core_nr < enabled_cores) {
- mali_group_lock(virtual_group);
- if (!_mali_osk_list_empty(&virtual_group->group_list)) {
- struct mali_group *group;
-
- group = _MALI_OSK_LIST_ENTRY(virtual_group->group_list.next, struct mali_group, group_list);
- MALI_DEBUG_ASSERT_POINTER(group);
-
- mali_group_unlock(virtual_group);
-
- mali_pp_scheduler_disable_group_internal(group);
- } else {
- mali_group_unlock(virtual_group);
- break; /* No more physical groups in virtual group */
- }
- }
- }
-
- _mali_osk_pm_dev_ref_dec_no_power_on();
-
- mali_pp_scheduler_resume();
-}
-
-int mali_pp_scheduler_set_perf_level(unsigned int target_core_nr, mali_bool override)
-{
- if (target_core_nr == enabled_cores) return 0;
- if (MALI_FALSE == core_scaling_enabled && MALI_FALSE == override) return -EPERM;
- if (target_core_nr > num_cores) return -EINVAL;
- if (0 == target_core_nr) return -EINVAL;
-
- if (target_core_nr > enabled_cores) {
- mali_pp_scheduler_core_scale_up(target_core_nr);
- } else if (target_core_nr < enabled_cores) {
- mali_pp_scheduler_core_scale_down(target_core_nr);
- }
-
- if (target_core_nr != enabled_cores) {
- MALI_DEBUG_PRINT(2, ("Core scaling failed, target number: %d, actual number: %d\n", target_core_nr, enabled_cores));
- }
-
- mali_pp_scheduler_notify_core_change(enabled_cores);
-
- return 0;
-}
-
-void mali_pp_scheduler_core_scaling_enable(void)
-{
- /* PS: Core scaling is by default enabled */
- core_scaling_enabled = MALI_TRUE;
-}
-
-void mali_pp_scheduler_core_scaling_disable(void)
-{
- core_scaling_enabled = MALI_FALSE;
-}
-
-mali_bool mali_pp_scheduler_core_scaling_is_enabled(void)
-{
- return core_scaling_enabled;
-}
-
-static void mali_pp_scheduler_job_queued(void)
-{
- if (mali_utilization_enabled()) {
- /*
- * We cheat a little bit by counting the PP as busy from the time a PP job is queued.
- * This will be fine because we only loose the tiny idle gap between jobs, but
- * we will instead get less utilization work to do (less locks taken)
- */
- mali_utilization_pp_start();
- }
-}
-
-static void mali_pp_scheduler_job_completed(mali_bool job_started)
-{
- /* Release the PM reference we got in the mali_pp_scheduler_job_queued() function */
- _mali_osk_pm_dev_ref_dec();
-
- if (mali_utilization_enabled() && job_started) {
- mali_utilization_pp_end();
- }
-}
-
-static void mali_pp_scheduler_abort_job_and_unlock_scheduler(struct mali_pp_job *job)
-{
- MALI_DEBUG_ASSERT_POINTER(job);
- MALI_DEBUG_ASSERT_LOCK_HELD(pp_scheduler_lock);
-
- /* This job should not be on any lists. */
- MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
- MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_list));
-
- _mali_osk_list_delinit(&job->session_fb_lookup_list);
-
- mali_pp_scheduler_unlock();
-
- /* Release tracker. */
- mali_timeline_tracker_release(&job->tracker);
-}
-
-static mali_scheduler_mask mali_pp_scheduler_queue_job(struct mali_pp_job *job)
-{
- _mali_osk_list_t *queue = NULL;
- mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
- struct mali_pp_job *iter, *tmp;
-
- MALI_DEBUG_ASSERT_POINTER(job);
- MALI_DEBUG_ASSERT_POINTER(job->session);
-
-#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
- if (mali_pp_job_needs_dma_buf_mapping(job)) {
- mali_dma_buf_map_job(job);
- }
-#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
-
- mali_pp_scheduler_lock();
-
- if (unlikely(job->session->is_aborting)) {
- /* Before checking if the session is aborting, the scheduler must be locked. */
- MALI_DEBUG_ASSERT_LOCK_HELD(pp_scheduler_lock);
-
- MALI_DEBUG_PRINT(2, ("Mali PP scheduler: Job %u (0x%08X) queued while session is aborting.\n", mali_pp_job_get_id(job), job));
-
- mali_pp_scheduler_abort_job_and_unlock_scheduler(job);
-
- /* Delete job. */
-#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
- mali_pp_scheduler_deferred_job_delete(job);
-#else
- mali_pp_job_delete(job);
-#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE) */
-
- /* Release the PM reference taken for the job in
- * mali_pp_scheduler_submit_job(). */
- _mali_osk_pm_dev_ref_dec();
-
- /* Since we are aborting we ignore the scheduler mask. */
- return MALI_SCHEDULER_MASK_EMPTY;
- }
-
- mali_pp_scheduler_job_queued();
-
-#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
- trace_gpu_job_enqueue(mali_pp_job_get_tid(job), mali_pp_job_get_id(job), "PP");
-#endif
-
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_SINGLE_SW_PP_ENQUEUE, job->pid, job->tid, job->uargs.frame_builder_id, job->uargs.flush_id, 0);
-
- job->cache_order = mali_scheduler_get_new_cache_order();
-
- /* Determine which queue the job should be added to. */
- if (mali_pp_job_is_virtual_group_job(job)) {
- if (job->session->use_high_priority_job_queue) {
- queue = &virtual_group_job_queue.high_pri;
- } else {
- queue = &virtual_group_job_queue.normal_pri;
- }
-
- virtual_group_job_queue.depth += 1;
-
- /* Set schedule bitmask if the virtual group is idle. */
- if (VIRTUAL_GROUP_IDLE == virtual_group_state) {
- schedule_mask |= MALI_SCHEDULER_MASK_PP;
- }
- } else {
- if (job->session->use_high_priority_job_queue) {
- queue = &job_queue.high_pri;
- } else {
- queue = &job_queue.normal_pri;
- }
-
- job_queue.depth += mali_pp_job_get_sub_job_count(job);
-
- /* Set schedule bitmask if there are physical PP cores available, or if there is an
- * idle virtual group. */
- if (!_mali_osk_list_empty(&group_list_idle)
- || (mali_pp_scheduler_has_virtual_group()
- && (VIRTUAL_GROUP_IDLE == virtual_group_state))) {
- schedule_mask |= MALI_SCHEDULER_MASK_PP;
- }
- }
-
- /* Find position in queue where job should be added. */
- _MALI_OSK_LIST_FOREACHENTRY_REVERSE(iter, tmp, queue, struct mali_pp_job, list) {
- if (mali_pp_job_should_start_after(job, iter)) {
- break;
- }
- }
-
- /* Add job to queue. */
- _mali_osk_list_add(&job->list, &iter->list);
-
- /* Add job to session list. */
- _mali_osk_list_addtail(&job->session_list, &(job->session->pp_job_list));
-
- MALI_DEBUG_PRINT(3, ("Mali PP scheduler: %s job %u (0x%08X) with %u parts queued.\n",
- mali_pp_job_is_virtual_group_job(job) ? "Virtual Group" : "Physical",
- mali_pp_job_get_id(job), job, mali_pp_job_get_sub_job_count(job)));
-
- mali_pp_scheduler_unlock();
-
- return schedule_mask;
-}
-
-mali_scheduler_mask mali_pp_scheduler_activate_job(struct mali_pp_job *job)
-{
- mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
-
- MALI_DEBUG_ASSERT_POINTER(job);
- MALI_DEBUG_ASSERT_POINTER(job->session);
-
- MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Timeline activation for job %u (0x%08X).\n", mali_pp_job_get_id(job), job));
-
- if (MALI_TIMELINE_ACTIVATION_ERROR_FATAL_BIT & job->tracker.activation_error) {
- MALI_DEBUG_PRINT(2, ("Mali PP scheduler: Job %u (0x%08X) activated with error, aborting.\n", mali_pp_job_get_id(job), job));
-
- mali_pp_scheduler_lock();
- mali_pp_scheduler_abort_job_and_unlock_scheduler(job);
-
- mali_pp_job_mark_sub_job_completed(job, MALI_FALSE); /* Flagging the job as failed. */
- mali_pp_scheduler_finalize_job(job, MALI_FALSE);
-
- return MALI_SCHEDULER_MASK_EMPTY;
- }
-
- /* PP job is ready to run, queue it. */
-
-#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE)
- if (mali_pp_job_needs_dma_buf_mapping(job)) {
- mali_pp_scheduler_deferred_job_queue(job);
-
- return MALI_SCHEDULER_MASK_EMPTY;
- }
-#endif /* defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_QUEUE) */
-
- schedule_mask = mali_pp_scheduler_queue_job(job);
-
- return schedule_mask;
-}
+++ /dev/null
-/*
- * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
- *
- * This program is free software and is provided to you under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
- *
- * A copy of the licence is included with the program, and can also be obtained from Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#ifndef __MALI_PP_SCHEDULER_H__
-#define __MALI_PP_SCHEDULER_H__
-
-#include "mali_osk.h"
-#include "mali_pp_job.h"
-#include "mali_group.h"
-#include "linux/mali/mali_utgard.h"
-
-/** Initalize the HW independent parts of the PP scheduler
- */
-_mali_osk_errcode_t mali_pp_scheduler_initialize(void);
-void mali_pp_scheduler_terminate(void);
-
-/** Poplulate the PP scheduler with groups
- */
-void mali_pp_scheduler_populate(void);
-void mali_pp_scheduler_depopulate(void);
-
-/**
- * @brief Handle job completion.
- *
- * Will attempt to start a new job on the locked group.
- *
- * If all sub jobs have completed the job's tracker will be released, any other resources associated
- * with the job will be freed. A notification will also be sent to user space.
- *
- * Releasing the tracker might activate other jobs, so if appropriate we also schedule them.
- *
- * @note Group must be locked when entering this function. Will be unlocked before exiting.
- *
- * @param group The group that completed the job.
- * @param job The job that is done.
- * @param sub_job Sub job of job.
- * @param success MALI_TRUE if job completed successfully, MALI_FALSE if not.
- * @param in_upper_half MALI_TRUE if called from upper half, MALI_FALSE if not.
- */
-void mali_pp_scheduler_job_done(struct mali_group *group, struct mali_pp_job *job, u32 sub_job, mali_bool success, mali_bool in_upper_half);
-
-void mali_pp_scheduler_suspend(void);
-void mali_pp_scheduler_resume(void);
-
-/**
- * @brief Abort all running and queued PP jobs from session.
- *
- * This functions aborts all PP jobs from the specified session. Queued jobs are removed from the
- * queue and jobs currently running on a core will be aborted.
- *
- * @param session Session that is aborting.
- */
-void mali_pp_scheduler_abort_session(struct mali_session_data *session);
-
-/**
- * @brief Reset all groups
- *
- * This function resets all groups known by the PP scheuduler. This must be
- * called after the Mali HW has been powered on in order to reset the HW.
- *
- * This function is intended for power on reset of all cores.
- * No locking is done, which can only be safe if the scheduler is paused and
- * all cores idle. That is always the case on init and power on.
- */
-void mali_pp_scheduler_reset_all_groups(void);
-
-/**
- * @brief Zap TLB on all groups with \a session active
- *
- * The scheculer will zap the session on all groups it owns.
- */
-void mali_pp_scheduler_zap_all_active(struct mali_session_data *session);
-
-/**
- * @brief Get the virtual PP core
- *
- * The returned PP core may only be used to prepare DMA command buffers for the
- * PP core. Other actions must go through the PP scheduler, or the virtual
- * group.
- *
- * @return Pointer to the virtual PP core, NULL if this doesn't exist
- */
-struct mali_pp_core *mali_pp_scheduler_get_virtual_pp(void);
-
-u32 mali_pp_scheduler_dump_state(char *buf, u32 size);
-
-void mali_pp_scheduler_enable_group(struct mali_group *group);
-void mali_pp_scheduler_disable_group(struct mali_group *group);
-
-/**
- * @brief Used by the Timeline system to queue a PP job.
- *
- * @note @ref mali_scheduler_schedule_from_mask() should be called if this function returns non-zero.
- *
- * @param job The PP job that is being activated.
- *
- * @return A scheduling bitmask that can be used to decide if scheduling is necessary after this
- * call.
- */
-mali_scheduler_mask mali_pp_scheduler_activate_job(struct mali_pp_job *job);
-
-/**
- * @brief Schedule queued jobs on idle cores.
- */
-void mali_pp_scheduler_schedule(void);
-
-int mali_pp_scheduler_set_perf_level(u32 cores, mali_bool override);
-
-void mali_pp_scheduler_core_scaling_enable(void);
-void mali_pp_scheduler_core_scaling_disable(void);
-mali_bool mali_pp_scheduler_core_scaling_is_enabled(void);
-
-u32 mali_pp_scheduler_get_num_cores_total(void);
-u32 mali_pp_scheduler_get_num_cores_enabled(void);
-
-/**
- * @brief Returns the number of Pixel Processors in the system irrespective of the context
- *
- * @return number of physical Pixel Processor cores in the system
- */
-u32 mali_pp_scheduler_get_num_cores_total(void);
-
-#endif /* __MALI_PP_SCHEDULER_H__ */
*/
#include "mali_scheduler.h"
-
#include "mali_kernel_common.h"
#include "mali_osk.h"
+#include "mali_osk_profiling.h"
+#include "mali_kernel_utilization.h"
+#include "mali_timeline.h"
+#include "mali_gp_job.h"
+#include "mali_pp_job.h"
+#include "mali_executor.h"
+#include "mali_group.h"
-mali_bool mali_scheduler_hints[MALI_SCHEDULER_HINT_MAX];
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#include "mali_memory_dma_buf.h"
+#endif
-static _mali_osk_atomic_t mali_job_id_autonumber;
-static _mali_osk_atomic_t mali_job_cache_order_autonumber;
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+#include <linux/sched.h>
+#include <trace/events/gpu.h>
+#endif
+/*
+ * ---------- static defines/constants ----------
+ */
-static _mali_osk_wq_work_t *pp_scheduler_wq_high_pri = NULL;
-static _mali_osk_wq_work_t *gp_scheduler_wq_high_pri = NULL;
+/*
+ * If dma_buf with map on demand is used, we defer job deletion and job queue
+ * if in atomic context, since both might sleep.
+ */
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#if !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+#define MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE 1
+#define MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE 1
+#endif
+#endif
-static void mali_scheduler_wq_schedule_pp(void *arg)
-{
- MALI_IGNORE(arg);
+/*
+ * ---------- global variables (exported due to inline functions) ----------
+ */
- mali_pp_scheduler_schedule();
-}
+/* Lock protecting this module */
+_mali_osk_spinlock_irq_t *mali_scheduler_lock_obj = NULL;
-static void mali_scheduler_wq_schedule_gp(void *arg)
-{
- MALI_IGNORE(arg);
+/* Queue of jobs to be executed on the GP group */
+struct mali_scheduler_job_queue job_queue_gp;
- mali_gp_scheduler_schedule();
-}
+/* Queue of PP jobs */
+struct mali_scheduler_job_queue job_queue_pp;
+
+_mali_osk_atomic_t mali_job_id_autonumber;
+_mali_osk_atomic_t mali_job_cache_order_autonumber;
+/*
+ * ---------- static variables ----------
+ */
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE)
+static _mali_osk_wq_work_t *scheduler_wq_pp_job_delete = NULL;
+static _mali_osk_spinlock_irq_t *scheduler_pp_job_delete_lock = NULL;
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(scheduler_pp_job_deletion_queue);
+#endif
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+static _mali_osk_wq_work_t *scheduler_wq_pp_job_queue = NULL;
+static _mali_osk_spinlock_irq_t *scheduler_pp_job_queue_lock = NULL;
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(scheduler_pp_job_queue_list);
+#endif
+
+/*
+ * ---------- Forward declaration of static functions ----------
+ */
+
+static mali_timeline_point mali_scheduler_submit_gp_job(
+ struct mali_session_data *session, struct mali_gp_job *job);
+static mali_timeline_point mali_scheduler_submit_pp_job(
+ struct mali_session_data *session, struct mali_pp_job *job);
+
+static mali_bool mali_scheduler_queue_gp_job(struct mali_gp_job *job);
+static mali_bool mali_scheduler_queue_pp_job(struct mali_pp_job *job);
+
+static void mali_scheduler_return_gp_job_to_user(struct mali_gp_job *job,
+ mali_bool success);
+static void mali_scheduler_return_pp_job_to_user(struct mali_pp_job *job,
+ u32 num_cores_in_virtual);
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE)
+static void mali_scheduler_deferred_pp_job_delete(struct mali_pp_job *job);
+static void mali_scheduler_do_pp_job_delete(void *arg);
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE) */
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+static void mali_scheduler_deferred_pp_job_queue(struct mali_pp_job *job);
+static void mali_scheduler_do_pp_job_queue(void *arg);
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */
+
+/*
+ * ---------- Actual implementation ----------
+ */
_mali_osk_errcode_t mali_scheduler_initialize(void)
{
- if (_MALI_OSK_ERR_OK != _mali_osk_atomic_init(&mali_job_id_autonumber, 0)) {
- MALI_DEBUG_PRINT(1, ("Initialization of atomic job id counter failed.\n"));
+ _mali_osk_atomic_init(&mali_job_id_autonumber, 0);
+ _mali_osk_atomic_init(&mali_job_cache_order_autonumber, 0);
+
+ _MALI_OSK_INIT_LIST_HEAD(&job_queue_gp.normal_pri);
+ _MALI_OSK_INIT_LIST_HEAD(&job_queue_gp.high_pri);
+ job_queue_gp.depth = 0;
+
+ _MALI_OSK_INIT_LIST_HEAD(&job_queue_pp.normal_pri);
+ _MALI_OSK_INIT_LIST_HEAD(&job_queue_pp.high_pri);
+ job_queue_pp.depth = 0;
+
+ mali_scheduler_lock_obj = _mali_osk_spinlock_irq_init(
+ _MALI_OSK_LOCKFLAG_ORDERED,
+ _MALI_OSK_LOCK_ORDER_SCHEDULER);
+ if (NULL == mali_scheduler_lock_obj) {
+ mali_scheduler_terminate();
+ }
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE)
+ scheduler_wq_pp_job_delete = _mali_osk_wq_create_work(
+ mali_scheduler_do_pp_job_delete, NULL);
+ if (NULL == scheduler_wq_pp_job_delete) {
+ mali_scheduler_terminate();
return _MALI_OSK_ERR_FAULT;
}
- if (_MALI_OSK_ERR_OK != _mali_osk_atomic_init(&mali_job_cache_order_autonumber, 0)) {
- MALI_DEBUG_PRINT(1, ("Initialization of atomic job cache order counter failed.\n"));
- _mali_osk_atomic_term(&mali_job_id_autonumber);
+ scheduler_pp_job_delete_lock = _mali_osk_spinlock_irq_init(
+ _MALI_OSK_LOCKFLAG_ORDERED,
+ _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED);
+ if (NULL == scheduler_pp_job_delete_lock) {
+ mali_scheduler_terminate();
return _MALI_OSK_ERR_FAULT;
}
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE) */
- pp_scheduler_wq_high_pri = _mali_osk_wq_create_work_high_pri(mali_scheduler_wq_schedule_pp, NULL);
- if (NULL == pp_scheduler_wq_high_pri) {
- _mali_osk_atomic_term(&mali_job_cache_order_autonumber);
- _mali_osk_atomic_term(&mali_job_id_autonumber);
- return _MALI_OSK_ERR_NOMEM;
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+ scheduler_wq_pp_job_queue = _mali_osk_wq_create_work(
+ mali_scheduler_do_pp_job_queue, NULL);
+ if (NULL == scheduler_wq_pp_job_queue) {
+ mali_scheduler_terminate();
+ return _MALI_OSK_ERR_FAULT;
}
- gp_scheduler_wq_high_pri = _mali_osk_wq_create_work_high_pri(mali_scheduler_wq_schedule_gp, NULL);
- if (NULL == gp_scheduler_wq_high_pri) {
- _mali_osk_wq_delete_work(pp_scheduler_wq_high_pri);
- _mali_osk_atomic_term(&mali_job_cache_order_autonumber);
- _mali_osk_atomic_term(&mali_job_id_autonumber);
- return _MALI_OSK_ERR_NOMEM;
+ scheduler_pp_job_queue_lock = _mali_osk_spinlock_irq_init(
+ _MALI_OSK_LOCKFLAG_ORDERED,
+ _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED);
+ if (NULL == scheduler_pp_job_queue_lock) {
+ mali_scheduler_terminate();
+ return _MALI_OSK_ERR_FAULT;
}
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */
return _MALI_OSK_ERR_OK;
}
void mali_scheduler_terminate(void)
{
- _mali_osk_wq_delete_work(gp_scheduler_wq_high_pri);
- _mali_osk_wq_delete_work(pp_scheduler_wq_high_pri);
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+ if (NULL != scheduler_pp_job_queue_lock) {
+ _mali_osk_spinlock_irq_term(scheduler_pp_job_queue_lock);
+ scheduler_pp_job_queue_lock = NULL;
+ }
+
+ if (NULL != scheduler_wq_pp_job_queue) {
+ _mali_osk_wq_delete_work(scheduler_wq_pp_job_queue);
+ scheduler_wq_pp_job_queue = NULL;
+ }
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE)
+ if (NULL != scheduler_pp_job_delete_lock) {
+ _mali_osk_spinlock_irq_term(scheduler_pp_job_delete_lock);
+ scheduler_pp_job_delete_lock = NULL;
+ }
+
+ if (NULL != scheduler_wq_pp_job_delete) {
+ _mali_osk_wq_delete_work(scheduler_wq_pp_job_delete);
+ scheduler_wq_pp_job_delete = NULL;
+ }
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE) */
+
+ if (NULL != mali_scheduler_lock_obj) {
+ _mali_osk_spinlock_irq_term(mali_scheduler_lock_obj);
+ mali_scheduler_lock_obj = NULL;
+ }
+
_mali_osk_atomic_term(&mali_job_cache_order_autonumber);
_mali_osk_atomic_term(&mali_job_id_autonumber);
}
-u32 mali_scheduler_get_new_id(void)
+u32 mali_scheduler_job_physical_head_count(void)
{
- u32 job_id = _mali_osk_atomic_inc_return(&mali_job_id_autonumber);
- return job_id;
+ /*
+ * Count how many physical sub jobs are present from the head of queue
+ * until the first virtual job is present.
+ * Early out when we have reached maximum number of PP cores (8)
+ */
+ u32 count = 0;
+ struct mali_pp_job *job;
+ struct mali_pp_job *temp;
+
+ /* Check for partially started normal pri jobs */
+ if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) {
+ MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
+
+ job = _MALI_OSK_LIST_ENTRY(job_queue_pp.normal_pri.next,
+ struct mali_pp_job, list);
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ if (MALI_TRUE == mali_pp_job_has_started_sub_jobs(job)) {
+ /*
+ * Remember; virtual jobs can't be queued and started
+ * at the same time, so this must be a physical job
+ */
+ count += mali_pp_job_unstarted_sub_job_count(job);
+ if (MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS <= count) {
+ return MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS;
+ }
+ }
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(job, temp, &job_queue_pp.high_pri,
+ struct mali_pp_job, list) {
+ if (MALI_FALSE == mali_pp_job_is_virtual(job)) {
+ count += mali_pp_job_unstarted_sub_job_count(job);
+ if (MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS <= count) {
+ return MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS;
+ }
+ } else {
+ /* Came across a virtual job, so stop counting */
+ return count;
+ }
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(job, temp, &job_queue_pp.normal_pri,
+ struct mali_pp_job, list) {
+ if (MALI_FALSE == mali_pp_job_is_virtual(job)) {
+ /* any partially started is already counted */
+ if (MALI_FALSE == mali_pp_job_has_started_sub_jobs(job)) {
+ count += mali_pp_job_unstarted_sub_job_count(job);
+ if (MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS <=
+ count) {
+ return MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS;
+ }
+ }
+ } else {
+ /* Came across a virtual job, so stop counting */
+ return count;
+ }
+ }
+
+ return count;
}
-u32 mali_scheduler_get_new_cache_order(void)
+mali_bool mali_scheduler_job_next_is_virtual(void)
{
- u32 job_cache_order = _mali_osk_atomic_inc_return(&mali_job_cache_order_autonumber);
- return job_cache_order;
+ struct mali_pp_job *job;
+
+ job = mali_scheduler_job_pp_virtual_peek();
+ if (NULL != job) {
+ MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(job));
+
+ return MALI_TRUE;
+ }
+
+ return MALI_FALSE;
}
-void mali_scheduler_schedule_from_mask(mali_scheduler_mask mask, mali_bool deferred_schedule)
+struct mali_gp_job *mali_scheduler_job_gp_get(void)
{
- if (MALI_SCHEDULER_MASK_GP & mask) {
- /* GP needs scheduling. */
- if (deferred_schedule) {
- /* Schedule GP deferred. */
- _mali_osk_wq_schedule_work_high_pri(gp_scheduler_wq_high_pri);
- } else {
- /* Schedule GP now. */
- mali_gp_scheduler_schedule();
+ _mali_osk_list_t *queue;
+ struct mali_gp_job *job = NULL;
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
+ MALI_DEBUG_ASSERT(0 < job_queue_gp.depth);
+
+ if (!_mali_osk_list_empty(&job_queue_gp.high_pri)) {
+ queue = &job_queue_gp.high_pri;
+ } else {
+ queue = &job_queue_gp.normal_pri;
+ MALI_DEBUG_ASSERT(!_mali_osk_list_empty(queue));
+ }
+
+ job = _MALI_OSK_LIST_ENTRY(queue->next, struct mali_gp_job, list);
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ mali_gp_job_list_remove(job);
+ job_queue_gp.depth--;
+
+ return job;
+}
+
+struct mali_pp_job *mali_scheduler_job_pp_physical_peek(void)
+{
+ struct mali_pp_job *job = NULL;
+ struct mali_pp_job *tmp_job = NULL;
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
+
+ /*
+ * For PP jobs we favour partially started jobs in normal
+ * priority queue over unstarted jobs in high priority queue
+ */
+
+ if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) {
+ MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
+
+ tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.normal_pri.next,
+ struct mali_pp_job, list);
+ MALI_DEBUG_ASSERT(NULL != tmp_job);
+
+ if (MALI_FALSE == mali_pp_job_is_virtual(tmp_job)) {
+ job = tmp_job;
+ }
+ }
+
+ if (NULL == job ||
+ MALI_FALSE == mali_pp_job_has_started_sub_jobs(job)) {
+ /*
+ * There isn't a partially started job in normal queue, so
+ * look in high priority queue.
+ */
+ if (!_mali_osk_list_empty(&job_queue_pp.high_pri)) {
+ MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
+
+ tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.high_pri.next,
+ struct mali_pp_job, list);
+ MALI_DEBUG_ASSERT(NULL != tmp_job);
+
+ if (MALI_FALSE == mali_pp_job_is_virtual(tmp_job)) {
+ job = tmp_job;
+ }
+ }
+ }
+
+ return job;
+}
+
+struct mali_pp_job *mali_scheduler_job_pp_virtual_peek(void)
+{
+ struct mali_pp_job *job = NULL;
+ struct mali_pp_job *tmp_job = NULL;
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
+
+ if (!_mali_osk_list_empty(&job_queue_pp.high_pri)) {
+ MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
+
+ tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.high_pri.next,
+ struct mali_pp_job, list);
+
+ if (MALI_TRUE == mali_pp_job_is_virtual(tmp_job)) {
+ job = tmp_job;
+ }
+ }
+
+ if (NULL == job) {
+ if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) {
+ MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
+
+ tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.normal_pri.next,
+ struct mali_pp_job, list);
+
+ if (MALI_TRUE == mali_pp_job_is_virtual(tmp_job)) {
+ job = tmp_job;
+ }
+ }
+ }
+
+ return job;
+}
+
+struct mali_pp_job *mali_scheduler_job_pp_physical_get(u32 *sub_job)
+{
+ struct mali_pp_job *job = mali_scheduler_job_pp_physical_peek();
+
+ MALI_DEBUG_ASSERT(MALI_FALSE == mali_pp_job_is_virtual(job));
+
+ if (NULL != job) {
+ *sub_job = mali_pp_job_get_first_unstarted_sub_job(job);
+
+ mali_pp_job_mark_sub_job_started(job, *sub_job);
+ if (MALI_FALSE == mali_pp_job_has_unstarted_sub_jobs(job)) {
+ /* Remove from queue when last sub job has been retrieved */
+ mali_pp_job_list_remove(job);
+ }
+
+ job_queue_pp.depth--;
+
+ /*
+ * Job about to start so it is no longer be
+ * possible to discard WB
+ */
+ mali_pp_job_fb_lookup_remove(job);
+ }
+
+ return job;
+}
+
+struct mali_pp_job *mali_scheduler_job_pp_virtual_get(void)
+{
+ struct mali_pp_job *job = mali_scheduler_job_pp_virtual_peek();
+
+ MALI_DEBUG_ASSERT(MALI_TRUE == mali_pp_job_is_virtual(job));
+
+ if (NULL != job) {
+ MALI_DEBUG_ASSERT(0 ==
+ mali_pp_job_get_first_unstarted_sub_job(job));
+ MALI_DEBUG_ASSERT(1 ==
+ mali_pp_job_get_sub_job_count(job));
+
+ mali_pp_job_mark_sub_job_started(job, 0);
+
+ mali_pp_job_list_remove(job);
+
+ job_queue_pp.depth--;
+
+ /*
+ * Job about to start so it is no longer be
+ * possible to discard WB
+ */
+ mali_pp_job_fb_lookup_remove(job);
+ }
+
+ return job;
+}
+
+mali_scheduler_mask mali_scheduler_activate_gp_job(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Timeline activation for job %u (0x%08X).\n",
+ mali_gp_job_get_id(job), job));
+
+ mali_scheduler_lock();
+
+ if (!mali_scheduler_queue_gp_job(job)) {
+ /* Failed to enqueue job, release job (with error) */
+
+ mali_scheduler_unlock();
+
+ mali_timeline_tracker_release(mali_gp_job_get_tracker(job));
+ mali_gp_job_signal_pp_tracker(job, MALI_FALSE);
+
+ /* This will notify user space and close the job object */
+ mali_scheduler_complete_gp_job(job, MALI_FALSE,
+ MALI_TRUE, MALI_FALSE);
+
+ return MALI_SCHEDULER_MASK_EMPTY;
+ }
+
+ mali_scheduler_unlock();
+
+ return MALI_SCHEDULER_MASK_GP;
+}
+
+mali_scheduler_mask mali_scheduler_activate_pp_job(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Timeline activation for job %u (0x%08X).\n",
+ mali_pp_job_get_id(job), job));
+
+ if (MALI_TRUE == mali_timeline_tracker_activation_error(
+ mali_pp_job_get_tracker(job))) {
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Job %u (0x%08X) activated with error, aborting.\n",
+ mali_pp_job_get_id(job), job));
+
+ mali_scheduler_lock();
+ mali_pp_job_fb_lookup_remove(job);
+ mali_pp_job_mark_unstarted_failed(job);
+ mali_scheduler_unlock();
+
+ mali_timeline_tracker_release(mali_pp_job_get_tracker(job));
+
+ /* This will notify user space and close the job object */
+ mali_scheduler_complete_pp_job(job, 0, MALI_TRUE, MALI_FALSE);
+
+ return MALI_SCHEDULER_MASK_EMPTY;
+ }
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+ if (mali_pp_job_needs_dma_buf_mapping(job)) {
+ mali_scheduler_deferred_pp_job_queue(job);
+ return MALI_SCHEDULER_MASK_EMPTY;
+ }
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */
+
+ mali_scheduler_lock();
+
+ if (!mali_scheduler_queue_pp_job(job)) {
+ /* Failed to enqueue job, release job (with error) */
+ mali_pp_job_fb_lookup_remove(job);
+ mali_pp_job_mark_unstarted_failed(job);
+ mali_scheduler_unlock();
+
+ mali_timeline_tracker_release(mali_pp_job_get_tracker(job));
+
+ /* This will notify user space and close the job object */
+ mali_scheduler_complete_pp_job(job, 0, MALI_TRUE, MALI_FALSE);
+
+ return MALI_SCHEDULER_MASK_EMPTY;
+ }
+
+ mali_scheduler_unlock();
+ return MALI_SCHEDULER_MASK_PP;
+}
+
+void mali_scheduler_complete_gp_job(struct mali_gp_job *job,
+ mali_bool success,
+ mali_bool user_notification,
+ mali_bool dequeued)
+{
+ if (user_notification) {
+ mali_scheduler_return_gp_job_to_user(job, success);
+ }
+
+ if (dequeued) {
+ _mali_osk_pm_dev_ref_put();
+
+ if (mali_utilization_enabled()) {
+ mali_utilization_gp_end();
+ }
+ }
+
+ mali_gp_job_delete(job);
+}
+
+void mali_scheduler_complete_pp_job(struct mali_pp_job *job,
+ u32 num_cores_in_virtual,
+ mali_bool user_notification,
+ mali_bool dequeued)
+{
+ if (user_notification) {
+ mali_scheduler_return_pp_job_to_user(job,
+ num_cores_in_virtual);
+ }
+
+ if (dequeued) {
+#if defined(CONFIG_MALI_DVFS)
+ if (mali_pp_job_is_window_surface(job)) {
+ struct mali_session_data *session;
+ session = mali_pp_job_get_session(job);
+ mali_session_inc_num_window_jobs(session);
+ }
+#endif
+
+ _mali_osk_pm_dev_ref_put();
+
+ if (mali_utilization_enabled()) {
+ mali_utilization_pp_end();
+ }
+ }
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE)
+ /*
+ * The deletion of the job object (releasing sync refs etc)
+ * must be done in a different context
+ */
+ mali_scheduler_deferred_pp_job_delete(job);
+#else
+ /* no use cases need this in this configuration */
+ mali_pp_job_delete(job);
+#endif
+}
+
+void mali_scheduler_abort_session(struct mali_session_data *session)
+{
+ struct mali_gp_job *gp_job;
+ struct mali_gp_job *gp_tmp;
+ struct mali_pp_job *pp_job;
+ struct mali_pp_job *pp_tmp;
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(removed_jobs_gp);
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(removed_jobs_pp);
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT(session->is_aborting);
+
+ MALI_DEBUG_PRINT(3, ("Mali scheduler: Aborting all queued jobs from session 0x%08X.\n",
+ session));
+
+ mali_scheduler_lock();
+
+ /* Remove from GP normal priority queue */
+ _MALI_OSK_LIST_FOREACHENTRY(gp_job, gp_tmp, &job_queue_gp.normal_pri,
+ struct mali_gp_job, list) {
+ if (mali_gp_job_get_session(gp_job) == session) {
+ mali_gp_job_list_move(gp_job, &removed_jobs_gp);
+ job_queue_gp.depth--;
+ }
+ }
+
+ /* Remove from GP high priority queue */
+ _MALI_OSK_LIST_FOREACHENTRY(gp_job, gp_tmp, &job_queue_gp.high_pri,
+ struct mali_gp_job, list) {
+ if (mali_gp_job_get_session(gp_job) == session) {
+ mali_gp_job_list_move(gp_job, &removed_jobs_gp);
+ job_queue_gp.depth--;
+ }
+ }
+
+ /* Remove from PP normal priority queue */
+ _MALI_OSK_LIST_FOREACHENTRY(pp_job, pp_tmp,
+ &job_queue_pp.normal_pri,
+ struct mali_pp_job, list) {
+ if (mali_pp_job_get_session(pp_job) == session) {
+ mali_pp_job_fb_lookup_remove(pp_job);
+
+ job_queue_pp.depth -=
+ mali_pp_job_unstarted_sub_job_count(
+ pp_job);
+ mali_pp_job_mark_unstarted_failed(pp_job);
+
+ if (mali_pp_job_is_complete(pp_job)) {
+ mali_pp_job_list_move(pp_job,
+ &removed_jobs_pp);
+ }
+ }
+ }
+
+ /* Remove from PP high priority queue */
+ _MALI_OSK_LIST_FOREACHENTRY(pp_job, pp_tmp,
+ &job_queue_pp.high_pri,
+ struct mali_pp_job, list) {
+ if (mali_pp_job_get_session(pp_job) == session) {
+ mali_pp_job_fb_lookup_remove(pp_job);
+
+ job_queue_pp.depth -=
+ mali_pp_job_unstarted_sub_job_count(
+ pp_job);
+ mali_pp_job_mark_unstarted_failed(pp_job);
+
+ if (mali_pp_job_is_complete(pp_job)) {
+ mali_pp_job_list_move(pp_job,
+ &removed_jobs_pp);
+ }
+ }
+ }
+
+ /*
+ * Release scheduler lock so we can release trackers
+ * (which will potentially queue new jobs)
+ */
+ mali_scheduler_unlock();
+
+ /* Release and complete all (non-running) found GP jobs */
+ _MALI_OSK_LIST_FOREACHENTRY(gp_job, gp_tmp, &removed_jobs_gp,
+ struct mali_gp_job, list) {
+ mali_timeline_tracker_release(mali_gp_job_get_tracker(gp_job));
+ mali_gp_job_signal_pp_tracker(gp_job, MALI_FALSE);
+ _mali_osk_list_delinit(&gp_job->list);
+ mali_scheduler_complete_gp_job(gp_job,
+ MALI_FALSE, MALI_FALSE, MALI_TRUE);
+ }
+
+ /* Release and complete non-running PP jobs */
+ _MALI_OSK_LIST_FOREACHENTRY(pp_job, pp_tmp, &removed_jobs_pp,
+ struct mali_pp_job, list) {
+ mali_timeline_tracker_release(mali_pp_job_get_tracker(pp_job));
+ _mali_osk_list_delinit(&pp_job->list);
+ mali_scheduler_complete_pp_job(pp_job, 0,
+ MALI_FALSE, MALI_TRUE);
+ }
+}
+
+_mali_osk_errcode_t _mali_ukk_gp_start_job(void *ctx,
+ _mali_uk_gp_start_job_s *uargs)
+{
+ struct mali_session_data *session;
+ struct mali_gp_job *job;
+ mali_timeline_point point;
+ u32 __user *point_ptr = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(uargs);
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+
+ session = (struct mali_session_data *)(uintptr_t)ctx;
+
+ job = mali_gp_job_create(session, uargs, mali_scheduler_get_new_id(),
+ NULL);
+ if (NULL == job) {
+ MALI_PRINT_ERROR(("Failed to create GP job.\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ point_ptr = (u32 __user *)(uintptr_t)mali_gp_job_get_timeline_point_ptr(job);
+
+ point = mali_scheduler_submit_gp_job(session, job);
+
+ if (0 != _mali_osk_put_user(((u32) point), point_ptr)) {
+ /*
+ * Let user space know that something failed
+ * after the job was started.
+ */
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_pp_start_job(void *ctx,
+ _mali_uk_pp_start_job_s *uargs)
+{
+ struct mali_session_data *session;
+ struct mali_pp_job *job;
+ mali_timeline_point point;
+ u32 __user *point_ptr = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(uargs);
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+
+ session = (struct mali_session_data *)(uintptr_t)ctx;
+
+ job = mali_pp_job_create(session, uargs, mali_scheduler_get_new_id());
+ if (NULL == job) {
+ MALI_PRINT_ERROR(("Failed to create PP job.\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ point_ptr = (u32 __user *)(uintptr_t)mali_pp_job_get_timeline_point_ptr(job);
+
+ point = mali_scheduler_submit_pp_job(session, job);
+ job = NULL;
+
+ if (0 != _mali_osk_put_user(((u32) point), point_ptr)) {
+ /*
+ * Let user space know that something failed
+ * after the job was started.
+ */
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_pp_and_gp_start_job(void *ctx,
+ _mali_uk_pp_and_gp_start_job_s *uargs)
+{
+ struct mali_session_data *session;
+ _mali_uk_pp_and_gp_start_job_s kargs;
+ struct mali_pp_job *pp_job;
+ struct mali_gp_job *gp_job;
+ u32 __user *point_ptr = NULL;
+ mali_timeline_point point;
+ _mali_uk_pp_start_job_s __user *pp_args;
+ _mali_uk_gp_start_job_s __user *gp_args;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(uargs);
+
+ session = (struct mali_session_data *) ctx;
+
+ if (0 != _mali_osk_copy_from_user(&kargs, uargs,
+ sizeof(_mali_uk_pp_and_gp_start_job_s))) {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ pp_args = (_mali_uk_pp_start_job_s __user *)(uintptr_t)kargs.pp_args;
+ gp_args = (_mali_uk_gp_start_job_s __user *)(uintptr_t)kargs.gp_args;
+
+ pp_job = mali_pp_job_create(session, pp_args,
+ mali_scheduler_get_new_id());
+ if (NULL == pp_job) {
+ MALI_PRINT_ERROR(("Failed to create PP job.\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ gp_job = mali_gp_job_create(session, gp_args,
+ mali_scheduler_get_new_id(),
+ mali_pp_job_get_tracker(pp_job));
+ if (NULL == gp_job) {
+ MALI_PRINT_ERROR(("Failed to create GP job.\n"));
+ mali_pp_job_delete(pp_job);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ point_ptr = (u32 __user *)(uintptr_t)mali_pp_job_get_timeline_point_ptr(pp_job);
+
+ /* Submit GP job. */
+ mali_scheduler_submit_gp_job(session, gp_job);
+ gp_job = NULL;
+
+ /* Submit PP job. */
+ point = mali_scheduler_submit_pp_job(session, pp_job);
+ pp_job = NULL;
+
+ if (0 != _mali_osk_put_user(((u32) point), point_ptr)) {
+ /*
+ * Let user space know that something failed
+ * after the jobs were started.
+ */
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_ukk_pp_job_disable_wb(_mali_uk_pp_disable_wb_s *args)
+{
+ struct mali_session_data *session;
+ struct mali_pp_job *job;
+ struct mali_pp_job *tmp;
+ u32 fb_lookup_id;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+ session = (struct mali_session_data *)(uintptr_t)args->ctx;
+
+ fb_lookup_id = args->fb_id & MALI_PP_JOB_FB_LOOKUP_LIST_MASK;
+
+ mali_scheduler_lock();
+
+ /* Iterate over all jobs for given frame builder_id. */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp,
+ &session->pp_job_fb_lookup_list[fb_lookup_id],
+ struct mali_pp_job, session_fb_lookup_list) {
+ MALI_DEBUG_CODE(u32 disable_mask = 0);
+
+ if (mali_pp_job_get_frame_builder_id(job) !=
+ (u32) args->fb_id) {
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Disable WB mismatching FB.\n"));
+ continue;
+ }
+
+ MALI_DEBUG_CODE(disable_mask |= 0xD << (4 * 3));
+
+ if (mali_pp_job_get_wb0_source_addr(job) == args->wb0_memory) {
+ MALI_DEBUG_CODE(disable_mask |= 0x1 << (4 * 1));
+ mali_pp_job_disable_wb0(job);
+ }
+
+ if (mali_pp_job_get_wb1_source_addr(job) == args->wb1_memory) {
+ MALI_DEBUG_CODE(disable_mask |= 0x2 << (4 * 2));
+ mali_pp_job_disable_wb1(job);
}
+
+ if (mali_pp_job_get_wb2_source_addr(job) == args->wb2_memory) {
+ MALI_DEBUG_CODE(disable_mask |= 0x3 << (4 * 3));
+ mali_pp_job_disable_wb2(job);
+ }
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Disable WB: 0x%X.\n",
+ disable_mask));
}
- if (MALI_SCHEDULER_MASK_PP & mask) {
- /* PP needs scheduling. */
- if (deferred_schedule) {
- /* Schedule PP deferred. */
- _mali_osk_wq_schedule_work_high_pri(pp_scheduler_wq_high_pri);
+ mali_scheduler_unlock();
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_scheduler_dump_state(char *buf, u32 size)
+{
+ int n = 0;
+
+ n += _mali_osk_snprintf(buf + n, size - n, "GP queues\n");
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tQueue depth: %u\n", job_queue_gp.depth);
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tNormal priority queue is %s\n",
+ _mali_osk_list_empty(&job_queue_gp.normal_pri) ?
+ "empty" : "not empty");
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tHigh priority queue is %s\n",
+ _mali_osk_list_empty(&job_queue_gp.high_pri) ?
+ "empty" : "not empty");
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "PP queues\n");
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tQueue depth: %u\n", job_queue_pp.depth);
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tNormal priority queue is %s\n",
+ _mali_osk_list_empty(&job_queue_pp.normal_pri)
+ ? "empty" : "not empty");
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tHigh priority queue is %s\n",
+ _mali_osk_list_empty(&job_queue_pp.high_pri)
+ ? "empty" : "not empty");
+
+ n += _mali_osk_snprintf(buf + n, size - n, "\n");
+
+ return n;
+}
+#endif
+
+/*
+ * ---------- Implementation of static functions ----------
+ */
+
+static mali_timeline_point mali_scheduler_submit_gp_job(
+ struct mali_session_data *session, struct mali_gp_job *job)
+{
+ mali_timeline_point point;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ /* Add job to Timeline system. */
+ point = mali_timeline_system_add_tracker(session->timeline_system,
+ mali_gp_job_get_tracker(job), MALI_TIMELINE_GP);
+
+ return point;
+}
+
+static mali_timeline_point mali_scheduler_submit_pp_job(
+ struct mali_session_data *session, struct mali_pp_job *job)
+{
+ mali_timeline_point point;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ mali_scheduler_lock();
+ /*
+ * Adding job to the lookup list used to quickly discard
+ * writeback units of queued jobs.
+ */
+ mali_pp_job_fb_lookup_add(job);
+ mali_scheduler_unlock();
+
+ /* Add job to Timeline system. */
+ point = mali_timeline_system_add_tracker(session->timeline_system,
+ mali_pp_job_get_tracker(job), MALI_TIMELINE_PP);
+
+ return point;
+}
+
+static mali_bool mali_scheduler_queue_gp_job(struct mali_gp_job *job)
+{
+ struct mali_session_data *session;
+ _mali_osk_list_t *queue;
+
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ session = mali_gp_job_get_session(job);
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ if (unlikely(session->is_aborting)) {
+ MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Job %u (0x%08X) queued while session is aborting.\n",
+ mali_gp_job_get_id(job), job));
+ return MALI_FALSE; /* job not queued */
+ }
+
+ mali_gp_job_set_cache_order(job, mali_scheduler_get_new_cache_order());
+
+ /* Determine which queue the job should be added to. */
+ if (session->use_high_priority_job_queue) {
+ queue = &job_queue_gp.high_pri;
+ } else {
+ queue = &job_queue_gp.normal_pri;
+ }
+
+ job_queue_gp.depth += 1;
+
+ /* Add job to queue (mali_gp_job_queue_add find correct place). */
+ mali_gp_job_list_add(job, queue);
+
+ /*
+ * We hold a PM reference for every job we hold queued (and running)
+ * It is important that we take this reference after job has been
+ * added the the queue so that any runtime resume could schedule this
+ * job right there and then.
+ */
+ _mali_osk_pm_dev_ref_get_async();
+
+ if (mali_utilization_enabled()) {
+ /*
+ * We cheat a little bit by counting the GP as busy from the
+ * time a GP job is queued. This will be fine because we only
+ * loose the tiny idle gap between jobs, but we will instead
+ * get less utilization work to do (less locks taken)
+ */
+ mali_utilization_gp_start();
+ }
+
+ /* Add profiling events for job enqueued */
+ _mali_osk_profiling_add_event(
+ MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_GP_ENQUEUE,
+ mali_gp_job_get_pid(job),
+ mali_gp_job_get_tid(job),
+ mali_gp_job_get_frame_builder_id(job),
+ mali_gp_job_get_flush_id(job),
+ 0);
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_job_enqueue(mali_gp_job_get_tid(job),
+ mali_gp_job_get_id(job), "GP");
+#endif
+
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) queued\n",
+ mali_gp_job_get_id(job), job));
+
+ return MALI_TRUE; /* job queued */
+}
+
+static mali_bool mali_scheduler_queue_pp_job(struct mali_pp_job *job)
+{
+ struct mali_session_data *session;
+ _mali_osk_list_t *queue = NULL;
+
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ session = mali_pp_job_get_session(job);
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ if (unlikely(session->is_aborting)) {
+ MALI_DEBUG_PRINT(2, ("Mali PP scheduler: Job %u (0x%08X) queued while session is aborting.\n",
+ mali_pp_job_get_id(job), job));
+ return MALI_FALSE; /* job not queued */
+ }
+
+ mali_pp_job_set_cache_order(job, mali_scheduler_get_new_cache_order());
+
+ if (session->use_high_priority_job_queue) {
+ queue = &job_queue_pp.high_pri;
+ } else {
+ queue = &job_queue_pp.normal_pri;
+ }
+
+ job_queue_pp.depth +=
+ mali_pp_job_get_sub_job_count(job);
+
+ /* Add job to queue (mali_gp_job_queue_add find correct place). */
+ mali_pp_job_list_add(job, queue);
+
+ /*
+ * We hold a PM reference for every job we hold queued (and running)
+ * It is important that we take this reference after job has been
+ * added the the queue so that any runtime resume could schedule this
+ * job right there and then.
+ */
+ _mali_osk_pm_dev_ref_get_async();
+
+ if (mali_utilization_enabled()) {
+ /*
+ * We cheat a little bit by counting the PP as busy from the
+ * time a PP job is queued. This will be fine because we only
+ * loose the tiny idle gap between jobs, but we will instead
+ * get less utilization work to do (less locks taken)
+ */
+ mali_utilization_pp_start();
+ }
+
+ /* Add profiling events for job enqueued */
+
+ _mali_osk_profiling_add_event(
+ MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_PP_ENQUEUE,
+ mali_pp_job_get_pid(job),
+ mali_pp_job_get_tid(job),
+ mali_pp_job_get_frame_builder_id(job),
+ mali_pp_job_get_flush_id(job),
+ 0);
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_job_enqueue(mali_pp_job_get_tid(job),
+ mali_pp_job_get_id(job), "PP");
+#endif
+
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: %s job %u (0x%08X) with %u parts queued.\n",
+ mali_pp_job_is_virtual(job)
+ ? "Virtual" : "Physical",
+ mali_pp_job_get_id(job), job,
+ mali_pp_job_get_sub_job_count(job)));
+
+ return MALI_TRUE; /* job queued */
+}
+
+static void mali_scheduler_return_gp_job_to_user(struct mali_gp_job *job,
+ mali_bool success)
+{
+ _mali_uk_gp_job_finished_s *jobres;
+ struct mali_session_data *session;
+ _mali_osk_notification_t *notification;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ session = mali_gp_job_get_session(job);
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ notification = mali_gp_job_get_finished_notification(job);
+ MALI_DEBUG_ASSERT_POINTER(notification);
+
+ jobres = notification->result_buffer;
+ MALI_DEBUG_ASSERT_POINTER(jobres);
+
+ jobres->user_job_ptr = mali_gp_job_get_user_id(job);
+ if (MALI_TRUE == success) {
+ jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS;
+ } else {
+ jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR;
+ }
+ jobres->heap_current_addr = mali_gp_job_get_current_heap_addr(job);
+ jobres->perf_counter0 = mali_gp_job_get_perf_counter_value0(job);
+ jobres->perf_counter1 = mali_gp_job_get_perf_counter_value1(job);
+
+ mali_session_send_notification(session, notification);
+}
+
+static void mali_scheduler_return_pp_job_to_user(struct mali_pp_job *job,
+ u32 num_cores_in_virtual)
+{
+ u32 i;
+ u32 num_counters_to_copy;
+ _mali_uk_pp_job_finished_s *jobres;
+ struct mali_session_data *session;
+ _mali_osk_notification_t *notification;
+
+ if (MALI_TRUE == mali_pp_job_use_no_notification(job)) {
+ return;
+ }
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ session = mali_pp_job_get_session(job);
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ notification = mali_pp_job_get_finished_notification(job);
+ MALI_DEBUG_ASSERT_POINTER(notification);
+
+ jobres = notification->result_buffer;
+ MALI_DEBUG_ASSERT_POINTER(jobres);
+
+ jobres->user_job_ptr = mali_pp_job_get_user_id(job);
+ if (MALI_TRUE == mali_pp_job_was_success(job)) {
+ jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS;
+ } else {
+ jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR;
+ }
+
+ if (mali_pp_job_is_virtual(job)) {
+ num_counters_to_copy = num_cores_in_virtual;
+ } else {
+ num_counters_to_copy = mali_pp_job_get_sub_job_count(job);
+ }
+
+ for (i = 0; i < num_counters_to_copy; i++) {
+ jobres->perf_counter0[i] =
+ mali_pp_job_get_perf_counter_value0(job, i);
+ jobres->perf_counter1[i] =
+ mali_pp_job_get_perf_counter_value1(job, i);
+ jobres->perf_counter_src0 =
+ mali_pp_job_get_pp_counter_global_src0();
+ jobres->perf_counter_src1 =
+ mali_pp_job_get_pp_counter_global_src1();
+ }
+
+ mali_session_send_notification(session, notification);
+}
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE)
+
+static void mali_scheduler_deferred_pp_job_delete(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ _mali_osk_spinlock_irq_lock(scheduler_pp_job_delete_lock);
+ mali_pp_job_list_addtail(job, &scheduler_pp_job_deletion_queue);
+ _mali_osk_spinlock_irq_unlock(scheduler_pp_job_delete_lock);
+
+ _mali_osk_wq_schedule_work(scheduler_wq_pp_job_delete);
+}
+
+static void mali_scheduler_do_pp_job_delete(void *arg)
+{
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(list);
+ struct mali_pp_job *job;
+ struct mali_pp_job *tmp;
+
+ MALI_IGNORE(arg);
+
+ /*
+ * Quickly "unhook" the jobs pending to be deleted, so we can release
+ * the lock before we start deleting the job objects
+ * (without any locks held)
+ */
+ _mali_osk_spinlock_irq_lock(scheduler_pp_job_delete_lock);
+ _mali_osk_list_move_list(&scheduler_pp_job_deletion_queue, &list);
+ _mali_osk_spinlock_irq_unlock(scheduler_pp_job_delete_lock);
+
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list,
+ struct mali_pp_job, list) {
+
+ _mali_osk_list_delinit(&job->list);
+ mali_pp_job_delete(job); /* delete the job object itself */
+ }
+}
+
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE) */
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+
+static void mali_scheduler_deferred_pp_job_queue(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ _mali_osk_spinlock_irq_lock(scheduler_pp_job_queue_lock);
+ mali_pp_job_list_addtail(job, &scheduler_pp_job_queue_list);
+ _mali_osk_spinlock_irq_unlock(scheduler_pp_job_queue_lock);
+
+ _mali_osk_wq_schedule_work(scheduler_wq_pp_job_queue);
+}
+
+static void mali_scheduler_do_pp_job_queue(void *arg)
+{
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(list);
+ struct mali_pp_job *job;
+ struct mali_pp_job *tmp;
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_IGNORE(arg);
+
+ /*
+ * Quickly "unhook" the jobs pending to be queued, so we can release
+ * the lock before we start queueing the job objects
+ * (without any locks held)
+ */
+ _mali_osk_spinlock_irq_lock(scheduler_pp_job_queue_lock);
+ _mali_osk_list_move_list(&scheduler_pp_job_queue_list, &list);
+ _mali_osk_spinlock_irq_unlock(scheduler_pp_job_queue_lock);
+
+ /* First loop through all jobs and do the pre-work (no locks needed) */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list,
+ struct mali_pp_job, list) {
+ if (mali_pp_job_needs_dma_buf_mapping(job)) {
+ /*
+ * This operation could fail, but we continue anyway,
+ * because the worst that could happen is that this
+ * job will fail due to a Mali page fault.
+ */
+ mali_dma_buf_map_job(job);
+ }
+ }
+
+ mali_scheduler_lock();
+
+ /* Then loop through all jobs again to queue them (lock needed) */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list,
+ struct mali_pp_job, list) {
+
+ /* Remove from scheduler_pp_job_queue_list before queueing */
+ mali_pp_job_list_remove(job);
+
+ if (mali_scheduler_queue_pp_job(job)) {
+ /* Job queued successfully */
+ schedule_mask |= MALI_SCHEDULER_MASK_PP;
} else {
- /* Schedule PP now. */
- mali_pp_scheduler_schedule();
+ /* Failed to enqueue job, release job (with error) */
+ mali_pp_job_fb_lookup_remove(job);
+ mali_pp_job_mark_unstarted_failed(job);
+
+ /* unlock scheduler in this uncommon case */
+ mali_scheduler_unlock();
+
+ mali_timeline_tracker_release(
+ mali_pp_job_get_tracker(job));
+
+ /* Notify user space and close the job object */
+ mali_scheduler_complete_pp_job(job, 0, MALI_TRUE,
+ MALI_FALSE);
+
+ mali_scheduler_lock();
}
}
+
+ mali_scheduler_unlock();
+
+ /* Trigger scheduling of jobs */
+ mali_executor_schedule_from_mask(schedule_mask, MALI_FALSE);
}
+
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */
#define __MALI_SCHEDULER_H__
#include "mali_osk.h"
+#include "mali_osk_list.h"
#include "mali_scheduler_types.h"
-#include "mali_gp_scheduler.h"
-#include "mali_pp_scheduler.h"
+#include "mali_session.h"
+
+struct mali_scheduler_job_queue {
+ _MALI_OSK_LIST_HEAD(normal_pri); /* Queued jobs with normal priority */
+ _MALI_OSK_LIST_HEAD(high_pri); /* Queued jobs with high priority */
+ u32 depth; /* Depth of combined queues. */
+};
+
+extern _mali_osk_spinlock_irq_t *mali_scheduler_lock_obj;
+
+/* Queue of jobs to be executed on the GP group */
+extern struct mali_scheduler_job_queue job_queue_gp;
+
+/* Queue of PP jobs */
+extern struct mali_scheduler_job_queue job_queue_pp;
+
+extern _mali_osk_atomic_t mali_job_id_autonumber;
+extern _mali_osk_atomic_t mali_job_cache_order_autonumber;
+
+#define MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD() MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
_mali_osk_errcode_t mali_scheduler_initialize(void);
void mali_scheduler_terminate(void);
-u32 mali_scheduler_get_new_id(void);
-u32 mali_scheduler_get_new_cache_order(void);
+MALI_STATIC_INLINE void mali_scheduler_lock(void)
+{
+ _mali_osk_spinlock_irq_lock(mali_scheduler_lock_obj);
+ MALI_DEBUG_PRINT(5, ("Mali scheduler: scheduler lock taken.\n"));
+}
-/**
- * @brief Reset all groups
- *
- * This function resets all groups known by the both the PP and GP scheuduler.
- * This must be called after the Mali HW has been powered on in order to reset
- * the HW.
- */
-MALI_STATIC_INLINE void mali_scheduler_reset_all_groups(void)
+MALI_STATIC_INLINE void mali_scheduler_unlock(void)
{
- mali_gp_scheduler_reset_all_groups();
- mali_pp_scheduler_reset_all_groups();
+ MALI_DEBUG_PRINT(5, ("Mali scheduler: Releasing scheduler lock.\n"));
+ _mali_osk_spinlock_irq_unlock(mali_scheduler_lock_obj);
}
-/**
- * @brief Zap TLB on all active groups running \a session
- *
- * @param session Pointer to the session to zap
- */
-MALI_STATIC_INLINE void mali_scheduler_zap_all_active(struct mali_session_data *session)
+MALI_STATIC_INLINE u32 mali_scheduler_job_gp_count(void)
{
- mali_gp_scheduler_zap_all_active(session);
- mali_pp_scheduler_zap_all_active(session);
+ return job_queue_gp.depth;
+}
+
+u32 mali_scheduler_job_physical_head_count(void);
+
+mali_bool mali_scheduler_job_next_is_virtual(void);
+
+struct mali_gp_job *mali_scheduler_job_gp_get(void);
+struct mali_pp_job *mali_scheduler_job_pp_physical_peek(void);
+struct mali_pp_job *mali_scheduler_job_pp_virtual_peek(void);
+struct mali_pp_job *mali_scheduler_job_pp_physical_get(u32 *sub_job);
+struct mali_pp_job *mali_scheduler_job_pp_virtual_get(void);
+
+MALI_STATIC_INLINE u32 mali_scheduler_get_new_id(void)
+{
+ return _mali_osk_atomic_inc_return(&mali_job_id_autonumber);
+}
+
+MALI_STATIC_INLINE u32 mali_scheduler_get_new_cache_order(void)
+{
+ return _mali_osk_atomic_inc_return(&mali_job_cache_order_autonumber);
}
/**
- * Check if bit is set in scheduler mask.
+ * @brief Used by the Timeline system to queue a GP job.
*
- * @param mask Scheduler mask to check.
- * @param bit Bit to check.
- * @return MALI_TRUE if bit is set in scheduler mask, MALI_FALSE if not.
+ * @note @ref mali_executor_schedule_from_mask() should be called if this
+ * function returns non-zero.
+ *
+ * @param job The GP job that is being activated.
+ *
+ * @return A scheduling bitmask that can be used to decide if scheduling is
+ * necessary after this call.
*/
-MALI_STATIC_INLINE mali_bool mali_scheduler_mask_is_set(mali_scheduler_mask mask, mali_scheduler_mask bit)
-{
- return MALI_SCHEDULER_MASK_EMPTY != (bit & mask);
-}
+mali_scheduler_mask mali_scheduler_activate_gp_job(struct mali_gp_job *job);
/**
- * Schedule GP and PP according to bitmask.
+ * @brief Used by the Timeline system to queue a PP job.
*
- * @param mask A scheduling bitmask.
- * @param deferred_schedule MALI_TRUE if schedule should be deferred, MALI_FALSE if not.
+ * @note @ref mali_executor_schedule_from_mask() should be called if this
+ * function returns non-zero.
+ *
+ * @param job The PP job that is being activated.
+ *
+ * @return A scheduling bitmask that can be used to decide if scheduling is
+ * necessary after this call.
*/
-void mali_scheduler_schedule_from_mask(mali_scheduler_mask mask, mali_bool deferred_schedule);
+mali_scheduler_mask mali_scheduler_activate_pp_job(struct mali_pp_job *job);
-/* Enable or disable scheduler hint. */
-extern mali_bool mali_scheduler_hints[MALI_SCHEDULER_HINT_MAX];
+void mali_scheduler_complete_gp_job(struct mali_gp_job *job,
+ mali_bool success,
+ mali_bool user_notification,
+ mali_bool dequeued);
-MALI_STATIC_INLINE void mali_scheduler_hint_enable(mali_scheduler_hint hint)
-{
- MALI_DEBUG_ASSERT(hint < MALI_SCHEDULER_HINT_MAX);
- mali_scheduler_hints[hint] = MALI_TRUE;
-}
+void mali_scheduler_complete_pp_job(struct mali_pp_job *job,
+ u32 num_cores_in_virtual,
+ mali_bool user_notification,
+ mali_bool dequeued);
-MALI_STATIC_INLINE void mali_scheduler_hint_disable(mali_scheduler_hint hint)
-{
- MALI_DEBUG_ASSERT(hint < MALI_SCHEDULER_HINT_MAX);
- mali_scheduler_hints[hint] = MALI_FALSE;
-}
+void mali_scheduler_abort_session(struct mali_session_data *session);
-MALI_STATIC_INLINE mali_bool mali_scheduler_hint_is_enabled(mali_scheduler_hint hint)
-{
- MALI_DEBUG_ASSERT(hint < MALI_SCHEDULER_HINT_MAX);
- return mali_scheduler_hints[hint];
-}
+#if MALI_STATE_TRACKING
+u32 mali_scheduler_dump_state(char *buf, u32 size);
+#endif
#endif /* __MALI_SCHEDULER_H__ */
#define MALI_SCHEDULER_MASK_EMPTY 0
#define MALI_SCHEDULER_MASK_ALL (MALI_SCHEDULER_MASK_GP | MALI_SCHEDULER_MASK_PP)
-typedef enum {
- MALI_SCHEDULER_HINT_GP_BOUND = 0
-#define MALI_SCHEDULER_HINT_MAX 1
-} mali_scheduler_hint;
-
#endif /* __MALI_SCHEDULER_TYPES_H__ */
#include "mali_osk.h"
#include "mali_osk_list.h"
#include "mali_session.h"
+#include "mali_ukk.h"
_MALI_OSK_LIST_HEAD(mali_sessions);
static u32 mali_session_count = 0;
-_mali_osk_spinlock_irq_t *mali_sessions_lock;
+_mali_osk_spinlock_irq_t *mali_sessions_lock = NULL;
_mali_osk_errcode_t mali_session_initialize(void)
{
_MALI_OSK_INIT_LIST_HEAD(&mali_sessions);
- mali_sessions_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SESSIONS);
-
- if (NULL == mali_sessions_lock) return _MALI_OSK_ERR_NOMEM;
+ mali_sessions_lock = _mali_osk_spinlock_irq_init(
+ _MALI_OSK_LOCKFLAG_ORDERED,
+ _MALI_OSK_LOCK_ORDER_SESSIONS);
+ if (NULL == mali_sessions_lock) {
+ return _MALI_OSK_ERR_NOMEM;
+ }
return _MALI_OSK_ERR_OK;
}
void mali_session_terminate(void)
{
- _mali_osk_spinlock_irq_term(mali_sessions_lock);
+ if (NULL != mali_sessions_lock) {
+ _mali_osk_spinlock_irq_term(mali_sessions_lock);
+ mali_sessions_lock = NULL;
+ }
}
void mali_session_add(struct mali_session_data *session)
* Get the max completed window jobs from all active session,
* which will be used in window render frame per sec calculate
*/
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+#if defined(CONFIG_MALI_DVFS)
u32 mali_session_max_window_num(void)
{
struct mali_session_data *session, *tmp;
mali_session_lock();
MALI_SESSION_FOREACH(session, tmp, link) {
- tmp_number = _mali_osk_atomic_xchg(&session->number_of_window_jobs, 0);
+ tmp_number = _mali_osk_atomic_xchg(
+ &session->number_of_window_jobs, 0);
if (max_window_num < tmp_number) {
max_window_num = tmp_number;
}
return max_window_num;
}
#endif
+
+void mali_session_memory_tracking(_mali_osk_print_ctx *print_ctx)
+{
+ struct mali_session_data *session, *tmp;
+ u32 mali_mem_usage;
+ u32 total_mali_mem_size;
+
+ MALI_DEBUG_ASSERT_POINTER(print_ctx);
+ mali_session_lock();
+ MALI_SESSION_FOREACH(session, tmp, link) {
+ _mali_osk_ctxprintf(print_ctx, " %-25s %-10u %-10u %-15u %-15u %-10u %-10u\n",
+ session->comm, session->pid,
+ session->mali_mem_array[MALI_MEM_OS] + session->mali_mem_array[MALI_MEM_BLOCK], session->max_mali_mem_allocated,
+ session->mali_mem_array[MALI_MEM_EXTERNAL], session->mali_mem_array[MALI_MEM_UMP],
+ session->mali_mem_array[MALI_MEM_DMA_BUF]);
+ }
+ mali_session_unlock();
+ mali_mem_usage = _mali_ukk_report_memory_usage();
+ total_mali_mem_size = _mali_ukk_report_total_memory_size();
+ _mali_osk_ctxprintf(print_ctx, "Mali mem usage: %u\nMali mem limit: %u\n", mali_mem_usage, total_mali_mem_size);
+}
\ No newline at end of file
#include "mali_kernel_descriptor_mapping.h"
#include "mali_osk.h"
#include "mali_osk_list.h"
+#include "mali_memory_types.h"
struct mali_timeline_system;
struct mali_soft_system;
_MALI_OSK_LIST_HEAD(link); /**< Link for list of all sessions */
_MALI_OSK_LIST_HEAD(pp_job_list); /**< List of all PP jobs on this session */
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+#if defined(CONFIG_MALI_DVFS)
_mali_osk_atomic_t number_of_window_jobs; /**< Record the window jobs completed on this session in a period */
#endif
mali_bool is_aborting; /**< MALI_TRUE if the session is aborting, MALI_FALSE if not. */
mali_bool use_high_priority_job_queue; /**< If MALI_TRUE, jobs added from this session will use the high priority job queues. */
+ u32 pid;
+ char *comm;
+ size_t mali_mem_array[MALI_MEM_TYPE_MAX]; /**< The array to record all mali mem types' usage for this session. */
+ size_t max_mali_mem_allocated; /**< The past max mali memory usage for this session. */
};
_mali_osk_errcode_t mali_session_initialize(void);
return session->page_directory;
}
+MALI_STATIC_INLINE void mali_session_memory_lock(struct mali_session_data *session)
+{
+ MALI_DEBUG_ASSERT_POINTER(session);
+ _mali_osk_mutex_wait(session->memory_lock);
+}
+
+MALI_STATIC_INLINE void mali_session_memory_unlock(struct mali_session_data *session)
+{
+ MALI_DEBUG_ASSERT_POINTER(session);
+ _mali_osk_mutex_signal(session->memory_lock);
+}
+
MALI_STATIC_INLINE void mali_session_send_notification(struct mali_session_data *session, _mali_osk_notification_t *object)
{
_mali_osk_notification_queue_send(session->ioctl_queue, object);
}
+#if defined(CONFIG_MALI_DVFS)
+
+MALI_STATIC_INLINE void mali_session_inc_num_window_jobs(struct mali_session_data *session)
+{
+ MALI_DEBUG_ASSERT_POINTER(session);
+ _mali_osk_atomic_inc(&session->number_of_window_jobs);
+}
+
/*
* Get the max completed window jobs from all active session,
* which will be used in window render frame per sec calculate
*/
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
u32 mali_session_max_window_num(void);
+
#endif
+void mali_session_memory_tracking(_mali_osk_print_ctx *print_ctx);
+
#endif /* __MALI_SESSION_H__ */
#include "mali_soft_job.h"
#include "mali_osk.h"
-#include "mali_osk_mali.h"
#include "mali_timeline.h"
#include "mali_session.h"
#include "mali_kernel_common.h"
#include "mali_uk_types.h"
#include "mali_scheduler.h"
+#include "mali_executor.h"
MALI_STATIC_INLINE void mali_soft_job_system_lock(struct mali_soft_job_system *system)
{
MALI_DEBUG_PRINT(4, ("Mali Soft Job: signaling soft job %u (0x%08X)\n", job->id, job));
schedule_mask = mali_timeline_tracker_release(&job->tracker);
- mali_scheduler_schedule_from_mask(schedule_mask, MALI_FALSE);
+ mali_executor_schedule_from_mask(schedule_mask, MALI_FALSE);
mali_soft_job_destroy(job);
mali_soft_job_system_unlock(job->system);
schedule_mask = mali_timeline_tracker_release(&job->tracker);
- mali_scheduler_schedule_from_mask(schedule_mask, MALI_FALSE);
+ mali_executor_schedule_from_mask(schedule_mask, MALI_FALSE);
mali_soft_job_destroy(job);
} else {
#include "mali_timeline.h"
#include "mali_kernel_common.h"
-#include "mali_osk_mali.h"
#include "mali_scheduler.h"
#include "mali_soft_job.h"
#include "mali_timeline_fence_wait.h"
#include "mali_timeline_sync_fence.h"
+#include "mali_executor.h"
+#include "mali_pp_job.h"
#define MALI_TIMELINE_SYSTEM_LOCKED(system) (mali_spinlock_reentrant_is_held((system)->spinlock, _mali_osk_get_tid()))
+/*
+ * Following three elements are used to record how many
+ * gp, physical pp or virtual pp jobs are delayed in the whole
+ * timeline system, we can use these three value to decide
+ * if need to deactivate idle group.
+ */
+_mali_osk_atomic_t gp_tracker_count;
+_mali_osk_atomic_t phy_pp_tracker_count;
+_mali_osk_atomic_t virt_pp_tracker_count;
+
static mali_scheduler_mask mali_timeline_system_release_waiter(struct mali_timeline_system *system,
struct mali_timeline_waiter *waiter);
mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
u32 tid = _mali_osk_get_tid();
mali_bool is_aborting = MALI_FALSE;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
int fence_status = sync_fence->status;
+#else
+ int fence_status = atomic_read(&sync_fence->status);
+#endif
MALI_DEBUG_ASSERT_POINTER(sync_fence);
MALI_DEBUG_ASSERT_POINTER(sync_fence_waiter);
MALI_DEBUG_ASSERT_POINTER(waiter);
tracker->sync_fence = NULL;
+ tracker->fence.sync_fd = -1;
+
schedule_mask |= mali_timeline_system_release_waiter(system, waiter);
/* If aborting, wake up sleepers that are waiting for sync fence callbacks to complete. */
#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) */
if (!is_aborting) {
- mali_scheduler_schedule_from_mask(schedule_mask, MALI_TRUE);
+ mali_executor_schedule_from_mask(schedule_mask, MALI_TRUE);
}
}
#endif /* defined(CONFIG_SYNC) */
mali_spinlock_reentrant_signal(system->spinlock, tid);
- mali_scheduler_schedule_from_mask(schedule_mask, MALI_FALSE);
+ mali_executor_schedule_from_mask(schedule_mask, MALI_FALSE);
}
void mali_timeline_system_stop_timer(struct mali_timeline_system *system)
return NULL;
}
- timeline->sync_tl = mali_sync_timeline_create(timeline_name);
+ timeline->sync_tl = mali_sync_timeline_create(timeline, timeline_name);
if (NULL == timeline->sync_tl) {
mali_timeline_destroy(timeline);
return NULL;
MALI_DEBUG_ASSERT(!mali_timeline_is_empty(timeline));
+ if (MALI_TIMELINE_TRACKER_GP == tracker->type) {
+ _mali_osk_atomic_inc(&gp_tracker_count);
+ } else if (MALI_TIMELINE_TRACKER_PP == tracker->type) {
+ if (mali_pp_job_is_virtual((struct mali_pp_job *)tracker->job)) {
+ _mali_osk_atomic_inc(&virt_pp_tracker_count);
+ } else {
+ _mali_osk_atomic_inc(&phy_pp_tracker_count);
+ }
+ }
+
/* Add tracker as new head on timeline's tracker list. */
if (NULL == timeline->tracker_head) {
/* Tracker list is empty. */
switch (tracker->type) {
case MALI_TIMELINE_TRACKER_GP:
- schedule_mask = mali_gp_scheduler_activate_job((struct mali_gp_job *) tracker->job);
+ schedule_mask = mali_scheduler_activate_gp_job((struct mali_gp_job *) tracker->job);
+
+ _mali_osk_atomic_dec(&gp_tracker_count);
break;
case MALI_TIMELINE_TRACKER_PP:
- schedule_mask = mali_pp_scheduler_activate_job((struct mali_pp_job *) tracker->job);
+ schedule_mask = mali_scheduler_activate_pp_job((struct mali_pp_job *) tracker->job);
+
+ if (mali_pp_job_is_virtual((struct mali_pp_job *)tracker->job)) {
+ _mali_osk_atomic_dec(&virt_pp_tracker_count);
+ } else {
+ _mali_osk_atomic_dec(&phy_pp_tracker_count);
+ }
break;
case MALI_TIMELINE_TRACKER_SOFT:
timeline = tracker->timeline;
}
#if defined(CONFIG_SYNC)
- system->signaled_sync_tl = mali_sync_timeline_create("mali-always-signaled");
+ system->signaled_sync_tl = mali_sync_timeline_create(NULL, "mali-always-signaled");
if (NULL == system->signaled_sync_tl) {
mali_timeline_system_destroy(system);
return NULL;
ret = sync_fence_wait_async(sync_fence, &tracker->sync_fence_waiter);
if (1 == ret) {
/* Fence already signaled, no waiter needed. */
+ tracker->fence.sync_fd = -1;
goto exit;
} else if (0 != ret) {
MALI_PRINT_ERROR(("Mali Timeline: sync fence fd %d signaled with error %d\n", tracker->fence.sync_fd, ret));
}
#endif /* defined(CONFIG_SYNC) */
- mali_scheduler_schedule_from_mask(schedule_mask, MALI_FALSE);
+ mali_executor_schedule_from_mask(schedule_mask, MALI_FALSE);
}
mali_timeline_point mali_timeline_system_add_tracker(struct mali_timeline_system *system,
return point;
}
+void mali_timeline_initialize(void)
+{
+ _mali_osk_atomic_init(&gp_tracker_count, 0);
+ _mali_osk_atomic_init(&phy_pp_tracker_count, 0);
+ _mali_osk_atomic_init(&virt_pp_tracker_count, 0);
+}
+
+void mali_timeline_terminate(void)
+{
+ _mali_osk_atomic_term(&gp_tracker_count);
+ _mali_osk_atomic_term(&phy_pp_tracker_count);
+ _mali_osk_atomic_term(&virt_pp_tracker_count);
+}
+
#if defined(MALI_TIMELINE_DEBUG_FUNCTIONS)
static mali_bool is_waiting_on_timeline(struct mali_timeline_tracker *tracker, enum mali_timeline_id id)
MALI_DEBUG_ASSERT_POINTER(timeline->system);
system = timeline->system;
- if (MALI_TIMELINE_MAX > id ) {
- if(MALI_TIMELINE_NO_POINT != tracker->fence.points[id]) {
+ if (MALI_TIMELINE_MAX > id) {
+ if (MALI_TIMELINE_NO_POINT != tracker->fence.points[id]) {
return mali_timeline_is_point_on(system->timelines[id], tracker->fence.points[id]);
} else {
return MALI_FALSE;
state_char = *(tracker_state + mali_timeline_debug_get_tracker_state(tracker));
_mali_osk_snprintf(tracker_type, sizeof(tracker_type), "%s", timeline_tracker_type_to_string(tracker->type));
- if (0 != tracker->trigger_ref_count) {
#if defined(CONFIG_SYNC)
- _mali_osk_ctxprintf(print_ctx, "TL: %s %u %c - ref_wait:%u [%s%u,%s%u,%s%u, fd:%d, fence:(0x%08X)] job:(0x%08X)\n",
- tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
- is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "W" : " ", tracker->fence.points[0],
- is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "W" : " ", tracker->fence.points[1],
- is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "W" : " ", tracker->fence.points[2],
- tracker->fence.sync_fd, tracker->sync_fence, tracker->job);
+ if (0 != tracker->trigger_ref_count) {
+ _mali_osk_ctxprintf(print_ctx, "TL: %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u), fd:%d, fence:(0x%08X)] job:(0x%08X)\n",
+ tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2],
+ tracker->fence.sync_fd, tracker->sync_fence, tracker->job);
} else {
_mali_osk_ctxprintf(print_ctx, "TL: %s %u %c fd:%d fence:(0x%08X) job:(0x%08X)\n",
- tracker_type, tracker->point, state_char,
- tracker->fence.sync_fd, tracker->sync_fence, tracker->job);
+ tracker_type, tracker->point, state_char,
+ tracker->fence.sync_fd, tracker->sync_fence, tracker->job);
}
#else
- _mali_osk_ctxprintf(print_ctx, "TL: %s %u %c - ref_wait:%u [%s%u,%s%u,%s%u] job:(0x%08X)\n",
- tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
- is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "W" : " ", tracker->fence.points[0],
- is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "W" : " ", tracker->fence.points[1],
- is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "W" : " ", tracker->fence.points[2],
- tracker->job);
+ if (0 != tracker->trigger_ref_count) {
+ _mali_osk_ctxprintf(print_ctx, "TL: %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u)] job:(0x%08X)\n",
+ tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2],
+ tracker->job);
} else {
_mali_osk_ctxprintf(print_ctx, "TL: %s %u %c job:(0x%08X)\n",
- tracker_type, tracker->point, state_char,
- tracker->job);
+ tracker_type, tracker->point, state_char,
+ tracker->job);
}
#endif
}
if (NULL == timeline->tracker_head) continue;
_mali_osk_ctxprintf(print_ctx, "TL: Timeline %s:\n",
- timeline_id_to_string((enum mali_timeline_id)i));
+ timeline_id_to_string((enum mali_timeline_id)i));
mali_timeline_debug_print_timeline(timeline, print_ctx);
num_printed++;
* Soft jobs have to be signaled as complete after activation. Normally this is done by user space,
* but in order to guarantee that every soft job is completed, we also have a timer.
*/
-#define MALI_TIMELINE_TIMEOUT_HZ ((u32) (HZ * 3 / 2)) /* 1500 ms. */
+#define MALI_TIMELINE_TIMEOUT_HZ ((unsigned long) (HZ * 3 / 2)) /* 1500 ms. */
/**
* Timeline type.
void *job; /**< Owner of tracker. */
/* The following fields are used to time out soft job trackers. */
- u32 os_tick_create;
- u32 os_tick_activate;
+ unsigned long os_tick_create;
+ unsigned long os_tick_activate;
mali_bool timer_active;
};
+extern _mali_osk_atomic_t gp_tracker_count;
+extern _mali_osk_atomic_t phy_pp_tracker_count;
+extern _mali_osk_atomic_t virt_pp_tracker_count;
+
/**
* What follows is a set of functions to check the state of a timeline and to determine where on a
* timeline a given point is. Most of these checks will translate the timeline so the oldest point
*/
mali_scheduler_mask mali_timeline_tracker_release(struct mali_timeline_tracker *tracker);
+MALI_STATIC_INLINE mali_bool mali_timeline_tracker_activation_error(
+ struct mali_timeline_tracker *tracker)
+{
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+ return (MALI_TIMELINE_ACTIVATION_ERROR_FATAL_BIT &
+ tracker->activation_error) ? MALI_TRUE : MALI_FALSE;
+}
+
/**
* Copy data from a UK fence to a Timeline fence.
*
*/
void mali_timeline_fence_copy_uk_fence(struct mali_timeline_fence *fence, _mali_uk_fence_t *uk_fence);
+void mali_timeline_initialize(void);
+
+void mali_timeline_terminate(void);
+
+MALI_STATIC_INLINE mali_bool mali_timeline_has_gp_job(void)
+{
+ return 0 < _mali_osk_atomic_read(&gp_tracker_count);
+}
+
+MALI_STATIC_INLINE mali_bool mali_timeline_has_physical_pp_job(void)
+{
+ return 0 < _mali_osk_atomic_read(&phy_pp_tracker_count);
+}
+
+MALI_STATIC_INLINE mali_bool mali_timeline_has_virtual_pp_job(void)
+{
+ return 0 < _mali_osk_atomic_read(&virt_pp_tracker_count);
+}
+
#if defined(DEBUG)
#define MALI_TIMELINE_DEBUG_FUNCTIONS
#endif /* DEBUG */
if (-1 != fence->sync_fd) {
sync_fence = sync_fence_fdget(fence->sync_fd);
if (likely(NULL != sync_fence)) {
- if (0 == sync_fence->status) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+ if( 0 == sync_fence->status) {
+#else
+ if (0 == atomic_read(&sync_fence->status)) {
+#endif
ret = MALI_FALSE;
}
} else {
u32 _mali_ukk_report_memory_usage(void);
+u32 _mali_ukk_report_total_memory_size(void);
+
u32 _mali_ukk_utilization_gp_pp(void);
u32 _mali_ukk_utilization_gp(void);
#define MALI_GPU_NAME_UTGARD "mali-utgard"
-/* Mali-200 */
-#define MALI_GPU_RESOURCES_MALI200(base_addr, gp_irq, pp_irq, mmu_irq) \
- MALI_GPU_RESOURCE_PP(base_addr + 0x0000, pp_irq) \
- MALI_GPU_RESOURCE_GP(base_addr + 0x2000, gp_irq) \
- MALI_GPU_RESOURCE_MMU(base_addr + 0x3000, mmu_irq)
+#define MALI_OFFSET_GP 0x00000
+#define MALI_OFFSET_GP_MMU 0x03000
+
+#define MALI_OFFSET_PP0 0x08000
+#define MALI_OFFSET_PP0_MMU 0x04000
+#define MALI_OFFSET_PP1 0x0A000
+#define MALI_OFFSET_PP1_MMU 0x05000
+#define MALI_OFFSET_PP2 0x0C000
+#define MALI_OFFSET_PP2_MMU 0x06000
+#define MALI_OFFSET_PP3 0x0E000
+#define MALI_OFFSET_PP3_MMU 0x07000
+
+#define MALI_OFFSET_PP4 0x28000
+#define MALI_OFFSET_PP4_MMU 0x1C000
+#define MALI_OFFSET_PP5 0x2A000
+#define MALI_OFFSET_PP5_MMU 0x1D000
+#define MALI_OFFSET_PP6 0x2C000
+#define MALI_OFFSET_PP6_MMU 0x1E000
+#define MALI_OFFSET_PP7 0x2E000
+#define MALI_OFFSET_PP7_MMU 0x1F000
+
+#define MALI_OFFSET_L2_RESOURCE0 0x01000
+#define MALI_OFFSET_L2_RESOURCE1 0x10000
+#define MALI_OFFSET_L2_RESOURCE2 0x11000
+
+#define MALI400_OFFSET_L2_CACHE0 MALI_OFFSET_L2_RESOURCE0
+#define MALI450_OFFSET_L2_CACHE0 MALI_OFFSET_L2_RESOURCE1
+#define MALI450_OFFSET_L2_CACHE1 MALI_OFFSET_L2_RESOURCE0
+#define MALI450_OFFSET_L2_CACHE2 MALI_OFFSET_L2_RESOURCE2
+
+#define MALI_OFFSET_BCAST 0x13000
+#define MALI_OFFSET_DLBU 0x14000
+
+#define MALI_OFFSET_PP_BCAST 0x16000
+#define MALI_OFFSET_PP_BCAST_MMU 0x15000
+
+#define MALI_OFFSET_PMU 0x02000
+#define MALI_OFFSET_DMA 0x12000
/* Mali-300 */
/* Mali-400 */
#define MALI_GPU_RESOURCES_MALI400_MP1(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq) \
- MALI_GPU_RESOURCE_L2(base_addr + 0x1000) \
- MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x0000, gp_irq, base_addr + 0x3000, gp_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x8000, pp0_irq, base_addr + 0x4000, pp0_mmu_irq)
+ MALI_GPU_RESOURCE_L2(base_addr + MALI400_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq)
#define MALI_GPU_RESOURCES_MALI400_MP1_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq) \
MALI_GPU_RESOURCES_MALI400_MP1(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq) \
- MALI_GPU_RESOURCE_PMU(base_addr + 0x2000)
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU)
#define MALI_GPU_RESOURCES_MALI400_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq) \
- MALI_GPU_RESOURCE_L2(base_addr + 0x1000) \
- MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x0000, gp_irq, base_addr + 0x3000, gp_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x8000, pp0_irq, base_addr + 0x4000, pp0_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0xA000, pp1_irq, base_addr + 0x5000, pp1_mmu_irq)
+ MALI_GPU_RESOURCE_L2(base_addr + MALI400_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq)
#define MALI_GPU_RESOURCES_MALI400_MP2_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq) \
MALI_GPU_RESOURCES_MALI400_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq) \
- MALI_GPU_RESOURCE_PMU(base_addr + 0x2000)
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU)
#define MALI_GPU_RESOURCES_MALI400_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq) \
- MALI_GPU_RESOURCE_L2(base_addr + 0x1000) \
- MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x0000, gp_irq, base_addr + 0x3000, gp_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x8000, pp0_irq, base_addr + 0x4000, pp0_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0xA000, pp1_irq, base_addr + 0x5000, pp1_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0xC000, pp2_irq, base_addr + 0x6000, pp2_mmu_irq)
+ MALI_GPU_RESOURCE_L2(base_addr + MALI400_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq)
#define MALI_GPU_RESOURCES_MALI400_MP3_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq) \
MALI_GPU_RESOURCES_MALI400_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq) \
- MALI_GPU_RESOURCE_PMU(base_addr + 0x2000)
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU)
#define MALI_GPU_RESOURCES_MALI400_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq) \
- MALI_GPU_RESOURCE_L2(base_addr + 0x1000) \
- MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x0000, gp_irq, base_addr + 0x3000, gp_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x8000, pp0_irq, base_addr + 0x4000, pp0_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0xA000, pp1_irq, base_addr + 0x5000, pp1_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0xC000, pp2_irq, base_addr + 0x6000, pp2_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + 0xE000, pp3_irq, base_addr + 0x7000, pp3_mmu_irq)
+ MALI_GPU_RESOURCE_L2(base_addr + MALI400_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + MALI_OFFSET_PP3, pp3_irq, base_addr + MALI_OFFSET_PP3_MMU, pp3_mmu_irq)
#define MALI_GPU_RESOURCES_MALI400_MP4_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq) \
MALI_GPU_RESOURCES_MALI400_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq) \
- MALI_GPU_RESOURCE_PMU(base_addr + 0x2000)
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
/* Mali-450 */
#define MALI_GPU_RESOURCES_MALI450_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
- MALI_GPU_RESOURCE_L2(base_addr + 0x10000) \
- MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x00000, gp_irq, base_addr + 0x03000, gp_mmu_irq) \
- MALI_GPU_RESOURCE_L2(base_addr + 0x01000) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x08000, pp0_irq, base_addr + 0x04000, pp0_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0x0A000, pp1_irq, base_addr + 0x05000, pp1_mmu_irq) \
- MALI_GPU_RESOURCE_BCAST(base_addr + 0x13000) \
- MALI_GPU_RESOURCE_DLBU(base_addr + 0x14000) \
- MALI_GPU_RESOURCE_PP_BCAST(base_addr + 0x16000, pp_bcast_irq) \
- MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + 0x15000) \
- MALI_GPU_RESOURCE_DMA(base_addr + 0x12000)
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE1) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) \
+ MALI_GPU_RESOURCE_DMA(base_addr + MALI_OFFSET_DMA)
#define MALI_GPU_RESOURCES_MALI450_MP2_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
MALI_GPU_RESOURCES_MALI450_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
- MALI_GPU_RESOURCE_PMU(base_addr + 0x2000) \
-
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
#define MALI_GPU_RESOURCES_MALI450_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
- MALI_GPU_RESOURCE_L2(base_addr + 0x10000) \
- MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x00000, gp_irq, base_addr + 0x03000, gp_mmu_irq) \
- MALI_GPU_RESOURCE_L2(base_addr + 0x01000) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x08000, pp0_irq, base_addr + 0x04000, pp0_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0x0A000, pp1_irq, base_addr + 0x05000, pp1_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0x0C000, pp2_irq, base_addr + 0x06000, pp2_mmu_irq) \
- MALI_GPU_RESOURCE_BCAST(base_addr + 0x13000) \
- MALI_GPU_RESOURCE_DLBU(base_addr + 0x14000) \
- MALI_GPU_RESOURCE_PP_BCAST(base_addr + 0x16000, pp_bcast_irq) \
- MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + 0x15000)
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE1) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU)
#define MALI_GPU_RESOURCES_MALI450_MP3_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
MALI_GPU_RESOURCES_MALI450_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
- MALI_GPU_RESOURCE_PMU(base_addr + 0x2000) \
-
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
#define MALI_GPU_RESOURCES_MALI450_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
- MALI_GPU_RESOURCE_L2(base_addr + 0x10000) \
- MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x00000, gp_irq, base_addr + 0x03000, gp_mmu_irq) \
- MALI_GPU_RESOURCE_L2(base_addr + 0x01000) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x08000, pp0_irq, base_addr + 0x04000, pp0_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0x0A000, pp1_irq, base_addr + 0x05000, pp1_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0x0C000, pp2_irq, base_addr + 0x06000, pp2_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + 0x0E000, pp3_irq, base_addr + 0x07000, pp3_mmu_irq) \
- MALI_GPU_RESOURCE_BCAST(base_addr + 0x13000) \
- MALI_GPU_RESOURCE_DLBU(base_addr + 0x14000) \
- MALI_GPU_RESOURCE_PP_BCAST(base_addr + 0x16000, pp_bcast_irq) \
- MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + 0x15000) \
- MALI_GPU_RESOURCE_DMA(base_addr + 0x12000)
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE1) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + MALI_OFFSET_PP3, pp3_irq, base_addr + MALI_OFFSET_PP3_MMU, pp3_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) \
+ MALI_GPU_RESOURCE_DMA(base_addr + MALI_OFFSET_DMA)
#define MALI_GPU_RESOURCES_MALI450_MP4_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
MALI_GPU_RESOURCES_MALI450_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
- MALI_GPU_RESOURCE_PMU(base_addr + 0x2000) \
-
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
#define MALI_GPU_RESOURCES_MALI450_MP6(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp_bcast_irq) \
- MALI_GPU_RESOURCE_L2(base_addr + 0x10000) \
- MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x00000, gp_irq, base_addr + 0x03000, gp_mmu_irq) \
- MALI_GPU_RESOURCE_L2(base_addr + 0x01000) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x08000, pp0_irq, base_addr + 0x04000, pp0_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0x0A000, pp1_irq, base_addr + 0x05000, pp1_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0x0C000, pp2_irq, base_addr + 0x06000, pp2_mmu_irq) \
- MALI_GPU_RESOURCE_L2(base_addr + 0x11000) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + 0x28000, pp3_irq, base_addr + 0x1C000, pp3_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(4, base_addr + 0x2A000, pp4_irq, base_addr + 0x1D000, pp4_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(5, base_addr + 0x2C000, pp5_irq, base_addr + 0x1E000, pp5_mmu_irq) \
- MALI_GPU_RESOURCE_BCAST(base_addr + 0x13000) \
- MALI_GPU_RESOURCE_DLBU(base_addr + 0x14000) \
- MALI_GPU_RESOURCE_PP_BCAST(base_addr + 0x16000, pp_bcast_irq) \
- MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + 0x15000) \
- MALI_GPU_RESOURCE_DMA(base_addr + 0x12000)
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE1) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE2) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + MALI_OFFSET_PP4, pp3_irq, base_addr + MALI_OFFSET_PP4_MMU, pp3_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(4, base_addr + MALI_OFFSET_PP5, pp4_irq, base_addr + MALI_OFFSET_PP5_MMU, pp4_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(5, base_addr + MALI_OFFSET_PP6, pp5_irq, base_addr + MALI_OFFSET_PP6_MMU, pp5_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) \
+ MALI_GPU_RESOURCE_DMA(base_addr + MALI_OFFSET_DMA)
#define MALI_GPU_RESOURCES_MALI450_MP6_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp_bcast_irq) \
MALI_GPU_RESOURCES_MALI450_MP6(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp_bcast_irq) \
- MALI_GPU_RESOURCE_PMU(base_addr + 0x2000) \
-
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
#define MALI_GPU_RESOURCES_MALI450_MP8(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp6_irq, pp6_mmu_irq, pp7_irq, pp7_mmu_irq, pp_bcast_irq) \
- MALI_GPU_RESOURCE_L2(base_addr + 0x10000) \
- MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x00000, gp_irq, base_addr + 0x03000, gp_mmu_irq) \
- MALI_GPU_RESOURCE_L2(base_addr + 0x01000) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x08000, pp0_irq, base_addr + 0x04000, pp0_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0x0A000, pp1_irq, base_addr + 0x05000, pp1_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0x0C000, pp2_irq, base_addr + 0x06000, pp2_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + 0x0E000, pp3_irq, base_addr + 0x07000, pp3_mmu_irq) \
- MALI_GPU_RESOURCE_L2(base_addr + 0x11000) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(4, base_addr + 0x28000, pp4_irq, base_addr + 0x1C000, pp4_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(5, base_addr + 0x2A000, pp5_irq, base_addr + 0x1D000, pp5_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(6, base_addr + 0x2C000, pp6_irq, base_addr + 0x1E000, pp6_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(7, base_addr + 0x2E000, pp7_irq, base_addr + 0x1F000, pp7_mmu_irq) \
- MALI_GPU_RESOURCE_BCAST(base_addr + 0x13000) \
- MALI_GPU_RESOURCE_DLBU(base_addr + 0x14000) \
- MALI_GPU_RESOURCE_PP_BCAST(base_addr + 0x16000, pp_bcast_irq) \
- MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + 0x15000) \
- MALI_GPU_RESOURCE_DMA(base_addr + 0x12000)
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE1) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + MALI_OFFSET_PP3, pp3_irq, base_addr + MALI_OFFSET_PP3_MMU, pp3_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE2) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(4, base_addr + MALI_OFFSET_PP4, pp4_irq, base_addr + MALI_OFFSET_PP4_MMU, pp4_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(5, base_addr + MALI_OFFSET_PP5, pp5_irq, base_addr + MALI_OFFSET_PP5_MMU, pp5_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(6, base_addr + MALI_OFFSET_PP6, pp6_irq, base_addr + MALI_OFFSET_PP6_MMU, pp6_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(7, base_addr + MALI_OFFSET_PP7, pp7_irq, base_addr + MALI_OFFSET_PP7_MMU, pp7_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) \
+ MALI_GPU_RESOURCE_DMA(base_addr + MALI_OFFSET_DMA)
#define MALI_GPU_RESOURCES_MALI450_MP8_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp6_irq, pp6_mmu_irq, pp7_irq, pp7_mmu_irq, pp_bcast_irq) \
MALI_GPU_RESOURCES_MALI450_MP8(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp6_irq, pp6_mmu_irq, pp7_irq, pp7_mmu_irq, pp_bcast_irq) \
- MALI_GPU_RESOURCE_PMU(base_addr + 0x2000) \
-
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
#define MALI_GPU_RESOURCE_L2(addr) \
{ \
.name = "Mali_L2", \
.start = pp_irq, \
.end = pp_irq, \
}, \
-
+
#define MALI_GPU_RESOURCE_PP_MMU_BCAST(pp_mmu_bcast_addr) \
{ \
.name = "Mali_PP_MMU_Broadcast", \
unsigned int utilization_gpu; /* Utilization for GP and all PP cores combined, 0 = no utilization, 256 = full utilization */
unsigned int utilization_gp; /* Utilization for GP core only, 0 = no utilization, 256 = full utilization */
unsigned int utilization_pp; /* Utilization for all PP cores combined, 0 = no utilization, 256 = full utilization */
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
- unsigned int number_of_window_jobs;
- unsigned int number_of_window_jobs_under_pressure;
-#endif
};
-struct mali_gpu_device_data {
- /* Dedicated GPU memory range (physical). */
- unsigned long dedicated_mem_start;
- unsigned long dedicated_mem_size;
+struct mali_gpu_clk_item {
+ unsigned int clock; /* unit(MHz) */
+ unsigned int vol;
+};
+
+struct mali_gpu_clock {
+ struct mali_gpu_clk_item *item;
+ unsigned int num_of_steps;
+};
+struct mali_gpu_device_data {
/* Shared GPU memory */
unsigned long shared_mem_size;
- /* Frame buffer memory to be accessible by Mali GPU (physical) */
- unsigned long fb_start;
- unsigned long fb_size;
-
- /* Max runtime [ms] for jobs */
- int max_job_runtime;
-
- /* Report GPU utilization in this interval (specified in ms) */
- unsigned long utilization_interval;
-
- /* Function that will receive periodic GPU utilization numbers */
- void (*utilization_callback)(struct mali_gpu_utilization_data *data);
-
/*
* Mali PMU switch delay.
* Only needed if the power gates are connected to the PMU in a high fanout
*/
u32 pmu_switch_delay;
-
/* Mali Dynamic power domain configuration in sequence from 0-11
* GP PP0 PP1 PP2 PP3 PP4 PP5 PP6 PP7, L2$0 L2$1 L2$2
*/
u16 pmu_domain_config[12];
- /* Fuction that platform callback for freq tunning, needed when POWER_PERFORMANCE_POLICY enabled*/
- int (*set_freq_callback)(unsigned int mhz);
-};
+ /* Dedicated GPU memory range (physical). */
+ unsigned long dedicated_mem_start;
+ unsigned long dedicated_mem_size;
-/** @brief MALI GPU power down using MALI in-built PMU
- *
- * called to power down all cores
- */
-int mali_pmu_powerdown(void);
+ /* Frame buffer memory to be accessible by Mali GPU (physical) */
+ unsigned long fb_start;
+ unsigned long fb_size;
+ /* Max runtime [ms] for jobs */
+ int max_job_runtime;
-/** @brief MALI GPU power up using MALI in-built PMU
- *
- * called to power up all cores
- */
-int mali_pmu_powerup(void);
+ /* Report GPU utilization and related control in this interval (specified in ms) */
+ unsigned long control_interval;
+
+ /* Function that will receive periodic GPU utilization numbers */
+ void (*utilization_callback)(struct mali_gpu_utilization_data *data);
+
+ /* Fuction that platform callback for freq setting, needed when CONFIG_MALI_DVFS enabled */
+ int (*set_freq)(int setting_clock_step);
+ /* Function that platfrom report it's clock info which driver can set, needed when CONFIG_MALI_DVFS enabled */
+ void (*get_clock_info)(struct mali_gpu_clock **data);
+ /* Function that get the current clock info, needed when CONFIG_MALI_DVFS enabled */
+ int (*get_freq)(void);
+};
/**
* Pause the scheduling and power state changes of Mali device driver.
MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_MALI_FENCE_DUP = 43,
MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_FLUSH_SERVER_WAITS = 44,
MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_WAIT_SYNC = 45, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_JOBS_WAIT = 46, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_NOFRAMES_WAIT = 47, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_NOJOBS_WAIT = 48, /* USED */
} cinstr_profiling_event_reason_suspend_resume_sw_t;
/**
MALI_PROFILING_EVENT_DATA_CORE_PP5 = 10,
MALI_PROFILING_EVENT_DATA_CORE_PP6 = 11,
MALI_PROFILING_EVENT_DATA_CORE_PP7 = 12,
+ MALI_PROFILING_EVENT_DATA_CORE_GP0_MMU = 22, /* GP0 + 21 */
+ MALI_PROFILING_EVENT_DATA_CORE_PP0_MMU = 26, /* PP0 + 21 */
+ MALI_PROFILING_EVENT_DATA_CORE_PP1_MMU = 27, /* PP1 + 21 */
+ MALI_PROFILING_EVENT_DATA_CORE_PP2_MMU = 28, /* PP2 + 21 */
+ MALI_PROFILING_EVENT_DATA_CORE_PP3_MMU = 29, /* PP3 + 21 */
+ MALI_PROFILING_EVENT_DATA_CORE_PP4_MMU = 30, /* PP4 + 21 */
+ MALI_PROFILING_EVENT_DATA_CORE_PP5_MMU = 31, /* PP5 + 21 */
+ MALI_PROFILING_EVENT_DATA_CORE_PP6_MMU = 32, /* PP6 + 21 */
+ MALI_PROFILING_EVENT_DATA_CORE_PP7_MMU = 33, /* PP7 + 21 */
+
} cinstr_profiling_event_data_core_t;
#define MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(num) (MALI_PROFILING_EVENT_DATA_CORE_GP0 + (num))
+#define MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(num) (MALI_PROFILING_EVENT_DATA_CORE_GP0_MMU + (num))
#define MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(num) (MALI_PROFILING_EVENT_DATA_CORE_PP0 + (num))
+#define MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(num) (MALI_PROFILING_EVENT_DATA_CORE_PP0_MMU + (num))
#endif /*_MALI_UTGARD_PROFILING_EVENTS_H_*/
#include <linux/module.h>
#include <linux/mali/mali_utgard.h>
-#include "mali_gp_scheduler.h"
-#include "mali_pp_scheduler.h"
+#include "mali_pm.h"
void mali_dev_pause(void)
{
- mali_gp_scheduler_suspend();
- mali_pp_scheduler_suspend();
- mali_group_power_off(MALI_FALSE);
- mali_l2_cache_pause_all(MALI_TRUE);
+ /*
+ * Deactive all groups to prevent hardware being touched
+ * during the period of mali device pausing
+ */
+ mali_pm_os_suspend(MALI_FALSE);
}
EXPORT_SYMBOL(mali_dev_pause);
void mali_dev_resume(void)
{
- mali_l2_cache_pause_all(MALI_FALSE);
- mali_gp_scheduler_resume();
- mali_pp_scheduler_resume();
+ mali_pm_os_resume();
}
EXPORT_SYMBOL(mali_dev_resume);
#include <linux/platform_device.h>
#include <linux/miscdevice.h>
#include <linux/bug.h>
+#include <linux/of.h>
+
#include <linux/mali/mali_utgard.h>
#include "mali_kernel_common.h"
#include "mali_session.h"
#include "mali_kernel_license.h"
#include "mali_memory.h"
#include "mali_memory_dma_buf.h"
+#include <meson_main.h>
#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
#include "mali_profiling_internal.h"
#endif
+#if defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_MALI_DVFS)
+#include "mali_osk_profiling.h"
+#include "mali_dvfs_policy.h"
+static int is_first_resume = 1;
+/*Store the clk and vol for boot/insmod and mali_resume*/
+static struct mali_gpu_clk_item mali_gpu_clk[2];
+#endif
/* Streamline support for the Mali driver */
#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_MALI400_PROFILING)
/* from the __malidrv_build_info.c file that is generated during build */
extern const char *__malidrv_build_info(void);
-extern void mali_post_init(void);
-extern int mali_pdev_dts_init(struct platform_device* mali_gpu_device);
/* Module parameter to control log level */
int mali_debug_level = 2;
module_param(mali_max_pp_cores_group_2, int, S_IRUSR | S_IRGRP | S_IROTH);
MODULE_PARM_DESC(mali_max_pp_cores_group_2, "Limit the number of PP cores to use from second PP group (Mali-450 only).");
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+#if defined(CONFIG_MALI_DVFS)
/** the max fps the same as display vsync default 60, can set by module insert parameter */
extern int mali_max_system_fps;
module_param(mali_max_system_fps, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH);
#endif
#if defined(MALI_FAKE_PLATFORM_DEVICE)
+#if defined(CONFIG_MALI_DT)
+extern int mali_platform_device_init(struct platform_device *device);
+extern int mali_platform_device_deinit(struct platform_device *device);
+#else
extern int mali_platform_device_register(void);
extern int mali_platform_device_unregister(void);
#endif
+#endif
/* Linux power management operations provided by the Mali device driver */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29))
};
#endif
-#ifdef CONFIG_USE_OF
-static const struct of_device_id amlogic_mesonstream_dt_match[]={
- { .compatible = "arm,mali",
- },
+#ifdef CONFIG_MALI_DT
+static struct of_device_id base_dt_ids[] = {
+ {.compatible = "arm,mali-300"},
+ {.compatible = "arm,mali-400"},
+ {.compatible = "arm,mali-450"},
+ {.compatible = "arm,mali-utgard"},
{},
};
-#else
-#define amlogic_mesonstream_dt_match NULL
+
+MODULE_DEVICE_TABLE(of, base_dt_ids);
#endif
/* The Mali device driver struct */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29))
.pm = &mali_dev_pm_ops,
#endif
-
-#ifdef CONFIG_USE_OF
- .of_match_table = amlogic_mesonstream_dt_match,
+#ifdef CONFIG_MALI_DT
+ .of_match_table = of_match_ptr(base_dt_ids),
#endif
},
};
.mmap = mali_mmap
};
-
#if MALI_ENABLE_CPU_CYCLES
void mali_init_cpu_time_counters(int reset, int enable_divide_by_64)
{
#endif
/* Initialize module wide settings */
-#if defined(MALI_FAKE_PLATFORM_DEVICE)
+#ifdef MALI_FAKE_PLATFORM_DEVICE
+#ifndef CONFIG_MALI_DT
MALI_DEBUG_PRINT(2, ("mali_module_init() registering device\n"));
err = mali_platform_device_register();
if (0 != err) {
return err;
}
+#endif
#endif
MALI_DEBUG_PRINT(2, ("mali_module_init() registering driver\n"));
if (0 != err) {
MALI_DEBUG_PRINT(2, ("mali_module_init() Failed to register driver (%d)\n", err));
-#if defined(MALI_FAKE_PLATFORM_DEVICE)
- mali_platform_device_unregister();
+#ifdef MALI_FAKE_PLATFORM_DEVICE
+#ifndef CONFIG_MALI_DT
+ mali_platform_device_unregister();
+#endif
#endif
mali_platform_device = NULL;
return err;
}
#endif
+ /* Tracing the current frequency and voltage from boot/insmod*/
+#if defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_MALI_DVFS)
+ /* Just call mali_get_current_gpu_clk_item(),to record current clk info.*/
+ mali_get_current_gpu_clk_item(&mali_gpu_clk[0]);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+ mali_gpu_clk[0].clock,
+ mali_gpu_clk[0].vol / 1000,
+ 0, 0, 0);
+#endif
+
MALI_PRINT(("Mali device driver loaded\n"));
mpgpu_class_init();
MALI_DEBUG_PRINT(2, ("mali_module_exit() unregistering driver\n"));
-#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
- _mali_internal_profiling_term();
-#endif
-
platform_driver_unregister(&mali_platform_driver);
#if defined(MALI_FAKE_PLATFORM_DEVICE)
+#ifndef CONFIG_MALI_DT
MALI_DEBUG_PRINT(2, ("mali_module_exit() unregistering device\n"));
mali_platform_device_unregister();
+#endif
#endif
+ /* Tracing the current frequency and voltage from rmmod*/
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+ 0,
+ 0,
+ 0, 0, 0);
+
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+ _mali_internal_profiling_term();
+#endif
mpgpu_class_exit();
MALI_PRINT(("Mali device driver unloaded\n"));
mali_platform_device = pdev;
-#ifndef MALI_FAKE_PLATFORM_DEVICE
- if (mali_pdev_dts_init(pdev) < 0)
- return -ENOMEM;
+#ifdef CONFIG_MALI_DT
+ /* If we use DT to initialize our DDK, we have to prepare somethings. */
+ err = mali_platform_device_init(mali_platform_device);
+ if (0 != err) {
+ MALI_PRINT_ERROR(("mali_probe(): Failed to initialize platform device."));
+ return -EFAULT;
+ }
#endif
if (_MALI_OSK_ERR_OK == _mali_osk_wq_init()) {
if (0 == err) {
/* Setup sysfs entries */
err = mali_sysfs_register(mali_dev_name);
+
if (0 == err) {
mali_post_init();
MALI_DEBUG_PRINT(2, ("mali_probe(): Successfully initialized driver for platform device %s\n", pdev->name));
+
return 0;
} else {
MALI_PRINT_ERROR(("mali_probe(): failed to register sysfs entries"));
mali_miscdevice_unregister();
mali_terminate_subsystems();
_mali_osk_wq_term();
+#ifdef CONFIG_MALI_DT
+ mali_platform_device_deinit(mali_platform_device);
+#endif
mali_platform_device = NULL;
return 0;
}
static int mali_driver_suspend_scheduler(struct device *dev)
{
- mali_pm_os_suspend();
+ mali_pm_os_suspend(MALI_TRUE);
+ /* Tracing the frequency and voltage after mali is suspended */
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+ 0,
+ 0,
+ 0, 0, 0);
return 0;
}
static int mali_driver_resume_scheduler(struct device *dev)
{
+ /* Tracing the frequency and voltage after mali is resumed */
+#if defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_MALI_DVFS)
+ /* Just call mali_get_current_gpu_clk_item() once,to record current clk info.*/
+ if (is_first_resume == 1) {
+ mali_get_current_gpu_clk_item(&mali_gpu_clk[1]);
+ is_first_resume = 0;
+ }
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+ mali_gpu_clk[1].clock,
+ mali_gpu_clk[1].vol / 1000,
+ 0, 0, 0);
+#endif
mali_pm_os_resume();
return 0;
}
#ifdef CONFIG_PM_RUNTIME
static int mali_driver_runtime_suspend(struct device *dev)
{
- mali_pm_runtime_suspend();
- return 0;
+ if (MALI_TRUE == mali_pm_runtime_suspend()) {
+ /* Tracing the frequency and voltage after mali is suspended */
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+ 0,
+ 0,
+ 0, 0, 0);
+
+ return 0;
+ } else {
+ return -EBUSY;
+ }
}
static int mali_driver_runtime_resume(struct device *dev)
{
+ /* Tracing the frequency and voltage after mali is resumed */
+#if defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_MALI_DVFS)
+ /* Just call mali_get_current_gpu_clk_item() once,to record current clk info.*/
+ if (is_first_resume == 1) {
+ mali_get_current_gpu_clk_item(&mali_gpu_clk[1]);
+ is_first_resume = 0;
+ }
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+ mali_gpu_clk[1].clock,
+ mali_gpu_clk[1].vol / 1000,
+ 0, 0, 0);
+#endif
+
mali_pm_runtime_resume();
return 0;
}
#include "mali_profiling_internal.h"
#include "mali_gp_job.h"
#include "mali_pp_job.h"
-#include "mali_pp_scheduler.h"
-#include "mali_session.h"
+#include "mali_executor.h"
#define PRIVATE_DATA_COUNTER_MAKE_GP(src) (src)
#define PRIVATE_DATA_COUNTER_MAKE_PP(src) ((1 << 24) | src)
group = (struct mali_group *)filp->private_data;
MALI_DEBUG_ASSERT_POINTER(group);
- r = snprintf(buffer, 64, "%u\n", mali_group_is_enabled(group) ? 1 : 0);
+ r = snprintf(buffer, 64, "%u\n",
+ mali_executor_group_is_disabled(group) ? 0 : 1);
return simple_read_from_buffer(buf, count, offp, buffer, r);
}
}
buffer[count] = '\0';
- r = strict_strtoul(&buffer[0], 10, &val);
+ r = kstrtoul(&buffer[0], 10, &val);
if (0 != r) {
return -EINVAL;
}
switch (val) {
case 1:
- mali_group_enable(group);
+ mali_executor_group_enable(group);
break;
case 0:
- mali_group_disable(group);
+ mali_executor_group_disable(group);
break;
default:
return -EINVAL;
hw_core = (struct mali_hw_core *)filp->private_data;
MALI_DEBUG_ASSERT_POINTER(hw_core);
- r = snprintf(buffer, 64, "0x%08X\n", hw_core->phys_addr);
+ r = snprintf(buffer, 64, "0x%lX\n", hw_core->phys_addr);
return simple_read_from_buffer(buf, count, offp, buffer, r);
}
buf[cnt] = 0;
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtol(buf, 10, &val);
if (ret < 0) {
return ret;
}
buf[cnt] = 0;
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtol(buf, 10, &val);
if (ret < 0) {
return ret;
}
val = MALI_HW_CORE_NO_COUNTER;
}
- if (0 == src_id) {
- mali_l2_cache_core_set_counter_src0(l2_core, (u32)val);
- } else {
- mali_l2_cache_core_set_counter_src1(l2_core, (u32)val);
- }
+ mali_l2_cache_core_set_counter_src(l2_core, src_id, (u32)val);
*ppos += cnt;
return cnt;
buf[cnt] = 0;
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtol(buf, 10, &val);
if (ret < 0) {
return ret;
}
l2_id = 0;
l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id);
while (NULL != l2_cache) {
- if (0 == src_id) {
- mali_l2_cache_core_set_counter_src0(l2_cache, (u32)val);
- } else {
- mali_l2_cache_core_set_counter_src1(l2_cache, (u32)val);
- }
+ mali_l2_cache_core_set_counter_src(l2_cache, src_id, (u32)val);
/* try next L2 */
l2_id++;
.write = l2_all_counter_src1_write,
};
+static ssize_t l2_l2x_counter_valx_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+ char buf[64];
+ int r;
+ u32 src0 = 0;
+ u32 val0 = 0;
+ u32 src1 = 0;
+ u32 val1 = 0;
+ u32 val = -1;
+ struct mali_l2_cache_core *l2_core = (struct mali_l2_cache_core *)filp->private_data;
+
+ mali_l2_cache_core_get_counter_values(l2_core, &src0, &val0, &src1, &val1);
+
+ if (0 == src_id) {
+ if (MALI_HW_CORE_NO_COUNTER != val0) {
+ val = val0;
+ }
+ } else {
+ if (MALI_HW_CORE_NO_COUNTER != val1) {
+ val = val1;
+ }
+ }
+
+ r = snprintf(buf, 64, "%u\n", val);
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t l2_l2x_counter_val0_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_l2x_counter_valx_read(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t l2_l2x_counter_val1_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_l2x_counter_valx_read(filp, ubuf, cnt, ppos, 1);
+}
+
+static const struct file_operations l2_l2x_counter_val0_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = l2_l2x_counter_val0_read,
+};
+
+static const struct file_operations l2_l2x_counter_val1_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = l2_l2x_counter_val1_read,
+};
+
static ssize_t power_always_on_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
{
unsigned long val;
}
buf[cnt] = '\0';
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (0 != ret) {
return ret;
}
/* Update setting (not exactly thread safe) */
if (1 == val && MALI_FALSE == power_always_on_enabled) {
power_always_on_enabled = MALI_TRUE;
- _mali_osk_pm_dev_ref_add();
+ _mali_osk_pm_dev_ref_get_sync();
} else if (0 == val && MALI_TRUE == power_always_on_enabled) {
power_always_on_enabled = MALI_FALSE;
- _mali_osk_pm_dev_ref_dec();
+ _mali_osk_pm_dev_ref_put();
}
*ppos += cnt;
static ssize_t power_power_events_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
{
-
- if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_SUSPEND], strlen(mali_power_events[_MALI_DEVICE_SUSPEND]))) {
- mali_pm_os_suspend();
-
- } else if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_RESUME], strlen(mali_power_events[_MALI_DEVICE_RESUME]))) {
+ if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_SUSPEND], strlen(mali_power_events[_MALI_DEVICE_SUSPEND])-1)) {
+ mali_pm_os_suspend(MALI_TRUE);
+ } else if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_RESUME], strlen(mali_power_events[_MALI_DEVICE_RESUME])-1)) {
mali_pm_os_resume();
- } else if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_DVFS_PAUSE], strlen(mali_power_events[_MALI_DEVICE_DVFS_PAUSE]))) {
+ } else if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_DVFS_PAUSE], strlen(mali_power_events[_MALI_DEVICE_DVFS_PAUSE])-1)) {
mali_dev_pause();
- } else if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_DVFS_RESUME], strlen(mali_power_events[_MALI_DEVICE_DVFS_RESUME]))) {
+ } else if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_DVFS_RESUME], strlen(mali_power_events[_MALI_DEVICE_DVFS_RESUME])-1)) {
mali_dev_resume();
}
*ppos += cnt;
buf[cnt] = 0;
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (ret < 0) {
return ret;
}
#endif
-static ssize_t memory_used_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+static int memory_debugfs_show(struct seq_file *s, void *private_data)
{
- char buf[64];
- size_t r;
- u32 mem = _mali_ukk_report_memory_usage();
+ seq_printf(s, " %-25s %-10s %-10s %-15s %-15s %-10s %-10s\n"\
+ "==============================================================================================================\n",
+ "Name (:bytes)", "pid", "mali_mem", "max_mali_mem",
+ "external_mem", "ump_mem", "dma_mem");
+ mali_session_memory_tracking(s);
+ return 0;
+}
- r = snprintf(buf, 64, "%u\n", mem);
- return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+static int memory_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, memory_debugfs_show, inode->i_private);
}
static const struct file_operations memory_usage_fops = {
.owner = THIS_MODULE,
- .read = memory_used_read,
+ .open = memory_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
};
static ssize_t utilization_gp_pp_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
}
buf[cnt] = '\0';
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (0 != ret) {
return ret;
}
return 0;
}
-static ssize_t pmu_power_down_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
-{
- int ret;
- char buffer[32];
- unsigned long val;
- struct mali_pmu_core *pmu;
- _mali_osk_errcode_t err;
-
- if (count >= sizeof(buffer)) {
- return -ENOMEM;
- }
-
- if (copy_from_user(&buffer[0], buf, count)) {
- return -EFAULT;
- }
- buffer[count] = '\0';
-
- ret = strict_strtoul(&buffer[0], 10, &val);
- if (0 != ret) {
- return -EINVAL;
- }
-
- pmu = mali_pmu_get_global_pmu_core();
- MALI_DEBUG_ASSERT_POINTER(pmu);
-
- err = mali_pmu_power_down(pmu, val);
- if (_MALI_OSK_ERR_OK != err) {
- return -EINVAL;
- }
-
- *offp += count;
- return count;
-}
-
-static ssize_t pmu_power_up_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
-{
- int ret;
- char buffer[32];
- unsigned long val;
- struct mali_pmu_core *pmu;
- _mali_osk_errcode_t err;
-
- if (count >= sizeof(buffer)) {
- return -ENOMEM;
- }
-
- if (copy_from_user(&buffer[0], buf, count)) {
- return -EFAULT;
- }
- buffer[count] = '\0';
-
- ret = strict_strtoul(&buffer[0], 10, &val);
- if (0 != ret) {
- return -EINVAL;
- }
-
- pmu = mali_pmu_get_global_pmu_core();
- MALI_DEBUG_ASSERT_POINTER(pmu);
-
- err = mali_pmu_power_up(pmu, val);
- if (_MALI_OSK_ERR_OK != err) {
- return -EINVAL;
- }
-
- *offp += count;
- return count;
-}
-
-static const struct file_operations pmu_power_down_fops = {
- .owner = THIS_MODULE,
- .write = pmu_power_down_write,
-};
-
-static const struct file_operations pmu_power_up_fops = {
- .owner = THIS_MODULE,
- .write = pmu_power_up_write,
-};
-
static ssize_t pp_num_cores_enabled_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
{
int ret;
}
buffer[count] = '\0';
- ret = strict_strtoul(&buffer[0], 10, &val);
+ ret = kstrtoul(&buffer[0], 10, &val);
if (0 != ret) {
return -EINVAL;
}
- ret = mali_pp_scheduler_set_perf_level(val, MALI_TRUE); /* override even if core scaling is disabled */
+ ret = mali_executor_set_perf_level(val, MALI_TRUE); /* override even if core scaling is disabled */
if (ret) {
return ret;
}
int r;
char buffer[64];
- r = snprintf(buffer, 64, "%u\n", mali_pp_scheduler_get_num_cores_enabled());
+ r = snprintf(buffer, 64, "%u\n", mali_executor_get_num_cores_enabled());
return simple_read_from_buffer(buf, count, offp, buffer, r);
}
int r;
char buffer[64];
- r = snprintf(buffer, 64, "%u\n", mali_pp_scheduler_get_num_cores_total());
+ r = snprintf(buffer, 64, "%u\n", mali_executor_get_num_cores_total());
return simple_read_from_buffer(buf, count, offp, buffer, r);
}
}
buffer[count] = '\0';
- ret = strict_strtoul(&buffer[0], 10, &val);
+ ret = kstrtoul(&buffer[0], 10, &val);
if (0 != ret) {
return -EINVAL;
}
switch (val) {
case 1:
- mali_pp_scheduler_core_scaling_enable();
+ mali_executor_core_scaling_enable();
break;
case 0:
- mali_pp_scheduler_core_scaling_disable();
+ mali_executor_core_scaling_disable();
break;
default:
return -EINVAL;
static ssize_t pp_core_scaling_enabled_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
{
- return simple_read_from_buffer(buf, count, offp, mali_pp_scheduler_core_scaling_is_enabled() ? "1\n" : "0\n", 2);
+ return simple_read_from_buffer(buf, count, offp, mali_executor_core_scaling_is_enabled() ? "1\n" : "0\n", 2);
}
static const struct file_operations pp_core_scaling_enabled_fops = {
.owner = THIS_MODULE,
seq_printf(s, "timeline system info: \n=================\n\n");
- mali_session_lock();
- MALI_SESSION_FOREACH(session, tmp, link){
- seq_printf(s, "session %d <%p> start:\n", session_seq,session);
- mali_timeline_debug_print_system(session->timeline_system,s);
+ mali_session_lock();
+ MALI_SESSION_FOREACH(session, tmp, link) {
+ seq_printf(s, "session %d <%p> start:\n", session_seq, session);
+ mali_timeline_debug_print_system(session->timeline_system, s);
seq_printf(s, "session %d end\n\n\n", session_seq++);
}
mali_session_unlock();
return 0;
}
-static int timeline_debugfs_open( struct inode *inode, struct file *file)
+static int timeline_debugfs_open(struct inode *inode, struct file *file)
{
return single_open(file, timeline_debugfs_show, inode->i_private);
}
} else {
if (NULL != mali_debugfs_dir) {
/* Debugfs directory created successfully; create files now */
- struct dentry *mali_pmu_dir;
struct dentry *mali_power_dir;
struct dentry *mali_gp_dir;
struct dentry *mali_pp_dir;
debugfs_create_file("version", 0400, mali_debugfs_dir, NULL, &version_fops);
- mali_pmu_dir = debugfs_create_dir("pmu", mali_debugfs_dir);
- if (NULL != mali_pmu_dir) {
- debugfs_create_file("power_down", 0200, mali_pmu_dir, NULL, &pmu_power_down_fops);
- debugfs_create_file("power_up", 0200, mali_pmu_dir, NULL, &pmu_power_up_fops);
- }
-
mali_power_dir = debugfs_create_dir("power", mali_debugfs_dir);
if (mali_power_dir != NULL) {
debugfs_create_file("always_on", 0600, mali_power_dir, NULL, &power_always_on_fops);
if (NULL != mali_l2_l2x_dir) {
debugfs_create_file("counter_src0", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_src0_fops);
debugfs_create_file("counter_src1", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_src1_fops);
+ debugfs_create_file("counter_val0", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_val0_fops);
+ debugfs_create_file("counter_val1", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_val1_fops);
debugfs_create_file("base_addr", 0400, mali_l2_l2x_dir, &l2_cache->hw_core, &hw_core_base_addr_fops);
}
}
}
- debugfs_create_file("memory_usage", 0400, mali_debugfs_dir, NULL, &memory_usage_fops);
+ debugfs_create_file("gpu_memory", 0444, mali_debugfs_dir, NULL, &memory_usage_fops);
debugfs_create_file("utilization_gp_pp", 0400, mali_debugfs_dir, NULL, &utilization_gp_pp_fops);
debugfs_create_file("utilization_gp", 0400, mali_debugfs_dir, NULL, &utilization_gp_fops);
debugfs_create_file("counter_src1", 0600, mali_profiling_pp_dir, (void *)PRIVATE_DATA_COUNTER_MAKE_PP(1), &profiling_counter_src_fops);
}
- max_sub_jobs = mali_pp_scheduler_get_num_cores_total();
+ max_sub_jobs = mali_executor_get_num_cores_total();
for (i = 0; i < max_sub_jobs; i++) {
char buf[16];
struct dentry *mali_profiling_pp_x_dir;
#include "mali_osk_mali.h"
#include "mali_kernel_linux.h"
#include "mali_scheduler.h"
+#include "mali_executor.h"
#include "mali_kernel_descriptor_mapping.h"
#include "mali_memory.h"
#include "mali_memory_os_alloc.h"
#include "mali_memory_block_alloc.h"
+extern unsigned int mali_dedicated_mem_size;
+extern unsigned int mali_shared_mem_size;
+
/* session->memory_lock must be held when calling this function */
static void mali_mem_release(mali_mem_allocation *descriptor)
{
case MALI_MEM_BLOCK:
mali_mem_block_release(descriptor);
break;
+ default:
+ MALI_DEBUG_PRINT(1, ("mem type %d is not in the mali_mem_type enum.\n", descriptor->type));
+ break;
}
}
/* Put on descriptor map */
if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session->descriptor_mapping, descriptor, &descriptor->id)) {
_mali_osk_mutex_wait(session->memory_lock);
- mali_mem_os_release(descriptor);
+ if (MALI_MEM_OS == descriptor->type) {
+ mali_mem_os_release(descriptor);
+ } else if (MALI_MEM_BLOCK == descriptor->type) {
+ mali_mem_block_release(descriptor);
+ }
_mali_osk_mutex_signal(session->memory_lock);
return -EFAULT;
}
/* Umap and flush L2 */
mali_mmu_pagedir_unmap(session->page_directory, descriptor->mali_mapping.addr, descriptor->size);
- mali_scheduler_zap_all_active(session);
+ mali_executor_zap_all_active(session);
}
u32 _mali_ukk_report_memory_usage(void)
return sum;
}
+u32 _mali_ukk_report_total_memory_size(void)
+{
+ return mali_dedicated_mem_size + mali_shared_mem_size;
+}
+
+
/**
* Per-session memory descriptor mapping table sizes
*/
/* sg must be page aligned. */
MALI_DEBUG_ASSERT(0 == size % MALI_MMU_PAGE_SIZE);
+ MALI_DEBUG_ASSERT(0 == (phys & ~(uintptr_t)0xFFFFFFFF));
mali_mmu_pagedir_update(pagedir, virt, phys, size, MALI_MMU_FLAGS_DEFAULT);
_mali_osk_errcode_t err;
int i;
int ret = 0;
+ u32 num_memory_cookies;
+ struct mali_session_data *session;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ num_memory_cookies = mali_pp_job_num_memory_cookies(job);
- _mali_osk_mutex_wait(job->session->memory_lock);
+ session = mali_pp_job_get_session(job);
+
+ MALI_DEBUG_ASSERT_POINTER(session);
- for (i = 0; i < job->num_memory_cookies; i++) {
- int cookie = job->memory_cookies[i];
+ mali_session_memory_lock(session);
+
+ for (i = 0; i < num_memory_cookies; i++) {
+ u32 cookie = mali_pp_job_get_memory_cookie(job, i);
if (0 == cookie) {
/* 0 is not a valid cookie */
- MALI_DEBUG_ASSERT(NULL == job->dma_bufs[i]);
+ MALI_DEBUG_ASSERT(NULL ==
+ mali_pp_job_get_dma_buf(job, i));
continue;
}
MALI_DEBUG_ASSERT(0 < cookie);
- err = mali_descriptor_mapping_get(job->session->descriptor_mapping,
- cookie, (void **)&descriptor);
+ err = mali_descriptor_mapping_get(
+ mali_pp_job_get_session(job)->descriptor_mapping,
+ cookie, (void **)&descriptor);
if (_MALI_OSK_ERR_OK != err) {
MALI_DEBUG_PRINT_ERROR(("Mali DMA-buf: Failed to get descriptor for cookie %d\n", cookie));
ret = -EFAULT;
- MALI_DEBUG_ASSERT(NULL == job->dma_bufs[i]);
+ MALI_DEBUG_ASSERT(NULL ==
+ mali_pp_job_get_dma_buf(job, i));
continue;
}
if (MALI_MEM_DMA_BUF != descriptor->type) {
/* Not a DMA-buf */
- MALI_DEBUG_ASSERT(NULL == job->dma_bufs[i]);
+ MALI_DEBUG_ASSERT(NULL ==
+ mali_pp_job_get_dma_buf(job, i));
continue;
}
mem = descriptor->dma_buf.attachment;
MALI_DEBUG_ASSERT_POINTER(mem);
- MALI_DEBUG_ASSERT(mem->session == job->session);
+ MALI_DEBUG_ASSERT(mem->session == mali_pp_job_get_session(job));
err = mali_dma_buf_map(mem, mem->session, descriptor->mali_mapping.addr, descriptor->flags);
if (0 != err) {
MALI_DEBUG_PRINT_ERROR(("Mali DMA-buf: Failed to map dma-buf for cookie %d at mali address %x\b",
cookie, descriptor->mali_mapping.addr));
ret = -EFAULT;
- MALI_DEBUG_ASSERT(NULL == job->dma_bufs[i]);
+ MALI_DEBUG_ASSERT(NULL ==
+ mali_pp_job_get_dma_buf(job, i));
continue;
}
/* Add mem to list of DMA-bufs mapped for this job */
- job->dma_bufs[i] = mem;
+ mali_pp_job_set_dma_buf(job, i, mem);
}
- _mali_osk_mutex_signal(job->session->memory_lock);
+ mali_session_memory_unlock(session);
return ret;
}
void mali_dma_buf_unmap_job(struct mali_pp_job *job)
{
- int i;
- for (i = 0; i < job->num_dma_bufs; i++) {
- if (NULL == job->dma_bufs[i]) continue;
+ u32 i;
+ u32 num_dma_bufs = mali_pp_job_num_dma_bufs(job);
- mali_dma_buf_unmap(job->dma_bufs[i]);
- job->dma_bufs[i] = NULL;
+ for (i = 0; i < num_dma_bufs; i++) {
+ struct mali_dma_buf_attachment *mem;
+
+ mem = mali_pp_job_get_dma_buf(job, i);
+ if (NULL != mem) {
+ mali_dma_buf_unmap(mem);
+ mali_pp_job_set_dma_buf(job, i, NULL);
+ }
}
}
#endif /* !CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH */
descriptor->flags = MALI_MEM_FLAG_MALI_GUARD_PAGE;
}
- _mali_osk_mutex_wait(session->memory_lock);
+ mali_session_memory_lock(session);
/* Map dma-buf into this session's page tables */
if (_MALI_OSK_ERR_OK != mali_mem_mali_map_prepare(descriptor)) {
- _mali_osk_mutex_signal(session->memory_lock);
+ mali_session_memory_unlock(session);
MALI_DEBUG_PRINT_ERROR(("Failed to map dma-buf on Mali\n"));
mali_mem_descriptor_destroy(descriptor);
mali_dma_buf_release(mem);
if (0 != mali_dma_buf_map(mem, session, descriptor->mali_mapping.addr, descriptor->flags)) {
mali_mem_mali_map_free(descriptor);
- _mali_osk_mutex_signal(session->memory_lock);
+ mali_session_memory_unlock(session);
MALI_DEBUG_PRINT_ERROR(("Failed to map dma-buf %d into Mali address space\n", fd));
mali_mem_descriptor_destroy(descriptor);
#endif
- _mali_osk_mutex_signal(session->memory_lock);
+ mali_session_memory_unlock(session);
/* Get descriptor mapping for memory. */
if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session->descriptor_mapping, descriptor, &md)) {
- _mali_osk_mutex_wait(session->memory_lock);
+ mali_session_memory_lock(session);
mali_mem_mali_map_free(descriptor);
- _mali_osk_mutex_signal(session->memory_lock);
+ mali_session_memory_unlock(session);
MALI_DEBUG_PRINT_ERROR(("Failed to create descriptor mapping for dma-buf %d\n", fd));
mali_mem_descriptor_destroy(descriptor);
/* Return stuff to user space */
if (0 != put_user(md, &user_arg->cookie)) {
- _mali_osk_mutex_wait(session->memory_lock);
+ mali_session_memory_lock(session);
mali_mem_mali_map_free(descriptor);
- _mali_osk_mutex_signal(session->memory_lock);
+ mali_session_memory_unlock(session);
MALI_DEBUG_PRINT_ERROR(("Failed to return descriptor to user space for dma-buf %d\n", fd));
mali_descriptor_mapping_free(session->descriptor_mapping, md);
MALI_DEBUG_PRINT(3, ("Mali DMA-buf: release descriptor cookie %ld\n", args.cookie));
- _mali_osk_mutex_wait(session->memory_lock);
+ mali_session_memory_lock(session);
descriptor = mali_descriptor_mapping_free(session->descriptor_mapping, (u32)args.cookie);
ret = -EINVAL;
}
- _mali_osk_mutex_signal(session->memory_lock);
+ mali_session_memory_unlock(session);
/* Return the error that _mali_ukk_map_external_ump_mem produced */
return ret;
extern "C" {
#endif
+#include "mali_uk_types.h"
#include "mali_osk.h"
#include "mali_memory.h"
#if defined(CONFIG_ARM) && !defined(CONFIG_ARM_LPAE)
flags |= GFP_HIGHUSER;
#else
+ /* After 3.15.0 kernel use ZONE_DMA replace ZONE_DMA32 */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0)
flags |= GFP_DMA32;
+#else
+ flags |= GFP_DMA;
+#endif
#endif
new_page = alloc_page(flags);
err = mali_mem_os_mali_map(descriptor, session); /* Map on Mali */
if (0 != err) goto mali_map_failed;
- _mali_osk_mutex_signal(session->memory_lock);
-
err = mali_mem_os_cpu_map(descriptor, vma); /* Map on CPU */
if (0 != err) goto cpu_map_failed;
+ _mali_osk_mutex_signal(session->memory_lock);
return descriptor;
cpu_map_failed:
static unsigned long mali_mem_os_shrink_count(struct shrinker *shrinker, struct shrink_control *sc)
{
- return mali_mem_os_allocator.pool_count + mali_mem_page_table_page_pool.count;
+ return mali_mem_os_allocator.pool_count;
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
mali_mem_os_free_page(page);
}
- /* Release some pages from page table page pool */
- mali_mem_os_trim_page_table_page_pool();
-
if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES > mali_mem_os_allocator.pool_count) {
/* Pools are empty, stop timer */
MALI_DEBUG_PRINT(5, ("Stopping timer, only %u pages on pool\n", mali_mem_os_allocator.pool_count));
cancel_delayed_work(&mali_mem_os_allocator.timed_shrinker);
}
- return mali_mem_os_allocator.pool_count + mali_mem_page_table_page_pool.count;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+ return mali_mem_os_shrink_count(shrinker, sc);
+#else
+ return nr;
+#endif
}
static void mali_mem_os_trim_pool(struct work_struct *data)
unregister_shrinker(&mali_mem_os_allocator.shrinker);
cancel_delayed_work_sync(&mali_mem_os_allocator.timed_shrinker);
- destroy_workqueue(mali_mem_os_allocator.wq);
+
+ if (NULL != mali_mem_os_allocator.wq) {
+ destroy_workqueue(mali_mem_os_allocator.wq);
+ mali_mem_os_allocator.wq = NULL;
+ }
spin_lock(&mali_mem_os_allocator.pool_lock);
list_for_each_entry_safe(page, tmp, &mali_mem_os_allocator.pool_pages, lru) {
MALI_MEM_DMA_BUF,
MALI_MEM_UMP,
MALI_MEM_BLOCK,
+ MALI_MEM_TYPE_MAX,
} mali_mem_type;
typedef struct mali_mem_os_mem {
return atomic_inc_return((atomic_t *)&atom->u.val);
}
-_mali_osk_errcode_t _mali_osk_atomic_init(_mali_osk_atomic_t *atom, u32 val)
+void _mali_osk_atomic_init(_mali_osk_atomic_t *atom, u32 val)
{
- MALI_CHECK_NON_NULL(atom, _MALI_OSK_ERR_INVALID_ARGS);
+ MALI_DEBUG_ASSERT_POINTER(atom);
atomic_set((atomic_t *)&atom->u.val, val);
- return _MALI_OSK_ERR_OK;
}
u32 _mali_osk_atomic_read(_mali_osk_atomic_t *atom)
case _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP:
return "_MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP";
break;
- case _MALI_OSK_LOCK_ORDER_GROUP_VIRTUAL:
- return "_MALI_OSK_LOCK_ORDER_GROUP_VIRTUAL";
+ case _MALI_OSK_LOCK_ORDER_PM_EXECUTION:
+ return "_MALI_OSK_LOCK_ORDER_PM_EXECUTION";
break;
- case _MALI_OSK_LOCK_ORDER_GROUP:
- return "_MALI_OSK_LOCK_ORDER_GROUP";
+ case _MALI_OSK_LOCK_ORDER_EXECUTOR:
+ return "_MALI_OSK_LOCK_ORDER_EXECUTOR";
+ break;
+ case _MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM:
+ return "_MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM";
break;
case _MALI_OSK_LOCK_ORDER_SCHEDULER:
return "_MALI_OSK_LOCK_ORDER_SCHEDULER";
break;
- case _MALI_OSK_LOCK_ORDER_PM_CORE_STATE:
- return "_MALI_OSK_LOCK_ORDER_PM_CORE_STATE";
+ case _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED:
+ return "_MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED";
break;
- case _MALI_OSK_LOCK_ORDER_L2_COMMAND:
- return "_MALI_OSK_LOCK_ORDER_L2_COMMAND";
+ case _MALI_OSK_LOCK_ORDER_DMA_COMMAND:
+ return "_MALI_OSK_LOCK_ORDER_DMA_COMMAND";
break;
case _MALI_OSK_LOCK_ORDER_PROFILING:
return "_MALI_OSK_LOCK_ORDER_PROFILING";
break;
- case _MALI_OSK_LOCK_ORDER_L2_COUNTER:
- return "_MALI_OSK_LOCK_ORDER_L2_COUNTER";
+ case _MALI_OSK_LOCK_ORDER_L2:
+ return "_MALI_OSK_LOCK_ORDER_L2";
+ break;
+ case _MALI_OSK_LOCK_ORDER_L2_COMMAND:
+ return "_MALI_OSK_LOCK_ORDER_L2_COMMAND";
break;
case _MALI_OSK_LOCK_ORDER_UTILIZATION:
return "_MALI_OSK_LOCK_ORDER_UTILIZATION";
break;
- case _MALI_OSK_LOCK_ORDER_PM_EXECUTE:
- return "_MALI_OSK_LOCK_ORDER_PM_EXECUTE";
- break;
case _MALI_OSK_LOCK_ORDER_SESSION_PENDING_JOBS:
return "_MALI_OSK_LOCK_ORDER_SESSION_PENDING_JOBS";
break;
+ case _MALI_OSK_LOCK_ORDER_PM_STATE:
+ return "_MALI_OSK_LOCK_ORDER_PM_STATE";
+ break;
default:
- return "";
+ return "<UNKNOWN_LOCK_ORDER>";
}
}
#endif /* LOCK_ORDER_CHECKING */
wmb();
}
-mali_io_address _mali_osk_mem_mapioregion(u32 phys, u32 size, const char *description)
+mali_io_address _mali_osk_mem_mapioregion(uintptr_t phys, u32 size, const char *description)
{
return (mali_io_address)ioremap_nocache(phys, size);
}
-void _mali_osk_mem_unmapioregion(u32 phys, u32 size, mali_io_address virt)
+void _mali_osk_mem_unmapioregion(uintptr_t phys, u32 size, mali_io_address virt)
{
iounmap((void *)virt);
}
-_mali_osk_errcode_t inline _mali_osk_mem_reqregion(u32 phys, u32 size, const char *description)
+_mali_osk_errcode_t inline _mali_osk_mem_reqregion(uintptr_t phys, u32 size, const char *description)
{
#if MALI_LICENSE_IS_GPL
return _MALI_OSK_ERR_OK; /* GPL driver gets the mem region for the resources registered automatically */
#endif
}
-void inline _mali_osk_mem_unreqregion(u32 phys, u32 size)
+void inline _mali_osk_mem_unreqregion(uintptr_t phys, u32 size)
{
#if !MALI_LICENSE_IS_GPL
release_mem_region(phys, size);
#include <asm/uaccess.h>
#include <linux/platform_device.h>
#include <linux/mali/mali_utgard.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include "mali_osk_mali.h"
#include "mali_kernel_common.h" /* MALI_xxx macros */
#include "mali_osk.h" /* kernel side OS functions */
#include "mali_kernel_linux.h"
+
+#ifdef CONFIG_MALI_DT
+
+#define MALI_OSK_INVALID_RESOURCE_ADDRESS 0xFFFFFFFF
+
+/**
+ * Define the max number of resource we could have.
+ */
+#define MALI_OSK_MAX_RESOURCE_NUMBER 27
+
+/**
+ * Define the max number of resource with interrupts, and they are
+ * the first 20 elements in array mali_osk_resource_bank.
+ */
+#define MALI_OSK_RESOURCE_WITH_IRQ_NUMBER 20
+
+/**
+ * pp core start and end location in mali_osk_resource_bank array.
+ */
+#define MALI_OSK_RESOURCE_PP_LOCATION_START 2
+#define MALI_OSK_RESOURCE_PP_LOCATION_END 17
+
+/**
+ * L2 cache start and end location in mali_osk_resource_bank array.
+ */
+#define MALI_OSK_RESOURCE_L2_LOCATION_START 20
+#define MALI_OSK_RESOURCE_l2_LOCATION_END 22
+
+static _mali_osk_resource_t mali_osk_resource_bank[MALI_OSK_MAX_RESOURCE_NUMBER] = {
+{.description = "Mali_GP", .base = MALI_OFFSET_GP, .irq_name = "IRQGP",},
+{.description = "Mali_GP_MMU", .base = MALI_OFFSET_GP_MMU, .irq_name = "IRQGPMMU",},
+{.description = "Mali_PP0", .base = MALI_OFFSET_PP0, .irq_name = "IRQPP0",},
+{.description = "Mali_PP0_MMU", .base = MALI_OFFSET_PP0_MMU, .irq_name = "IRQPPMMU0",},
+{.description = "Mali_PP1", .base = MALI_OFFSET_PP1, .irq_name = "IRQPP1",},
+{.description = "Mali_PP1_MMU", .base = MALI_OFFSET_PP1_MMU, .irq_name = "IRQPPMMU1",},
+{.description = "Mali_PP2", .base = MALI_OFFSET_PP2, .irq_name = "IRQPP2",},
+{.description = "Mali_PP2_MMU", .base = MALI_OFFSET_PP2_MMU, .irq_name = "IRQPPMMU2",},
+{.description = "Mali_PP3", .base = MALI_OFFSET_PP3, .irq_name = "IRQPP3",},
+{.description = "Mali_PP3_MMU", .base = MALI_OFFSET_PP3_MMU, .irq_name = "IRQPPMMU3",},
+{.description = "Mali_PP4", .base = MALI_OFFSET_PP4, .irq_name = "IRQPP4",},
+{.description = "Mali_PP4_MMU", .base = MALI_OFFSET_PP4_MMU, .irq_name = "IRQPPMMU4",},
+{.description = "Mali_PP5", .base = MALI_OFFSET_PP5, .irq_name = "IRQPP5",},
+{.description = "Mali_PP5_MMU", .base = MALI_OFFSET_PP5_MMU, .irq_name = "IRQPPMMU5",},
+{.description = "Mali_PP6", .base = MALI_OFFSET_PP6, .irq_name = "IRQPP6",},
+{.description = "Mali_PP6_MMU", .base = MALI_OFFSET_PP6_MMU, .irq_name = "IRQPPMMU6",},
+{.description = "Mali_PP7", .base = MALI_OFFSET_PP7, .irq_name = "IRQPP7",},
+{.description = "Mali_PP7_MMU", .base = MALI_OFFSET_PP7_MMU, .irq_name = "IRQPPMMU",},
+{.description = "Mali_PP_Broadcast", .base = MALI_OFFSET_PP_BCAST, .irq_name = "IRQPP",},
+{.description = "Mali_PMU", .base = MALI_OFFSET_PMU, .irq_name = "IRQPMU",},
+{.description = "Mali_L2", .base = MALI_OFFSET_L2_RESOURCE0,},
+{.description = "Mali_L2", .base = MALI_OFFSET_L2_RESOURCE1,},
+{.description = "Mali_L2", .base = MALI_OFFSET_L2_RESOURCE2,},
+{.description = "Mali_PP_MMU_Broadcast", .base = MALI_OFFSET_PP_BCAST_MMU,},
+{.description = "Mali_Broadcast", .base = MALI_OFFSET_BCAST,},
+{.description = "Mali_DLBU", .base = MALI_OFFSET_DLBU,},
+{.description = "Mali_DMA", .base = MALI_OFFSET_DMA,},
+};
+
+_mali_osk_errcode_t _mali_osk_resource_initialize(void)
+{
+ mali_bool mali_is_450 = MALI_FALSE;
+ int i, pp_core_num = 0, l2_core_num = 0;
+ struct resource *res;
+
+ for (i = 0; i < MALI_OSK_RESOURCE_WITH_IRQ_NUMBER; i++) {
+ res = platform_get_resource_byname(mali_platform_device, IORESOURCE_IRQ, mali_osk_resource_bank[i].irq_name);
+ if (res) {
+ mali_osk_resource_bank[i].irq = res->start;
+ if (0 == strncmp("Mali_PP_Broadcast", mali_osk_resource_bank[i].description,
+ strlen(mali_osk_resource_bank[i].description))) {
+ mali_is_450 = MALI_TRUE;
+ }
+ } else {
+ mali_osk_resource_bank[i].base = MALI_OSK_INVALID_RESOURCE_ADDRESS;
+ }
+ }
+
+ for (i = MALI_OSK_RESOURCE_PP_LOCATION_START; i <= MALI_OSK_RESOURCE_PP_LOCATION_END; i++) {
+ if (MALI_OSK_INVALID_RESOURCE_ADDRESS != mali_osk_resource_bank[i].base) {
+ pp_core_num++;
+ }
+ }
+
+ /* We have to divide by 2, because we caculate twice for only one pp(pp_core and pp_mmu_core). */
+ if (0 != pp_core_num % 2) {
+ MALI_DEBUG_PRINT(2, ("The value of pp core number isn't normal."));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ pp_core_num /= 2;
+
+ /**
+ * we can caculate the number of l2 cache core according the number of pp core number
+ * and device type(mali400/mali450).
+ */
+ if (mali_is_450 && 4 < pp_core_num) {
+ l2_core_num = 3;
+ } else if (mali_is_450 && 4 >= pp_core_num) {
+ l2_core_num = 2;
+ } else {
+ l2_core_num = 1;
+ }
+
+ for (i = MALI_OSK_RESOURCE_l2_LOCATION_END; i > MALI_OSK_RESOURCE_L2_LOCATION_START + l2_core_num - 1; i--) {
+ mali_osk_resource_bank[i].base = MALI_OSK_INVALID_RESOURCE_ADDRESS;
+ }
+
+ /* If device is not mali-450 type, we have to remove related resource from resource bank. */
+ if (!mali_is_450) {
+ for (i = MALI_OSK_RESOURCE_l2_LOCATION_END + 1; i < MALI_OSK_MAX_RESOURCE_NUMBER; i++) {
+ mali_osk_resource_bank[i].base = MALI_OSK_INVALID_RESOURCE_ADDRESS;
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_osk_resource_find(u32 addr, _mali_osk_resource_t *res)
+{
+ int i;
+
+ if (NULL == mali_platform_device) {
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+
+ /* Traverse all of resources in resources bank to find the matching one. */
+ for (i = 0; i < MALI_OSK_MAX_RESOURCE_NUMBER; i++) {
+ if (mali_osk_resource_bank[i].base == addr) {
+ if (NULL != res) {
+ res->base = addr + _mali_osk_resource_base_address();
+ res->description = mali_osk_resource_bank[i].description;
+ res->irq = mali_osk_resource_bank[i].irq;
+ }
+ return _MALI_OSK_ERR_OK;
+ }
+ }
+
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+}
+
+uintptr_t _mali_osk_resource_base_address(void)
+{
+ struct resource *reg_res = NULL;
+ uintptr_t ret = 0;
+
+ reg_res = platform_get_resource(mali_platform_device, IORESOURCE_MEM, 0);
+
+ if (NULL != reg_res) {
+ ret = reg_res->start;
+ }
+
+ return ret;
+}
+
+void _mali_osk_device_data_pmu_config_get(u16 *domain_config_array, int array_size)
+{
+ struct device_node *node = mali_platform_device->dev.of_node;
+ struct property *prop;
+ const __be32 *p;
+ int length = 0, i = 0;
+ u32 u;
+
+ MALI_DEBUG_ASSERT(NULL != node);
+
+ if (!of_get_property(node, "pmu_domain_config", &length)) {
+ return;
+ }
+
+ if (array_size != length/sizeof(u32)) {
+ MALI_PRINT_ERROR(("Wrong pmu domain config in device tree."));
+ return;
+ }
+
+ of_property_for_each_u32(node, "pmu_domain_config", prop, p, u) {
+ domain_config_array[i] = (u16)u;
+ i++;
+ }
+
+ return;
+}
+
+u32 _mali_osk_get_pmu_switch_delay(void)
+{
+ struct device_node *node = mali_platform_device->dev.of_node;
+ u32 switch_delay;
+
+ MALI_DEBUG_ASSERT(NULL != node);
+
+ if (0 == of_property_read_u32(node, "pmu_switch_delay", &switch_delay)) {
+ return switch_delay;
+ } else {
+ MALI_DEBUG_PRINT(2, ("Couldn't find pmu_switch_delay in device tree configuration.\n"));
+ }
+
+ return 0;
+}
+
+#else /* CONFIG_MALI_DT */
+
_mali_osk_errcode_t _mali_osk_resource_find(u32 addr, _mali_osk_resource_t *res)
{
int i;
+ uintptr_t phys_addr;
if (NULL == mali_platform_device) {
/* Not connected to a device */
return _MALI_OSK_ERR_ITEM_NOT_FOUND;
}
+ phys_addr = addr + _mali_osk_resource_base_address();
for (i = 0; i < mali_platform_device->num_resources; i++) {
if (IORESOURCE_MEM == resource_type(&(mali_platform_device->resource[i])) &&
- mali_platform_device->resource[i].start == addr) {
+ mali_platform_device->resource[i].start == phys_addr) {
if (NULL != res) {
- res->base = addr;
+ res->base = phys_addr;
res->description = mali_platform_device->resource[i].name;
/* Any (optional) IRQ resource belonging to this resource will follow */
return _MALI_OSK_ERR_ITEM_NOT_FOUND;
}
-u32 _mali_osk_resource_base_address(void)
+uintptr_t _mali_osk_resource_base_address(void)
{
- u32 lowest_addr = 0xFFFFFFFF;
- u32 ret = 0;
+ uintptr_t lowest_addr = (uintptr_t)(0 - 1);
+ uintptr_t ret = 0;
if (NULL != mali_platform_device) {
int i;
return ret;
}
+void _mali_osk_device_data_pmu_config_get(u16 *domain_config_array, int array_size)
+{
+ _mali_osk_device_data data = { 0, };
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+ /* Copy the custom customer power domain config */
+ _mali_osk_memcpy(domain_config_array, data.pmu_domain_config, sizeof(data.pmu_domain_config));
+ }
+
+ return;
+}
+
+u32 _mali_osk_get_pmu_switch_delay(void)
+{
+ _mali_osk_errcode_t err;
+ _mali_osk_device_data data = { 0, };
+
+ err = _mali_osk_device_data_get(&data);
+
+ if (_MALI_OSK_ERR_OK == err) {
+ return data.pmu_switch_delay;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_MALI_DT */
+
_mali_osk_errcode_t _mali_osk_device_data_get(_mali_osk_device_data *data)
{
MALI_DEBUG_ASSERT_POINTER(data);
return _MALI_OSK_ERR_ITEM_NOT_FOUND;
}
+u32 _mali_osk_l2_resource_count(void)
+{
+ u32 l2_core_num = 0;
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_L2_RESOURCE0, NULL))
+ l2_core_num++;
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_L2_RESOURCE1, NULL))
+ l2_core_num++;
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_L2_RESOURCE2, NULL))
+ l2_core_num++;
+
+ MALI_DEBUG_ASSERT(0 < l2_core_num);
+
+ return l2_core_num;
+}
+
mali_bool _mali_osk_shared_interrupts(void)
{
u32 irqs[128];
char buf[512];
va_start(args, fmt);
- vscnprintf(buf,512,fmt,args);
- seq_printf(print_ctx,buf);
+ vscnprintf(buf, 512, fmt, args);
+ seq_printf(print_ctx, buf);
va_end(args);
}
return (u32)current->tgid;
}
+char *_mali_osk_get_comm(void)
+{
+ return (char *)current->comm;
+}
+
+
u32 _mali_osk_get_tid(void)
{
/* pid is actually identifying the thread on Linux */
#include "mali_kernel_common.h"
#include "mali_kernel_linux.h"
-static _mali_osk_atomic_t mali_pm_ref_count;
-
-void _mali_osk_pm_dev_enable(void)
-{
- _mali_osk_atomic_init(&mali_pm_ref_count, 0);
-}
-
-void _mali_osk_pm_dev_disable(void)
-{
- _mali_osk_atomic_term(&mali_pm_ref_count);
-}
-
/* Can NOT run in atomic context */
-_mali_osk_errcode_t _mali_osk_pm_dev_ref_add(void)
+_mali_osk_errcode_t _mali_osk_pm_dev_ref_get_sync(void)
{
#ifdef CONFIG_PM_RUNTIME
int err;
MALI_PRINT_ERROR(("Mali OSK PM: pm_runtime_get_sync() returned error code %d\n", err));
return _MALI_OSK_ERR_FAULT;
}
- _mali_osk_atomic_inc(&mali_pm_ref_count);
- MALI_DEBUG_PRINT(4, ("Mali OSK PM: Power ref taken (%u)\n", _mali_osk_atomic_read(&mali_pm_ref_count)));
#endif
return _MALI_OSK_ERR_OK;
}
/* Can run in atomic context */
-void _mali_osk_pm_dev_ref_dec(void)
+_mali_osk_errcode_t _mali_osk_pm_dev_ref_get_async(void)
{
#ifdef CONFIG_PM_RUNTIME
+ int err;
MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
- _mali_osk_atomic_dec(&mali_pm_ref_count);
+ err = pm_runtime_get(&(mali_platform_device->dev));
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
pm_runtime_mark_last_busy(&(mali_platform_device->dev));
- pm_runtime_put_autosuspend(&(mali_platform_device->dev));
-#else
- pm_runtime_put(&(mali_platform_device->dev));
#endif
- MALI_DEBUG_PRINT(4, ("Mali OSK PM: Power ref released (%u)\n", _mali_osk_atomic_read(&mali_pm_ref_count)));
+ if (0 > err && -EINPROGRESS != err) {
+ MALI_PRINT_ERROR(("Mali OSK PM: pm_runtime_get() returned error code %d\n", err));
+ return _MALI_OSK_ERR_FAULT;
+ }
#endif
+ return _MALI_OSK_ERR_OK;
}
-/* Can run in atomic context */
-mali_bool _mali_osk_pm_dev_ref_add_no_power_on(void)
-{
-#ifdef CONFIG_PM_RUNTIME
- u32 ref;
- MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
- pm_runtime_get_noresume(&(mali_platform_device->dev));
- ref = _mali_osk_atomic_read(&mali_pm_ref_count);
- MALI_DEBUG_PRINT(4, ("Mali OSK PM: No-power ref taken (%u)\n", _mali_osk_atomic_read(&mali_pm_ref_count)));
- return ref > 0 ? MALI_TRUE : MALI_FALSE;
-#else
- return MALI_TRUE;
-#endif
-}
/* Can run in atomic context */
-void _mali_osk_pm_dev_ref_dec_no_power_on(void)
+void _mali_osk_pm_dev_ref_put(void)
{
#ifdef CONFIG_PM_RUNTIME
MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
+ pm_runtime_mark_last_busy(&(mali_platform_device->dev));
pm_runtime_put_autosuspend(&(mali_platform_device->dev));
#else
pm_runtime_put(&(mali_platform_device->dev));
#endif
- MALI_DEBUG_PRINT(4, ("Mali OSK PM: No-power ref released (%u)\n", _mali_osk_atomic_read(&mali_pm_ref_count)));
#endif
}
#include "mali_linux_trace.h"
#include "mali_gp.h"
#include "mali_pp.h"
-#include "mali_pp_scheduler.h"
#include "mali_l2_cache.h"
#include "mali_user_settings_db.h"
+#include "mali_executor.h"
_mali_osk_errcode_t _mali_osk_profiling_init(mali_bool auto_start)
{
if (NULL != l2_cache_core) {
u32 counter_src = (counter_id - COUNTER_L2_0_C0) & 1;
- if (0 == counter_src) {
- mali_l2_cache_core_set_counter_src0(l2_cache_core, event_id);
- } else {
- mali_l2_cache_core_set_counter_src1(l2_cache_core, event_id);
- }
+ mali_l2_cache_core_set_counter_src(l2_cache_core,
+ counter_src, event_id);
}
} else {
return 0; /* Failure, unknown event */
*/
u32 _mali_profiling_get_l2_counters(_mali_profiling_l2_counter_values *values)
{
- struct mali_l2_cache_core *l2_cache;
u32 l2_cores_num = mali_l2_cache_core_get_glob_num_l2_cores();
u32 i;
- u32 ret = 0;
MALI_DEBUG_ASSERT(l2_cores_num <= 3);
for (i = 0; i < l2_cores_num; i++) {
- l2_cache = mali_l2_cache_core_get_glob_l2_core(i);
+ struct mali_l2_cache_core *l2_cache = mali_l2_cache_core_get_glob_l2_core(i);
if (NULL == l2_cache) {
continue;
}
- if (MALI_TRUE == mali_l2_cache_lock_power_state(l2_cache)) {
- /* It is now safe to access the L2 cache core in order to retrieve the counters */
- mali_l2_cache_core_get_counter_values(l2_cache,
- &values->cores[i].source0,
- &values->cores[i].value0,
- &values->cores[i].source1,
- &values->cores[i].value1);
- } else {
- /* The core was not available, set the right bit in the mask. */
- ret |= (1 << i);
- }
- mali_l2_cache_unlock_power_state(l2_cache);
+ mali_l2_cache_core_get_counter_values(l2_cache,
+ &values->cores[i].source0,
+ &values->cores[i].value0,
+ &values->cores[i].source1,
+ &values->cores[i].value1);
}
- return ret;
+ return 0;
}
/**
values->mali_version_major = mali_kernel_core_get_gpu_major_version();
values->mali_version_minor = mali_kernel_core_get_gpu_minor_version();
values->num_of_l2_cores = mali_l2_cache_core_get_glob_num_l2_cores();
- values->num_of_fp_cores = mali_pp_scheduler_get_num_cores_total();
+ values->num_of_fp_cores = mali_executor_get_num_cores_total();
values->num_of_vp_cores = 1;
}
#include <asm/uaccess.h>
#include <linux/platform_device.h>
-#include <linux/dmapool.h>
#include <linux/gfp.h>
#include <linux/hardirq.h>
+
#include "mali_osk_types.h"
#include "mali_kernel_linux.h"
typedef u32 mali_dma_addr;
-
-MALI_STATIC_INLINE mali_dma_pool mali_dma_pool_create(u32 size, u32 alignment, u32 boundary)
-{
- return dma_pool_create("mali-dma", &mali_platform_device->dev,
- (size_t)size, (size_t)alignment, (size_t)boundary);
-}
-
-MALI_STATIC_INLINE void mali_dma_pool_destroy(mali_dma_pool pool)
-{
- dma_pool_destroy(pool);
-}
-
-MALI_STATIC_INLINE mali_io_address mali_dma_pool_alloc(mali_dma_pool pool, mali_dma_addr *phys_addr)
-{
- void *ret;
- dma_addr_t phys;
-
- ret = dma_pool_alloc(pool, GFP_KERNEL, &phys);
-#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
- /* Verify that the "physical" address is 32-bit and
- * usable for Mali, when on a system with bus addresses
- * wider than 32-bit. */
- BUG_ON(0 != (phys >> 32));
-#endif
- *phys_addr = phys;
-
- return ret;
-}
-
-MALI_STATIC_INLINE void mali_dma_pool_free(mali_dma_pool pool, void *virt_addr, mali_dma_addr phys_addr)
-{
- dma_pool_free(pool, virt_addr, (dma_addr_t)phys_addr);
-}
-
-
#if MALI_ENABLE_CPU_CYCLES
/* Reads out the clock cycle performance counter of the current cpu.
It is useful for cost-free (2 cycle) measuring of the time spent
#include <linux/time.h>
#include <asm/delay.h>
-int _mali_osk_time_after(u32 ticka, u32 tickb)
+mali_bool _mali_osk_time_after_eq(unsigned long ticka, unsigned long tickb)
{
- return time_after((unsigned long)ticka, (unsigned long)tickb);
+ return time_after_eq(ticka, tickb) ?
+ MALI_TRUE : MALI_FALSE;
}
-u32 _mali_osk_time_mstoticks(u32 ms)
+unsigned long _mali_osk_time_mstoticks(u32 ms)
{
return msecs_to_jiffies(ms);
}
-u32 _mali_osk_time_tickstoms(u32 ticks)
+u32 _mali_osk_time_tickstoms(unsigned long ticks)
{
return jiffies_to_msecs(ticks);
}
-u32 _mali_osk_time_tickcount(void)
+unsigned long _mali_osk_time_tickcount(void)
{
return jiffies;
}
getnstimeofday(&tsval);
return (u64)timespec_to_ns(&tsval);
}
+
+u64 _mali_osk_boot_time_get_ns(void)
+{
+ struct timespec tsval;
+ get_monotonic_boottime(&tsval);
+ return (u64)timespec_to_ns(&tsval);
+}
return t;
}
-void _mali_osk_timer_add(_mali_osk_timer_t *tim, u32 ticks_to_expire)
+void _mali_osk_timer_add(_mali_osk_timer_t *tim, unsigned long ticks_to_expire)
{
MALI_DEBUG_ASSERT_POINTER(tim);
tim->timer.expires = jiffies + ticks_to_expire;
add_timer(&(tim->timer));
}
-void _mali_osk_timer_mod(_mali_osk_timer_t *tim, u32 ticks_to_expire)
+void _mali_osk_timer_mod(_mali_osk_timer_t *tim, unsigned long ticks_to_expire)
{
MALI_DEBUG_ASSERT_POINTER(tim);
mod_timer(&(tim->timer), jiffies + ticks_to_expire);
* @file mali_pmu_power_up_down.c
*/
-#include <linux/version.h>
-#include <linux/sched.h>
#include <linux/module.h>
-#include "mali_osk.h"
-#include "mali_kernel_common.h"
-#include "mali_pmu.h"
-#include "mali_pp_scheduler.h"
-#include "linux/mali/mali_utgard.h"
-
-/* Mali PMU power up/down APIs */
-
-int mali_pmu_powerup(void)
-{
- struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
-
- MALI_DEBUG_PRINT(5, ("Mali PMU: Power up\n"));
-
- MALI_DEBUG_ASSERT_POINTER(pmu);
- if (NULL == pmu) {
- return -ENXIO;
- }
-
- if (_MALI_OSK_ERR_OK != mali_pmu_power_up_all(pmu)) {
- return -EFAULT;
- }
-
- return 0;
-}
-
-EXPORT_SYMBOL(mali_pmu_powerup);
-
-int mali_pmu_powerdown(void)
-{
- struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
-
- MALI_DEBUG_PRINT(5, ("Mali PMU: Power down\n"));
-
- MALI_DEBUG_ASSERT_POINTER(pmu);
- if (NULL == pmu) {
- return -ENXIO;
- }
-
- if (_MALI_OSK_ERR_OK != mali_pmu_power_down_all(pmu)) {
- return -EFAULT;
- }
-
- return 0;
-}
-
-EXPORT_SYMBOL(mali_pmu_powerdown);
+#include "mali_executor.h"
int mali_perf_set_num_pp_cores(unsigned int num_cores)
{
- return mali_pp_scheduler_set_perf_level(num_cores, MALI_FALSE);
+ return mali_executor_set_perf_level(num_cores, MALI_FALSE);
}
EXPORT_SYMBOL(mali_perf_set_num_pp_cores);
struct mali_sync_pt {
struct sync_pt sync_pt;
struct mali_sync_flag *flag;
+ struct sync_timeline *sync_tl; /**< Sync timeline this pt is connected to. */
};
/**
struct kref refcount; /**< Reference count. */
};
+/**
+ * Mali sync timeline is used to connect mali timeline to sync_timeline.
+ * When fence timeout can print more detailed mali timeline system info.
+ */
+struct mali_sync_timeline_container {
+ struct sync_timeline sync_timeline;
+ struct mali_timeline *timeline;
+};
+
MALI_STATIC_INLINE struct mali_sync_pt *to_mali_sync_pt(struct sync_pt *pt)
{
return container_of(pt, struct mali_sync_pt, sync_pt);
}
+MALI_STATIC_INLINE struct mali_sync_timeline_container *to_mali_sync_tl_container(struct sync_timeline *sync_tl)
+{
+ return container_of(sync_tl, struct mali_sync_timeline_container, sync_timeline);
+}
+
static struct sync_pt *timeline_dup(struct sync_pt *pt)
{
struct mali_sync_pt *mpt, *new_mpt;
struct sync_pt *new_pt;
- MALI_DEBUG_ASSERT_POINTER(pt);
-
if (NULL == pt) {
dump_stack();
return NULL;
}
+
+ MALI_DEBUG_ASSERT_POINTER(pt);
mpt = to_mali_sync_pt(pt);
- new_pt = sync_pt_create(pt->parent, sizeof(struct mali_sync_pt));
+ new_pt = sync_pt_create(mpt->sync_tl, sizeof(struct mali_sync_pt));
if (NULL == new_pt) return NULL;
new_mpt = to_mali_sync_pt(new_pt);
mali_sync_flag_get(mpt->flag);
new_mpt->flag = mpt->flag;
+ new_mpt->sync_tl = mpt->sync_tl;
return new_pt;
}
module_put(THIS_MODULE);
}
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
static void timeline_print_pt(struct seq_file *s, struct sync_pt *sync_pt)
{
struct mali_sync_pt *mpt;
}
}
+#else
+static void timeline_pt_value_str(struct sync_pt *pt, char *str, int size)
+{
+ struct mali_sync_pt *mpt;
+
+ MALI_DEBUG_ASSERT_POINTER(str);
+ MALI_DEBUG_ASSERT_POINTER(pt);
+
+ mpt = to_mali_sync_pt(pt);
+
+ /* It is possible this sync point is just under construct,
+ * make sure the flag is valid before accessing it
+ */
+ if (mpt->flag) {
+ _mali_osk_snprintf(str, size, "%u", mpt->flag->point);
+ } else {
+ _mali_osk_snprintf(str, size, "uninitialized");
+ }
+}
+
+static void timeline_value_str(struct sync_timeline *timeline, char *str, int size)
+{
+ struct mali_sync_timeline_container *mali_sync_tl;
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ MALI_DEBUG_ASSERT_POINTER(str);
+
+ mali_sync_tl = to_mali_sync_tl_container(timeline);
+
+ MALI_DEBUG_ASSERT_POINTER(mali_sync_tl);
+
+ if (NULL != mali_sync_tl->timeline) {
+ _mali_osk_snprintf(str, size, "oldest (%u) next (%u)\n", mali_sync_tl->timeline->point_oldest,
+ mali_sync_tl->timeline->point_next);
+ }
+}
+#endif
+
static struct sync_timeline_ops mali_timeline_ops = {
.driver_name = "Mali",
.dup = timeline_dup,
.compare = timeline_compare,
.free_pt = timeline_free_pt,
.release_obj = timeline_release,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
.print_pt = timeline_print_pt,
+#else
+ .pt_value_str = timeline_pt_value_str,
+ .timeline_value_str = timeline_value_str,
+#endif
};
-struct sync_timeline *mali_sync_timeline_create(const char *name)
+struct sync_timeline *mali_sync_timeline_create(struct mali_timeline *timeline, const char *name)
{
struct sync_timeline *sync_tl;
+ struct mali_sync_timeline_container *mali_sync_tl;
- sync_tl = sync_timeline_create(&mali_timeline_ops, sizeof(struct sync_timeline), name);
+ sync_tl = sync_timeline_create(&mali_timeline_ops, sizeof(struct mali_sync_timeline_container), name);
if (NULL == sync_tl) return NULL;
+ mali_sync_tl = to_mali_sync_tl_container(sync_tl);
+ mali_sync_tl->timeline = timeline;
+
/* Grab a reference on the module to ensure the callbacks are present
* as long some timeline exists. The reference is released when the
* timeline is freed.
mpt = to_mali_sync_pt(pt);
mpt->flag = flag;
+ mpt->sync_tl = flag->sync_tl;
return pt;
}
#include "mali_osk.h"
struct mali_sync_flag;
+struct mali_timeline;
/**
* Create a sync timeline.
* @param name Name of the sync timeline.
* @return The new sync timeline if successful, NULL if not.
*/
-struct sync_timeline *mali_sync_timeline_create(const char *name);
+struct sync_timeline *mali_sync_timeline_create(struct mali_timeline *timeline, const char *name);
/**
* Check if sync timeline belongs to Mali.
static int mali_os_thaw(struct device *device)
{
int ret = 0;
+ struct mali_pmu_core *pmu;
MALI_DEBUG_PRINT(4, ("mali_os_thaw() called\n"));
enable_clock();
- mali_pmu_powerup();
-
+ pmu = mali_pmu_get_global_pmu_core();
+ mali_pmu_power_up_all(pmu);
if (NULL != device->driver &&
NULL != device->driver->pm &&
NULL != device->driver->pm->thaw)
{
/* for mali platform data. */
struct mali_gpu_device_data* pdev = ptr_plt_dev->dev.platform_data;
- pdev->utilization_interval = 1000,
- pdev->utilization_callback = mali_gpu_utilization_callback,
/* for resource data. */
ptr_plt_dev->num_resources = ARRAY_SIZE(meson_mali_resources);
void vh264_4k2k_register_module_callback(void(*enter_func)(void), void(*remove_func)(void));
#endif /* CONFIG_AM_VDEC_H264_4K2K */
-void mali_gpu_utilization_callback(struct mali_gpu_utilization_data *data);
int mali_meson_init_start(struct platform_device* ptr_plt_dev)
{
- struct mali_gpu_device_data* pdev = ptr_plt_dev->dev.platform_data;
-
/* for mali platform data. */
- pdev->utilization_interval = 300,
pdev->utilization_callback = mali_gpu_utilization_callback,
/* for resource data. */
void vh264_4k2k_register_module_callback(void(*enter_func)(void), void(*remove_func)(void));
#endif /* CONFIG_AM_VDEC_H264_4K2K */
-void mali_gpu_utilization_callback(struct mali_gpu_utilization_data *data);
int mali_meson_init_start(struct platform_device* ptr_plt_dev)
{
- struct mali_gpu_device_data* pdev = ptr_plt_dev->dev.platform_data;
-
/* chip mark detect. */
-
#ifdef IS_MESON_M8_CPU
if(IS_MESON_M8_CPU) {
mali_plat_data.have_switch = 0;
}
#endif
- /* for mali platform data. */
- pdev->utilization_interval = 300,
- pdev->utilization_callback = mali_gpu_utilization_callback,
-
/* for resource data. */
ptr_plt_dev->num_resources = ARRAY_SIZE(mali_gpu_resources);
ptr_plt_dev->resource = mali_gpu_resources;
return ret;
}
#endif
-void mali_gpu_utilization_callback(struct mali_gpu_utilization_data *data);
+
int mali_meson_init_start(struct platform_device* ptr_plt_dev)
{
- struct mali_gpu_device_data* pdev = ptr_plt_dev->dev.platform_data;
-
- /* for mali platform data. */
- pdev->utilization_interval = 200,
- pdev->utilization_callback = mali_gpu_utilization_callback,
-
/* for resource data. */
ptr_plt_dev->num_resources = ARRAY_SIZE(mali_gpu_resources);
ptr_plt_dev->resource = mali_gpu_resources;
#include <linux/module.h>
#include <linux/workqueue.h>
#include <linux/mali/mali_utgard.h>
-#include "mali_kernel_common.h"
-#include "common/mali_osk_profiling.h"
-#include "common/mali_kernel_utilization.h"
-#include "common/mali_pp_scheduler.h"
+#include <mali_kernel_common.h>
+#include <mali_osk_profiling.h>
#include <meson_main.h>
module_param(mali_core_timeout, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */
MODULE_PARM_DESC(mali_core_timeout, "times of failed to reset GP");
-static struct mali_gpu_device_data mali_gpu_data =
-{
- .shared_mem_size = 1024 * 1024 * 1024,
- .max_job_runtime = 60000, /* 60 seconds */
- .pmu_switch_delay = 0xFFFF, /* do not have to be this high on FPGA, but it is good for testing to have a delay */
- .pmu_domain_config = {0x1, 0x2, 0x4, 0x4, 0x4, 0x8, 0x8, 0x8, 0x8, 0x1, 0x2, 0x8},
-};
-
-static void mali_platform_device_release(struct device *device);
-static struct platform_device mali_gpu_device =
-{
- .name = MALI_GPU_NAME_UTGARD,
- .id = 0,
- .dev.release = mali_platform_device_release,
- .dev.coherent_dma_mask = DMA_BIT_MASK(32),
- .dev.platform_data = &mali_gpu_data,
- .dev.type = &mali_pm_device, /* We should probably use the pm_domain instead of type on newer kernels */
-};
-
int mali_pdev_pre_init(struct platform_device* ptr_plt_dev)
{
MALI_DEBUG_PRINT(4, ("mali_platform_device_register() called\n"));
- if (mali_gpu_data.shared_mem_size < 10) {
- MALI_DEBUG_PRINT(2, ("mali os memory didn't configered, set to default(512M)\n"));
- mali_gpu_data.shared_mem_size = 1024 * 1024 *1024;
- }
return mali_meson_init_start(ptr_plt_dev);
}
mali_meson_init_finish(pdev);
}
-int mali_pdev_dts_init(struct platform_device* mali_gpu_device)
+void mali_gpu_utilization_callback(struct mali_gpu_utilization_data *data);
+static struct mali_gpu_device_data mali_gpu_data =
{
- struct device_node *cfg_node = mali_gpu_device->dev.of_node;
- struct device_node *child;
- u32 prop_value;
- int err;
-
- for_each_child_of_node(cfg_node, child) {
- err = of_property_read_u32(child, "shared_memory", &prop_value);
- if (err == 0) {
- MALI_DEBUG_PRINT(2, ("shared_memory configurate %d\n", prop_value));
- mali_gpu_data.shared_mem_size = prop_value * 1024 * 1024;
- }
- }
+ .shared_mem_size = 1024 * 1024 * 1024,
+ .max_job_runtime = 60000, /* 60 seconds */
+ .pmu_switch_delay = 0xFFFF, /* do not have to be this high on FPGA, but it is good for testing to have a delay */
- err = mali_pdev_pre_init(mali_gpu_device);
- if (err == 0)
- mali_pdev_post_init(mali_gpu_device);
- return err;
-}
+ /* the following is for dvfs or utilization. */
+ .control_interval = 300, /* 1000ms */
+ .get_clock_info = NULL,
+ .get_freq = NULL,
+ .set_freq = NULL,
+ .utilization_callback = mali_gpu_utilization_callback,
+};
+
+#ifndef CONFIG_MALI_DT
+static void mali_platform_device_release(struct device *device);
+static struct platform_device mali_gpu_device =
+{
+ .name = MALI_GPU_NAME_UTGARD,
+ .id = 0,
+ .dev.release = mali_platform_device_release,
+ .dev.dma_mask = &mali_gpu_device.dev.coherent_dma_mask,
+ .dev.coherent_dma_mask = DMA_BIT_MASK(32),
+ .dev.platform_data = &mali_gpu_data,
+ .dev.type = &mali_pm_device, /* We should probably use the pm_domain instead of type on newer kernels */
+};
int mali_platform_device_register(void)
{
MALI_DEBUG_PRINT(4, ("mali_platform_device_release() called\n"));
}
+#else /* CONFIG_MALI_DT */
+
+int mali_platform_device_init(struct platform_device *device)
+{
+ int err = 0;
+
+ ret = mali_pdev_pre_init(&mali_gpu_data);
+ if (ret < 0) goto exit;
+ err = platform_device_add_data(device, &mali_gpu_data, sizeof(mali_gpu_data));
+ if (ret < 0) goto exit;
+
+ mali_pdev_post_init(&mali_gpu_data);
+exit:
+ return err;
+}
+
+int mali_platform_device_deinit(struct platform_device *device)
+{
+ MALI_IGNORE(device);
+
+ MALI_DEBUG_PRINT(4, ("mali_platform_device_deinit() called\n"));
+
+ mali_core_scaling_term();
+
+ return 0;
+}
+
+#endif /* CONFIG_MALI_DT */
+
int mali_meson_init_start(struct platform_device* ptr_plt_dev);
int mali_meson_init_finish(struct platform_device* ptr_plt_dev);
+void mali_post_init(void);
int mali_meson_uninit(struct platform_device* ptr_plt_dev);
int mali_light_suspend(struct device *device);
int mali_light_resume(struct device *device);
The result will be a mali.ko file, which can be loaded into the Linux kernel
by using the insmod command.
+Use of UMP is not recommended. The dma-buf API in the Linux kernel has
+replaced UMP. The Mali Device Driver will be built with dma-buf support if the
+kernel config includes enabled dma-buf.
+
The kernel needs to be provided with a platform_device struct for the Mali GPU
device. See the mali_utgard.h header file for how to set up the Mali GPU
resources.
MALI_STATIC_INLINE u64 _mali_timestamp_get(void)
{
- return _mali_osk_time_get_ns();
+ return _mali_osk_boot_time_get_ns();
}
#endif /* __MALI_TIMESTAMP_H__ */