#include "meson_main.h"
+#define FCLK_MPLL2 (2 << 9)
static DEFINE_SPINLOCK(lock);
+static u32 mali_extr_backup = 0;
+
+#if MESON_CPU_TYPE > MESON_CPU_TYPE_MESON8
+#define HAVE_MALI_CLOCK_SWITCH 1
+#endif
+
int mali_clock_init(u32 def_clk_idx)
{
+#ifdef HAVE_MALI_CLOCK_SWITCH
+ writel((mali_dvfs_clk[def_clk_idx]<<16)|(mali_dvfs_clk[def_clk_idx]<<16), (u32*)P_HHI_MALI_CLK_CNTL);
+ setbits_le32((u32)P_HHI_MALI_CLK_CNTL, 1 << 24);
+ setbits_le32((u32)P_HHI_MALI_CLK_CNTL, 1 << 8);
+#else
mali_clock_set(def_clk_idx);
+#endif
+#if MESON_CPU_TYPE > MESON_CPU_TYPE_MESON8
+ mali_extr_backup = mali_dvfs_clk[get_mali_tbl_size() -1];
+#endif
return 0;
}
static int critical_clock_set(size_t param)
{
unsigned int idx = param;
+#ifdef HAVE_MALI_CLOCK_SWITCH
+ u32 clk_value;
+ setbits_le32((u32)P_HHI_MALI_CLK_CNTL, 1 << 31);
+ clk_value = readl((u32 *)P_HHI_MALI_CLK_CNTL) & 0xffff0000;
+ clk_value = clk_value | mali_dvfs_clk[idx] | (1 << 8);
+ writel(clk_value, (u32*)P_HHI_MALI_CLK_CNTL);
+ clrbits_le32((u32)P_HHI_MALI_CLK_CNTL, 1 << 31);
+#else
clrbits_le32((u32)P_HHI_MALI_CLK_CNTL, 1 << 8);
clrbits_le32((u32)P_HHI_MALI_CLK_CNTL, (0x7F | (0x7 << 9)));
writel(mali_dvfs_clk[idx], (u32*)P_HHI_MALI_CLK_CNTL); /* set clock to 333MHZ.*/
setbits_le32((u32)P_HHI_MALI_CLK_CNTL, 1 << 8);
+#endif
return 0;
}
{
return mali_dvfs_clk_sample[idx];
}
+
+void set_str_src(u32 data)
+{
+#if MESON_CPU_TYPE > MESON_CPU_TYPE_MESON8
+ if (data == 11) {
+ writel(0x0004d000, (u32*)P_HHI_MPLL_CNTL9);
+ } else if (data > 11) {
+ writel(data, (u32*)P_HHI_MPLL_CNTL9);
+ }
+
+ if (data == 0) {
+ mali_dvfs_clk[get_mali_tbl_size() -1] = mali_extr_backup;
+ } else if (data > 10) {
+ mali_dvfs_clk[get_mali_tbl_size() -1] = FCLK_MPLL2;
+ }
+#endif
+}
void disable_clock(void);
void enable_clock(void);
u32 get_mali_freq(u32 idx);
+void set_str_src(u32 data);
#endif /* _MALI_CLOCK_H_ */
637, /* 637.5 Mhz */
};
+u32 get_mali_tbl_size(void)
+{
+ return sizeof(mali_dvfs_clk) / sizeof(u32);
+}
+
int get_mali_freq_level(int freq)
{
int i = 0, level = -1;
*/
u32 mali_clock_turbo_index = 4;
-u32 mali_default_clock_idx = 3;
+u32 mali_default_clock_idx = 1;
u32 mali_up_clock_idx = 3;
/* fclk is 2550Mhz. */
#define FCLK_DEV7 (4 << 9) /* 364.3 Mhz */
u32 mali_dvfs_clk[] = {
- FCLK_DEV7 | 1, /* 182.1 Mhz */
- FCLK_DEV4 | 1, /* 318.7 Mhz */
+ FCLK_DEV5 | 1, /* 255 Mhz */
+ FCLK_DEV7 | 0, /* 364 Mhz */
FCLK_DEV3 | 1, /* 425 Mhz */
FCLK_DEV5 | 0, /* 510 Mhz */
FCLK_DEV4 | 0, /* 637.5 Mhz */
};
u32 mali_dvfs_clk_sample[] = {
- 182, /* 182.1 Mhz */
- 319, /* 318.7 Mhz */
+ 255, /* 182.1 Mhz */
+ 364, /* 318.7 Mhz */
425, /* 425 Mhz */
510, /* 510 Mhz */
637, /* 637.5 Mhz */
};
+u32 get_mali_tbl_size(void)
+{
+ return sizeof(mali_dvfs_clk) / sizeof(u32);
+}
+
#define MALI_PP_NUMBER 2
static struct resource mali_gpu_resources[] =
void mali_gpu_utilization_callback(struct mali_gpu_utilization_data *data);
int mali_meson_init_start(struct platform_device* ptr_plt_dev)
{
-
struct mali_gpu_device_data* pdev = ptr_plt_dev->dev.platform_data;
/* for mali platform data. */
- pdev->utilization_interval = 500,
+ pdev->utilization_interval = 200,
pdev->utilization_callback = mali_gpu_utilization_callback,
/* for resource data. */
return 0;
}
-static int mali_cri_pmu_on_off(size_t param)
+int mali_light_suspend(struct device *device)
{
+ int ret = 0;
struct mali_pmu_core *pmu;
- MALI_DEBUG_PRINT(4, ("mali_os_suspend() called\n"));
pmu = mali_pmu_get_global_pmu_core();
- if (param == 0)
- mali_pmu_power_down_all(pmu);
- else
- mali_pmu_power_up_all(pmu);
- return 0;
-}
-
-int mali_light_suspend(struct device *device)
-{
- int ret = 0;
#ifdef CONFIG_MALI400_PROFILING
_mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
MALI_PROFILING_EVENT_CHANNEL_GPU |
}
/* clock scaling. Kasin..*/
- mali_clock_critical(mali_cri_pmu_on_off, 0);
+ mali_pmu_power_down_all(pmu);
//disable_clock();
return ret;
}
int mali_light_resume(struct device *device)
{
int ret = 0;
- /* clock scaling. Kasin..*/
- //enable_clock();
+ struct mali_pmu_core *pmu;
- mali_clock_critical(mali_cri_pmu_on_off, 1);
+ pmu = mali_pmu_get_global_pmu_core();
+ mali_pmu_power_up_all(pmu);
#ifdef CONFIG_MALI400_PROFILING
_mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
MALI_PROFILING_EVENT_CHANNEL_GPU |
int mali_deep_suspend(struct device *device)
{
int ret = 0;
+ struct mali_pmu_core *pmu;
+
+ pmu = mali_pmu_get_global_pmu_core();
enable_clock();
flush_scaling_job();
if (NULL != device->driver &&
}
/* clock scaling off. Kasin... */
- mali_clock_critical(mali_cri_pmu_on_off, 0);
+ mali_pmu_power_down_all(pmu);
disable_clock();
return ret;
}
int mali_deep_resume(struct device *device)
{
int ret = 0;
- /* clock scaling up. Kasin.. */
+ struct mali_pmu_core *pmu;
+
+ pmu = mali_pmu_get_global_pmu_core();
enable_clock();
- mali_clock_critical(mali_cri_pmu_on_off, 1);
+ mali_pmu_power_up_all(pmu);
if (NULL != device->driver &&
NULL != device->driver->pm &&
NULL != device->driver->pm->resume)
ret = device->driver->pm->resume(device);
}
return ret;
-
}
#define MALI_TABLE_SIZE 6
+#define LOG_MALI_SCALING 0
+#define LOG_SCALING_CHANGE 0
+#if LOG_SCALING_CHANGE
+# define TRACE_STAY() MALI_DEBUG_PRINT(2, ("[SCALING]stay_count:%d\n", stay_count));
+#else
+# define TRACE_STAY()
+#endif
+
static int num_cores_total;
static int num_cores_enabled;
static int currentStep;
MALI_SCALING_MODE_MAX
};
-
static int scaling_mode = MALI_PP_FS_SCALING;
enum enum_threshold_t {
230, /* 90% */
};
-
static u32 mali_dvfs_table_size = MALI_TABLE_SIZE;
static struct mali_dvfs_threshold_table mali_dvfs_threshold[MALI_TABLE_SIZE]={
- { 0, 0, 2, 0 , 200}, /* for 182.1 */
- { 1, 1, 2, 152, 205}, /* for 318.7 */
- { 2, 2, 2, 180, 212}, /* for 425.0 */
- { 3, 3, 2, 205, 236}, /* for 510.0 */
- { 4, 4, 2, 230, 256}, /* for 637.5 */
- { 0, 0, 2, 0, 0}
+ { 0, 0, 5, 0 , 180}, /* for 255 */
+ { 1, 1, 5, 152, 205}, /* for 364 */
+ { 2, 2, 5, 180, 212}, /* for 425 */
+ { 3, 3, 5, 205, 236}, /* for 510 */
+ { 4, 4, 5, 230, 256}, /* for 637 */
+ { 0, 0, 5, 0, 0}
};
u32 set_mali_dvfs_tbl_size(u32 size)
schedule_work(&wq_work);
}
-void trace_utilization(struct mali_gpu_utilization_data *data)
+#if LOG_MALI_SCALING
+void trace_utilization(struct mali_gpu_utilization_data *data, u32 current_idx,
+ u32 next, u32 count)
{
char direction;
- if (currentStep > lastStep)
+ if (next > current_idx)
direction = '>';
- else if ((currentStep > min_mali_clock) && (currentStep < lastStep))
+ else if ((current_idx > min_mali_clock) && (current_idx < next))
direction = '<';
else
direction = '~';
-
- MALI_DEBUG_PRINT(2, ("%c [%d-->%d]@%d{%d - %d}. pp:%d\n",
- direction,
- lastStep,
- currentStep,
- data->utilization_gpu,
- mali_dvfs_threshold[lastStep].downthreshold,
- mali_dvfs_threshold[lastStep].upthreshold,
- num_cores_enabled));
+
+ if (count == 0) {
+ MALI_DEBUG_PRINT(2, ("[SCALING]%c (%1d-->%1d)@%d{%3d - %3d}. pp:%d\n",
+ direction,
+ current_idx,
+ next,
+ data->utilization_gpu,
+ mali_dvfs_threshold[lastStep].downthreshold,
+ mali_dvfs_threshold[lastStep].upthreshold,
+ num_cores_enabled));
+ }
}
+#endif
static void mali_decide_next_status(struct mali_gpu_utilization_data *data, int* next_fs_idx,
int* pp_change_flag)
ld_left = data->utilization_pp * num_cores_enabled;
ld_right = (mali_dvfs_threshold[currentStep].upthreshold) * (num_cores_enabled - 1);
- if (ld_left < ld_right)
+ if ((ld_left < ld_right) && (num_cores_enabled > min_pp_num))
*pp_change_flag = -1;
}
*next_fs_idx = decided_fs_idx;
ret = 1;
currentStep = next_idx;
stay_count = mali_dvfs_threshold[currentStep].keep_count;
- /* if (ret ) printk("__scaling__%d__\n", __LINE__); */
}
- if((next_idx < currentStep) || (pp_change_flag == -1)) {
- if (stay_count == 0) {
- stay_count = mali_dvfs_threshold[currentStep].keep_count;
- ret = 1;
+#if LOG_MALI_SCALING
+ trace_utilization(data, currentStep, next_idx, stay_count);
+#endif
- if (pp_change_flag == -1)
+ if((next_idx <= currentStep) || (pp_change_flag == -1)) {
+ if (stay_count == 0) {
+ if (pp_change_flag == -1) {
ret = disable_one_core();
+ stay_count = mali_dvfs_threshold[currentStep].keep_count;
+ }
- if (next_idx < currentStep)
+ if (next_idx < currentStep) {
+ ret = 1;
currentStep = next_idx;
+ stay_count = mali_dvfs_threshold[next_idx].keep_count;
+ }
} else {
stay_count--;
}
}
- if (ret == 1) {
- trace_utilization(data);
+ TRACE_STAY()
+ if (ret == 1)
schedule_work(&wq_work);
- }
#ifdef CONFIG_MALI400_PROFILING
else
_mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
void mali_plat_preheat(void)
{
- //printk(" aml mali test*************\n");
- int ret;
- ret = enable_max_num_cores();
- if (ret)
+ int ret1, ret2 = 0;
+#if 0
+ printk(" aml mali test*************\n");
+#endif
+ if (currentStep < mali_default_clock_idx) {
+ ret2 = 1;
+ currentStep = mali_default_clock_idx;
+ }
+ ret1 = enable_max_num_cores();
+ if (ret1 || ret2)
schedule_work(&wq_work);
}
extern u32 mali_up_clock_idx;
extern u32 set_max_mali_freq(u32 idx);
extern u32 get_max_mali_freq(void);
+extern u32 get_mali_tbl_size(void);
int mali_meson_init_start(struct platform_device* ptr_plt_dev);
int mali_meson_init_finish(struct platform_device* ptr_plt_dev);
return count;
}
+
+static ssize_t read_extr_src(struct class *class,
+ struct class_attribute *attr, char *buf)
+{
+ return sprintf(buf, "usage echo 0(restore), 1(set fix src), xxx user mode\n");
+}
+
+static ssize_t write_extr_src(struct class *class,
+ struct class_attribute *attr, const char *buf, size_t count)
+{
+ int ret;
+ unsigned int val;
+
+ ret = kstrtouint(buf, 10, &val);
+ if (0 != ret)
+ {
+ return -EINVAL;
+ }
+
+ set_str_src(val);
+
+ return count;
+}
#endif
__ATTR(max_freq, 0644, max_freq_read, max_freq_write),
__ATTR(min_pp, 0644, min_pp_read, min_pp_write),
__ATTR(max_pp, 0644, max_pp_read, max_pp_write),
+ __ATTR(extr_src, 0644, read_extr_src, write_extr_src),
#endif
};