extern void set_mali_qq_for_sched(u32 pp_num);
extern u32 get_mali_schel_mode(void);
extern void set_mali_schel_mode(u32 mode);
+extern u32 get_max_pp_num(void);
+extern u32 set_max_pp_num(u32 num);
+extern u32 get_min_pp_num(void);
+extern u32 set_min_pp_num(u32 num);
+extern u32 get_max_mali_freq(void);
+extern u32 set_max_mali_freq(u32 idx);
+extern u32 get_min_mali_freq(void);
+extern u32 set_min_mali_freq(u32 idx);
+
+extern void enable_clock(void);
+extern void disable_clock(void);
static ssize_t pp_for_sched_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
{
.owner = THIS_MODULE,
.read = domain_stat_read,
};
+
+static ssize_t gate_test_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
+{
+ int ret;
+ char buffer[32];
+ unsigned long val;
+
+ if (count >= sizeof(buffer))
+ {
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(&buffer[0], buf, count))
+ {
+ return -EFAULT;
+ }
+ buffer[count] = '\0';
+
+ ret = strict_strtoul(&buffer[0], 10, &val);
+ if (0 != ret)
+ {
+ return -EINVAL;
+ }
+
+ if (val == 0) {
+ printk(" gate off the mali clock.\n");
+ disable_clock();
+ } else {
+ printk(" gate off the mali clock.\n");
+ enable_clock();
+ }
+
+ *offp += count;
+ return count;
+}
+
+static const struct file_operations gate_test_fops = {
+ .owner = THIS_MODULE,
+ .write = gate_test_write
+};
+
+static ssize_t max_pp_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+ int r;
+ char buffer[64];
+ r = sprintf(buffer, "%d\n", get_max_pp_num());
+
+ return simple_read_from_buffer(buf, count, offp, buffer, r);
+}
+
+static ssize_t max_pp_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
+{
+ int ret;
+ char buffer[32];
+ unsigned long val;
+
+ if (count >= sizeof(buffer))
+ {
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(&buffer[0], buf, count))
+ {
+ return -EFAULT;
+ }
+ buffer[count] = '\0';
+
+ ret = strict_strtoul(&buffer[0], 10, &val);
+ if (0 != ret)
+ {
+ return -EINVAL;
+ }
+
+ ret = set_max_pp_num(val);
+
+ *offp += count;
+ return count;
+}
+
+static const struct file_operations max_pp_fops = {
+ .owner = THIS_MODULE,
+ .read = max_pp_read,
+ .write = max_pp_write
+};
+
+static ssize_t min_pp_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+ int r;
+ char buffer[64];
+ r = sprintf(buffer, "%d\n", get_min_pp_num());
+
+ return simple_read_from_buffer(buf, count, offp, buffer, r);
+}
+
+static ssize_t min_pp_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
+{
+ int ret;
+ char buffer[32];
+ unsigned long val;
+
+ if (count >= sizeof(buffer))
+ {
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(&buffer[0], buf, count))
+ {
+ return -EFAULT;
+ }
+ buffer[count] = '\0';
+
+ ret = strict_strtoul(&buffer[0], 10, &val);
+ if (0 != ret)
+ {
+ return -EINVAL;
+ }
+
+ ret = set_min_pp_num(val);
+
+ *offp += count;
+ return count;
+}
+
+static const struct file_operations min_pp_fops = {
+ .owner = THIS_MODULE,
+ .read = min_pp_read,
+ .write = min_pp_write
+};
+
+
+static ssize_t max_freq_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+ int r;
+ char buffer[64];
+ r = sprintf(buffer, "%d\n", get_max_mali_freq());
+
+ return simple_read_from_buffer(buf, count, offp, buffer, r);
+}
+
+static ssize_t max_freq_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
+{
+ int ret;
+ char buffer[32];
+ unsigned long val;
+
+ if (count >= sizeof(buffer))
+ {
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(&buffer[0], buf, count))
+ {
+ return -EFAULT;
+ }
+ buffer[count] = '\0';
+
+ ret = strict_strtoul(&buffer[0], 10, &val);
+ if (0 != ret)
+ {
+ return -EINVAL;
+ }
+
+ ret = set_max_mali_freq(val);
+
+ *offp += count;
+ return count;
+}
+
+static const struct file_operations max_freq_fops = {
+ .owner = THIS_MODULE,
+ .read = max_freq_read,
+ .write = max_freq_write
+};
+
+
+static ssize_t min_freq_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+ int r;
+ char buffer[64];
+ r = sprintf(buffer, "%d\n", get_min_mali_freq());
+
+ return simple_read_from_buffer(buf, count, offp, buffer, r);
+}
+
+static ssize_t min_freq_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
+{
+ int ret;
+ char buffer[32];
+ unsigned long val;
+
+ if (count >= sizeof(buffer))
+ {
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(&buffer[0], buf, count))
+ {
+ return -EFAULT;
+ }
+ buffer[count] = '\0';
+
+ ret = strict_strtoul(&buffer[0], 10, &val);
+ if (0 != ret)
+ {
+ return -EINVAL;
+ }
+
+ ret = set_min_mali_freq(val);
+
+ *offp += count;
+ return count;
+}
+
+static const struct file_operations min_freq_fops = {
+ .owner = THIS_MODULE,
+ .read = min_freq_read,
+ .write = min_freq_write
+};
+
+
#endif /* MESON_CPU_TYPE_MESON8 */
static ssize_t version_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
debugfs_create_file("cur_freq", 0600, mali_misc_setting_dir, NULL, &cur_freq_fops);
debugfs_create_file("scale_mode", 0600, mali_misc_setting_dir, NULL, &scale_mode_fops);
debugfs_create_file("domain_stat", 0600, mali_misc_setting_dir, NULL, &domain_stat_fops);
+ debugfs_create_file("gate_test", 0600, mali_misc_setting_dir, NULL, &gate_test_fops);
+ debugfs_create_file("max_pp", 0600, mali_misc_setting_dir, NULL, &max_pp_fops);
+ debugfs_create_file("min_pp", 0600, mali_misc_setting_dir, NULL, &min_pp_fops);
+ debugfs_create_file("max_freq", 0600, mali_misc_setting_dir, NULL, &max_freq_fops);
+ debugfs_create_file("min_freq", 0600, mali_misc_setting_dir, NULL, &min_freq_fops);
+
}
}
#endif /* MESON_CPU_TYPE_MESON8 */
static u32 last_utilization_gp;
static u32 last_utilization_gp_pp;
+unsigned int min_mali_clock = MALI_CLOCK_182;
+unsigned int max_mali_clock = MALI_CLOCK_637;
+unsigned int min_pp_num = 1;
unsigned int mali_dvfs_clk[] = {
// FCLK_DEV7 | 3, /* 91 Mhz */
static void disable_one_core(void)
{
- if (1 < num_cores_enabled)
+ if (min_pp_num < num_cores_enabled)
{
--num_cores_enabled;
schedule_work(&wq_work);
MALI_DEBUG_PRINT(3, ("Core scaling: Disabling one core\n"));
}
- MALI_DEBUG_ASSERT( 1 <= num_cores_enabled);
+ MALI_DEBUG_ASSERT( min_pp_num <= num_cores_enabled);
MALI_DEBUG_ASSERT(num_cores_total >= num_cores_enabled);
}
}
else if (0 < data->utilization_pp)
{
- #if 0
- if (num_cores_enabled == 1) {
+ if (num_cores_enabled == min_pp_num) {
if ( mali_pp_scale_threshold[MALI_PP_THRESHOLD_30]< data->utilization_pp )
currentStep = MALI_CLOCK_318;
else
} else {
disable_one_core();
}
- #else
- disable_one_core();
- #endif
}
else
{
u32 utilization = data->utilization_gpu;
if (utilization > mali_dvfs_threshold[currentStep].upthreshold) {
- #if 0
- if (utilization < mali_utilization_high && currentStep < MALI_CLOCK_INDX_MAX)
+ if (utilization < mali_utilization_high && currentStep < max_mali_clock)
currentStep ++;
else
- #endif
- currentStep = MALI_CLOCK_637;
+ currentStep = max_mali_clock;
if (data->utilization_pp > MALI_PP_THRESHOLD_90) { // 90%
enable_max_num_cores();
} else {
enable_one_core();
}
- } else if (utilization < mali_dvfs_threshold[currentStep].downthreshold && currentStep > 1) {
+ } else if (utilization < mali_dvfs_threshold[currentStep].downthreshold && currentStep > min_mali_clock) {
currentStep--;
MALI_DEBUG_PRINT(2, ("Mali clock set %d..\n",currentStep));
} else {
{
return num_cores_total;
}
+
+u32 get_max_pp_num(void)
+{
+ printk(" %d->%s \n", __LINE__, __FUNCTION__);
+ return num_cores_total;
+}
+u32 set_max_pp_num(u32 num)
+{
+ printk(" %d->%s \n", __LINE__, __FUNCTION__);
+ if (num > MALI_PP_NUMBER || num < min_pp_num )
+ return -1;
+ num_cores_total = num;
+ if (num_cores_enabled > num_cores_total) {
+ num_cores_enabled = num_cores_total;
+ schedule_work(&wq_work);
+ }
+
+ return 0;
+}
+
+u32 get_min_pp_num(void)
+{
+ return min_pp_num;
+}
+u32 set_min_pp_num(u32 num)
+{
+ if (num > num_cores_total)
+ return -1;
+ min_pp_num = num;
+ if (num_cores_enabled < min_pp_num) {
+ num_cores_enabled = min_pp_num;
+ schedule_work(&wq_work);
+ }
+
+ return 0;
+}
+
+u32 get_max_mali_freq(void)
+{
+ return max_mali_clock;
+}
+u32 set_max_mali_freq(u32 idx)
+{
+ if (idx >= MALI_CLOCK_INDX_MAX || idx < min_mali_clock )
+ return -1;
+ max_mali_clock = idx;
+ if (currentStep > max_mali_clock) {
+ currentStep = max_mali_clock;
+ schedule_work(&wq_work);
+ }
+
+ return 0;
+}
+
+u32 get_min_mali_freq(void)
+{
+ return min_mali_clock;
+}
+u32 set_min_mali_freq(u32 idx)
+{
+ if (idx > max_mali_clock)
+ return -1;
+ min_mali_clock = idx;
+ if (currentStep < min_mali_clock) {
+ currentStep = min_mali_clock;
+ schedule_work(&wq_work);
+ }
+
+ return 0;
+}
+
+
+