cpumask_copy(cpus_to_visit, cpu_possible_mask);
+#ifndef CONFIG_SIMPLIFIED_ENERGY_MODEL
ret = cpufreq_register_notifier(&init_cpu_capacity_notifier,
CPUFREQ_POLICY_NOTIFIER);
if (ret)
free_cpumask_var(cpus_to_visit);
+#endif
return ret;
}
return topology_cpu_flags();
}
+#ifdef CONFIG_SIMPLIFIED_ENERGY_MODEL
+#define use_simplified 1
+#else
+#define use_simplified 0
+#endif
+
static inline
const struct sched_group_energy * const cpu_core_energy(int cpu)
{
int max_cap_idx;
int level = cpu_energy_level[cpu].core;
+ if (use_simplified)
+ return NULL;
+
if (level < 0)
return NULL;
struct sched_group_energy *sge;
int level = cpu_energy_level[cpu].coregroup;
+ if (use_simplified)
+ return NULL;
+
if (level < 0)
return NULL;
struct sched_group_energy *sge;
int level = cpu_energy_level[cpu].cluster;
+ if (use_simplified)
+ return NULL;
+
if (level < 0)
return NULL;
extern void gb_qos_update_request(struct gb_qos_request *req, u32 new_value);
extern void request_kernel_prefer_perf(int grp_idx, int enable);
-
-extern void init_sched_energy_table(struct cpumask *cpus, int table_size,
- unsigned long *f_table, unsigned int *v_table,
- int max_f, int min_f);
#else
static inline struct sched_group *exynos_fit_idlest_group(struct sched_domain *sd,
struct task_struct *p) { return NULL; }
static inline void gb_qos_update_request(struct gb_qos_request *req, u32 new_value) { }
static inline void request_kernel_prefer_perf(int grp_idx, int enable) { }
+#endif /* CONFIG_SCHED_EMS */
+#ifdef CONFIG_SIMPLIFIED_ENERGY_MODEL
+extern void init_sched_energy_table(struct cpumask *cpus, int table_size,
+ unsigned long *f_table, unsigned int *v_table,
+ int max_f, int min_f);
+#else
static inline void init_sched_energy_table(struct cpumask *cpus, int table_size,
unsigned long *f_table, unsigned int *v_table,
int max_f, int min_f) { }
-#endif /* CONFIG_SCHED_EMS */
+#endif
Say N if unsure.
+config SIMPLIFIED_ENERGY_MODEL
+ bool "Enable simplified energy model feature"
+ depends on SCHED_EMS
+ default n
+ help
+ This option enables support for simplified energy model. This allows
+ using a simple energy table declared per_cpu instead of using the EAS
+ energy table.
+
+ Say N if unsure.
+
config SCHED_USE_FLUID_RT
bool "Enable Fluid RT scheduler feature"
depends on SMP
return select_eco_cpu(&eenv);
}
+#ifdef CONFIG_SIMPLIFIED_ENERGY_MODEL
static void
fill_power_table(struct energy_table *table, int table_size,
unsigned long *f_table, unsigned int *v_table,
int cpu, i, mips, valid_table_size = 0;
int max_mips = 0;
unsigned long max_mips_freq = 0;
+ int last_state;
mips = per_cpu(energy_table, cpumask_any(cpus)).mips;
for_each_cpu(cpu, cpus) {
continue;
if (table->mips > max_mips) {
- int last_state = table->nr_states - 1;
-
max_mips = table->mips;
+
+ last_state = table->nr_states - 1;
max_mips_freq = table->states[last_state].frequency;
}
}
* recalculated.
*/
for_each_possible_cpu(cpu) {
+ struct sched_domain *sd;
+
table = &per_cpu(energy_table, cpu);
if (!table->states)
continue;
fill_cap_table(table, max_mips, max_mips_freq);
show_energy_table(table, cpu);
+
+ last_state = table->nr_states - 1;
+ topology_set_cpu_scale(cpu, table->states[last_state].cap);
+
+ rcu_read_lock();
+ for_each_domain(cpu, sd)
+ update_group_capacity(sd, cpu);
+ rcu_read_unlock();
}
}
return 0;
}
pure_initcall(init_sched_energy_data);
+#endif /* CONFIG_SIMPLIFIED_ENERGY_MODEL */