From a1938e8f3d3ec4c36af07b060f97b2aafa6d2141 Mon Sep 17 00:00:00 2001 From: Daeyeong Lee Date: Fri, 18 May 2018 10:58:30 +0900 Subject: [PATCH] sched: ems: ontime: Add API to find fit cpus for heavy task. Change-Id: I833b0c6997c40eb239836ba54385d3acb782b9ec Signed-off-by: Daeyeong Lee --- kernel/sched/ems/ontime.c | 84 ++++++++++++++++++++++++++++++++------- 1 file changed, 69 insertions(+), 15 deletions(-) diff --git a/kernel/sched/ems/ontime.c b/kernel/sched/ems/ontime.c index 5d78eeebb1b6..671d056ae9c9 100644 --- a/kernel/sched/ems/ontime.c +++ b/kernel/sched/ems/ontime.c @@ -58,38 +58,92 @@ struct ontime_env { }; DEFINE_PER_CPU(struct ontime_env, ontime_env); -static unsigned long get_up_threshold(int cpu) +static inline struct task_struct *task_of(struct sched_entity *se) { - struct ontime_cond *curr; - - list_for_each_entry(curr, &cond_list, list) { - if (cpumask_test_cpu(cpu, &curr->cpus)) - return curr->up_threshold; - } + return container_of(se, struct task_struct, se); +} - return ULONG_MAX; +static inline struct sched_entity *se_of(struct sched_avg *sa) +{ + return container_of(sa, struct sched_entity, avg); } -static unsigned long get_down_threshold(int cpu) +struct ontime_cond *get_current_cond(int cpu) { struct ontime_cond *curr; list_for_each_entry(curr, &cond_list, list) { if (cpumask_test_cpu(cpu, &curr->cpus)) - return curr->down_threshold; + return curr; } - return 0; + return NULL; } -static inline struct task_struct *task_of(struct sched_entity *se) +static unsigned long get_up_threshold(int cpu) { - return container_of(se, struct task_struct, se); + struct ontime_cond *curr = get_current_cond(cpu); + + if (curr) + return curr->up_threshold; + else + return ULONG_MAX; } -static inline struct sched_entity *se_of(struct sched_avg *sa) +static unsigned long get_down_threshold(int cpu) { - return container_of(sa, struct sched_entity, avg); + struct ontime_cond *curr = get_current_cond(cpu); + + if (curr) + return curr->down_threshold; + else + return 0; +} + +static void +ontime_select_fit_cpus(struct task_struct *p, struct cpumask *fit_cpus) +{ + struct ontime_cond *curr; + struct cpumask cpus; + int cpu = task_cpu(p); + + cpumask_and(fit_cpus, cpu_coregroup_mask(cpu), tsk_cpus_allowed(p)); + + curr = get_current_cond(cpu); + if (!curr) + return; + + if (ontime_load_avg(p) >= curr->up_threshold) { + /* + * 1. If task's load is bigger than up threshold, + * find fit_cpus among next coregroup. + */ + list_for_each_entry_from(curr, &cond_list, list) { + cpumask_and(&cpus, &curr->cpus, tsk_cpus_allowed(p)); + if (cpumask_empty(&cpus)) + break; + + cpumask_copy(fit_cpus, &cpus); + + if (ontime_load_avg(p) < curr->up_threshold) + break; + } + } else if (ontime_load_avg(p) < curr->down_threshold) { + /* + * 2. If task's load is smaller than down threshold, + * find fit_cpus among prev coregroup. + */ + list_for_each_entry_from_reverse(curr, &cond_list, list) { + cpumask_and(&cpus, &curr->cpus, tsk_cpus_allowed(p)); + if (cpumask_empty(&cpus)) + break; + + cpumask_copy(fit_cpus, &cpus); + + if (ontime_load_avg(p) >= curr->down_threshold) + break; + } + } } static int -- 2.20.1