From: Park Bumgyu Date: Fri, 6 Apr 2018 06:02:28 +0000 (+0900) Subject: sched: ems: move active balance to ems X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=b94c2bcae564e85f8d3985c2771996049ddefa48;p=GitHub%2FLineageOS%2Fandroid_kernel_motorola_exynos9610.git sched: ems: move active balance to ems Move the active balance related code from ehmp.c to core.c of ems. ehmp.c will be removed. Change-Id: I6a01db308b3c889f01da642fa3b96df93dc6432f Signed-off-by: Park Bumgyu --- diff --git a/kernel/sched/ems/core.c b/kernel/sched/ems/core.c index 166530ffbd64..16c0533580fb 100644 --- a/kernel/sched/ems/core.c +++ b/kernel/sched/ems/core.c @@ -30,6 +30,80 @@ int cpu_util_wake(int cpu, struct task_struct *p) return (util >= capacity) ? capacity : util; } +static inline int task_fits(struct task_struct *p, long capacity) +{ + return capacity * 1024 > task_util(p) * 1248; +} + +struct sched_group * +exynos_fit_idlest_group(struct sched_domain *sd, struct task_struct *p) +{ + struct sched_group *group = sd->groups; + struct sched_group *fit_group = NULL; + unsigned long fit_capacity = ULONG_MAX; + + do { + int i; + + /* Skip over this group if it has no CPUs allowed */ + if (!cpumask_intersects(sched_group_span(group), + &p->cpus_allowed)) + continue; + + for_each_cpu(i, sched_group_span(group)) { + if (capacity_of(i) < fit_capacity && task_fits(p, capacity_of(i))) { + fit_capacity = capacity_of(i); + fit_group = group; + } + } + } while (group = group->next, group != sd->groups); + + return fit_group; +} + +static inline int +check_cpu_capacity(struct rq *rq, struct sched_domain *sd) +{ + return ((rq->cpu_capacity * sd->imbalance_pct) < + (rq->cpu_capacity_orig * 100)); +} + +#define lb_sd_parent(sd) \ + (sd->parent && sd->parent->groups != sd->parent->groups->next) + +int exynos_need_active_balance(enum cpu_idle_type idle, struct sched_domain *sd, + int src_cpu, int dst_cpu) +{ + unsigned int src_imb_pct = lb_sd_parent(sd) ? sd->imbalance_pct : 1; + unsigned int dst_imb_pct = lb_sd_parent(sd) ? 100 : 1; + unsigned long src_cap = capacity_of(src_cpu); + unsigned long dst_cap = capacity_of(dst_cpu); + int level = sd->level; + + /* dst_cpu is idle */ + if ((idle != CPU_NOT_IDLE) && + (cpu_rq(src_cpu)->cfs.h_nr_running == 1)) { + if ((check_cpu_capacity(cpu_rq(src_cpu), sd)) && + (src_cap * sd->imbalance_pct < dst_cap * 100)) { + return 1; + } + + /* This domain is top and dst_cpu is bigger than src_cpu*/ + if (!lb_sd_parent(sd) && src_cap < dst_cap) + if (lbt_overutilized(src_cpu, level) || global_boosted()) + return 1; + } + + if ((src_cap * src_imb_pct < dst_cap * dst_imb_pct) && + cpu_rq(src_cpu)->cfs.h_nr_running == 1 && + lbt_overutilized(src_cpu, level) && + !lbt_overutilized(dst_cpu, level)) { + return 1; + } + + return unlikely(sd->nr_balance_failed > sd->cache_nice_tries + 2); +} + static int select_proper_cpu(struct task_struct *p) { return -1; diff --git a/kernel/sched/ems/ehmp.c b/kernel/sched/ems/ehmp.c index 6d91108f6aa4..17d7400125bf 100644 --- a/kernel/sched/ems/ehmp.c +++ b/kernel/sched/ems/ehmp.c @@ -106,78 +106,6 @@ int exynos_estimate_idle_state(int cpu_idx, struct cpumask *mask, return estimate_state; } -/********************************************************************** - * load balance * - **********************************************************************/ -#define lb_sd_parent(sd) \ - (sd->parent && sd->parent->groups != sd->parent->groups->next) - -struct sched_group * -exynos_fit_idlest_group(struct sched_domain *sd, struct task_struct *p) -{ - struct sched_group *group = sd->groups; - struct sched_group *fit_group = NULL; - unsigned long fit_capacity = ULONG_MAX; - - do { - int i; - - /* Skip over this group if it has no CPUs allowed */ - if (!cpumask_intersects(sched_group_span(group), - &p->cpus_allowed)) - continue; - - for_each_cpu(i, sched_group_span(group)) { - if (capacity_of(i) < fit_capacity && task_fits(p, capacity_of(i))) { - fit_capacity = capacity_of(i); - fit_group = group; - } - } - } while (group = group->next, group != sd->groups); - - return fit_group; -} - -static inline int -check_cpu_capacity(struct rq *rq, struct sched_domain *sd) -{ - return ((rq->cpu_capacity * sd->imbalance_pct) < - (rq->cpu_capacity_orig * 100)); -} - -int exynos_need_active_balance(enum cpu_idle_type idle, struct sched_domain *sd, - int src_cpu, int dst_cpu) -{ - unsigned int src_imb_pct = lb_sd_parent(sd) ? sd->imbalance_pct : 1; - unsigned int dst_imb_pct = lb_sd_parent(sd) ? 100 : 1; - unsigned long src_cap = capacity_of(src_cpu); - unsigned long dst_cap = capacity_of(dst_cpu); - int level = sd->level; - - /* dst_cpu is idle */ - if ((idle != CPU_NOT_IDLE) && - (cpu_rq(src_cpu)->cfs.h_nr_running == 1)) { - if ((check_cpu_capacity(cpu_rq(src_cpu), sd)) && - (src_cap * sd->imbalance_pct < dst_cap * 100)) { - return 1; - } - - /* This domain is top and dst_cpu is bigger than src_cpu*/ - if (!lb_sd_parent(sd) && src_cap < dst_cap) - if (lbt_overutilized(src_cpu, level) || global_boosted()) - return 1; - } - - if ((src_cap * src_imb_pct < dst_cap * dst_imb_pct) && - cpu_rq(src_cpu)->cfs.h_nr_running == 1 && - lbt_overutilized(src_cpu, level) && - !lbt_overutilized(dst_cpu, level)) { - return 1; - } - - return unlikely(sd->nr_balance_failed > sd->cache_nice_tries + 2); -} - /********************************************************************** * Global boost * **********************************************************************/