From 7887cee7a72ab67d7462b70dc7c1ac4bdff4c400 Mon Sep 17 00:00:00 2001 From: "lakkyung.jung" Date: Tue, 13 Mar 2018 20:35:22 +0900 Subject: [PATCH] [COMMON] sched: fair/ehmp: Consider cpu capacity when idlest group finds. Change-Id: I2f168aced04ade3321adcd3408ce2da344d4ab3b Signed-off-by: lakkyung.jung --- include/linux/ehmp.h | 4 ++++ kernel/sched/ehmp.c | 51 ++++++++++++++++++++++++++++++++++++-------- kernel/sched/fair.c | 6 ++++++ 3 files changed, 52 insertions(+), 9 deletions(-) diff --git a/include/linux/ehmp.h b/include/linux/ehmp.h index b8f2a10bfba6..ac1c09185c8e 100644 --- a/include/linux/ehmp.h +++ b/include/linux/ehmp.h @@ -31,6 +31,8 @@ struct gb_qos_request { }; #ifdef CONFIG_SCHED_EHMP +extern struct sched_group *exynos_fit_idlest_group(struct sched_domain *sd, + struct task_struct *p); extern void exynos_init_entity_util_avg(struct sched_entity *se); extern int exynos_need_active_balance(enum cpu_idle_type idle, struct sched_domain *sd, int src_cpu, int dst_cpu); @@ -57,6 +59,8 @@ extern void gb_qos_update_request(struct gb_qos_request *req, u32 new_value); extern void request_kernel_prefer_perf(int grp_idx, int enable); #else +static inline struct sched_group *exynos_fit_idlest_group(struct sched_domain *sd, + struct task_struct *p) { return NULL; } static inline void exynos_init_entity_util_avg(struct sched_entity *se) { } static inline int exynos_need_active_balance(enum cpu_idle_type idle, struct sched_domain *sd, int src_cpu, int dst_cpu) { return 0; } diff --git a/kernel/sched/ehmp.c b/kernel/sched/ehmp.c index b3cd7ecd512d..ce396851e9b9 100644 --- a/kernel/sched/ehmp.c +++ b/kernel/sched/ehmp.c @@ -17,6 +17,17 @@ #include "sched.h" #include "tune.h" +/********************************************************************** + * extern functions * + **********************************************************************/ +extern struct sched_entity *__pick_next_entity(struct sched_entity *se); +extern unsigned long boosted_task_util(struct task_struct *task); +extern unsigned long capacity_curr_of(int cpu); +extern int find_best_target(struct task_struct *p, int *backup_cpu, + bool boosted, bool prefer_idle); +extern u64 decay_load(u64 val, u64 n); +extern int start_cpu(bool boosted); + static unsigned long task_util(struct task_struct *p) { return p->se.avg.util_avg; @@ -32,6 +43,11 @@ static inline struct sched_entity *se_of(struct sched_avg *sa) return container_of(sa, struct sched_entity, avg); } +static inline int task_fits(struct task_struct *p, long capacity) +{ + return capacity * 1024 > boosted_task_util(p) * 1248; +} + #define entity_is_cfs_rq(se) (se->my_q) #define entity_is_task(se) (!se->my_q) #define LOAD_AVG_MAX 47742 @@ -178,6 +194,32 @@ bool cpu_overutilized(int cpu); #define lb_sd_parent(sd) \ (sd->parent && sd->parent->groups != sd->parent->groups->next) +struct sched_group * +exynos_fit_idlest_group(struct sched_domain *sd, struct task_struct *p) +{ + struct sched_group *group = sd->groups; + struct sched_group *fit_group = NULL; + unsigned long fit_capacity = ULONG_MAX; + + do { + int i; + + /* Skip over this group if it has no CPUs allowed */ + if (!cpumask_intersects(sched_group_span(group), + &p->cpus_allowed)) + continue; + + for_each_cpu(i, sched_group_span(group)) { + if (capacity_of(i) < fit_capacity && task_fits(p, capacity_of(i))) { + fit_capacity = capacity_of(i); + fit_group = group; + } + } + } while (group = group->next, group != sd->groups); + + return fit_group; +} + static inline int check_cpu_capacity(struct rq *rq, struct sched_domain *sd) { @@ -1092,7 +1134,6 @@ ontime_select_target_cpu(struct sched_group *sg, const struct cpumask *mask) #define TASK_TRACK_COUNT 5 -extern struct sched_entity *__pick_next_entity(struct sched_entity *se); static struct task_struct * ontime_pick_heavy_task(struct sched_entity *se, struct cpumask *dst_cpus, int *boost_migration) @@ -1508,8 +1549,6 @@ static void ontime_update_next_balance(int cpu, struct ontime_avg *oa) #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT) -extern u64 decay_load(u64 val, u64 n); - static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3) { u32 c1, c2, c3 = d3; @@ -1608,12 +1647,6 @@ pure_initcall(init_ontime); /********************************************************************** * cpu selection * **********************************************************************/ -extern unsigned long boosted_task_util(struct task_struct *task); -extern unsigned long capacity_curr_of(int cpu); -extern int find_best_target(struct task_struct *p, int *backup_cpu, - bool boosted, bool prefer_idle); -extern int start_cpu(bool boosted); - #define EAS_CPU_PRV 0 #define EAS_CPU_NXT 1 #define EAS_CPU_BKP 2 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7a05a84fd460..17e9e680e1a6 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6724,6 +6724,12 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, unsigned long imbalance = scale_load_down(NICE_0_LOAD) * (sd->imbalance_pct-100) / 100; + if (sched_feat(EXYNOS_HMP)) { + idlest = exynos_fit_idlest_group(sd, p); + if (idlest) + return idlest; + } + if (sd_flag & SD_BALANCE_WAKE) load_idx = sd->wake_idx; -- 2.20.1