From 5494e2edf59c2ff74742c224eb05c5df0b3d1fe6 Mon Sep 17 00:00:00 2001 From: Morten Rasmussen Date: Tue, 7 Mar 2017 16:40:34 +0000 Subject: [PATCH] ANDROID: sched: Consider misfit tasks when load-balancing On asymmetric cpu capacity systems and systems with high RT/IRQ load intensive tasks can end up on cpus that don't suit their compute demand. In this scenarios 'misfit' tasks should be migrated to cpus with higher compute capacity to ensure better throughput. group_misfit_task indicates this scenario, but tweaks to the load-balance code is needed to make the migrations happen. Misfit balancing only makes sense between a source group of lower per-cpu capacity and destination group of higher compute capacity. Otherwise, misfit balancing is ignored. group_misfit_task has lowest priority so any imbalance due to overload is dealt with first. The modifications are: 1. Only pick a group containing misfit tasks as the busiest group if the destination group has higher capacity and has spare capacity. 2. When the busiest group is a 'misfit' group, skip the usual average load and group capacity checks. 3. Set the imbalance for 'misfit' balancing sufficiently high for a task to be pulled ignoring average load. 4. Pick the first cpu with the rq->misfit flag raised as the source cpu. cc: Ingo Molnar cc: Peter Zijlstra Signed-off-by: Morten Rasmussen Change-Id: If523e28ca397f67aaddf7dc1ddc1c22488f899d1 Signed-off-by: Chris Redpath --- kernel/sched/fair.c | 47 ++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 44 insertions(+), 3 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 2677eada93c0..9cf5d479cbbd 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6636,6 +6636,7 @@ struct lb_env { unsigned int loop_max; enum fbq_type fbq_type; + enum group_type src_grp_type; struct list_head tasks; }; @@ -7424,6 +7425,19 @@ group_smaller_cpu_capacity(struct sched_group *sg, struct sched_group *ref) ref->sgc->min_capacity * 1024; } +/* + * group_similar_cpu_capacity: Returns true if the minimum capacity of the + * compared groups differ by less than 12.5%. + */ +static inline bool +group_similar_cpu_capacity(struct sched_group *sg, struct sched_group *ref) +{ + long diff = sg->sgc->min_capacity - ref->sgc->min_capacity; + long max = max(sg->sgc->min_capacity, ref->sgc->min_capacity); + + return abs(diff) < max >> 3; +} + static inline enum group_type group_classify(struct sched_group *group, struct sg_lb_stats *sgs) @@ -7488,7 +7502,7 @@ static inline void update_sg_lb_stats(struct lb_env *env, sgs->idle_cpus++; if (env->sd->flags & SD_ASYM_CPUCAPACITY && - !sgs->group_misfit_task && rq->misfit_task) + !sgs->group_misfit_task && rq_has_misfit(rq)) sgs->group_misfit_task = capacity_of(i); } @@ -7525,6 +7539,14 @@ static bool update_sd_pick_busiest(struct lb_env *env, { struct sg_lb_stats *busiest = &sds->busiest_stat; + /* + * Don't try to pull misfit tasks we can't help. + */ + if (sgs->group_type == group_misfit_task && + (!group_smaller_cpu_capacity(sg, sds->local) || + !group_has_capacity(env, &sds->local_stat))) + return false; + if (sgs->group_type > busiest->group_type) return true; @@ -7832,8 +7854,9 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s * factors in sg capacity and sgs with smaller group_type are * skipped when updating the busiest sg: */ - if (busiest->avg_load <= sds->avg_load || - local->avg_load >= sds->avg_load) { + if (busiest->group_type != group_misfit_task && + (busiest->avg_load <= sds->avg_load || + local->avg_load >= sds->avg_load)) { env->imbalance = 0; return fix_small_imbalance(env, sds); } @@ -7867,6 +7890,12 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s (sds->avg_load - local->avg_load) * local->group_capacity ) / SCHED_CAPACITY_SCALE; + /* Boost imbalance to allow misfit task to be balanced. */ + if (busiest->group_type == group_misfit_task) { + env->imbalance = max_t(long, env->imbalance, + busiest->group_misfit_task); + } + /* * if *imbalance is less than the average load per runnable task * there is no guarantee that any tasks will be moved so we'll have @@ -7930,6 +7959,10 @@ static struct sched_group *find_busiest_group(struct lb_env *env) busiest->group_no_capacity) goto force_balance; + /* Misfitting tasks should be dealt with regardless of the avg load */ + if (busiest->group_type == group_misfit_task) + goto force_balance; + /* * If the local group is busier than the selected busiest group * don't try and pull any tasks. @@ -7967,6 +8000,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env) force_balance: /* Looks like there is an imbalance. Compute it */ + env->src_grp_type = busiest->group_type; calculate_imbalance(env, &sds); return sds.busiest; @@ -8014,6 +8048,13 @@ static struct rq *find_busiest_queue(struct lb_env *env, if (rt > env->fbq_type) continue; + /* + * For ASYM_CPUCAPACITY domains with misfit tasks we ignore + * load. + */ + if (env->src_grp_type == group_misfit_task && rq_has_misfit(rq)) + return rq; + capacity = capacity_of(i); wl = weighted_cpuload(rq); -- 2.20.1