From: Daeyeong Lee Date: Mon, 12 Mar 2018 13:56:35 +0000 (+0900) Subject: [COMMON] sched: ehmp: Modify ontime_task_wakeup to remove topology dependencies X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=888a77749515435e82865ff2194bef5f1ffee9ce;p=GitHub%2FLineageOS%2Fandroid_kernel_motorola_exynos9610.git [COMMON] sched: ehmp: Modify ontime_task_wakeup to remove topology dependencies Change-Id: Iccc9abce05adc6e0d15264eafa1bd6001c707481 Signed-off-by: Daeyeong Lee --- diff --git a/kernel/sched/ehmp.c b/kernel/sched/ehmp.c index a0ad5d74110f..8a96bf87adbd 100644 --- a/kernel/sched/ehmp.c +++ b/kernel/sched/ehmp.c @@ -1431,44 +1431,70 @@ success_unlock: static int ontime_task_wakeup(struct task_struct *p) { - struct sched_domain *sd; + struct ontime_cond *cond; + struct cpumask target_mask; u64 delta; int target_cpu = -1; - if (ontime_flag(p) & NOT_ONTIME) - if (ontime_load_avg(p) < get_up_threshold(task_cpu(p))) - return -1; + /* + * When wakeup task satisfies ontime condition to up migration, + * check there is a possible target cpu. + */ + if (ontime_load_avg(p) >= get_up_threshold(task_cpu(p))) { + cpumask_clear(&target_mask); + + for (cond = ontime_cond; cond != NULL; cond = cond->next) + if (cpumask_test_cpu(task_cpu(p), &cond->src_cpus)) { + cpumask_copy(&target_mask, &cond->dst_cpus); + break; + } + + target_cpu = ontime_select_target_cpu(&target_mask, tsk_cpus_allowed(p)); - if (ontime_flag(p) & ONTIME) { + if (cpu_selected(target_cpu)) + goto ontime_up; + } + + /* + * If wakeup task is not ontime and doesn't satisfy ontime condition, + * it cannot be ontime task. + */ + if (ontime_flag(p) == NOT_ONTIME) + goto ontime_out; + + if (ontime_flag(p) == ONTIME) { + /* + * If wakeup task is ontime but doesn't keep ontime condition, + * exclude this task from ontime. + */ delta = cpu_rq(0)->clock_task - ontime_migration_time(p); delta = delta >> 10; - if (delta > get_min_residency(task_cpu(p)) && - ontime_load_avg(p) < get_down_threshold(task_cpu(p))) { - exclude_ontime_task(p); - return -1; - } + if (delta > get_min_residency(ontime_task_cpu(p)) && + ontime_load_avg(p) < get_down_threshold(ontime_task_cpu(p))) + goto ontime_out; - if (idle_cpu(task_cpu(p))) - return task_cpu(p); - } + /* + * If there is a possible cpu to stay ontime, task will wake up at this cpu. + */ + cpumask_copy(&target_mask, cpu_coregroup_mask(ontime_task_cpu(p))); + target_cpu = ontime_select_target_cpu(&target_mask, tsk_cpus_allowed(p)); - /* caller must hold rcu for sched domain */ - sd = rcu_dereference(per_cpu(sd_ea, maxcap_cpu)); - if (!sd) - return -1; + if (cpu_selected(target_cpu)) + goto ontime_stay; - target_cpu = ontime_select_target_cpu(sched_group_cpus(sd->groups), tsk_cpus_allowed(p)); - if (cpu_selected(target_cpu)) { - if (ontime_flag(p) & NOT_ONTIME) { - include_ontime_task(p, target_cpu); - } - } else { - if (ontime_flag(p) & ONTIME) - exclude_ontime_task(p); + goto ontime_out; } +ontime_up: + include_ontime_task(p, target_cpu); + +ontime_stay: return target_cpu; + +ontime_out: + exclude_ontime_task(p); + return -1; } static void ontime_update_next_balance(int cpu, struct ontime_avg *oa)