From 6c3c37677014a877bcb0ea72cb44408f1c0a66ef Mon Sep 17 00:00:00 2001 From: Johnlay Park Date: Tue, 27 Feb 2018 21:24:13 +0900 Subject: [PATCH] [COMMON] sched/{fair,rt}: copy sched avg data across switches between class When switching a task between FAIR and RT, its sched avg data has to be copied over, so that per-class PELT always works on updated data. Refer the I7561e16ceca2635a50f57e9361d351f22f991651 Change-Id: Id4f7c96c87c97441901b8dd8110ba2aed9ccf247 Signed-off-by: Johnlay Park --- kernel/sched/fair.c | 21 +++++++++++++++++++++ kernel/sched/rt.c | 5 +++++ 2 files changed, 26 insertions(+) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 72cf9ee92a70..0f71c8ef6385 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -11035,6 +11035,22 @@ static void attach_task_cfs_rq(struct task_struct *p) se->vruntime += cfs_rq->min_vruntime; } +#ifdef CONFIG_SMP +void copy_sched_avg(struct sched_avg *from, struct sched_avg *to, unsigned int ratio) +{ + if (ratio < 0) + ratio = 0; + + to->last_update_time = from->last_update_time; + to->util_avg = (from->util_avg * ratio) / 100; + to->util_sum = (from->util_sum * ratio) / 100; + to->load_avg = (from->load_avg * ratio) / 100; + to->load_sum = (from->load_sum * ratio) / 100; +} +#else +void copy_sched_avg(struct sched_avg *from, struct sched_avg *to, unsigned int ratio) { } +#endif + static void switched_from_fair(struct rq *rq, struct task_struct *p) { detach_task_cfs_rq(p); @@ -11042,6 +11058,11 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p) static void switched_to_fair(struct rq *rq, struct task_struct *p) { + /* + * Need to scale the applying ratio while migrating. + * - Copy rt sched avg into fair sched avg + */ + copy_sched_avg(&p->rt.avg, &p->se.avg, 100); attach_task_cfs_rq(p); if (task_on_rq_queued(p)) { diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 17fb5acac3cd..cc07931eb4a1 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -2544,6 +2544,9 @@ void update_rt_load_avg(u64 now, struct sched_rt_entity *rt_se) } #endif /* CONFIG_SMP */ +extern void +copy_sched_avg(struct sched_avg *from, struct sched_avg *to, unsigned int ratio); + /* * When switching a task to RT, we may overload the runqueue * with RT tasks. In this case we try to push them off to @@ -2551,6 +2554,8 @@ void update_rt_load_avg(u64 now, struct sched_rt_entity *rt_se) */ static void switched_to_rt(struct rq *rq, struct task_struct *p) { + /* Copy fair sched avg into rt sched avg */ + copy_sched_avg(&p->se.avg, &p->rt.avg, 100); /* * If we are already running, then there's nothing * that needs to be done. But if we are not running -- 2.20.1