From 8232cd93d321d773fbd6dff634f4dcf888a3e962 Mon Sep 17 00:00:00 2001 From: Johnlay Park Date: Tue, 27 Feb 2018 21:53:22 +0900 Subject: [PATCH] [COMMON] sched/rt: fix !CONFIG_64BIT last_update_time update Special care must be given to last_update_time on 32BIT configurations. Add such special treatment as in FAIR. CAUTIONS: not actually tested on !CONFIG_64BIT refer the I5728e02a00670ad20f93b4fe9b2d0ea697a66b87 Change-Id: I490c62b207e452f881b8acd07252aa4408b2fa83 Signed-off-by: Johnlay Park --- kernel/sched/fair.c | 5 +++++ kernel/sched/rt.c | 51 ++++++++++++++++++++++++++++++++++++++++++++ kernel/sched/sched.h | 13 +++++++++++ 3 files changed, 69 insertions(+) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 0f71c8ef6385..e2a849be4c34 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3417,6 +3417,11 @@ int update_rt_rq_load_avg(u64 now, int cpu, struct rt_rq *rt_rq, int running) */ ret = ___update_load_avg(now, cpu, sa, 0, running, NULL, rt_rq); +#ifndef CONFIG_64BIT + smp_wmb(); + rt_rq->load_last_update_time_copy = sa->last_update_time; +#endif + return ret; } diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index cc07931eb4a1..bd0217c2e983 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1573,6 +1573,57 @@ out: return cpu; } +#ifdef CONFIG_RT_GROUP_SCHED +/* + * Called within set_task_rq() right before setting a task's cpu. The + * caller only guarantees p->pi_lock is held; no other assumptions, + * including the state of rq->lock, should be made. + */ +void set_task_rq_rt(struct sched_rt_entity *rt_se, + struct rt_rq *prev, struct rt_rq *next) +{ + u64 p_last_update_time; + u64 n_last_update_time; + + if (!sched_feat(ATTACH_AGE_LOAD)) + return; + /* + * We are supposed to update the task to "current" time, then its up to + * date and ready to go to new CPU/rt_rq. But we have difficulty in + * getting what current time is, so simply throw away the out-of-date + * time. This will result in the wakee task is less decayed, but giving + * the wakee more load sounds not bad. + */ + if (!(rt_se->avg.last_update_time && prev)) + return; +#ifndef CONFIG_64BIT + { + u64 p_last_update_time_copy; + u64 n_last_update_time_copy; + + do { + p_last_update_time_copy = prev->load_last_update_time_copy; + n_last_update_time_copy = next->load_last_update_time_copy; + + smp_rmb(); + + p_last_update_time = prev->avg.last_update_time; + n_last_update_time = next->avg.last_update_time; + + } while (p_last_update_time != p_last_update_time_copy || + n_last_update_time != n_last_update_time_copy); + } +#else + p_last_update_time = prev->avg.last_update_time; + n_last_update_time = next->avg.last_update_time; +#endif + __update_load_avg(p_last_update_time, cpu_of(rq_of_rt_rq(prev)), + &rt_se->avg, 0, 0, NULL); + + rt_se->avg.last_update_time = n_last_update_time; +} +#endif /* CONFIG_RT_GROUP_SCHED */ + #ifndef CONFIG_64BIT static inline u64 rt_rq_last_update_time(struct rt_rq *rt_rq) { diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 447abeba7f29..6e88be5d98be 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -438,6 +438,16 @@ extern void sched_move_task(struct task_struct *tsk); #ifdef CONFIG_FAIR_GROUP_SCHED extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); +#ifdef CONFIG_RT_GROUP_SCHED +#ifdef CONFIG_SMP +extern void set_task_rq_rt(struct sched_rt_entity *rt_se, + struct rt_rq *prev, struct rt_rq *next); +#else /* !CONFIG_SMP */ +static inline void set_task_rq_rt(struct sched_rt_entity *rt_se, + struct rt_rq *prev, struct rt_rq *next) { } +#endif /* CONFIG_SMP */ +#endif /* CONFIG_RT_GROUP_SCHED */ + #ifdef CONFIG_SMP extern void set_task_rq_fair(struct sched_entity *se, struct cfs_rq *prev, struct cfs_rq *next); @@ -583,6 +593,9 @@ struct rt_rq { struct rq *rq; struct task_group *tg; +#ifndef CONFIG_64BIT + u64 load_last_update_time_copy; +#endif #endif }; -- 2.20.1