From 2cc05c491ce892607f7c4ad01443b6d02fcd0789 Mon Sep 17 00:00:00 2001 From: Johnlay Park Date: Thu, 22 Feb 2018 21:15:15 +0900 Subject: [PATCH] [COMMON] sched/rt: Introduce pelt load for rt To activate the FRT (Fluid Real Time) scheduler, task load for RT task is also necessary for the fluidic scheduling. Change-Id: I82a998bf9253d54c6858ab9dcd8eaa5b99ecb4a6 Signed-off-by: Johnlay Park --- include/linux/sched.h | 10 ++++ kernel/sched/rt.c | 109 ++++++++++++++++++++++++++++++++++++++++++ kernel/sched/sched.h | 2 +- 3 files changed, 120 insertions(+), 1 deletion(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 8542337f11d1..259f6eddfa22 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -494,6 +494,16 @@ struct sched_rt_entity { /* rq "owned" by this entity/group: */ struct rt_rq *my_q; #endif + +#ifdef CONFIG_SMP + /* + * Per entity load average tracking. + * + * Put into separate cache line so it does not + * collide with read-mostly values above. + */ + struct sched_avg avg;// ____cacheline_aligned_in_smp; +#endif } __randomize_layout; struct sched_dl_entity { diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index c2e40ca9e34e..4841ec697f56 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -261,6 +261,93 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) #ifdef CONFIG_SMP +#include "sched-pelt.h" + +extern u64 decay_load(u64 val, u64 n); + +static u32 __accumulate_pelt_segments_rt(u64 periods, u32 d1, u32 d3) +{ + u32 c1, c2, c3 = d3; + + c1 = decay_load((u64)d1, periods); + + c2 = LOAD_AVG_MAX - decay_load(LOAD_AVG_MAX, periods) - 1024; + + return c1 + c2 + c3; +} + +#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT) + +static __always_inline u32 +accumulate_sum_rt(u64 delta, int cpu, struct sched_avg *sa, + unsigned long weight, int running) +{ + unsigned long scale_freq, scale_cpu; + u32 contrib = (u32)delta; + u64 periods; + + scale_freq = arch_scale_freq_capacity(NULL, cpu); + scale_cpu = arch_scale_cpu_capacity(NULL, cpu); + + delta += sa->period_contrib; + periods = delta / 1024; + + if (periods) { + sa->load_sum = decay_load(sa->load_sum, periods); + sa->util_sum = decay_load((u64)(sa->util_sum), periods); + + delta %= 1024; + contrib = __accumulate_pelt_segments_rt(periods, + 1024 - sa->period_contrib, delta); + } + sa->period_contrib = delta; + + contrib = cap_scale(contrib, scale_freq); + if (weight) { + sa->load_sum += weight * contrib; + } + if (running) + sa->util_sum += contrib * scale_cpu; + + return periods; +} + +/* + * We can represent the historical contribution to runnable average as the + * coefficients of a geometric series, exactly like fair task load. + * refer the ___update_load_avg @ fair sched class + */ +static __always_inline int +__update_load_avg(u64 now, int cpu, struct sched_avg *sa, + unsigned long weight, int running, struct rt_rq *rt_rq) +{ + u64 delta; + + delta = now - sa->last_update_time; + + if ((s64)delta < 0) { + sa->last_update_time = now; + return 0; + } + + delta >>= 10; + if (!delta) + return 0; + + sa->last_update_time += delta << 10; + + if (!weight) + running = 0; + + if (!accumulate_sum_rt(delta, cpu, sa, weight, running)) + return 0; + + sa->load_avg = div_u64(sa->load_sum, LOAD_AVG_MAX - 1024 + sa->period_contrib); + sa->util_avg = sa->util_sum / (LOAD_AVG_MAX - 1024 + sa->period_contrib); + + return 1; +} + static void pull_rt_task(struct rq *this_rq); static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev) @@ -1598,6 +1685,24 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) } #ifdef CONFIG_SMP +void update_rt_load_avg(u64 now, struct sched_rt_entity *rt_se) +{ + struct rt_rq *rt_rq = rt_rq_of_se(rt_se); + struct rq *rq = rq_of_rt_rq(rt_rq); + int cpu = cpu_of(rq); + /* + * Track task load average for carrying it to new CPU after migrated. + */ + if (rt_se->avg.last_update_time) + __update_load_avg(now, cpu, &rt_se->avg, scale_load_down(NICE_0_LOAD), + rt_rq->curr == rt_se, NULL); + + update_rt_rq_load_avg(now, cpu, rt_rq, true); + /* TODO + * propagate the rt entity load average () + * if (rt_entity_is_task(rt_se)) tracing the rt average + */ +} /* Only try algorithms three times */ #define RT_MAX_TRIES 3 @@ -2214,6 +2319,10 @@ void __init init_sched_rt_class(void) GFP_KERNEL, cpu_to_node(i)); } } +#else +void update_rt_load_avg(u64 now, struct sched_rt_entity *rt_se) +{ +} #endif /* CONFIG_SMP */ /* diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 5d4480705485..0aae6a73c4c0 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -530,7 +530,7 @@ struct rt_rq { struct plist_head pushable_tasks; struct sched_avg avg; - + struct sched_rt_entity *curr; #endif /* CONFIG_SMP */ int rt_queued; -- 2.20.1