From d4c37268f6a928c96b496577b1a69b3eef0983c7 Mon Sep 17 00:00:00 2001 From: "lakkyung.jung" Date: Fri, 4 May 2018 20:16:53 +0900 Subject: [PATCH] sched: fair/ems: Add schedtune_util_est Change-Id: I0a0f1723356683829ce709ec750f4f013aa1c75b Signed-off-by: lakkyung.jung --- kernel/sched/fair.c | 17 ++++++++++++----- kernel/sched/tune.h | 2 ++ 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index fb07b27b1177..e545f01979c6 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3122,7 +3122,9 @@ __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entit if (___update_load_avg(now, cpu, &se->avg, se->on_rq * scale_load_down(se->load.weight), cfs_rq->curr == se, NULL, NULL)) { - cfs_se_util_change(&se->avg); + if (schedtune_util_est_en(task_of(se))) + cfs_se_util_change(&se->avg); + return 1; } @@ -3660,16 +3662,18 @@ static inline unsigned long task_util(struct task_struct *p) return READ_ONCE(p->se.avg.util_avg); } -static inline unsigned long _task_util_est(struct task_struct *p) +inline unsigned long _task_util_est(struct task_struct *p) { struct util_est ue = READ_ONCE(p->se.avg.util_est); - return max(ue.ewma, ue.enqueued); + return schedtune_util_est_en(p) ? max(ue.ewma, ue.enqueued) + : task_util(p); } -static inline unsigned long task_util_est(struct task_struct *p) +inline unsigned long task_util_est(struct task_struct *p) { - return max(READ_ONCE(p->se.avg.util_avg), _task_util_est(p)); + return schedtune_util_est_en(p) ? max(READ_ONCE(p->se.avg.util_avg), _task_util_est(p)) + : task_util(p); } static inline void util_est_enqueue(struct cfs_rq *cfs_rq, @@ -3735,6 +3739,9 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) if (!task_sleep) return; + if (!schedtune_util_est_en(p)) + return; + /* * If the PELT values haven't changed since enqueue time, * skip the util_est update. diff --git a/kernel/sched/tune.h b/kernel/sched/tune.h index 48a357735905..135b1a49ba2b 100644 --- a/kernel/sched/tune.h +++ b/kernel/sched/tune.h @@ -20,6 +20,7 @@ int schedtune_need_group_balance(struct task_struct *p); int schedtune_prefer_idle(struct task_struct *tsk); int schedtune_prefer_perf(struct task_struct *tsk); +int schedtune_util_est_en(struct task_struct *tsk); void schedtune_enqueue_task(struct task_struct *p, int cpu); void schedtune_dequeue_task(struct task_struct *p, int cpu); @@ -34,6 +35,7 @@ void schedtune_dequeue_task(struct task_struct *p, int cpu); #define schedtune_prefer_idle(tsk) 0 #define schedtune_prefer_perf(tsk) 0 +#define schedtune_util_est_en(tsk) 0 #define schedtune_enqueue_task(task, cpu) do { } while (0) #define schedtune_dequeue_task(task, cpu) do { } while (0) -- 2.20.1