From 38c6ade2dd4dcc3bca06c981e2a1b91289046177 Mon Sep 17 00:00:00 2001 From: Dietmar Eggemann Date: Tue, 20 Oct 2015 13:04:41 +0100 Subject: [PATCH] sched/fair: Remove empty idle enter and exit functions Commit cd126afe838d ("sched/fair: Remove rq's runnable avg") got rid of rq->avg and so there is no need to update it any more when entering or exiting idle. Remove the now empty functions idle_{enter|exit}_fair(). Signed-off-by: Dietmar Eggemann Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Yuyang Du Link: http://lkml.kernel.org/r/1445342681-17171-1-git-send-email-dietmar.eggemann@arm.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 24 +----------------------- kernel/sched/idle_task.c | 1 - kernel/sched/sched.h | 8 -------- 3 files changed, 1 insertion(+), 32 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f04fda8f669c..2779dece43b2 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2835,24 +2835,6 @@ void remove_entity_load_avg(struct sched_entity *se) atomic_long_add(se->avg.util_avg, &cfs_rq->removed_util_avg); } -/* - * Update the rq's load with the elapsed running time before entering - * idle. if the last scheduled task is not a CFS task, idle_enter will - * be the only way to update the runnable statistic. - */ -void idle_enter_fair(struct rq *this_rq) -{ -} - -/* - * Update the rq's load with the elapsed idle time before a task is - * scheduled. if the newly scheduled task is not a CFS task, idle_exit will - * be the only way to update the runnable statistic. - */ -void idle_exit_fair(struct rq *this_rq) -{ -} - static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq) { return cfs_rq->runnable_load_avg; @@ -7248,8 +7230,6 @@ static int idle_balance(struct rq *this_rq) int pulled_task = 0; u64 curr_cost = 0; - idle_enter_fair(this_rq); - /* * We must set idle_stamp _before_ calling idle_balance(), such that we * measure the duration of idle_balance() as idle time. @@ -7330,10 +7310,8 @@ out: if (this_rq->nr_running != this_rq->cfs.h_nr_running) pulled_task = -1; - if (pulled_task) { - idle_exit_fair(this_rq); + if (pulled_task) this_rq->idle_stamp = 0; - } return pulled_task; } diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c index c4ae0f1fdf9b..47ce94931f1b 100644 --- a/kernel/sched/idle_task.c +++ b/kernel/sched/idle_task.c @@ -47,7 +47,6 @@ dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) { - idle_exit_fair(rq); rq_last_tick_reset(rq); } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index efd3bfc7e347..2eb2002aa336 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1249,16 +1249,8 @@ extern void update_group_capacity(struct sched_domain *sd, int cpu); extern void trigger_load_balance(struct rq *rq); -extern void idle_enter_fair(struct rq *this_rq); -extern void idle_exit_fair(struct rq *this_rq); - extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask); -#else - -static inline void idle_enter_fair(struct rq *rq) { } -static inline void idle_exit_fair(struct rq *rq) { } - #endif #ifdef CONFIG_CPU_IDLE -- 2.20.1