From 46383648b3c769fa74794ae6425ab993fc113bdb Mon Sep 17 00:00:00 2001 From: Kirill Tkhai Date: Sat, 15 Mar 2014 02:15:07 +0400 Subject: [PATCH] sched: Revert commit 4c6c4e38c4e9 ("sched/core: Fix endless loop in pick_next_task()") This reverts commit 4c6c4e38c4e9 ("sched/core: Fix endless loop in pick_next_task()"), which is not necessary after ("sched/rt: Substract number of tasks of throttled queues from rq->nr_running"). Signed-off-by: Kirill Tkhai Reviewed-by: Preeti U Murthy [conflict resolution with stop task checking patch] Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1394835307.18748.34.camel@HP-250-G1-Notebook-PC Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 5 +---- kernel/sched/rt.c | 10 ++++++++++ kernel/sched/sched.h | 12 ------------ 3 files changed, 11 insertions(+), 16 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 5e157f157d85..43232b8bacde 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6732,10 +6732,7 @@ static int idle_balance(struct rq *this_rq) out: /* Is there a task of a high priority class? */ - if (this_rq->nr_running != this_rq->cfs.h_nr_running && - ((this_rq->stop && this_rq->stop->on_rq) || - this_rq->dl.dl_nr_running || - (this_rq->rt.rt_nr_running && !rt_rq_throttled(&this_rq->rt)))) + if (this_rq->nr_running != this_rq->cfs.h_nr_running) pulled_task = -1; if (pulled_task) { diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 2add019ddbd0..7795e292f4c9 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -493,6 +493,11 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) dequeue_rt_entity(rt_se); } +static inline int rt_rq_throttled(struct rt_rq *rt_rq) +{ + return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted; +} + static int rt_se_boosted(struct sched_rt_entity *rt_se) { struct rt_rq *rt_rq = group_rt_rq(rt_se); @@ -569,6 +574,11 @@ static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) dequeue_top_rt_rq(rt_rq); } +static inline int rt_rq_throttled(struct rt_rq *rt_rq) +{ + return rt_rq->rt_throttled; +} + static inline const struct cpumask *sched_rt_period_mask(void) { return cpu_online_mask; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c8d9ee418ca7..b2cbe81308af 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -425,18 +425,6 @@ struct rt_rq { #endif }; -#ifdef CONFIG_RT_GROUP_SCHED -static inline int rt_rq_throttled(struct rt_rq *rt_rq) -{ - return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted; -} -#else -static inline int rt_rq_throttled(struct rt_rq *rt_rq) -{ - return rt_rq->rt_throttled; -} -#endif - /* Deadline class' related fields in a runqueue */ struct dl_rq { /* runqueue is an rbtree, ordered by deadline */ -- 2.20.1