sched/rt: Substract number of tasks of throttled queues from rq->nr_running
authorKirill Tkhai <tkhai@yandex.ru>
Fri, 14 Mar 2014 22:15:00 +0000 (02:15 +0400)
committerIngo Molnar <mingo@kernel.org>
Fri, 18 Apr 2014 10:07:28 +0000 (12:07 +0200)
Now rq->rt becomes to be able to be in dequeued or enqueued state.
We add new member rt_rq->rt_queued, which is used to indicate this.
The member is used only for top queue rq->rt_rq.

The goal is to fit generic scheme which is used in deadline and
fair classes, i.e. throttled rt_rq's rt_nr_running is beeing
substracted from rq->nr_running.

Signed-off-by: Kirill Tkhai <tkhai@yandex.ru>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1394835300.18748.33.camel@HP-250-G1-Notebook-PC
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/rt.c
kernel/sched/sched.h

index f6aa3cdbee840be94413fcc6d56df026224fae32..2add019ddbd0816283544c9a8093ce72680cc25d 100644 (file)
@@ -79,6 +79,8 @@ void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
        rt_rq->overloaded = 0;
        plist_head_init(&rt_rq->pushable_tasks);
 #endif
+       /* We start is dequeued state, because no RT tasks are queued */
+       rt_rq->rt_queued = 0;
 
        rt_rq->rt_time = 0;
        rt_rq->rt_throttled = 0;
@@ -404,6 +406,9 @@ static inline void set_post_schedule(struct rq *rq)
 }
 #endif /* CONFIG_SMP */
 
+static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
+static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
+
 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
 {
        return !list_empty(&rt_se->run_list);
@@ -465,8 +470,11 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
        rt_se = rt_rq->tg->rt_se[cpu];
 
        if (rt_rq->rt_nr_running) {
-               if (rt_se && !on_rt_rq(rt_se))
+               if (!rt_se)
+                       enqueue_top_rt_rq(rt_rq);
+               else if (!on_rt_rq(rt_se))
                        enqueue_rt_entity(rt_se, false);
+
                if (rt_rq->highest_prio.curr < curr->prio)
                        resched_task(curr);
        }
@@ -479,7 +487,9 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 
        rt_se = rt_rq->tg->rt_se[cpu];
 
-       if (rt_se && on_rt_rq(rt_se))
+       if (!rt_se)
+               dequeue_top_rt_rq(rt_rq);
+       else if (on_rt_rq(rt_se))
                dequeue_rt_entity(rt_se);
 }
 
@@ -545,12 +555,18 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
 
 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 {
-       if (rt_rq->rt_nr_running)
-               resched_task(rq_of_rt_rq(rt_rq)->curr);
+       struct rq *rq = rq_of_rt_rq(rt_rq);
+
+       if (!rt_rq->rt_nr_running)
+               return;
+
+       enqueue_top_rt_rq(rt_rq);
+       resched_task(rq->curr);
 }
 
 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 {
+       dequeue_top_rt_rq(rt_rq);
 }
 
 static inline const struct cpumask *sched_rt_period_mask(void)
@@ -935,6 +951,38 @@ static void update_curr_rt(struct rq *rq)
        }
 }
 
+static void
+dequeue_top_rt_rq(struct rt_rq *rt_rq)
+{
+       struct rq *rq = rq_of_rt_rq(rt_rq);
+
+       BUG_ON(&rq->rt != rt_rq);
+
+       if (!rt_rq->rt_queued)
+               return;
+
+       BUG_ON(!rq->nr_running);
+
+       rq->nr_running -= rt_rq->rt_nr_running;
+       rt_rq->rt_queued = 0;
+}
+
+static void
+enqueue_top_rt_rq(struct rt_rq *rt_rq)
+{
+       struct rq *rq = rq_of_rt_rq(rt_rq);
+
+       BUG_ON(&rq->rt != rt_rq);
+
+       if (rt_rq->rt_queued)
+               return;
+       if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
+               return;
+
+       rq->nr_running += rt_rq->rt_nr_running;
+       rt_rq->rt_queued = 1;
+}
+
 #if defined CONFIG_SMP
 
 static void
@@ -1143,6 +1191,8 @@ static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
                back = rt_se;
        }
 
+       dequeue_top_rt_rq(rt_rq_of_se(back));
+
        for (rt_se = back; rt_se; rt_se = rt_se->back) {
                if (on_rt_rq(rt_se))
                        __dequeue_rt_entity(rt_se);
@@ -1151,13 +1201,18 @@ static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
 
 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
 {
+       struct rq *rq = rq_of_rt_se(rt_se);
+
        dequeue_rt_stack(rt_se);
        for_each_sched_rt_entity(rt_se)
                __enqueue_rt_entity(rt_se, head);
+       enqueue_top_rt_rq(&rq->rt);
 }
 
 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
 {
+       struct rq *rq = rq_of_rt_se(rt_se);
+
        dequeue_rt_stack(rt_se);
 
        for_each_sched_rt_entity(rt_se) {
@@ -1166,6 +1221,7 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
                if (rt_rq && rt_rq->rt_nr_running)
                        __enqueue_rt_entity(rt_se, false);
        }
+       enqueue_top_rt_rq(&rq->rt);
 }
 
 /*
@@ -1183,8 +1239,6 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
 
        if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
                enqueue_pushable_task(rq, p);
-
-       inc_nr_running(rq);
 }
 
 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
@@ -1195,8 +1249,6 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
        dequeue_rt_entity(rt_se);
 
        dequeue_pushable_task(rq, p);
-
-       dec_nr_running(rq);
 }
 
 /*
@@ -1401,10 +1453,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev)
        if (prev->sched_class == &rt_sched_class)
                update_curr_rt(rq);
 
-       if (!rt_rq->rt_nr_running)
-               return NULL;
-
-       if (rt_rq_throttled(rt_rq))
+       if (!rt_rq->rt_queued)
                return NULL;
 
        put_prev_task(rq, prev);
index 456e492a3dca37c13d7cb7b57a51965bfa18d6b3..c8d9ee418ca7186900db54ff2c7dd6cc74d4f84b 100644 (file)
@@ -409,6 +409,8 @@ struct rt_rq {
        int overloaded;
        struct plist_head pushable_tasks;
 #endif
+       int rt_queued;
+
        int rt_throttled;
        u64 rt_time;
        u64 rt_runtime;