sched/fair: Rework throttle_count sync
authorPeter Zijlstra <peterz@infradead.org>
Wed, 22 Jun 2016 13:14:26 +0000 (15:14 +0200)
committerIngo Molnar <mingo@kernel.org>
Mon, 27 Jun 2016 10:53:19 +0000 (12:53 +0200)
Since we already take rq->lock when creating a cgroup, use it to also
sync the throttle_count and avoid the extra state and enqueue path
branch.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: bsegall@google.com
Cc: linux-kernel@vger.kernel.org
[ Fixed build warning. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/fair.c
kernel/sched/sched.h

index 62d5e7dcc7f821949fddbaf4cf1a3b6878e01270..4088eedea7637859844c777dfa56dfb23136c142 100644 (file)
@@ -4241,26 +4241,6 @@ static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
        if (!cfs_bandwidth_used())
                return;
 
-       /* Synchronize hierarchical throttle counter: */
-       if (unlikely(!cfs_rq->throttle_uptodate)) {
-               struct rq *rq = rq_of(cfs_rq);
-               struct cfs_rq *pcfs_rq;
-               struct task_group *tg;
-
-               cfs_rq->throttle_uptodate = 1;
-
-               /* Get closest up-to-date node, because leaves go first: */
-               for (tg = cfs_rq->tg->parent; tg; tg = tg->parent) {
-                       pcfs_rq = tg->cfs_rq[cpu_of(rq)];
-                       if (pcfs_rq->throttle_uptodate)
-                               break;
-               }
-               if (tg) {
-                       cfs_rq->throttle_count = pcfs_rq->throttle_count;
-                       cfs_rq->throttled_clock_task = rq_clock_task(rq);
-               }
-       }
-
        /* an active group must be handled by the update_curr()->put() path */
        if (!cfs_rq->runtime_enabled || cfs_rq->curr)
                return;
@@ -4275,6 +4255,23 @@ static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
                throttle_cfs_rq(cfs_rq);
 }
 
+static void sync_throttle(struct task_group *tg, int cpu)
+{
+       struct cfs_rq *pcfs_rq, *cfs_rq;
+
+       if (!cfs_bandwidth_used())
+               return;
+
+       if (!tg->parent)
+               return;
+
+       cfs_rq = tg->cfs_rq[cpu];
+       pcfs_rq = tg->parent->cfs_rq[cpu];
+
+       cfs_rq->throttle_count = pcfs_rq->throttle_count;
+       pcfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
+}
+
 /* conditionally throttle active cfs_rq's from put_prev_entity() */
 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 {
@@ -4414,6 +4411,7 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
+static inline void sync_throttle(struct task_group *tg, int cpu) {}
 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
 
 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
@@ -8646,6 +8644,7 @@ void online_fair_sched_group(struct task_group *tg)
 
                raw_spin_lock_irq(&rq->lock);
                post_init_entity_util_avg(se);
+               sync_throttle(tg, i);
                raw_spin_unlock_irq(&rq->lock);
        }
 }
index 28c42b789f700f31aea16569d28343ccdd348062..f44da95c70cde1360cc8292ed8a61eed66fa0396 100644 (file)
@@ -438,7 +438,7 @@ struct cfs_rq {
 
        u64 throttled_clock, throttled_clock_task;
        u64 throttled_clock_task_time;
-       int throttled, throttle_count, throttle_uptodate;
+       int throttled, throttle_count;
        struct list_head throttled_list;
 #endif /* CONFIG_CFS_BANDWIDTH */
 #endif /* CONFIG_FAIR_GROUP_SCHED */