sched/core: Replace sd_busy/nr_busy_cpus with sched_domain_shared
authorPeter Zijlstra <peterz@infradead.org>
Mon, 9 May 2016 08:38:01 +0000 (10:38 +0200)
committerIngo Molnar <mingo@kernel.org>
Fri, 30 Sep 2016 08:54:07 +0000 (10:54 +0200)
Move the nr_busy_cpus thing from its hacky sd->parent->groups->sgc
location into the much more natural sched_domain_shared location.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/linux/sched.h
kernel/sched/core.c
kernel/sched/fair.c
kernel/sched/sched.h

index 8a878b9649a163c50acc59ed7125c2778bc89cea..98888f1a03bc8250bf02b98c7f38b50459bc77eb 100644 (file)
@@ -1069,6 +1069,7 @@ struct sched_group;
 
 struct sched_domain_shared {
        atomic_t        ref;
+       atomic_t        nr_busy_cpus;
 };
 
 struct sched_domain {
index 97d3c18d00bf8b2a5bab48ec825d42f71e35b41f..26826eac5545f5dcfcc9235cba5040cf0888637e 100644 (file)
@@ -5981,14 +5981,14 @@ static void destroy_sched_domains(struct sched_domain *sd)
 DEFINE_PER_CPU(struct sched_domain *, sd_llc);
 DEFINE_PER_CPU(int, sd_llc_size);
 DEFINE_PER_CPU(int, sd_llc_id);
+DEFINE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
 DEFINE_PER_CPU(struct sched_domain *, sd_numa);
-DEFINE_PER_CPU(struct sched_domain *, sd_busy);
 DEFINE_PER_CPU(struct sched_domain *, sd_asym);
 
 static void update_top_cache_domain(int cpu)
 {
+       struct sched_domain_shared *sds = NULL;
        struct sched_domain *sd;
-       struct sched_domain *busy_sd = NULL;
        int id = cpu;
        int size = 1;
 
@@ -5996,13 +5996,13 @@ static void update_top_cache_domain(int cpu)
        if (sd) {
                id = cpumask_first(sched_domain_span(sd));
                size = cpumask_weight(sched_domain_span(sd));
-               busy_sd = sd->parent; /* sd_busy */
+               sds = sd->shared;
        }
-       rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd);
 
        rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
        per_cpu(sd_llc_size, cpu) = size;
        per_cpu(sd_llc_id, cpu) = id;
+       rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds);
 
        sd = lowest_flag_domain(cpu, SD_NUMA);
        rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
@@ -6299,7 +6299,6 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
                return;
 
        update_group_capacity(sd, cpu);
-       atomic_set(&sg->sgc->nr_busy_cpus, sg->group_weight);
 }
 
 /*
@@ -6546,6 +6545,7 @@ sd_init(struct sched_domain_topology_level *tl,
        if (sd->flags & SD_SHARE_PKG_RESOURCES) {
                sd->shared = *per_cpu_ptr(sdd->sds, sd_id);
                atomic_inc(&sd->shared->ref);
+               atomic_set(&sd->shared->nr_busy_cpus, sd_weight);
        }
 
        sd->private = sdd;
index 786ef94197e0b49be055a621d2e71fcf001aa76b..15902cdb27b3d2412b0c13bf74122479749233fa 100644 (file)
@@ -8008,13 +8008,13 @@ static inline void set_cpu_sd_state_busy(void)
        int cpu = smp_processor_id();
 
        rcu_read_lock();
-       sd = rcu_dereference(per_cpu(sd_busy, cpu));
+       sd = rcu_dereference(per_cpu(sd_llc, cpu));
 
        if (!sd || !sd->nohz_idle)
                goto unlock;
        sd->nohz_idle = 0;
 
-       atomic_inc(&sd->groups->sgc->nr_busy_cpus);
+       atomic_inc(&sd->shared->nr_busy_cpus);
 unlock:
        rcu_read_unlock();
 }
@@ -8025,13 +8025,13 @@ void set_cpu_sd_state_idle(void)
        int cpu = smp_processor_id();
 
        rcu_read_lock();
-       sd = rcu_dereference(per_cpu(sd_busy, cpu));
+       sd = rcu_dereference(per_cpu(sd_llc, cpu));
 
        if (!sd || sd->nohz_idle)
                goto unlock;
        sd->nohz_idle = 1;
 
-       atomic_dec(&sd->groups->sgc->nr_busy_cpus);
+       atomic_dec(&sd->shared->nr_busy_cpus);
 unlock:
        rcu_read_unlock();
 }
@@ -8258,8 +8258,8 @@ end:
 static inline bool nohz_kick_needed(struct rq *rq)
 {
        unsigned long now = jiffies;
+       struct sched_domain_shared *sds;
        struct sched_domain *sd;
-       struct sched_group_capacity *sgc;
        int nr_busy, cpu = rq->cpu;
        bool kick = false;
 
@@ -8287,11 +8287,13 @@ static inline bool nohz_kick_needed(struct rq *rq)
                return true;
 
        rcu_read_lock();
-       sd = rcu_dereference(per_cpu(sd_busy, cpu));
-       if (sd) {
-               sgc = sd->groups->sgc;
-               nr_busy = atomic_read(&sgc->nr_busy_cpus);
-
+       sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
+       if (sds) {
+               /*
+                * XXX: write a coherent comment on why we do this.
+                * See also: http://lkml.kernel.org/r/20111202010832.602203411@sbsiddha-desk.sc.intel.com
+                */
+               nr_busy = atomic_read(&sds->nr_busy_cpus);
                if (nr_busy > 1) {
                        kick = true;
                        goto unlock;
index 420c05d099c3867dec0083249b1a860d76506feb..4fc6e9876d9c0470f04951fdd8f5a483ed4db1c8 100644 (file)
@@ -858,8 +858,8 @@ static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
 DECLARE_PER_CPU(struct sched_domain *, sd_llc);
 DECLARE_PER_CPU(int, sd_llc_size);
 DECLARE_PER_CPU(int, sd_llc_id);
+DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
 DECLARE_PER_CPU(struct sched_domain *, sd_numa);
-DECLARE_PER_CPU(struct sched_domain *, sd_busy);
 DECLARE_PER_CPU(struct sched_domain *, sd_asym);
 
 struct sched_group_capacity {
@@ -871,10 +871,6 @@ struct sched_group_capacity {
        unsigned int capacity;
        unsigned long next_update;
        int imbalance; /* XXX unrelated to capacity but shared group state */
-       /*
-        * Number of busy cpus in this group.
-        */
-       atomic_t nr_busy_cpus;
 
        unsigned long cpumask[0]; /* iteration mask */
 };