sched: Simplify finding the lowest sched_domain
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Thu, 7 Apr 2011 12:09:48 +0000 (14:09 +0200)
committerIngo Molnar <mingo@elte.hu>
Mon, 11 Apr 2011 10:58:19 +0000 (12:58 +0200)
Instead of relying on knowing the build order and various CONFIG_
flags simply remember the bottom most sched_domain when we created the
domain hierarchy.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/r/20110407122942.134511046@chello.nl
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched.c

index e66d24aaf6d1edebb9b41d3d39482cf1b82e262d..d6992bfa11eb54910d1f6489608a05bf6f174340 100644 (file)
@@ -6865,11 +6865,13 @@ struct s_data {
        cpumask_var_t           nodemask;
        cpumask_var_t           send_covered;
        cpumask_var_t           tmpmask;
+       struct sched_domain ** __percpu sd;
        struct root_domain      *rd;
 };
 
 enum s_alloc {
        sa_rootdomain,
+       sa_sd,
        sa_tmpmask,
        sa_send_covered,
        sa_nodemask,
@@ -7104,6 +7106,8 @@ static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
        switch (what) {
        case sa_rootdomain:
                free_rootdomain(d->rd); /* fall through */
+       case sa_sd:
+               free_percpu(d->sd); /* fall through */
        case sa_tmpmask:
                free_cpumask_var(d->tmpmask); /* fall through */
        case sa_send_covered:
@@ -7124,10 +7128,15 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
                return sa_nodemask;
        if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL))
                return sa_send_covered;
+       d->sd = alloc_percpu(struct sched_domain *);
+       if (!d->sd) {
+               printk(KERN_WARNING "Cannot alloc per-cpu pointers\n");
+               return sa_tmpmask;
+       }
        d->rd = alloc_rootdomain();
        if (!d->rd) {
                printk(KERN_WARNING "Cannot alloc root domain\n");
-               return sa_tmpmask;
+               return sa_sd;
        }
        return sa_rootdomain;
 }
@@ -7316,6 +7325,8 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
                sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i);
                sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i);
 
+               *per_cpu_ptr(d.sd, i) = sd;
+
                for (tmp = sd; tmp; tmp = tmp->parent) {
                        tmp->span_weight = cpumask_weight(sched_domain_span(tmp));
                        build_sched_groups(&d, tmp, cpu_map, i);
@@ -7363,15 +7374,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
 
        /* Attach the domains */
        for_each_cpu(i, cpu_map) {
-#ifdef CONFIG_SCHED_SMT
-               sd = &per_cpu(cpu_domains, i).sd;
-#elif defined(CONFIG_SCHED_MC)
-               sd = &per_cpu(core_domains, i).sd;
-#elif defined(CONFIG_SCHED_BOOK)
-               sd = &per_cpu(book_domains, i).sd;
-#else
-               sd = &per_cpu(phys_domains, i).sd;
-#endif
+               sd = *per_cpu_ptr(d.sd, i);
                cpu_attach_domain(sd, d.rd, i);
        }