};
struct s_data {
- cpumask_var_t nodemask;
cpumask_var_t send_covered;
struct sched_domain ** __percpu sd;
struct sd_data sdd[SD_LV_MAX];
sa_sd,
sa_sd_storage,
sa_send_covered,
- sa_nodemask,
sa_none,
};
} /* fall through */
case sa_send_covered:
free_cpumask_var(d->send_covered); /* fall through */
- case sa_nodemask:
- free_cpumask_var(d->nodemask); /* fall through */
case sa_none:
break;
}
memset(d, 0, sizeof(*d));
- if (!alloc_cpumask_var(&d->nodemask, GFP_KERNEL))
- return sa_none;
if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL))
- return sa_nodemask;
+ return sa_none;
for (i = 0; i < SD_LV_MAX; i++) {
d->sdd[i].sd = alloc_percpu(struct sched_domain *);
if (!d->sdd[i].sd)
struct sched_domain *sd;
sd = sd_init_CPU(d, i);
set_domain_attribute(sd, attr);
- cpumask_copy(sched_domain_span(sd), d->nodemask);
+ cpumask_and(sched_domain_span(sd),
+ cpumask_of_node(cpu_to_node(i)), cpu_map);
sd->parent = parent;
if (parent)
parent->child = sd;
/* Set up domains for cpus specified by the cpu_map. */
for_each_cpu(i, cpu_map) {
- cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)),
- cpu_map);
-
sd = NULL;
sd = __build_allnodes_sched_domain(&d, cpu_map, attr, sd, i);
sd = __build_node_sched_domain(&d, cpu_map, attr, sd, i);