struct cgroup_subsys_state css;
unsigned long flags; /* "unsigned long" so bitops work */
- cpumask_t cpus_allowed; /* CPUs allowed to tasks in cpuset */
+ cpumask_var_t cpus_allowed; /* CPUs allowed to tasks in cpuset */
nodemask_t mems_allowed; /* Memory Nodes allowed to tasks */
struct cpuset *parent; /* my parent */
static struct cpuset top_cpuset = {
.flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)),
- .cpus_allowed = CPU_MASK_ALL,
- .mems_allowed = NODE_MASK_ALL,
};
/*
};
/*
- * Return in *pmask the portion of a cpusets's cpus_allowed that
+ * Return in pmask the portion of a cpusets's cpus_allowed that
* are online. If none are online, walk up the cpuset hierarchy
* until we find one that does have some online cpus. If we get
* all the way to the top and still haven't found any online cpus,
static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask)
{
- while (cs && !cpus_intersects(cs->cpus_allowed, cpu_online_map))
+ while (cs && !cpumask_intersects(cs->cpus_allowed, cpu_online_mask))
cs = cs->parent;
if (cs)
- cpus_and(*pmask, cs->cpus_allowed, cpu_online_map);
+ cpumask_and(pmask, cs->cpus_allowed, cpu_online_mask);
else
- *pmask = cpu_online_map;
- BUG_ON(!cpus_intersects(*pmask, cpu_online_map));
+ cpumask_copy(pmask, cpu_online_mask);
+ BUG_ON(!cpumask_intersects(pmask, cpu_online_mask));
}
/*
static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
{
- return cpus_subset(p->cpus_allowed, q->cpus_allowed) &&
+ return cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
nodes_subset(p->mems_allowed, q->mems_allowed) &&
is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
is_mem_exclusive(p) <= is_mem_exclusive(q);
*/
static struct cpuset *alloc_trial_cpuset(const struct cpuset *cs)
{
- return kmemdup(cs, sizeof(*cs), GFP_KERNEL);
+ struct cpuset *trial;
+
+ trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
+ if (!trial)
+ return NULL;
+
+ if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL)) {
+ kfree(trial);
+ return NULL;
+ }
+ cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
+
+ return trial;
}
/**
*/
static void free_trial_cpuset(struct cpuset *trial)
{
+ free_cpumask_var(trial->cpus_allowed);
kfree(trial);
}
c = cgroup_cs(cont);
if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
c != cur &&
- cpus_intersects(trial->cpus_allowed, c->cpus_allowed))
+ cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
return -EINVAL;
if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
c != cur &&
/* Cpusets with tasks can't have empty cpus_allowed or mems_allowed */
if (cgroup_task_count(cur->css.cgroup)) {
- if (cpus_empty(trial->cpus_allowed) ||
+ if (cpumask_empty(trial->cpus_allowed) ||
nodes_empty(trial->mems_allowed)) {
return -ENOSPC;
}
*/
static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
{
- return cpus_intersects(a->cpus_allowed, b->cpus_allowed);
+ return cpumask_intersects(a->cpus_allowed, b->cpus_allowed);
}
static void
cp = list_first_entry(&q, struct cpuset, stack_list);
list_del(q.next);
- if (cpus_empty(cp->cpus_allowed))
+ if (cpumask_empty(cp->cpus_allowed))
continue;
if (is_sched_load_balance(cp))
*dattr = SD_ATTR_INIT;
update_domain_attr_tree(dattr, &top_cpuset);
}
- *doms = top_cpuset.cpus_allowed;
+ cpumask_copy(doms, top_cpuset.cpus_allowed);
ndoms = 1;
goto done;
cp = list_first_entry(&q, struct cpuset, stack_list);
list_del(q.next);
- if (cpus_empty(cp->cpus_allowed))
+ if (cpumask_empty(cp->cpus_allowed))
continue;
/*
struct cpuset *b = csa[j];
if (apn == b->pn) {
- cpus_or(*dp, *dp, b->cpus_allowed);
+ cpumask_or(dp, dp, b->cpus_allowed);
if (dattr)
update_domain_attr_tree(dattr + nslot, b);
static int cpuset_test_cpumask(struct task_struct *tsk,
struct cgroup_scanner *scan)
{
- return !cpus_equal(tsk->cpus_allowed,
+ return !cpumask_equal(&tsk->cpus_allowed,
(cgroup_cs(scan->cg))->cpus_allowed);
}
static void cpuset_change_cpumask(struct task_struct *tsk,
struct cgroup_scanner *scan)
{
- set_cpus_allowed_ptr(tsk, &((cgroup_cs(scan->cg))->cpus_allowed));
+ set_cpus_allowed_ptr(tsk, ((cgroup_cs(scan->cg))->cpus_allowed));
}
/**
* with tasks have cpus.
*/
if (!*buf) {
- cpus_clear(trialcs->cpus_allowed);
+ cpumask_clear(trialcs->cpus_allowed);
} else {
- retval = cpulist_parse(buf, &trialcs->cpus_allowed);
+ retval = cpulist_parse(buf, trialcs->cpus_allowed);
if (retval < 0)
return retval;
- if (!cpus_subset(trialcs->cpus_allowed, cpu_online_map))
+ if (!cpumask_subset(trialcs->cpus_allowed, cpu_online_mask))
return -EINVAL;
}
retval = validate_change(cs, trialcs);
return retval;
/* Nothing to do if the cpus didn't change */
- if (cpus_equal(cs->cpus_allowed, trialcs->cpus_allowed))
+ if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
return 0;
retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
is_load_balanced = is_sched_load_balance(trialcs);
mutex_lock(&callback_mutex);
- cs->cpus_allowed = trialcs->cpus_allowed;
+ cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
mutex_unlock(&callback_mutex);
/*
cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
fudge = 10; /* spare mmarray[] slots */
- fudge += cpus_weight(cs->cpus_allowed); /* imagine one fork-bomb/cpu */
+ fudge += cpumask_weight(cs->cpus_allowed);/* imagine 1 fork-bomb/cpu */
retval = -ENOMEM;
/*
if (val != cs->relax_domain_level) {
cs->relax_domain_level = val;
- if (!cpus_empty(cs->cpus_allowed) && is_sched_load_balance(cs))
+ if (!cpumask_empty(cs->cpus_allowed) &&
+ is_sched_load_balance(cs))
async_rebuild_sched_domains();
}
cs->flags = trialcs->flags;
mutex_unlock(&callback_mutex);
- if (!cpus_empty(trialcs->cpus_allowed) && balance_flag_changed)
+ if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
async_rebuild_sched_domains();
out:
struct cpuset *cs = cgroup_cs(cont);
int ret = 0;
- if (cpus_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
+ if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
return -ENOSPC;
if (tsk->flags & PF_THREAD_BOUND) {
mutex_lock(&callback_mutex);
- if (!cpus_equal(tsk->cpus_allowed, cs->cpus_allowed))
+ if (!cpumask_equal(&tsk->cpus_allowed, cs->cpus_allowed))
ret = -EINVAL;
mutex_unlock(&callback_mutex);
}
int ret;
mutex_lock(&callback_mutex);
- ret = cpulist_scnprintf(page, PAGE_SIZE, &cs->cpus_allowed);
+ ret = cpulist_scnprintf(page, PAGE_SIZE, cs->cpus_allowed);
mutex_unlock(&callback_mutex);
return ret;
parent_cs = cgroup_cs(parent);
cs->mems_allowed = parent_cs->mems_allowed;
- cs->cpus_allowed = parent_cs->cpus_allowed;
+ cpumask_copy(cs->cpus_allowed, parent_cs->cpus_allowed);
return;
}
cs = kmalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return ERR_PTR(-ENOMEM);
+ if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL)) {
+ kfree(cs);
+ return ERR_PTR(-ENOMEM);
+ }
cpuset_update_task_memory_state();
cs->flags = 0;
if (is_spread_slab(parent))
set_bit(CS_SPREAD_SLAB, &cs->flags);
set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
- cpus_clear(cs->cpus_allowed);
+ cpumask_clear(cs->cpus_allowed);
nodes_clear(cs->mems_allowed);
cs->mems_generation = cpuset_mems_generation++;
fmeter_init(&cs->fmeter);
update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
number_of_cpusets--;
+ free_cpumask_var(cs->cpus_allowed);
kfree(cs);
}
int __init cpuset_init_early(void)
{
+ alloc_bootmem_cpumask_var(&top_cpuset.cpus_allowed);
+
top_cpuset.mems_generation = cpuset_mems_generation++;
return 0;
}
{
int err = 0;
- cpus_setall(top_cpuset.cpus_allowed);
+ cpumask_setall(top_cpuset.cpus_allowed);
nodes_setall(top_cpuset.mems_allowed);
fmeter_init(&top_cpuset.fmeter);
* has online cpus, so can't be empty).
*/
parent = cs->parent;
- while (cpus_empty(parent->cpus_allowed) ||
+ while (cpumask_empty(parent->cpus_allowed) ||
nodes_empty(parent->mems_allowed))
parent = parent->parent;
}
/* Continue past cpusets with all cpus, mems online */
- if (cpus_subset(cp->cpus_allowed, cpu_online_map) &&
+ if (cpumask_subset(cp->cpus_allowed, cpu_online_mask) &&
nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY]))
continue;
/* Remove offline cpus and mems from this cpuset. */
mutex_lock(&callback_mutex);
- cpus_and(cp->cpus_allowed, cp->cpus_allowed, cpu_online_map);
+ cpumask_and(cp->cpus_allowed, cp->cpus_allowed,
+ cpu_online_mask);
nodes_and(cp->mems_allowed, cp->mems_allowed,
node_states[N_HIGH_MEMORY]);
mutex_unlock(&callback_mutex);
/* Move tasks from the empty cpuset to a parent */
- if (cpus_empty(cp->cpus_allowed) ||
+ if (cpumask_empty(cp->cpus_allowed) ||
nodes_empty(cp->mems_allowed))
remove_tasks_in_empty_cpuset(cp);
else {
}
cgroup_lock();
- top_cpuset.cpus_allowed = cpu_online_map;
+ cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask);
scan_for_empty_cpusets(&top_cpuset);
ndoms = generate_sched_domains(&doms, &attr);
cgroup_unlock();
void __init cpuset_init_smp(void)
{
- top_cpuset.cpus_allowed = cpu_online_map;
+ cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask);
top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
hotcpu_notifier(cpuset_track_online_cpus, 0);
* @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
* @pmask: pointer to cpumask_t variable to receive cpus_allowed set.
*
- * Description: Returns the cpumask_t cpus_allowed of the cpuset
+ * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
* attached to the specified @tsk. Guaranteed to return some non-empty
* subset of cpu_online_map, even if this means going outside the
* tasks cpuset.