* Migrate task to the cpu pointed by pr.
*/
saved_mask = current->cpus_allowed;
- set_cpus_allowed(current, cpumask_of_cpu(pr->id));
+ set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
ret = pr->throttling.acpi_processor_get_throttling(pr);
/* restore the previous state */
- set_cpus_allowed(current, saved_mask);
+ set_cpus_allowed_ptr(current, &saved_mask);
return ret;
}
* it can be called only for the cpu pointed by pr.
*/
if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
- set_cpus_allowed(current, cpumask_of_cpu(pr->id));
+ set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
ret = p_throttling->acpi_processor_set_throttling(pr,
t_state.target_state);
} else {
continue;
}
t_state.cpu = i;
- set_cpus_allowed(current, cpumask_of_cpu(i));
+ set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
ret = match_pr->throttling.
acpi_processor_set_throttling(
match_pr, t_state.target_state);
&t_state);
}
/* restore the previous state */
- set_cpus_allowed(current, saved_mask);
+ set_cpus_allowed_ptr(current, &saved_mask);
return ret;
}
/* SMI requires CPU 0 */
old_mask = current->cpus_allowed;
- set_cpus_allowed(current, cpumask_of_cpu(0));
+ set_cpus_allowed_ptr(current, &cpumask_of_cpu(0));
if (smp_processor_id() != 0) {
dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
__FUNCTION__);
);
out:
- set_cpus_allowed(current, old_mask);
+ set_cpus_allowed_ptr(current, &old_mask);
return ret;
}
struct mempolicy *oldpol;
cpumask_t oldmask = current->cpus_allowed;
int node = pcibus_to_node(dev->bus);
- if (node >= 0 && node_online(node))
- set_cpus_allowed(current, node_to_cpumask(node));
+
+ if (node >= 0) {
+ node_to_cpumask_ptr(nodecpumask, node);
+ set_cpus_allowed_ptr(current, nodecpumask);
+ }
/* And set default memory allocation policy */
oldpol = current->mempolicy;
current->mempolicy = NULL; /* fall back to system default policy */
#endif
error = drv->probe(dev, id);
#ifdef CONFIG_NUMA
- set_cpus_allowed(current, oldmask);
+ set_cpus_allowed_ptr(current, &oldmask);
current->mempolicy = oldpol;
#endif
return error;
/* Ensure that we are not runnable on dying cpu */
old_allowed = current->cpus_allowed;
- tmp = CPU_MASK_ALL;
+ cpus_setall(tmp);
cpu_clear(cpu, tmp);
- set_cpus_allowed(current, tmp);
+ set_cpus_allowed_ptr(current, &tmp);
p = __stop_machine_run(take_cpu_down, &tcd_param, cpu);
out_thread:
err = kthread_stop(p);
out_allowed:
- set_cpus_allowed(current, old_allowed);
+ set_cpus_allowed_ptr(current, &old_allowed);
out_release:
cpu_hotplug_done();
return err;
}
/* We can run anywhere, unlike our parent keventd(). */
- set_cpus_allowed(current, CPU_MASK_ALL);
+ set_cpus_allowed_ptr(current, CPU_MASK_ALL_PTR);
/*
* Our parent is keventd, which runs with elevated scheduling priority.
*/
static void rcu_torture_shuffle_tasks(void)
{
- cpumask_t tmp_mask = CPU_MASK_ALL;
+ cpumask_t tmp_mask;
int i;
+ cpus_setall(tmp_mask);
get_online_cpus();
/* No point in shuffling if there is only one online CPU (ex: UP) */
if (rcu_idle_cpu != -1)
cpu_clear(rcu_idle_cpu, tmp_mask);
- set_cpus_allowed(current, tmp_mask);
+ set_cpus_allowed_ptr(current, &tmp_mask);
if (reader_tasks) {
for (i = 0; i < nrealreaders; i++)
if (reader_tasks[i])
- set_cpus_allowed(reader_tasks[i], tmp_mask);
+ set_cpus_allowed_ptr(reader_tasks[i],
+ &tmp_mask);
}
if (fakewriter_tasks) {
for (i = 0; i < nfakewriters; i++)
if (fakewriter_tasks[i])
- set_cpus_allowed(fakewriter_tasks[i], tmp_mask);
+ set_cpus_allowed_ptr(fakewriter_tasks[i],
+ &tmp_mask);
}
if (writer_task)
- set_cpus_allowed(writer_task, tmp_mask);
+ set_cpus_allowed_ptr(writer_task, &tmp_mask);
if (stats_task)
- set_cpus_allowed(stats_task, tmp_mask);
+ set_cpus_allowed_ptr(stats_task, &tmp_mask);
if (rcu_idle_cpu == -1)
rcu_idle_cpu = num_online_cpus() - 1;
int irqs_disabled = 0;
int prepared = 0;
- set_cpus_allowed(current, cpumask_of_cpu((int)(long)cpu));
+ set_cpus_allowed_ptr(current, &cpumask_of_cpu((int)(long)cpu));
/* Ack: we are alive */
smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */