replaces for_each_cpu with for_each_possible_cpu().
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
iocontext_cachep = kmem_cache_create("blkdev_ioc",
sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
- for_each_cpu(i)
+ for_each_possible_cpu(i)
INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
void __init files_defer_init(void)
{
int i;
- for_each_cpu(i)
+ for_each_possible_cpu(i)
fdtable_defer_list_init(i);
}
if (wall_to_monotonic.tv_nsec)
--jif;
- for_each_cpu(i) {
+ for_each_possible_cpu(i) {
int j;
user = cputime64_add(user, kstat_cpu(i).cpustat.user);
#define percpu_modcopy(pcpudst, src, size) \
do { \
unsigned int __i; \
- for_each_cpu(__i) \
+ for_each_possible_cpu(__i) \
memcpy((pcpudst)+__per_cpu_offset[__i], \
(src), (size)); \
} while (0)
({ \
typeof(gendiskp->dkstats->field) res = 0; \
int i; \
- for_each_cpu(i) \
+ for_each_possible_cpu(i) \
res += per_cpu_ptr(gendiskp->dkstats, i)->field; \
res; \
})
static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) {
int i;
- for_each_cpu(i)
+ for_each_possible_cpu(i)
memset(per_cpu_ptr(gendiskp->dkstats, i), value,
sizeof (struct disk_stats));
}
{
int cpu, sum = 0;
- for_each_cpu(cpu)
+ for_each_possible_cpu(cpu)
sum += kstat_cpu(cpu).irqs[irq];
return sum;
#endif
ptr = alloc_bootmem(size * nr_possible_cpus);
- for_each_cpu(i) {
+ for_each_possible_cpu(i) {
__per_cpu_offset[i] = ptr - __per_cpu_start;
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
ptr += size;
long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
- for_each_cpu(cpu) {
+ for_each_possible_cpu(cpu) {
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
atomic_set(&n_rcu_torture_error, 0);
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
atomic_set(&rcu_torture_wcount[i], 0);
- for_each_cpu(cpu) {
+ for_each_possible_cpu(cpu) {
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
per_cpu(rcu_torture_count, cpu)[i] = 0;
per_cpu(rcu_torture_batch, cpu)[i] = 0;
{
unsigned long i, sum = 0;
- for_each_cpu(i)
+ for_each_possible_cpu(i)
sum += cpu_rq(i)->nr_uninterruptible;
/*
{
unsigned long long i, sum = 0;
- for_each_cpu(i)
+ for_each_possible_cpu(i)
sum += cpu_rq(i)->nr_switches;
return sum;
{
unsigned long i, sum = 0;
- for_each_cpu(i)
+ for_each_possible_cpu(i)
sum += atomic_read(&cpu_rq(i)->nr_iowait);
return sum;
runqueue_t *rq;
int i, j, k;
- for_each_cpu(i) {
+ for_each_possible_cpu(i) {
prio_array_t *array;
rq = cpu_rq(i);
* and we have no way of figuring out how to fix the array
* that we have allocated then....
*/
- for_each_cpu(i) {
+ for_each_possible_cpu(i) {
int node = cpu_to_node(i);
if (node_online(node))
/*
* We allocate for all cpus so we cannot use for online cpu here.
*/
- for_each_cpu(i)
+ for_each_possible_cpu(i)
kfree(p->ptrs[i]);
kfree(p);
}
spin_lock(&fbc->lock);
ret = fbc->count;
- for_each_cpu(cpu) {
+ for_each_possible_cpu(cpu) {
long *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount;
}