#endif
#ifdef CONFIG_SMP
-int highest_possible_processor_id(void);
+extern int nr_cpu_ids;
#define any_online_cpu(mask) __any_online_cpu(&(mask))
int __any_online_cpu(const cpumask_t *mask);
#else
-#define highest_possible_processor_id() 0
+#define nr_cpu_ids 1
#define any_online_cpu(mask) 0
#endif
/* Called by boot processor to activate the rest. */
static void __init smp_init(void)
{
- unsigned int i;
+ unsigned int cpu;
+ unsigned highest = 0;
+
+ for_each_cpu_mask(cpu, cpu_possible_map)
+ highest = cpu;
+ nr_cpu_ids = highest + 1;
/* FIXME: This should be done in userspace --RR */
- for_each_present_cpu(i) {
+ for_each_present_cpu(cpu) {
if (num_online_cpus() >= max_cpus)
break;
- if (!cpu_online(i))
- cpu_up(i);
+ if (!cpu_online(cpu))
+ cpu_up(cpu);
}
/* Any cleanup work */
}
EXPORT_SYMBOL(__next_cpu);
-/*
- * Find the highest possible smp_processor_id()
- *
- * Note: if we're prepared to assume that cpu_possible_map never changes
- * (reasonable) then this function should cache its return value.
- */
-int highest_possible_processor_id(void)
-{
- unsigned int cpu;
- unsigned highest = 0;
-
- for_each_cpu_mask(cpu, cpu_possible_map)
- highest = cpu;
- return highest;
-}
-EXPORT_SYMBOL(highest_possible_processor_id);
+int nr_cpu_ids;
+EXPORT_SYMBOL(nr_cpu_ids);
int __any_online_cpu(const cpumask_t *mask)
{
/* this will get free'd in do_replace()/ebt_register_table()
if an error occurs */
newinfo->chainstack =
- vmalloc((highest_possible_processor_id()+1)
- * sizeof(*(newinfo->chainstack)));
+ vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack)));
if (!newinfo->chainstack)
return -ENOMEM;
for_each_possible_cpu(i) {
if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
return -ENOMEM;
- countersize = COUNTER_OFFSET(tmp.nentries) *
- (highest_possible_processor_id()+1);
+ countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
newinfo = vmalloc(sizeof(*newinfo) + countersize);
if (!newinfo)
return -ENOMEM;
return -EINVAL;
}
- countersize = COUNTER_OFFSET(repl->nentries) *
- (highest_possible_processor_id()+1);
+ countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids;
newinfo = vmalloc(sizeof(*newinfo) + countersize);
ret = -ENOMEM;
if (!newinfo)
static int
svc_pool_map_init_percpu(struct svc_pool_map *m)
{
- unsigned int maxpools = highest_possible_processor_id() + 1;
+ unsigned int maxpools = nr_cpu_ids;
unsigned int pidx = 0;
unsigned int cpu;
int err;