#include <linux/kernel.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
#define IPI_SCHEDULE 1
#define IPI_CALL 2
/* CPU masks */
cpumask_t cpu_online_map = CPU_MASK_NONE;
cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
+EXPORT_SYMBOL(phys_cpu_present_map);
/* Variables used during SMP boot */
volatile int cpu_now_booting = 0;
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/sched.h>
+#include <linux/module.h>
#include <asm/atomic.h>
#include <asm/processor.h>
extern void per_cpu_trap_init(void);
cpumask_t cpu_possible_map;
+EXPORT_SYMBOL(cpu_possible_map);
+
cpumask_t cpu_online_map;
static atomic_t cpus_booted = ATOMIC_INIT(0);
#define for_each_online_cpu(cpu) for_each_cpu_mask((cpu), cpu_online_map)
#define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map)
+/* Find the highest possible smp_processor_id() */
+static inline unsigned int highest_possible_processor_id(void)
+{
+ unsigned int cpu, highest = 0;
+
+ for_each_cpu_mask(cpu, cpu_possible_map)
+ highest = cpu;
+
+ return highest;
+}
+
+
#endif /* __LINUX_CPUMASK_H */
#include <linux/spinlock.h>
#include <asm/uaccess.h>
#include <linux/smp.h>
+#include <linux/cpumask.h>
#include <net/sock.h>
/* needed for logical [in,out]-dev filtering */
#include "../br_private.h"
/* this will get free'd in do_replace()/ebt_register_table()
if an error occurs */
newinfo->chainstack = (struct ebt_chainstack **)
- vmalloc(num_possible_cpus() * sizeof(struct ebt_chainstack));
+ vmalloc((highest_possible_processor_id()+1)
+ * sizeof(struct ebt_chainstack));
if (!newinfo->chainstack)
return -ENOMEM;
- for (i = 0; i < num_possible_cpus(); i++) {
+ for_each_cpu(i) {
newinfo->chainstack[i] =
vmalloc(udc_cnt * sizeof(struct ebt_chainstack));
if (!newinfo->chainstack[i]) {
/* counters of cpu 0 */
memcpy(counters, oldcounters,
- sizeof(struct ebt_counter) * nentries);
+ sizeof(struct ebt_counter) * nentries);
+
/* add other counters to those of cpu 0 */
- for (cpu = 1; cpu < num_possible_cpus(); cpu++) {
+ for_each_cpu(cpu) {
+ if (cpu == 0)
+ continue;
counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
for (i = 0; i < nentries; i++) {
counters[i].pcnt += counter_base[i].pcnt;
BUGPRINT("Entries_size never zero\n");
return -EINVAL;
}
- countersize = COUNTER_OFFSET(tmp.nentries) * num_possible_cpus();
+ countersize = COUNTER_OFFSET(tmp.nentries) *
+ (highest_possible_processor_id()+1);
newinfo = (struct ebt_table_info *)
vmalloc(sizeof(struct ebt_table_info) + countersize);
if (!newinfo)
vfree(table->entries);
if (table->chainstack) {
- for (i = 0; i < num_possible_cpus(); i++)
+ for_each_cpu(i)
vfree(table->chainstack[i]);
vfree(table->chainstack);
}
vfree(counterstmp);
/* can be initialized in translate_table() */
if (newinfo->chainstack) {
- for (i = 0; i < num_possible_cpus(); i++)
+ for_each_cpu(i)
vfree(newinfo->chainstack[i]);
vfree(newinfo->chainstack);
}
return -EINVAL;
}
- countersize = COUNTER_OFFSET(table->table->nentries) * num_possible_cpus();
+ countersize = COUNTER_OFFSET(table->table->nentries) *
+ (highest_possible_processor_id()+1);
newinfo = (struct ebt_table_info *)
vmalloc(sizeof(struct ebt_table_info) + countersize);
ret = -ENOMEM;
up(&ebt_mutex);
free_chainstack:
if (newinfo->chainstack) {
- for (i = 0; i < num_possible_cpus(); i++)
+ for_each_cpu(i)
vfree(newinfo->chainstack[i]);
vfree(newinfo->chainstack);
}
up(&ebt_mutex);
vfree(table->private->entries);
if (table->private->chainstack) {
- for (i = 0; i < num_possible_cpus(); i++)
+ for_each_cpu(i)
vfree(table->private->chainstack[i]);
vfree(table->private->chainstack);
}
}
/* And one copy for every other CPU */
- for (i = 1; i < num_possible_cpus(); i++) {
- memcpy(newinfo->entries + SMP_ALIGN(newinfo->size)*i,
+ for_each_cpu(i) {
+ if (i == 0)
+ continue;
+ memcpy(newinfo->entries + SMP_ALIGN(newinfo->size) * i,
newinfo->entries,
SMP_ALIGN(newinfo->size));
}
unsigned int cpu;
unsigned int i;
- for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
+ for_each_cpu(cpu) {
i = 0;
ARPT_ENTRY_ITERATE(t->entries + TABLE_OFFSET(t, cpu),
t->size,
return -ENOMEM;
newinfo = vmalloc(sizeof(struct arpt_table_info)
- + SMP_ALIGN(tmp.size) * num_possible_cpus());
+ + SMP_ALIGN(tmp.size) *
+ (highest_possible_processor_id()+1));
if (!newinfo)
return -ENOMEM;
= { 0, 0, 0, { 0 }, { 0 }, { } };
newinfo = vmalloc(sizeof(struct arpt_table_info)
- + SMP_ALIGN(repl->size) * num_possible_cpus());
+ + SMP_ALIGN(repl->size) *
+ (highest_possible_processor_id()+1));
if (!newinfo) {
ret = -ENOMEM;
return ret;
#include <asm/semaphore.h>
#include <linux/proc_fs.h>
#include <linux/err.h>
+#include <linux/cpumask.h>
#include <linux/netfilter_ipv4/ip_tables.h>
}
/* And one copy for every other CPU */
- for (i = 1; i < num_possible_cpus(); i++) {
- memcpy(newinfo->entries + SMP_ALIGN(newinfo->size)*i,
+ for_each_cpu(i) {
+ if (i == 0)
+ continue;
+ memcpy(newinfo->entries + SMP_ALIGN(newinfo->size) * i,
newinfo->entries,
SMP_ALIGN(newinfo->size));
}
struct ipt_entry *table_base;
unsigned int i;
- for (i = 0; i < num_possible_cpus(); i++) {
+ for_each_cpu(i) {
table_base =
(void *)newinfo->entries
+ TABLE_OFFSET(newinfo, i);
unsigned int cpu;
unsigned int i;
- for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
+ for_each_cpu(cpu) {
i = 0;
IPT_ENTRY_ITERATE(t->entries + TABLE_OFFSET(t, cpu),
t->size,
return -ENOMEM;
newinfo = vmalloc(sizeof(struct ipt_table_info)
- + SMP_ALIGN(tmp.size) * num_possible_cpus());
+ + SMP_ALIGN(tmp.size) *
+ (highest_possible_processor_id()+1));
if (!newinfo)
return -ENOMEM;
= { 0, 0, 0, { 0 }, { 0 }, { } };
newinfo = vmalloc(sizeof(struct ipt_table_info)
- + SMP_ALIGN(repl->size) * num_possible_cpus());
+ + SMP_ALIGN(repl->size) *
+ (highest_possible_processor_id()+1));
if (!newinfo)
return -ENOMEM;
#include <asm/uaccess.h>
#include <asm/semaphore.h>
#include <linux/proc_fs.h>
+#include <linux/cpumask.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
}
/* And one copy for every other CPU */
- for (i = 1; i < num_possible_cpus(); i++) {
- memcpy(newinfo->entries + SMP_ALIGN(newinfo->size)*i,
+ for_each_cpu(i) {
+ if (i == 0)
+ continue;
+ memcpy(newinfo->entries + SMP_ALIGN(newinfo->size) * i,
newinfo->entries,
SMP_ALIGN(newinfo->size));
}
unsigned int i;
for (i = 0; i < num_possible_cpus(); i++) {
+ for_each_cpu(i) {
table_base =
(void *)newinfo->entries
+ TABLE_OFFSET(newinfo, i);
unsigned int cpu;
unsigned int i;
- for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
+ for_each_cpu(cpu) {
i = 0;
IP6T_ENTRY_ITERATE(t->entries + TABLE_OFFSET(t, cpu),
t->size,
return -ENOMEM;
newinfo = vmalloc(sizeof(struct ip6t_table_info)
- + SMP_ALIGN(tmp.size) * num_possible_cpus());
+ + SMP_ALIGN(tmp.size) *
+ (highest_possible_processor_id()+1));
if (!newinfo)
return -ENOMEM;
= { 0, 0, 0, { 0 }, { 0 }, { } };
newinfo = vmalloc(sizeof(struct ip6t_table_info)
- + SMP_ALIGN(repl->size) * num_possible_cpus());
+ + SMP_ALIGN(repl->size) *
+ (highest_possible_processor_id()+1));
if (!newinfo)
return -ENOMEM;