netfilter: xtables: stackptr should be percpu
authorEric Dumazet <eric.dumazet@gmail.com>
Mon, 31 May 2010 14:41:35 +0000 (16:41 +0200)
committerPatrick McHardy <kaber@trash.net>
Mon, 31 May 2010 14:41:35 +0000 (16:41 +0200)
commit f3c5c1bfd4 (netfilter: xtables: make ip_tables reentrant)
introduced a performance regression, because stackptr array is shared by
all cpus, adding cache line ping pongs. (16 cpus share a 64 bytes cache
line)

Fix this using alloc_percpu()

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Acked-By: Jan Engelhardt <jengelh@medozas.de>
Signed-off-by: Patrick McHardy <kaber@trash.net>
include/linux/netfilter/x_tables.h
net/ipv4/netfilter/ip_tables.c
net/ipv6/netfilter/ip6_tables.c
net/netfilter/x_tables.c

index c00cc0c4d0b7c29614bbafdfabe73828a04a0fbd..24e5d01d27d07b860bfed06701fb2d864ee89bc3 100644 (file)
@@ -397,7 +397,7 @@ struct xt_table_info {
         * @stacksize jumps (number of user chains) can possibly be made.
         */
        unsigned int stacksize;
-       unsigned int *stackptr;
+       unsigned int __percpu *stackptr;
        void ***jumpstack;
        /* ipt_entry tables: one per CPU */
        /* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */
index 63958f3394a5d0fbe67e675ee72a10b9d27446cd..4b6c5ca610fc0a463db4995e8b58393916e0a35c 100644 (file)
@@ -336,7 +336,7 @@ ipt_do_table(struct sk_buff *skb,
        cpu        = smp_processor_id();
        table_base = private->entries[cpu];
        jumpstack  = (struct ipt_entry **)private->jumpstack[cpu];
-       stackptr   = &private->stackptr[cpu];
+       stackptr   = per_cpu_ptr(private->stackptr, cpu);
        origptr    = *stackptr;
 
        e = get_entry(table_base, private->hook_entry[hook]);
index 6f517bd8369254ae73d0ac9888792326dd610352..9d2d68f0e6053d97fdfc3f350135604309a35808 100644 (file)
@@ -363,7 +363,7 @@ ip6t_do_table(struct sk_buff *skb,
        cpu        = smp_processor_id();
        table_base = private->entries[cpu];
        jumpstack  = (struct ip6t_entry **)private->jumpstack[cpu];
-       stackptr   = &private->stackptr[cpu];
+       stackptr   = per_cpu_ptr(private->stackptr, cpu);
        origptr    = *stackptr;
 
        e = get_entry(table_base, private->hook_entry[hook]);
index 47b1e7917a9c86423e4df95829f1d7865ae23b99..e34622fa000357c5e24eeddd2aad835d616fd8ce 100644 (file)
@@ -699,10 +699,8 @@ void xt_free_table_info(struct xt_table_info *info)
                vfree(info->jumpstack);
        else
                kfree(info->jumpstack);
-       if (sizeof(unsigned int) * nr_cpu_ids > PAGE_SIZE)
-               vfree(info->stackptr);
-       else
-               kfree(info->stackptr);
+
+       free_percpu(info->stackptr);
 
        kfree(info);
 }
@@ -753,14 +751,9 @@ static int xt_jumpstack_alloc(struct xt_table_info *i)
        unsigned int size;
        int cpu;
 
-       size = sizeof(unsigned int) * nr_cpu_ids;
-       if (size > PAGE_SIZE)
-               i->stackptr = vmalloc(size);
-       else
-               i->stackptr = kmalloc(size, GFP_KERNEL);
+       i->stackptr = alloc_percpu(unsigned int);
        if (i->stackptr == NULL)
                return -ENOMEM;
-       memset(i->stackptr, 0, size);
 
        size = sizeof(void **) * nr_cpu_ids;
        if (size > PAGE_SIZE)