mm: also use alloc_large_system_hash() for the PID hash table
authorJan Beulich <JBeulich@novell.com>
Tue, 22 Sep 2009 00:03:07 +0000 (17:03 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 22 Sep 2009 14:17:38 +0000 (07:17 -0700)
This is being done by allowing boot time allocations to specify that they
may want a sub-page sized amount of memory.

Overall this seems more consistent with the other hash table allocations,
and allows making two supposedly mm-only variables really mm-only
(nr_{kernel,all}_pages).

Signed-off-by: Jan Beulich <jbeulich@novell.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/bootmem.h
kernel/pid.c
mm/page_alloc.c

index bc3ab70736955675afef688d4209c5bf8392b4b3..dd97fb8408a87afe15c4c671e908a255562286d4 100644 (file)
@@ -132,9 +132,6 @@ static inline void *alloc_remap(int nid, unsigned long size)
 }
 #endif /* CONFIG_HAVE_ARCH_ALLOC_REMAP */
 
-extern unsigned long __meminitdata nr_kernel_pages;
-extern unsigned long __meminitdata nr_all_pages;
-
 extern void *alloc_large_system_hash(const char *tablename,
                                     unsigned long bucketsize,
                                     unsigned long numentries,
@@ -145,6 +142,8 @@ extern void *alloc_large_system_hash(const char *tablename,
                                     unsigned long limit);
 
 #define HASH_EARLY     0x00000001      /* Allocating during early boot? */
+#define HASH_SMALL     0x00000002      /* sub-page allocation allowed, min
+                                        * shift passed via *_hash_shift */
 
 /* Only NUMA needs hash distribution. 64bit NUMA architectures have
  * sufficient vmalloc space.
index 31310b5d3f50325ed75a6e54b94fc715d63698fa..d3f722d20f9c6e8d849693a1754b616d3ba74bea 100644 (file)
@@ -40,7 +40,7 @@
 #define pid_hashfn(nr, ns)     \
        hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
 static struct hlist_head *pid_hash;
-static int pidhash_shift;
+static unsigned int pidhash_shift = 4;
 struct pid init_struct_pid = INIT_STRUCT_PID;
 
 int pid_max = PID_MAX_DEFAULT;
@@ -499,19 +499,12 @@ struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
 void __init pidhash_init(void)
 {
        int i, pidhash_size;
-       unsigned long megabytes = nr_kernel_pages >> (20 - PAGE_SHIFT);
 
-       pidhash_shift = max(4, fls(megabytes * 4));
-       pidhash_shift = min(12, pidhash_shift);
+       pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18,
+                                          HASH_EARLY | HASH_SMALL,
+                                          &pidhash_shift, NULL, 4096);
        pidhash_size = 1 << pidhash_shift;
 
-       printk("PID hash table entries: %d (order: %d, %Zd bytes)\n",
-               pidhash_size, pidhash_shift,
-               pidhash_size * sizeof(struct hlist_head));
-
-       pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash)));
-       if (!pid_hash)
-               panic("Could not alloc pidhash!\n");
        for (i = 0; i < pidhash_size; i++)
                INIT_HLIST_HEAD(&pid_hash[i]);
 }
index 33b1a4762a7bb07a22b65ccda65ef4bd738dccac..770f011e1c12486a2d680e63aeb32324da671f4f 100644 (file)
@@ -124,8 +124,8 @@ static char * const zone_names[MAX_NR_ZONES] = {
 
 int min_free_kbytes = 1024;
 
-unsigned long __meminitdata nr_kernel_pages;
-unsigned long __meminitdata nr_all_pages;
+static unsigned long __meminitdata nr_kernel_pages;
+static unsigned long __meminitdata nr_all_pages;
 static unsigned long __meminitdata dma_reserve;
 
 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
@@ -4821,7 +4821,14 @@ void *__init alloc_large_system_hash(const char *tablename,
                        numentries <<= (PAGE_SHIFT - scale);
 
                /* Make sure we've got at least a 0-order allocation.. */
-               if (unlikely((numentries * bucketsize) < PAGE_SIZE))
+               if (unlikely(flags & HASH_SMALL)) {
+                       /* Makes no sense without HASH_EARLY */
+                       WARN_ON(!(flags & HASH_EARLY));
+                       if (!(numentries >> *_hash_shift)) {
+                               numentries = 1UL << *_hash_shift;
+                               BUG_ON(!numentries);
+                       }
+               } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
                        numentries = PAGE_SIZE / bucketsize;
        }
        numentries = roundup_pow_of_two(numentries);