Merge branch 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 30 Jul 2012 18:32:24 +0000 (11:32 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 30 Jul 2012 18:32:24 +0000 (11:32 -0700)
Pull SLAB changes from Pekka Enberg:
 "Most of the changes included are from Christoph Lameter's "common
  slab" patch series that unifies common parts of SLUB, SLAB, and SLOB
  allocators.  The unification is needed for Glauber Costa's "kmem
  memcg" work that will hopefully appear for v3.7.

  The rest of the changes are fixes and speedups by various people."

* 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux: (32 commits)
  mm: Fix build warning in kmem_cache_create()
  slob: Fix early boot kernel crash
  mm, slub: ensure irqs are enabled for kmemcheck
  mm, sl[aou]b: Move kmem_cache_create mutex handling to common code
  mm, sl[aou]b: Use a common mutex definition
  mm, sl[aou]b: Common definition for boot state of the slab allocators
  mm, sl[aou]b: Extract common code for kmem_cache_create()
  slub: remove invalid reference to list iterator variable
  mm: Fix signal SIGFPE in slabinfo.c.
  slab: move FULL state transition to an initcall
  slab: Fix a typo in commit 8c138b "slab: Get rid of obj_size macro"
  mm, slab: Build fix for recent kmem_cache changes
  slab: rename gfpflags to allocflags
  slub: refactoring unfreeze_partials()
  slub: use __cmpxchg_double_slab() at interrupt disabled place
  slab/mempolicy: always use local policy from interrupt context
  slab: Get rid of obj_size macro
  mm, sl[aou]b: Extract common fields from struct kmem_cache
  slab: Remove some accessors
  slab: Use page struct fields instead of casting
  ...

1  2 
include/linux/mm_types.h
mm/Makefile
mm/mempolicy.c

diff --combined include/linux/mm_types.h
index 704a626d94a08adc03b32b756d2d0a1c9a1f40cf,680a5e4e8cd50b2db17cc8b7c42f189e4747abcb..074eb98fe15dec4d7a8f01336875a91349d27a3e
@@@ -53,22 -53,12 +53,22 @@@ struct page 
        struct {
                union {
                        pgoff_t index;          /* Our offset within mapping. */
-                       void *freelist;         /* slub first free object */
+                       void *freelist;         /* slub/slob first free object */
                };
  
                union {
 +#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
 +      defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
                        /* Used for cmpxchg_double in slub */
                        unsigned long counters;
 +#else
 +                      /*
 +                       * Keep _count separate from slub cmpxchg_double data.
 +                       * As the rest of the double word is protected by
 +                       * slab_lock but _count is not.
 +                       */
 +                      unsigned counters;
 +#endif
  
                        struct {
  
                                         */
                                        atomic_t _mapcount;
  
-                                       struct {
+                                       struct { /* SLUB */
                                                unsigned inuse:16;
                                                unsigned objects:15;
                                                unsigned frozen:1;
                                        };
+                                       int units;      /* SLOB */
                                };
                                atomic_t _count;                /* Usage count, see below. */
                        };
                        short int pobjects;
  #endif
                };
+               struct list_head list;  /* slobs list of pages */
+               struct {                /* slab fields */
+                       struct kmem_cache *slab_cache;
+                       struct slab *slab_page;
+               };
        };
  
        /* Remainder is not double word aligned */
diff --combined mm/Makefile
index 2e2fbbefb99fa94c97be13aa8fa71da823455409,ae370783612d77125f95a11ef20d9bc96f5e2584..8e81fe263c94f63dcf3700fd63828a58d0c963af
@@@ -16,7 -16,8 +16,8 @@@ obj-y                 := filemap.o mempool.o oom_kill
                           readahead.o swap.o truncate.o vmscan.o shmem.o \
                           prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
                           page_isolation.o mm_init.o mmu_context.o percpu.o \
-                          compaction.o $(mmu-y)
+                          compaction.o slab_common.o $(mmu-y)
  obj-y += init-mm.o
  
  ifdef CONFIG_NO_BOOTMEM
@@@ -29,7 -30,6 +30,7 @@@ obj-$(CONFIG_HAVE_MEMBLOCK) += memblock
  
  obj-$(CONFIG_BOUNCE)  += bounce.o
  obj-$(CONFIG_SWAP)    += page_io.o swap_state.o swapfile.o
 +obj-$(CONFIG_FRONTSWAP)       += frontswap.o
  obj-$(CONFIG_HAS_DMA) += dmapool.o
  obj-$(CONFIG_HUGETLBFS)       += hugetlb.o
  obj-$(CONFIG_NUMA)    += mempolicy.o
diff --combined mm/mempolicy.c
index 1d771e4200d222eea46458bdd6089279a78ffeb5,cb0b230aa3f22850ea53c8976bf3660b9cb67238..bd92431d4c49a8e29f4b46d50d6d0e66c69098d5
@@@ -1177,7 -1177,7 +1177,7 @@@ static long do_mbind(unsigned long star
                if (!list_empty(&pagelist)) {
                        nr_failed = migrate_pages(&pagelist, new_vma_page,
                                                (unsigned long)vma,
 -                                              false, true);
 +                                              false, MIGRATE_SYNC);
                        if (nr_failed)
                                putback_lru_pages(&pagelist);
                }
@@@ -1602,8 -1602,14 +1602,14 @@@ static unsigned interleave_nodes(struc
   * task can change it's policy.  The system default policy requires no
   * such protection.
   */
- unsigned slab_node(struct mempolicy *policy)
+ unsigned slab_node(void)
  {
+       struct mempolicy *policy;
+       if (in_interrupt())
+               return numa_node_id();
+       policy = current->mempolicy;
        if (!policy || policy->flags & MPOL_F_LOCAL)
                return numa_node_id();