Merge commit 'linus/master' into HEAD
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / mm / slab.c
index 6a1ad0b9a94f51bece1c62b1bf2a7d4da7785205..af3376d0a8333f2f7271f3f093a52a917a8f9881 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -304,6 +304,12 @@ struct kmem_list3 {
        int free_touched;               /* updated without locking */
 };
 
+/*
+ * The slab allocator is initialized with interrupts disabled. Therefore, make
+ * sure early boot allocations don't accidentally enable interrupts.
+ */
+static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK;
+
 /*
  * Need this for bootstrapping a per node allocator.
  */
@@ -673,6 +679,7 @@ static enum {
        NONE,
        PARTIAL_AC,
        PARTIAL_L3,
+       EARLY,
        FULL
 } g_cpucache_up;
 
@@ -681,7 +688,7 @@ static enum {
  */
 int slab_is_available(void)
 {
-       return g_cpucache_up == FULL;
+       return g_cpucache_up >= EARLY;
 }
 
 static DEFINE_PER_CPU(struct delayed_work, reap_work);
@@ -1545,19 +1552,27 @@ void __init kmem_cache_init(void)
                }
        }
 
-       /* 6) resize the head arrays to their final sizes */
-       {
-               struct kmem_cache *cachep;
-               mutex_lock(&cache_chain_mutex);
-               list_for_each_entry(cachep, &cache_chain, next)
-                       if (enable_cpucache(cachep, GFP_NOWAIT))
-                               BUG();
-               mutex_unlock(&cache_chain_mutex);
-       }
+       g_cpucache_up = EARLY;
 
        /* Annotate slab for lockdep -- annotate the malloc caches */
        init_lock_keys();
+}
+
+void __init kmem_cache_init_late(void)
+{
+       struct kmem_cache *cachep;
+
+       /*
+        * Interrupts are enabled now so all GFP allocations are safe.
+        */
+       slab_gfp_mask = __GFP_BITS_MASK;
 
+       /* 6) resize the head arrays to their final sizes */
+       mutex_lock(&cache_chain_mutex);
+       list_for_each_entry(cachep, &cache_chain, next)
+               if (enable_cpucache(cachep, GFP_NOWAIT))
+                       BUG();
+       mutex_unlock(&cache_chain_mutex);
 
        /* Done! */
        g_cpucache_up = FULL;
@@ -2034,7 +2049,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
                        for_each_online_node(node) {
                                cachep->nodelists[node] =
                                    kmalloc_node(sizeof(struct kmem_list3),
-                                               GFP_KERNEL, node);
+                                               gfp, node);
                                BUG_ON(!cachep->nodelists[node]);
                                kmem_list3_init(cachep->nodelists[node]);
                        }
@@ -3286,6 +3301,8 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
        unsigned long save_flags;
        void *ptr;
 
+       flags &= slab_gfp_mask;
+
        lockdep_trace_alloc(flags);
 
        if (slab_should_failslab(cachep, flags))
@@ -3369,6 +3386,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
        unsigned long save_flags;
        void *objp;
 
+       flags &= slab_gfp_mask;
+
        lockdep_trace_alloc(flags);
 
        if (slab_should_failslab(cachep, flags))