From: Pekka Enberg Date: Fri, 12 Jun 2009 12:58:59 +0000 (+0300) Subject: slab: setup cpu caches later on when interrupts are enabled X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=8429db5c6336083594036c30f49401405d536911;p=GitHub%2Fexynos8895%2Fandroid_kernel_samsung_universal8895.git slab: setup cpu caches later on when interrupts are enabled Fixes the following boot-time warning: [ 0.000000] ------------[ cut here ]------------ [ 0.000000] WARNING: at kernel/smp.c:369 smp_call_function_many+0x56/0x1bc() [ 0.000000] Hardware name: [ 0.000000] Modules linked in: [ 0.000000] Pid: 0, comm: swapper Not tainted 2.6.30 #492 [ 0.000000] Call Trace: [ 0.000000] [] ? _spin_unlock+0x4f/0x5c [ 0.000000] [] ? smp_call_function_many+0x56/0x1bc [ 0.000000] [] warn_slowpath_common+0x7c/0xa9 [ 0.000000] [] warn_slowpath_null+0x14/0x16 [ 0.000000] [] smp_call_function_many+0x56/0x1bc [ 0.000000] [] ? do_ccupdate_local+0x0/0x54 [ 0.000000] [] ? do_ccupdate_local+0x0/0x54 [ 0.000000] [] smp_call_function+0x3d/0x68 [ 0.000000] [] ? do_ccupdate_local+0x0/0x54 [ 0.000000] [] on_each_cpu+0x31/0x7c [ 0.000000] [] do_tune_cpucache+0x119/0x454 [ 0.000000] [] ? lockdep_init_map+0x94/0x10b [ 0.000000] [] ? kmem_cache_init+0x421/0x593 [ 0.000000] [] enable_cpucache+0x68/0xad [ 0.000000] [] kmem_cache_init+0x434/0x593 [ 0.000000] [] ? mem_init+0x156/0x161 [ 0.000000] [] start_kernel+0x1cc/0x3b9 [ 0.000000] [] x86_64_start_reservations+0xaa/0xae [ 0.000000] [] x86_64_start_kernel+0xe1/0xe8 [ 0.000000] ---[ end trace 4eaa2a86a8e2da22 ]--- Cc: Christoph Lameter Cc: Nick Piggin Signed-off-by: Pekka Enberg --- diff --git a/mm/slab.c b/mm/slab.c index 453efcb1c980..18e3164de09a 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -759,6 +759,7 @@ static enum { NONE, PARTIAL_AC, PARTIAL_L3, + EARLY, FULL } g_cpucache_up; @@ -767,7 +768,7 @@ static enum { */ int slab_is_available(void) { - return g_cpucache_up == FULL; + return g_cpucache_up >= EARLY; } static DEFINE_PER_CPU(struct delayed_work, reap_work); @@ -1631,19 +1632,27 @@ void __init kmem_cache_init(void) } } - /* 6) resize the head arrays to their final sizes */ - { - struct kmem_cache *cachep; - mutex_lock(&cache_chain_mutex); - list_for_each_entry(cachep, &cache_chain, next) - if (enable_cpucache(cachep, GFP_NOWAIT)) - BUG(); - mutex_unlock(&cache_chain_mutex); - } + g_cpucache_up = EARLY; /* Annotate slab for lockdep -- annotate the malloc caches */ init_lock_keys(); +} + +void __init kmem_cache_init_late(void) +{ + struct kmem_cache *cachep; + + /* + * Interrupts are enabled now so all GFP allocations are safe. + */ + slab_gfp_mask = __GFP_BITS_MASK; + /* 6) resize the head arrays to their final sizes */ + mutex_lock(&cache_chain_mutex); + list_for_each_entry(cachep, &cache_chain, next) + if (enable_cpucache(cachep, GFP_NOWAIT)) + BUG(); + mutex_unlock(&cache_chain_mutex); /* Done! */ g_cpucache_up = FULL; @@ -1660,14 +1669,6 @@ void __init kmem_cache_init(void) */ } -void __init kmem_cache_init_late(void) -{ - /* - * Interrupts are enabled now so all GFP allocations are safe. - */ - slab_gfp_mask = __GFP_BITS_MASK; -} - static int __init cpucache_init(void) { int cpu;