From: Pekka Enberg Date: Thu, 4 Mar 2010 10:07:50 +0000 (+0200) Subject: Merge branches 'slab/cleanups', 'slab/failslab', 'slab/fixes' and 'slub/percpu' into... X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=e2b093f3e9262353558c6f89510ab2d286b28287;p=GitHub%2Fexynos8895%2Fandroid_kernel_samsung_universal8895.git Merge branches 'slab/cleanups', 'slab/failslab', 'slab/fixes' and 'slub/percpu' into slab-for-linus --- e2b093f3e9262353558c6f89510ab2d286b28287 diff --cc mm/slub.c index 8d71aaf888d7,8d71aaf888d7,cab5288736c8,8d71aaf888d7,bd4a9e942ace..3525a4ec9794 --- a/mm/slub.c +++ b/mm/slub.c @@@@@@ -2095,130 -2095,130 -2099,130 -2095,130 -2062,24 +2066,24 @@@@@@ init_kmem_cache_node(struct kmem_cache_ #endif } ---- #ifdef CONFIG_SMP ---- /* ---- * Per cpu array for per cpu structures. ---- * ---- * The per cpu array places all kmem_cache_cpu structures from one processor ---- * close together meaning that it becomes possible that multiple per cpu ---- * structures are contained in one cacheline. This may be particularly ---- * beneficial for the kmalloc caches. ---- * ---- * A desktop system typically has around 60-80 slabs. With 100 here we are ---- * likely able to get per cpu structures for all caches from the array defined ---- * here. We must be able to cover all kmalloc caches during bootstrap. ---- * ---- * If the per cpu array is exhausted then fall back to kmalloc ---- * of individual cachelines. No sharing is possible then. ---- */ ---- #define NR_KMEM_CACHE_CPU 100 ---- ---- static DEFINE_PER_CPU(struct kmem_cache_cpu [NR_KMEM_CACHE_CPU], ---- kmem_cache_cpu); ---- ---- static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free); ---- static DECLARE_BITMAP(kmem_cach_cpu_free_init_once, CONFIG_NR_CPUS); ---- ---- static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s, ---- int cpu, gfp_t flags) ---- { ---- struct kmem_cache_cpu *c = per_cpu(kmem_cache_cpu_free, cpu); ---- ---- if (c) ---- per_cpu(kmem_cache_cpu_free, cpu) = ---- (void *)c->freelist; ---- else { ---- /* Table overflow: So allocate ourselves */ ---- c = kmalloc_node( ---- ALIGN(sizeof(struct kmem_cache_cpu), cache_line_size()), ---- flags, cpu_to_node(cpu)); ---- if (!c) ---- return NULL; ---- } ---- ---- init_kmem_cache_cpu(s, c); ---- return c; ---- } ---- ---- static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu) ---- { ---- if (c < per_cpu(kmem_cache_cpu, cpu) || ---- c >= per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) { ---- kfree(c); ---- return; ---- } ---- c->freelist = (void *)per_cpu(kmem_cache_cpu_free, cpu); ---- per_cpu(kmem_cache_cpu_free, cpu) = c; ---- } ---- ---- static void free_kmem_cache_cpus(struct kmem_cache *s) ---- { ---- int cpu; ---- ---- for_each_online_cpu(cpu) { ---- struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); ---- ---- if (c) { ---- s->cpu_slab[cpu] = NULL; ---- free_kmem_cache_cpu(c, cpu); ---- } ---- } ---- } ---- ---- static int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags) ---- { ---- int cpu; ---- ---- for_each_online_cpu(cpu) { ---- struct kmem_cache_cpu *c = get_cpu_slab(s, cpu); -- - -- - if (c) -- - continue; -- - -- - c = alloc_kmem_cache_cpu(s, cpu, flags); -- - if (!c) { -- - free_kmem_cache_cpus(s); -- - return 0; -- - } -- - s->cpu_slab[cpu] = c; -- - } -- - return 1; -- - } -- - -- - /* -- - * Initialize the per cpu array. -- - */ -- - static void init_alloc_cpu_cpu(int cpu) -- - { -- - int i; ++++ static DEFINE_PER_CPU(struct kmem_cache_cpu, kmalloc_percpu[KMALLOC_CACHES]); - if (c) - continue; - - c = alloc_kmem_cache_cpu(s, cpu, flags); - if (!c) { - free_kmem_cache_cpus(s); - return 0; - } - s->cpu_slab[cpu] = c; - } - return 1; - } - - /* - * Initialize the per cpu array. - */ - static void init_alloc_cpu_cpu(int cpu) - { - int i; - ---- if (cpumask_test_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once))) ---- return; ---- ---- for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--) ---- free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu); ---- ---- cpumask_set_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once)); ---- } ---- ---- static void __init init_alloc_cpu(void) ++++ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags) { ---- int cpu; ---- ---- for_each_online_cpu(cpu) ---- init_alloc_cpu_cpu(cpu); ---- } ++++ if (s < kmalloc_caches + KMALLOC_CACHES && s >= kmalloc_caches) ++++ /* ++++ * Boot time creation of the kmalloc array. Use static per cpu data ++++ * since the per cpu allocator is not available yet. ++++ */ ++++ s->cpu_slab = per_cpu_var(kmalloc_percpu) + (s - kmalloc_caches); ++++ else ++++ s->cpu_slab = alloc_percpu(struct kmem_cache_cpu); ---- #else ---- static inline void free_kmem_cache_cpus(struct kmem_cache *s) {} ---- static inline void init_alloc_cpu(void) {} ++++ if (!s->cpu_slab) ++++ return 0; ---- static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags) ---- { ---- init_kmem_cache_cpu(s, &s->cpu_slab); return 1; } ---- #endif #ifdef CONFIG_NUMA /*