[PATCH] slab: extract cache_free_alien from __cache_free
authorPekka Enberg <penberg@cs.helsinki.fi>
Fri, 23 Jun 2006 09:03:05 +0000 (02:03 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Fri, 23 Jun 2006 14:42:46 +0000 (07:42 -0700)
Move alien object freeing to cache_free_alien() to reduce #ifdef clutter in
__cache_free().

Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Acked-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
mm/slab.c

index f1b644eb39d816b3865400caf8d589d458756a17..bf05ea900ce87f5e41be6d3103d7a6f746c609dc 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1024,6 +1024,40 @@ static void drain_alien_cache(struct kmem_cache *cachep,
                }
        }
 }
+
+static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
+{
+       struct slab *slabp = virt_to_slab(objp);
+       int nodeid = slabp->nodeid;
+       struct kmem_list3 *l3;
+       struct array_cache *alien = NULL;
+
+       /*
+        * Make sure we are not freeing a object from another node to the array
+        * cache on this cpu.
+        */
+       if (likely(slabp->nodeid == numa_node_id()))
+               return 0;
+
+       l3 = cachep->nodelists[numa_node_id()];
+       STATS_INC_NODEFREES(cachep);
+       if (l3->alien && l3->alien[nodeid]) {
+               alien = l3->alien[nodeid];
+               spin_lock(&alien->lock);
+               if (unlikely(alien->avail == alien->limit)) {
+                       STATS_INC_ACOVERFLOW(cachep);
+                       __drain_alien_cache(cachep, alien, nodeid);
+               }
+               alien->entry[alien->avail++] = objp;
+               spin_unlock(&alien->lock);
+       } else {
+               spin_lock(&(cachep->nodelists[nodeid])->list_lock);
+               free_block(cachep, &objp, 1, nodeid);
+               spin_unlock(&(cachep->nodelists[nodeid])->list_lock);
+       }
+       return 1;
+}
+
 #else
 
 #define drain_alien_cache(cachep, alien) do { } while (0)
@@ -1038,6 +1072,11 @@ static inline void free_alien_cache(struct array_cache **ac_ptr)
 {
 }
 
+static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
+{
+       return 0;
+}
+
 #endif
 
 static int cpuup_callback(struct notifier_block *nfb,
@@ -3087,41 +3126,9 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
        check_irq_off();
        objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
 
-       /* Make sure we are not freeing a object from another
-        * node to the array cache on this cpu.
-        */
-#ifdef CONFIG_NUMA
-       {
-               struct slab *slabp;
-               slabp = virt_to_slab(objp);
-               if (unlikely(slabp->nodeid != numa_node_id())) {
-                       struct array_cache *alien = NULL;
-                       int nodeid = slabp->nodeid;
-                       struct kmem_list3 *l3;
-
-                       l3 = cachep->nodelists[numa_node_id()];
-                       STATS_INC_NODEFREES(cachep);
-                       if (l3->alien && l3->alien[nodeid]) {
-                               alien = l3->alien[nodeid];
-                               spin_lock(&alien->lock);
-                               if (unlikely(alien->avail == alien->limit)) {
-                                       STATS_INC_ACOVERFLOW(cachep);
-                                       __drain_alien_cache(cachep,
-                                                           alien, nodeid);
-                               }
-                               alien->entry[alien->avail++] = objp;
-                               spin_unlock(&alien->lock);
-                       } else {
-                               spin_lock(&(cachep->nodelists[nodeid])->
-                                         list_lock);
-                               free_block(cachep, &objp, 1, nodeid);
-                               spin_unlock(&(cachep->nodelists[nodeid])->
-                                           list_lock);
-                       }
-                       return;
-               }
-       }
-#endif
+       if (cache_free_alien(cachep, objp))
+               return;
+
        if (likely(ac->avail < ac->limit)) {
                STATS_INC_FREEHIT(cachep);
                ac->entry[ac->avail++] = objp;