#define SIZE_AC 1
#define SIZE_L3 (1 + MAX_NUMNODES)
+static int drain_freelist(struct kmem_cache *cache,
+ struct kmem_list3 *l3, int tofree);
+static void free_block(struct kmem_cache *cachep, void **objpp, int len,
+ int node);
+static void enable_cpucache(struct kmem_cache *cachep);
+static void cache_reap(void *unused);
+
/*
* This function must be completely optimized away if a constant is passed to
* it. Mostly the same as what is in linux/slab.h except it returns an index.
#define STATS_DEC_ACTIVE(x) ((x)->num_active--)
#define STATS_INC_ALLOCED(x) ((x)->num_allocations++)
#define STATS_INC_GROWN(x) ((x)->grown++)
-#define STATS_INC_REAPED(x) ((x)->reaped++)
+#define STATS_ADD_REAPED(x,y) ((x)->reaped += (y))
#define STATS_SET_HIGH(x) \
do { \
if ((x)->num_active > (x)->high_mark) \
#define STATS_DEC_ACTIVE(x) do { } while (0)
#define STATS_INC_ALLOCED(x) do { } while (0)
#define STATS_INC_GROWN(x) do { } while (0)
-#define STATS_INC_REAPED(x) do { } while (0)
+#define STATS_ADD_REAPED(x,y) do { } while (0)
#define STATS_SET_HIGH(x) do { } while (0)
#define STATS_INC_ERR(x) do { } while (0)
#define STATS_INC_NODEALLOCS(x) do { } while (0)
static DEFINE_PER_CPU(struct work_struct, reap_work);
-static void free_block(struct kmem_cache *cachep, void **objpp, int len,
- int node);
-static void enable_cpucache(struct kmem_cache *cachep);
-static void cache_reap(void *unused);
-static int __node_shrink(struct kmem_cache *cachep, int node);
-
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
{
return cachep->array[smp_processor_id()];
l3 = cachep->nodelists[node];
if (!l3)
continue;
- spin_lock_irq(&l3->list_lock);
- /* free slabs belonging to this node */
- __node_shrink(cachep, node);
- spin_unlock_irq(&l3->list_lock);
+ drain_freelist(cachep, l3, l3->free_objects);
}
mutex_unlock(&cache_chain_mutex);
break;
}
}
-static int __node_shrink(struct kmem_cache *cachep, int node)
+/*
+ * Remove slabs from the list of free slabs.
+ * Specify the number of slabs to drain in tofree.
+ *
+ * Returns the actual number of slabs released.
+ */
+static int drain_freelist(struct kmem_cache *cache,
+ struct kmem_list3 *l3, int tofree)
{
+ struct list_head *p;
+ int nr_freed;
struct slab *slabp;
- struct kmem_list3 *l3 = cachep->nodelists[node];
- int ret;
- for (;;) {
- struct list_head *p;
+ nr_freed = 0;
+ while (nr_freed < tofree && !list_empty(&l3->slabs_free)) {
+ spin_lock_irq(&l3->list_lock);
p = l3->slabs_free.prev;
- if (p == &l3->slabs_free)
- break;
+ if (p == &l3->slabs_free) {
+ spin_unlock_irq(&l3->list_lock);
+ goto out;
+ }
- slabp = list_entry(l3->slabs_free.prev, struct slab, list);
+ slabp = list_entry(p, struct slab, list);
#if DEBUG
BUG_ON(slabp->inuse);
#endif
list_del(&slabp->list);
-
- l3->free_objects -= cachep->num;
+ /*
+ * Safe to drop the lock. The slab is no longer linked
+ * to the cache.
+ */
+ l3->free_objects -= cache->num;
spin_unlock_irq(&l3->list_lock);
- slab_destroy(cachep, slabp);
- spin_lock_irq(&l3->list_lock);
+ slab_destroy(cache, slabp);
+ nr_freed++;
}
- ret = !list_empty(&l3->slabs_full) || !list_empty(&l3->slabs_partial);
- return ret;
+out:
+ return nr_freed;
}
static int __cache_shrink(struct kmem_cache *cachep)
check_irq_on();
for_each_online_node(i) {
l3 = cachep->nodelists[i];
- if (l3) {
- spin_lock_irq(&l3->list_lock);
- ret += __node_shrink(cachep, i);
- spin_unlock_irq(&l3->list_lock);
- }
+ if (!l3)
+ continue;
+
+ drain_freelist(cachep, l3, l3->free_objects);
+
+ ret += !list_empty(&l3->slabs_full) ||
+ !list_empty(&l3->slabs_partial);
}
return (ret ? 1 : 0);
}
}
list_for_each_entry(searchp, &cache_chain, next) {
- struct list_head *p;
- int tofree;
- struct slab *slabp;
-
check_irq_on();
/*
drain_array(searchp, l3, l3->shared, 0, node);
- if (l3->free_touched) {
+ if (l3->free_touched)
l3->free_touched = 0;
- goto next;
- }
-
- tofree = (l3->free_limit + 5 * searchp->num - 1) /
- (5 * searchp->num);
- do {
- /*
- * Do not lock if there are no free blocks.
- */
- if (list_empty(&l3->slabs_free))
- break;
-
- spin_lock_irq(&l3->list_lock);
- p = l3->slabs_free.next;
- if (p == &(l3->slabs_free)) {
- spin_unlock_irq(&l3->list_lock);
- break;
- }
+ else {
+ int freed;
- slabp = list_entry(p, struct slab, list);
- BUG_ON(slabp->inuse);
- list_del(&slabp->list);
- STATS_INC_REAPED(searchp);
-
- /*
- * Safe to drop the lock. The slab is no longer linked
- * to the cache. searchp cannot disappear, we hold
- * cache_chain_lock
- */
- l3->free_objects -= searchp->num;
- spin_unlock_irq(&l3->list_lock);
- slab_destroy(searchp, slabp);
- } while (--tofree > 0);
+ freed = drain_freelist(searchp, l3, (l3->free_limit +
+ 5 * searchp->num - 1) / (5 * searchp->num));
+ STATS_ADD_REAPED(searchp, freed);
+ }
next:
cond_resched();
}