#define MAX_OBJECTS_PER_SLAB 65535
/* Internal SLUB flags */
-#define __OBJECT_POISON 0x80000000 /* Poison object */
+#define __OBJECT_POISON 0x80000000 /* Poison object */
+#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */
/* Not all arches define cache_line_size */
#ifndef cache_line_size
}
#ifdef CONFIG_ZONE_DMA
+
+static void sysfs_add_func(struct work_struct *w)
+{
+ struct kmem_cache *s;
+
+ down_write(&slub_lock);
+ list_for_each_entry(s, &slab_caches, list) {
+ if (s->flags & __SYSFS_ADD_DEFERRED) {
+ s->flags &= ~__SYSFS_ADD_DEFERRED;
+ sysfs_slab_add(s);
+ }
+ }
+ up_write(&slub_lock);
+}
+
+static DECLARE_WORK(sysfs_add_work, sysfs_add_func);
+
static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
{
struct kmem_cache *s;
- struct kmem_cache *x;
char *text;
size_t realsize;
return s;
/* Dynamically create dma cache */
- x = kmalloc(kmem_size, flags & ~SLUB_DMA);
- if (!x)
- panic("Unable to allocate memory for dma cache\n");
+ if (flags & __GFP_WAIT)
+ down_write(&slub_lock);
+ else {
+ if (!down_write_trylock(&slub_lock))
+ goto out;
+ }
+
+ if (kmalloc_caches_dma[index])
+ goto unlock_out;
realsize = kmalloc_caches[index].objsize;
- text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
- (unsigned int)realsize);
- s = create_kmalloc_cache(x, text, realsize, flags);
- down_write(&slub_lock);
- if (!kmalloc_caches_dma[index]) {
- kmalloc_caches_dma[index] = s;
- up_write(&slub_lock);
- return s;
+ text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d", (unsigned int)realsize),
+ s = kmalloc(kmem_size, flags & ~SLUB_DMA);
+
+ if (!s || !text || !kmem_cache_open(s, flags, text,
+ realsize, ARCH_KMALLOC_MINALIGN,
+ SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) {
+ kfree(s);
+ kfree(text);
+ goto unlock_out;
}
+
+ list_add(&s->list, &slab_caches);
+ kmalloc_caches_dma[index] = s;
+
+ schedule_work(&sysfs_add_work);
+
+unlock_out:
up_write(&slub_lock);
- kmem_cache_destroy(s);
+out:
return kmalloc_caches_dma[index];
}
#endif