break;
need_loop = 1;
- e = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
+ e = kmem_cache_zalloc(btrfs_free_space_cachep,
+ GFP_NOFS);
if (!e) {
kunmap(page);
unlock_page(page);
e->bytes = le64_to_cpu(entry->bytes);
if (!e->bytes) {
kunmap(page);
- kfree(e);
+ kmem_cache_free(btrfs_free_space_cachep, e);
unlock_page(page);
page_cache_release(page);
goto free_cache;
e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
if (!e->bitmap) {
kunmap(page);
- kfree(e);
+ kmem_cache_free(
+ btrfs_free_space_cachep, e);
unlock_page(page);
page_cache_release(page);
goto free_cache;
{
unlink_free_space(block_group, bitmap_info);
kfree(bitmap_info->bitmap);
- kfree(bitmap_info);
+ kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
block_group->total_bitmaps--;
recalculate_thresholds(block_group);
}
/* no pre-allocated info, allocate a new one */
if (!info) {
- info = kzalloc(sizeof(struct btrfs_free_space),
- GFP_NOFS);
+ info = kmem_cache_zalloc(btrfs_free_space_cachep,
+ GFP_NOFS);
if (!info) {
spin_lock(&block_group->tree_lock);
ret = -ENOMEM;
if (info) {
if (info->bitmap)
kfree(info->bitmap);
- kfree(info);
+ kmem_cache_free(btrfs_free_space_cachep, info);
}
return ret;
else
__unlink_free_space(block_group, right_info);
info->bytes += right_info->bytes;
- kfree(right_info);
+ kmem_cache_free(btrfs_free_space_cachep, right_info);
merged = true;
}
__unlink_free_space(block_group, left_info);
info->offset = left_info->offset;
info->bytes += left_info->bytes;
- kfree(left_info);
+ kmem_cache_free(btrfs_free_space_cachep, left_info);
merged = true;
}
struct btrfs_free_space *info;
int ret = 0;
- info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
+ info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
if (!info)
return -ENOMEM;
link:
ret = link_free_space(block_group, info);
if (ret)
- kfree(info);
+ kmem_cache_free(btrfs_free_space_cachep, info);
out:
spin_unlock(&block_group->tree_lock);
kfree(info->bitmap);
block_group->total_bitmaps--;
}
- kfree(info);
+ kmem_cache_free(btrfs_free_space_cachep, info);
goto out_lock;
}
/* the hole we're creating ends at the end
* of the info struct, just free the info
*/
- kfree(info);
+ kmem_cache_free(btrfs_free_space_cachep, info);
}
spin_unlock(&block_group->tree_lock);
unlink_free_space(block_group, info);
if (info->bitmap)
kfree(info->bitmap);
- kfree(info);
+ kmem_cache_free(btrfs_free_space_cachep, info);
if (need_resched()) {
spin_unlock(&block_group->tree_lock);
cond_resched();
entry->offset += bytes;
entry->bytes -= bytes;
if (!entry->bytes)
- kfree(entry);
+ kmem_cache_free(btrfs_free_space_cachep, entry);
else
link_free_space(block_group, entry);
}
block_group->free_space -= bytes;
if (entry->bytes == 0) {
block_group->free_extents--;
- kfree(entry);
+ kmem_cache_free(btrfs_free_space_cachep, entry);
}
spin_unlock(&block_group->tree_lock);
#include "tree-log.h"
#include "compression.h"
#include "locking.h"
+#include "free-space-cache.h"
struct btrfs_iget_args {
u64 ino;
struct kmem_cache *btrfs_trans_handle_cachep;
struct kmem_cache *btrfs_transaction_cachep;
struct kmem_cache *btrfs_path_cachep;
+struct kmem_cache *btrfs_free_space_cachep;
#define S_SHIFT 12
static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
kmem_cache_destroy(btrfs_transaction_cachep);
if (btrfs_path_cachep)
kmem_cache_destroy(btrfs_path_cachep);
+ if (btrfs_free_space_cachep)
+ kmem_cache_destroy(btrfs_free_space_cachep);
}
int btrfs_init_cachep(void)
if (!btrfs_path_cachep)
goto fail;
+ btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space_cache",
+ sizeof(struct btrfs_free_space), 0,
+ SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ if (!btrfs_free_space_cachep)
+ goto fail;
+
return 0;
fail:
btrfs_destroy_cachep();