Btrfs: reduce size of struct extent_state
authorFilipe Manana <fdmanana@suse.com>
Sun, 6 Jul 2014 19:09:59 +0000 (20:09 +0100)
committerChris Mason <clm@fb.com>
Wed, 17 Sep 2014 20:37:30 +0000 (13:37 -0700)
The tree field of struct extent_state was only used to figure out if
an extent state was connected to an inode's io tree or not. For this
we can just use the rb_node field itself.

On a x86_64 system with this change the sizeof(struct extent_state) is
reduced from 96 bytes down to 88 bytes, meaning that with a page size
of 4096 bytes we can now store 46 extent states per page instead of 42.

Signed-off-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: Chris Mason <clm@fb.com>
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h

index 1009fa8a08ef19918f95fd5243460bd6bf0da818..816e80e678bda7a27b9311f1bb22657fe58c02c4 100644 (file)
@@ -25,6 +25,11 @@ static struct kmem_cache *extent_state_cache;
 static struct kmem_cache *extent_buffer_cache;
 static struct bio_set *btrfs_bioset;
 
+static inline bool extent_state_in_tree(const struct extent_state *state)
+{
+       return !RB_EMPTY_NODE(&state->rb_node);
+}
+
 #ifdef CONFIG_BTRFS_DEBUG
 static LIST_HEAD(buffers);
 static LIST_HEAD(states);
@@ -59,9 +64,9 @@ void btrfs_leak_debug_check(void)
 
        while (!list_empty(&states)) {
                state = list_entry(states.next, struct extent_state, leak_list);
-               printk(KERN_ERR "BTRFS: state leak: start %llu end %llu "
-                      "state %lu in tree %p refs %d\n",
-                      state->start, state->end, state->state, state->tree,
+               pr_err("BTRFS: state leak: start %llu end %llu state %lu in tree %d refs %d\n",
+                      state->start, state->end, state->state,
+                      extent_state_in_tree(state),
                       atomic_read(&state->refs));
                list_del(&state->leak_list);
                kmem_cache_free(extent_state_cache, state);
@@ -209,7 +214,7 @@ static struct extent_state *alloc_extent_state(gfp_t mask)
                return state;
        state->state = 0;
        state->private = 0;
-       state->tree = NULL;
+       RB_CLEAR_NODE(&state->rb_node);
        btrfs_leak_debug_add(&state->leak_list, &states);
        atomic_set(&state->refs, 1);
        init_waitqueue_head(&state->wq);
@@ -222,7 +227,7 @@ void free_extent_state(struct extent_state *state)
        if (!state)
                return;
        if (atomic_dec_and_test(&state->refs)) {
-               WARN_ON(state->tree);
+               WARN_ON(extent_state_in_tree(state));
                btrfs_leak_debug_del(&state->leak_list);
                trace_free_extent_state(state, _RET_IP_);
                kmem_cache_free(extent_state_cache, state);
@@ -371,8 +376,8 @@ static void merge_state(struct extent_io_tree *tree,
                    other->state == state->state) {
                        merge_cb(tree, state, other);
                        state->start = other->start;
-                       other->tree = NULL;
                        rb_erase(&other->rb_node, &tree->state);
+                       RB_CLEAR_NODE(&other->rb_node);
                        free_extent_state(other);
                }
        }
@@ -383,8 +388,8 @@ static void merge_state(struct extent_io_tree *tree,
                    other->state == state->state) {
                        merge_cb(tree, state, other);
                        state->end = other->end;
-                       other->tree = NULL;
                        rb_erase(&other->rb_node, &tree->state);
+                       RB_CLEAR_NODE(&other->rb_node);
                        free_extent_state(other);
                }
        }
@@ -442,7 +447,6 @@ static int insert_state(struct extent_io_tree *tree,
                       found->start, found->end, start, end);
                return -EEXIST;
        }
-       state->tree = tree;
        merge_state(tree, state);
        return 0;
 }
@@ -486,7 +490,6 @@ static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
                free_extent_state(prealloc);
                return -EEXIST;
        }
-       prealloc->tree = tree;
        return 0;
 }
 
@@ -524,9 +527,9 @@ static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
                wake_up(&state->wq);
        if (state->state == 0) {
                next = next_state(state);
-               if (state->tree) {
+               if (extent_state_in_tree(state)) {
                        rb_erase(&state->rb_node, &tree->state);
-                       state->tree = NULL;
+                       RB_CLEAR_NODE(&state->rb_node);
                        free_extent_state(state);
                } else {
                        WARN_ON(1);
@@ -606,8 +609,8 @@ again:
                        cached_state = NULL;
                }
 
-               if (cached && cached->tree && cached->start <= start &&
-                   cached->end > start) {
+               if (cached && extent_state_in_tree(cached) &&
+                   cached->start <= start && cached->end > start) {
                        if (clear)
                                atomic_dec(&cached->refs);
                        state = cached;
@@ -843,7 +846,7 @@ again:
        if (cached_state && *cached_state) {
                state = *cached_state;
                if (state->start <= start && state->end > start &&
-                   state->tree) {
+                   extent_state_in_tree(state)) {
                        node = &state->rb_node;
                        goto hit_next;
                }
@@ -1069,7 +1072,7 @@ again:
        if (cached_state && *cached_state) {
                state = *cached_state;
                if (state->start <= start && state->end > start &&
-                   state->tree) {
+                   extent_state_in_tree(state)) {
                        node = &state->rb_node;
                        goto hit_next;
                }
@@ -1459,7 +1462,7 @@ int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
        spin_lock(&tree->lock);
        if (cached_state && *cached_state) {
                state = *cached_state;
-               if (state->end == start - 1 && state->tree) {
+               if (state->end == start - 1 && extent_state_in_tree(state)) {
                        n = rb_next(&state->rb_node);
                        while (n) {
                                state = rb_entry(n, struct extent_state,
@@ -1905,7 +1908,7 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
        int bitset = 0;
 
        spin_lock(&tree->lock);
-       if (cached && cached->tree && cached->start <= start &&
+       if (cached && extent_state_in_tree(cached) && cached->start <= start &&
            cached->end > start)
                node = &cached->rb_node;
        else
index ccc264e7bde12760035dacfd40cafb25b663de4e..1f608166fe6703c9af3ea8d5e55ca5393aca366e 100644 (file)
@@ -108,7 +108,6 @@ struct extent_state {
        struct rb_node rb_node;
 
        /* ADD NEW ELEMENTS AFTER THIS */
-       struct extent_io_tree *tree;
        wait_queue_head_t wq;
        atomic_t refs;
        unsigned long state;