Btrfs: record first logical byte in memory
authorLiu Bo <bo.li.liu@oracle.com>
Thu, 27 Dec 2012 09:01:23 +0000 (09:01 +0000)
committerJosef Bacik <jbacik@fusionio.com>
Wed, 20 Feb 2013 14:37:18 +0000 (09:37 -0500)
This'd save us a rbtree search which may become expensive in large filesystem.

Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: Josef Bacik <jbacik@fusionio.com>
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c

index 22f012d41fd030b28b18a2961a33d84127faebea..9ee099f3f8347003e753dc2aef4ab370ef5e6986 100644 (file)
@@ -1250,6 +1250,7 @@ struct btrfs_fs_info {
 
        /* block group cache stuff */
        spinlock_t block_group_cache_lock;
+       u64 first_logical_byte;
        struct rb_root block_group_cache_tree;
 
        /* keep track of unallocated space */
index 1db8a99388293d7699a7c2f8c46cc14af1e759a7..04f98e3ffd90e6c6e0bc40f49b7ebdfdeccd0504 100644 (file)
@@ -2130,6 +2130,7 @@ int open_ctree(struct super_block *sb,
 
        spin_lock_init(&fs_info->block_group_cache_lock);
        fs_info->block_group_cache_tree = RB_ROOT;
+       fs_info->first_logical_byte = (u64)-1;
 
        extent_io_tree_init(&fs_info->freed_extents[0],
                             fs_info->btree_inode->i_mapping);
index 825f23b13b584da158fa1bebd6a101a2e637758a..82400b2b2517c5950832c0aa74c002918164b6ac 100644 (file)
@@ -161,6 +161,10 @@ static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
        rb_link_node(&block_group->cache_node, parent, p);
        rb_insert_color(&block_group->cache_node,
                        &info->block_group_cache_tree);
+
+       if (info->first_logical_byte > block_group->key.objectid)
+               info->first_logical_byte = block_group->key.objectid;
+
        spin_unlock(&info->block_group_cache_lock);
 
        return 0;
@@ -202,8 +206,11 @@ block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
                        break;
                }
        }
-       if (ret)
+       if (ret) {
                btrfs_get_block_group(ret);
+               if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
+                       info->first_logical_byte = ret->key.objectid;
+       }
        spin_unlock(&info->block_group_cache_lock);
 
        return ret;
@@ -4848,6 +4855,13 @@ static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
        struct btrfs_block_group_cache *cache;
        u64 bytenr;
 
+       spin_lock(&root->fs_info->block_group_cache_lock);
+       bytenr = root->fs_info->first_logical_byte;
+       spin_unlock(&root->fs_info->block_group_cache_lock);
+
+       if (bytenr < (u64)-1)
+               return bytenr;
+
        cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
        if (!cache)
                return 0;
@@ -8059,6 +8073,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
        spin_lock(&root->fs_info->block_group_cache_lock);
        rb_erase(&block_group->cache_node,
                 &root->fs_info->block_group_cache_tree);
+
+       if (root->fs_info->first_logical_byte == block_group->key.objectid)
+               root->fs_info->first_logical_byte = (u64)-1;
        spin_unlock(&root->fs_info->block_group_cache_lock);
 
        down_write(&block_group->space_info->groups_sem);