Btrfs: reserve space for block groups
authorJosef Bacik <jbacik@fb.com>
Wed, 18 Feb 2015 16:06:57 +0000 (08:06 -0800)
committerChris Mason <clm@fb.com>
Fri, 10 Apr 2015 21:06:48 +0000 (14:06 -0700)
This changes our delayed refs calculations to include the space needed
to write back dirty block groups.

Signed-off-by: Chris Mason <clm@fb.com>
fs/btrfs/extent-tree.c
fs/btrfs/transaction.c
fs/btrfs/transaction.h

index 75f4bed6e6dbc0a1d974d7cbbbfb9604ce64bc5e..ae8db3ba467cf6eba74a31cce2d952d8e51a67f7 100644 (file)
@@ -2657,7 +2657,8 @@ int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
        struct btrfs_block_rsv *global_rsv;
        u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
        u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
-       u64 num_bytes;
+       u64 num_dirty_bgs = trans->transaction->num_dirty_bgs;
+       u64 num_bytes, num_dirty_bgs_bytes;
        int ret = 0;
 
        num_bytes = btrfs_calc_trans_metadata_size(root, 1);
@@ -2666,17 +2667,21 @@ int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
                num_bytes += (num_heads - 1) * root->nodesize;
        num_bytes <<= 1;
        num_bytes += btrfs_csum_bytes_to_leaves(root, csum_bytes) * root->nodesize;
+       num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(root,
+                                                            num_dirty_bgs);
        global_rsv = &root->fs_info->global_block_rsv;
 
        /*
         * If we can't allocate any more chunks lets make sure we have _lots_ of
         * wiggle room since running delayed refs can create more delayed refs.
         */
-       if (global_rsv->space_info->full)
+       if (global_rsv->space_info->full) {
+               num_dirty_bgs_bytes <<= 1;
                num_bytes <<= 1;
+       }
 
        spin_lock(&global_rsv->lock);
-       if (global_rsv->reserved <= num_bytes)
+       if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
                ret = 1;
        spin_unlock(&global_rsv->lock);
        return ret;
@@ -5408,6 +5413,7 @@ static int update_block_group(struct btrfs_trans_handle *trans,
                if (list_empty(&cache->dirty_list)) {
                        list_add_tail(&cache->dirty_list,
                                      &trans->transaction->dirty_bgs);
+                               trans->transaction->num_dirty_bgs++;
                        btrfs_get_block_group(cache);
                }
                spin_unlock(&trans->transaction->dirty_bgs_lock);
index 8b9eea8f2406a4de2734c37592a3bdf4d29d91fa..234d6063bbf3af431373f07ac92a422d6ce5cf72 100644 (file)
@@ -251,6 +251,7 @@ loop:
        INIT_LIST_HEAD(&cur_trans->switch_commits);
        INIT_LIST_HEAD(&cur_trans->pending_ordered);
        INIT_LIST_HEAD(&cur_trans->dirty_bgs);
+       cur_trans->num_dirty_bgs = 0;
        spin_lock_init(&cur_trans->dirty_bgs_lock);
        list_add_tail(&cur_trans->list, &fs_info->trans_list);
        extent_io_tree_init(&cur_trans->dirty_pages,
index 96b189b8898aa17dc2f9239d71b9964f8b70f58d..4cb0ae264534dada0a0266d3ea05b5975206c0f0 100644 (file)
@@ -64,6 +64,7 @@ struct btrfs_transaction {
        struct list_head pending_ordered;
        struct list_head switch_commits;
        struct list_head dirty_bgs;
+       u64 num_dirty_bgs;
        spinlock_t dirty_bgs_lock;
        struct btrfs_delayed_ref_root delayed_refs;
        int aborted;