From: Chris Mason Date: Thu, 31 Jul 2008 14:48:37 +0000 (-0400) Subject: Btrfs: Throttle tuning X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=37d1aeee3990385e9bb436c50c2f7e120a668df6;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git Btrfs: Throttle tuning This avoids waiting for transactions with pages locked by breaking out the code to wait for the current transaction to close into a function called by btrfs_throttle. It also lowers the limits for where we start throttling. Signed-off-by: Chris Mason --- diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 4765248000fd..b003b4364ddb 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2526,6 +2526,9 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans, *level = 0; break; } + if (printk_ratelimit()) + printk("leaf ref miss for bytenr %llu\n", + (unsigned long long)bytenr); } next = btrfs_find_tree_block(root, bytenr, blocksize); if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) { diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 388ac397c2c8..d3f2fe0b7c6c 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -253,7 +253,7 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, end_of_last_block = start_pos + num_bytes - 1; lock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS); - trans = btrfs_start_transaction(root, 1); + trans = btrfs_join_transaction(root, 1); if (!trans) { err = -ENOMEM; goto out_unlock; diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 52c5524896a3..66af5140c8ce 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -124,17 +124,12 @@ static noinline int record_root_in_trans(struct btrfs_root *root) return 0; } -struct btrfs_trans_handle *start_transaction(struct btrfs_root *root, - int num_blocks, int join) +static void wait_current_trans(struct btrfs_root *root) { - struct btrfs_trans_handle *h = - kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); struct btrfs_transaction *cur_trans; - int ret; - mutex_lock(&root->fs_info->trans_mutex); cur_trans = root->fs_info->running_transaction; - if (cur_trans && cur_trans->blocked && !join) { + if (cur_trans && cur_trans->blocked) { DEFINE_WAIT(wait); cur_trans->use_count++; while(1) { @@ -154,6 +149,18 @@ struct btrfs_trans_handle *start_transaction(struct btrfs_root *root, } put_transaction(cur_trans); } +} + +struct btrfs_trans_handle *start_transaction(struct btrfs_root *root, + int num_blocks, int join) +{ + struct btrfs_trans_handle *h = + kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); + int ret; + + mutex_lock(&root->fs_info->trans_mutex); + if (!join) + wait_current_trans(root); ret = join_transaction(root); BUG_ON(ret); @@ -200,7 +207,7 @@ static noinline int wait_for_commit(struct btrfs_root *root, return 0; } -void btrfs_throttle(struct btrfs_root *root) +static void throttle_on_drops(struct btrfs_root *root) { struct btrfs_fs_info *info = root->fs_info; @@ -223,19 +230,28 @@ harder: } while (thr == atomic_read(&info->throttle_gen)); if (harder_count < 5 && - info->total_ref_cache_size > 5 * 1024 * 1024) { + info->total_ref_cache_size > 1 * 1024 * 1024) { harder_count++; goto harder; } if (harder_count < 10 && - info->total_ref_cache_size > 10 * 1024 * 1024) { + info->total_ref_cache_size > 5 * 1024 * 1024) { harder_count++; goto harder; } } } +void btrfs_throttle(struct btrfs_root *root) +{ + mutex_lock(&root->fs_info->trans_mutex); + wait_current_trans(root); + mutex_unlock(&root->fs_info->trans_mutex); + + throttle_on_drops(root); +} + static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, struct btrfs_root *root, int throttle) { @@ -256,7 +272,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, kmem_cache_free(btrfs_trans_handle_cachep, trans); if (throttle) - btrfs_throttle(root); + throttle_on_drops(root); return 0; }