This patch adds one parameter to clean up all the callers of f2fs_balance_fs.
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
f2fs_put_dnode(&dn);
f2fs_unlock_op(sbi);
- if (dn.node_changed)
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, dn.node_changed);
}
return err;
f2fs_put_dnode(&dn);
out:
f2fs_unlock_op(sbi);
- if (dn.node_changed)
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, dn.node_changed);
return err;
}
if (create) {
f2fs_unlock_op(sbi);
- if (dn.node_changed)
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, dn.node_changed);
f2fs_lock_op(sbi);
}
unlock_out:
if (create) {
f2fs_unlock_op(sbi);
- if (dn.node_changed)
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, dn.node_changed);
}
out:
trace_f2fs_map_blocks(inode, map, err);
if (err)
ClearPageUptodate(page);
unlock_page(page);
- if (need_balance_fs)
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, need_balance_fs);
if (wbc->for_reclaim || unlikely(f2fs_cp_error(sbi))) {
f2fs_submit_merged_bio(sbi, DATA, WRITE);
remove_dirty_inode(inode);
if (need_balance && has_not_enough_free_secs(sbi, 0)) {
unlock_page(page);
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, true);
lock_page(page);
if (page->mapping != mapping) {
/* The page got truncated from under us */
*/
void register_inmem_page(struct inode *, struct page *);
int commit_inmem_pages(struct inode *, bool);
-void f2fs_balance_fs(struct f2fs_sb_info *);
+void f2fs_balance_fs(struct f2fs_sb_info *, bool);
void f2fs_balance_fs_bg(struct f2fs_sb_info *);
int f2fs_issue_flush(struct f2fs_sb_info *);
int create_flush_cmd_control(struct f2fs_sb_info *);
f2fs_put_dnode(&dn);
f2fs_unlock_op(sbi);
- if (dn.node_changed)
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, dn.node_changed);
file_update_time(vma->vm_file);
lock_page(page);
err = f2fs_truncate(inode, true);
if (err)
return err;
- f2fs_balance_fs(F2FS_I_SB(inode));
+ f2fs_balance_fs(F2FS_I_SB(inode), true);
} else {
/*
* do not trim all blocks after i_size if target size is
if (!len)
return 0;
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, true);
f2fs_lock_op(sbi);
page = get_new_data_page(inode, NULL, index, false);
loff_t blk_start, blk_end;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, true);
blk_start = (loff_t)pg_start << PAGE_CACHE_SHIFT;
blk_end = (loff_t)pg_end << PAGE_CACHE_SHIFT;
int ret = 0;
for (; end < nrpages; start++, end++) {
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, true);
f2fs_lock_op(sbi);
ret = __exchange_data_block(inode, end, start, true);
f2fs_unlock_op(sbi);
if (ret)
return ret;
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, true);
ret = truncate_blocks(inode, i_size_read(inode), true);
if (ret)
if (ret)
return ret;
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, true);
pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
pg_start = range->start >> PAGE_CACHE_SHIFT;
pg_end = (range->start + range->len) >> PAGE_CACHE_SHIFT;
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, true);
mutex_lock(&inode->i_mutex);
f2fs_put_page(page, 1);
- if (dn.node_changed)
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, dn.node_changed);
return err;
}
* during the urgent cleaning time when runing out of free sections.
*/
if (update_inode_page(inode))
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, true);
return 0;
}
inode->i_mapping->a_ops = &f2fs_dblock_aops;
ino = inode->i_ino;
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, true);
f2fs_lock_op(sbi);
err = f2fs_add_link(dentry, inode);
!f2fs_is_child_context_consistent_with_parent(dir, inode))
return -EPERM;
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, true);
inode->i_ctime = CURRENT_TIME;
ihold(inode);
return 0;
}
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, true);
f2fs_lock_op(sbi);
if (!de)
goto fail;
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, true);
f2fs_lock_op(sbi);
err = acquire_orphan_inode(sbi);
inode->i_op = &f2fs_symlink_inode_operations;
inode->i_mapping->a_ops = &f2fs_dblock_aops;
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, true);
f2fs_lock_op(sbi);
err = f2fs_add_link(dentry, inode);
inode->i_mapping->a_ops = &f2fs_dblock_aops;
mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_HIGH_ZERO);
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, true);
set_inode_flag(F2FS_I(inode), FI_INC_LINK);
f2fs_lock_op(sbi);
init_special_inode(inode, inode->i_mode, rdev);
inode->i_op = &f2fs_special_inode_operations;
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, true);
f2fs_lock_op(sbi);
err = f2fs_add_link(dentry, inode);
inode->i_mapping->a_ops = &f2fs_dblock_aops;
}
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, true);
f2fs_lock_op(sbi);
err = acquire_orphan_inode(sbi);
if (!new_entry)
goto out_whiteout;
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, true);
f2fs_lock_op(sbi);
update_inode_page(old_inode);
update_inode_page(new_inode);
} else {
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, true);
f2fs_lock_op(sbi);
goto out_new_dir;
}
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, true);
f2fs_lock_op(sbi);
* inode becomes free by iget_locked in f2fs_iget.
*/
if (!abort) {
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, true);
f2fs_lock_op(sbi);
}
* This function balances dirty node and dentry pages.
* In addition, it controls garbage collection.
*/
-void f2fs_balance_fs(struct f2fs_sb_info *sbi)
+void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
{
+ if (!need)
+ return;
/*
* We should do GC or end up with checkpoint, if there are so many dirty
* dir/node pages without enough free segments.
if (ipage)
return __f2fs_setxattr(inode, index, name, value,
size, ipage, flags);
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, true);
f2fs_lock_op(sbi);
/* protect xattr_ver */