struct writeback_control *wbc)
{
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
+ struct blk_plug plug;
long diff, written;
/* collect a number of dirty meta pages and write together */
/* if mounting is failed, skip writing node pages */
mutex_lock(&sbi->cp_mutex);
diff = nr_pages_to_write(sbi, META, wbc);
+ blk_start_plug(&plug);
written = sync_meta_pages(sbi, META, wbc->nr_to_write);
+ blk_finish_plug(&plug);
mutex_unlock(&sbi->cp_mutex);
wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
return 0;
.nr_to_write = LONG_MAX,
.for_reclaim = 0,
};
+ struct blk_plug plug;
int err = 0;
+ blk_start_plug(&plug);
+
retry_flush_dents:
f2fs_lock_all(sbi);
/* write all the dirty dentry pages */
goto retry_flush_nodes;
}
out:
+ blk_finish_plug(&plug);
return err;
}
{
struct inode *inode = mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct blk_plug plug;
int ret;
/* deal with chardevs and other special file */
trace_f2fs_writepages(mapping->host, wbc, DATA);
+ blk_start_plug(&plug);
ret = f2fs_write_cache_pages(mapping, wbc);
+ blk_finish_plug(&plug);
/*
* if some pages were truncated, we cannot guarantee its mapping->host
* to detect pending bios.
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
+ struct blk_plug plug;
ssize_t ret;
if (f2fs_encrypted_inode(inode) &&
ret = generic_write_checks(iocb, from);
if (ret > 0) {
ret = f2fs_preallocate_blocks(iocb, from);
- if (!ret)
+ if (!ret) {
+ blk_start_plug(&plug);
ret = __generic_file_write_iter(iocb, from);
+ blk_finish_plug(&plug);
+ }
}
inode_unlock(inode);
{
struct page *sum_page;
struct f2fs_summary_block *sum;
+ struct blk_plug plug;
unsigned int segno = start_segno;
unsigned int end_segno = start_segno + sbi->segs_per_sec;
int seg_freed = 0;
unlock_page(sum_page);
}
+ blk_start_plug(&plug);
+
for (segno = start_segno; segno < end_segno; segno++) {
if (get_valid_blocks(sbi, segno, 1) == 0)
f2fs_submit_merged_bio(sbi,
(type == SUM_TYPE_NODE) ? NODE : DATA, WRITE);
+ blk_finish_plug(&plug);
+
if (gc_type == FG_GC) {
while (start_segno < end_segno)
if (get_valid_blocks(sbi, start_segno++, 1) == 0)
struct writeback_control *wbc)
{
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
+ struct blk_plug plug;
long diff;
/* balancing f2fs's metadata in background */
diff = nr_pages_to_write(sbi, NODE, wbc);
wbc->sync_mode = WB_SYNC_NONE;
+ blk_start_plug(&plug);
sync_node_pages(sbi, wbc);
+ blk_finish_plug(&plug);
wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
return 0;
excess_prefree_segs(sbi) ||
excess_dirty_nats(sbi) ||
(is_idle(sbi) && f2fs_time_over(sbi, CP_TIME))) {
- if (test_opt(sbi, DATA_FLUSH))
+ if (test_opt(sbi, DATA_FLUSH)) {
+ struct blk_plug plug;
+
+ blk_start_plug(&plug);
sync_dirty_inodes(sbi, FILE_INODE);
+ blk_finish_plug(&plug);
+ }
f2fs_sync_fs(sbi->sb, true);
stat_inc_bg_cp_count(sbi->stat_info);
}