The periodic checkpoint can resolve the previous issue.
So, now we can use this again to improve the reported performance regression:
https://lkml.org/lkml/2015/10/8/20
This reverts commit
15bec0ff5a9ba6d203178fa8772259df6207942a.
if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
return 0;
+ if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
+ get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
+ available_free_memory(sbi, DIRTY_DENTS))
+ goto skip_write;
+
/* during POR, we don't need to trigger writepage at all. */
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto skip_write;
mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
PAGE_CACHE_SHIFT;
res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
+ } else if (type == DIRTY_DENTS) {
+ if (sbi->sb->s_bdi->wb.dirty_exceeded)
+ return false;
+ mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
+ res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
} else if (type == INO_ENTRIES) {
int i;
enum mem_type {
FREE_NIDS, /* indicates the free nid list */
NAT_ENTRIES, /* indicates the cached nat entry */
+ DIRTY_DENTS, /* indicates dirty dentry pages */
INO_ENTRIES, /* indicates inode entries */
EXTENT_CACHE, /* indicates extent cache */
BASE_CHECK, /* check kernel status */
if (sbi->sb->s_bdi->wb.dirty_exceeded)
return 0;
- if (type == NODE)
+ if (type == DATA)
+ return sbi->blocks_per_seg;
+ else if (type == NODE)
return 3 * sbi->blocks_per_seg;
else if (type == META)
return MAX_BIO_BLOCKS(sbi);