f2fs_put_page(page, 1);
f2fs_write_failed(mapping, pos + len);
if (drop_atomic)
- drop_inmem_pages_all(sbi);
+ drop_inmem_pages_all(sbi, false);
return err;
}
si->avail_nids = NM_I(sbi)->available_nids;
si->alloc_nids = NM_I(sbi)->nid_cnt[PREALLOC_NID];
si->bg_gc = sbi->bg_gc;
+ si->skipped_atomic_files[BG_GC] = sbi->skipped_atomic_files[BG_GC];
+ si->skipped_atomic_files[FG_GC] = sbi->skipped_atomic_files[FG_GC];
si->util_free = (int)(free_user_blocks(sbi) >> sbi->log_blocks_per_seg)
* 100 / (int)(sbi->user_block_count >> sbi->log_blocks_per_seg)
/ 2;
si->bg_data_blks);
seq_printf(s, " - node blocks : %d (%d)\n", si->node_blks,
si->bg_node_blks);
+ seq_printf(s, "Skipped : atomic write %llu (%llu)\n",
+ si->skipped_atomic_files[BG_GC] +
+ si->skipped_atomic_files[FG_GC],
+ si->skipped_atomic_files[BG_GC]);
seq_puts(s, "\nExtent Cache:\n");
seq_printf(s, " - Hit Count: L1-1:%llu L1-2:%llu L2:%llu\n",
si->hit_largest, si->hit_cached,
#define DEF_DIR_LEVEL 0
+enum {
+ GC_FAILURE_PIN,
+ GC_FAILURE_ATOMIC,
+ MAX_GC_FAILURE
+};
+
struct f2fs_inode_info {
struct inode vfs_inode; /* serve a vfs inode */
unsigned long i_flags; /* keep an inode flags for ioctl */
unsigned char i_advise; /* use to give file attribute hints */
unsigned char i_dir_level; /* use for dentry level for large dir */
- union {
- unsigned int i_current_depth; /* only for directory depth */
- unsigned short i_gc_failures; /* only for regular file */
- };
+ unsigned int i_current_depth; /* only for directory depth */
+ /* for gc failure statistic */
+ unsigned int i_gc_failures[MAX_GC_FAILURE];
unsigned int i_pino; /* parent inode number */
umode_t i_acl_mode; /* keep file acl mode temporarily */
struct f2fs_gc_kthread *gc_thread; /* GC thread */
unsigned int cur_victim_sec; /* current victim section num */
unsigned int gc_mode; /* current GC state */
+ /* for skip statistic */
+ unsigned long long skipped_atomic_files[2]; /* FG_GC and BG_GC */
/* threshold for gc trials on pinned files */
u64 gc_pin_file_threshold;
FI_EXTRA_ATTR, /* indicate file has extra attribute */
FI_PROJ_INHERIT, /* indicate file inherits projectid */
FI_PIN_FILE, /* indicate file should not be gced */
+ FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */
};
static inline void __mark_inode_dirty_flag(struct inode *inode,
static inline void f2fs_i_gc_failures_write(struct inode *inode,
unsigned int count)
{
- F2FS_I(inode)->i_gc_failures = count;
+ F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = count;
f2fs_mark_inode_dirty_sync(inode, true);
}
*/
bool need_SSR(struct f2fs_sb_info *sbi);
void register_inmem_page(struct inode *inode, struct page *page);
-void drop_inmem_pages_all(struct f2fs_sb_info *sbi);
+void drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure);
void drop_inmem_pages(struct inode *inode);
void drop_inmem_page(struct inode *inode, struct page *page);
int commit_inmem_pages(struct inode *inode);
int bg_node_segs, bg_data_segs;
int tot_blks, data_blks, node_blks;
int bg_data_blks, bg_node_blks;
+ unsigned long long skipped_atomic_files[2];
int curseg[NR_CURSEG_TYPE];
int cursec[NR_CURSEG_TYPE];
int curzone[NR_CURSEG_TYPE];
goto out;
skip_flush:
set_inode_flag(inode, FI_ATOMIC_FILE);
+ clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
F2FS_I(inode)->inmem_task = current;
ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
if (!ret) {
clear_inode_flag(inode, FI_ATOMIC_FILE);
+ F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
stat_dec_atomic_write(inode);
}
} else {
ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
}
err_out:
+ if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
+ clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
+ ret = -EINVAL;
+ }
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
inode_unlock(inode);
mnt_drop_write_file(filp);
/* Use i_gc_failures for normal file as a risk signal. */
if (inc)
- f2fs_i_gc_failures_write(inode, fi->i_gc_failures + 1);
+ f2fs_i_gc_failures_write(inode,
+ fi->i_gc_failures[GC_FAILURE_PIN] + 1);
- if (fi->i_gc_failures > sbi->gc_pin_file_threshold) {
+ if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
f2fs_msg(sbi->sb, KERN_WARNING,
"%s: Enable GC = ino %lx after %x GC trials\n",
- __func__, inode->i_ino, fi->i_gc_failures);
+ __func__, inode->i_ino,
+ fi->i_gc_failures[GC_FAILURE_PIN]);
clear_inode_flag(inode, FI_PIN_FILE);
return -EAGAIN;
}
if (!pin) {
clear_inode_flag(inode, FI_PIN_FILE);
- F2FS_I(inode)->i_gc_failures = 1;
+ F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = 1;
goto done;
}
goto out;
set_inode_flag(inode, FI_PIN_FILE);
- ret = F2FS_I(inode)->i_gc_failures;
+ ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
done:
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
out:
__u32 pin = 0;
if (is_inode_flag_set(inode, FI_PIN_FILE))
- pin = F2FS_I(inode)->i_gc_failures;
+ pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
return put_user(pin, (u32 __user *)arg);
}
* This can be used to move blocks, aka LBAs, directly on disk.
*/
static void move_data_block(struct inode *inode, block_t bidx,
- unsigned int segno, int off)
+ int gc_type, unsigned int segno, int off)
{
struct f2fs_io_info fio = {
.sbi = F2FS_I_SB(inode),
if (!check_valid_map(F2FS_I_SB(inode), segno, off))
goto out;
- if (f2fs_is_atomic_file(inode))
+ if (f2fs_is_atomic_file(inode)) {
+ F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
+ F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
goto out;
+ }
if (f2fs_is_pinned_file(inode)) {
f2fs_pin_file_control(inode, true);
if (!check_valid_map(F2FS_I_SB(inode), segno, off))
goto out;
- if (f2fs_is_atomic_file(inode))
+ if (f2fs_is_atomic_file(inode)) {
+ F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
+ F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
goto out;
+ }
if (f2fs_is_pinned_file(inode)) {
if (gc_type == FG_GC)
f2fs_pin_file_control(inode, true);
start_bidx = start_bidx_of_node(nofs, inode)
+ ofs_in_node;
if (f2fs_post_read_required(inode))
- move_data_block(inode, start_bidx, segno, off);
+ move_data_block(inode, start_bidx, gc_type,
+ segno, off);
else
move_data_page(inode, start_bidx, gc_type,
segno, off);
.ilist = LIST_HEAD_INIT(gc_list.ilist),
.iroot = RADIX_TREE_INIT(GFP_NOFS),
};
+ unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC];
+ unsigned int skipped_round = 0, round = 0;
trace_f2fs_gc_begin(sbi->sb, sync, background,
get_pages(sbi, F2FS_DIRTY_NODES),
sec_freed++;
total_freed += seg_freed;
+ if (gc_type == FG_GC) {
+ if (sbi->skipped_atomic_files[FG_GC] > last_skipped)
+ skipped_round++;
+ last_skipped = sbi->skipped_atomic_files[FG_GC];
+ round++;
+ }
+
if (gc_type == FG_GC)
sbi->cur_victim_sec = NULL_SEGNO;
if (!sync) {
if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
+ if (skipped_round > MAX_SKIP_ATOMIC_COUNT &&
+ skipped_round * 2 >= round)
+ drop_inmem_pages_all(sbi, true);
segno = NULL_SEGNO;
goto gc_more;
}
if (S_ISDIR(inode->i_mode))
fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
else if (S_ISREG(inode->i_mode))
- fi->i_gc_failures = le16_to_cpu(ri->i_gc_failures);
+ fi->i_gc_failures[GC_FAILURE_PIN] =
+ le16_to_cpu(ri->i_gc_failures);
fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
fi->i_flags = le32_to_cpu(ri->i_flags);
fi->flags = 0;
ri->i_current_depth =
cpu_to_le32(F2FS_I(inode)->i_current_depth);
else if (S_ISREG(inode->i_mode))
- ri->i_gc_failures = cpu_to_le16(F2FS_I(inode)->i_gc_failures);
+ ri->i_gc_failures =
+ cpu_to_le16(F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]);
ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid);
ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
return err;
}
-void drop_inmem_pages_all(struct f2fs_sb_info *sbi)
+void drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure)
{
struct list_head *head = &sbi->inode_list[ATOMIC_FILE];
struct inode *inode;
spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
if (inode) {
+ if (gc_failure) {
+ if (fi->i_gc_failures[GC_FAILURE_ATOMIC])
+ goto drop;
+ goto skip;
+ }
+drop:
+ set_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
drop_inmem_pages(inode);
iput(inode);
}
+skip:
congestion_wait(BLK_RW_ASYNC, HZ/50);
cond_resched();
goto next;
mutex_unlock(&fi->inmem_lock);
clear_inode_flag(inode, FI_ATOMIC_FILE);
+ fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
stat_dec_atomic_write(inode);
}
#define IS_DUMMY_WRITTEN_PAGE(page) \
(page_private(page) == (unsigned long)DUMMY_WRITTEN_PAGE)
+#define MAX_SKIP_ATOMIC_COUNT 16
+
struct inmem_pages {
struct list_head list;
struct page *page;