/* this function is copied from blkdev_issue_discard from block/blk-lib.c */
static void __submit_discard_cmd(struct f2fs_sb_info *sbi,
- struct discard_cmd *dc)
+ struct discard_cmd *dc, bool fstrim)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct list_head *wait_list = fstrim ? &(dcc->fstrim_list) :
+ &(dcc->wait_list);
struct bio *bio = NULL;
if (dc->state != D_PREP)
bio->bi_end_io = f2fs_submit_discard_endio;
bio->bi_opf |= REQ_SYNC;
submit_bio(bio);
- list_move_tail(&dc->list, &dcc->wait_list);
+ list_move_tail(&dc->list, wait_list);
__check_sit_bitmap(sbi, dc->start, dc->start + dc->len);
f2fs_update_iostat(sbi, FS_DISCARD, 1);
return 0;
}
+static void __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
+ unsigned int start, unsigned int end,
+ unsigned int granularity)
+{
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
+ struct rb_node **insert_p = NULL, *insert_parent = NULL;
+ struct discard_cmd *dc;
+ struct blk_plug plug;
+ int issued;
+
+next:
+ issued = 0;
+
+ mutex_lock(&dcc->cmd_lock);
+ f2fs_bug_on(sbi, !__check_rb_tree_consistence(sbi, &dcc->root));
+
+ dc = (struct discard_cmd *)__lookup_rb_tree_ret(&dcc->root,
+ NULL, start,
+ (struct rb_entry **)&prev_dc,
+ (struct rb_entry **)&next_dc,
+ &insert_p, &insert_parent, true);
+ if (!dc)
+ dc = next_dc;
+
+ blk_start_plug(&plug);
+
+ while (dc && dc->lstart <= end) {
+ struct rb_node *node;
+
+ if (dc->len < granularity)
+ goto skip;
+
+ if (dc->state != D_PREP) {
+ list_move_tail(&dc->list, &dcc->fstrim_list);
+ goto skip;
+ }
+
+ __submit_discard_cmd(sbi, dc, true);
+
+ if (++issued >= DISCARD_ISSUE_RATE) {
+ start = dc->lstart + dc->len;
+
+ blk_finish_plug(&plug);
+ mutex_unlock(&dcc->cmd_lock);
+
+ schedule();
+
+ goto next;
+ }
+skip:
+ node = rb_next(&dc->rb_node);
+ dc = rb_entry_safe(node, struct discard_cmd, rb_node);
+
+ if (fatal_signal_pending(current))
+ break;
+ }
+
+ blk_finish_plug(&plug);
+ mutex_unlock(&dcc->cmd_lock);
+}
+
static int __issue_discard_cmd(struct f2fs_sb_info *sbi, bool issue_cond)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
/* Hurry up to finish fstrim */
if (dcc->pend_list_tag[i] & P_TRIM) {
- __submit_discard_cmd(sbi, dc);
+ __submit_discard_cmd(sbi, dc, false);
issued++;
-
- if (fatal_signal_pending(current))
- break;
continue;
}
if (!issue_cond) {
- __submit_discard_cmd(sbi, dc);
+ __submit_discard_cmd(sbi, dc, false);
issued++;
continue;
}
if (is_idle(sbi)) {
- __submit_discard_cmd(sbi, dc);
+ __submit_discard_cmd(sbi, dc, false);
issued++;
} else {
io_interrupted = true;
mutex_unlock(&dcc->cmd_lock);
}
-static void __wait_discard_cmd(struct f2fs_sb_info *sbi, bool wait_cond)
+static void __wait_discard_cmd_range(struct f2fs_sb_info *sbi, bool wait_cond,
+ block_t start, block_t end,
+ unsigned int granularity,
+ bool fstrim)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
- struct list_head *wait_list = &(dcc->wait_list);
+ struct list_head *wait_list = fstrim ? &(dcc->fstrim_list) :
+ &(dcc->wait_list);
struct discard_cmd *dc, *tmp;
bool need_wait;
mutex_lock(&dcc->cmd_lock);
list_for_each_entry_safe(dc, tmp, wait_list, list) {
+ if (dc->lstart + dc->len <= start || end <= dc->lstart)
+ continue;
+ if (dc->len < granularity)
+ continue;
if (!wait_cond || (dc->state == D_DONE && !dc->ref)) {
wait_for_completion_io(&dc->wait);
__remove_discard_cmd(sbi, dc);
}
}
+static void __wait_all_discard_cmd(struct f2fs_sb_info *sbi, bool wait_cond)
+{
+ __wait_discard_cmd_range(sbi, wait_cond, 0, UINT_MAX, 1, false);
+}
+
/* This should be covered by global mutex, &sit_i->sentry_lock */
void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
{
}
}
-/* This comes from f2fs_put_super and f2fs_trim_fs */
-void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi, bool umount)
+/* This comes from f2fs_put_super */
+void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi)
{
__issue_discard_cmd(sbi, false);
__drop_discard_cmd(sbi);
- __wait_discard_cmd(sbi, !umount);
+ __wait_all_discard_cmd(sbi, false);
}
static void mark_discard_range_all(struct f2fs_sb_info *sbi)
issued = __issue_discard_cmd(sbi, true);
if (issued) {
- __wait_discard_cmd(sbi, true);
+ __wait_all_discard_cmd(sbi, true);
wait_ms = DEF_MIN_DISCARD_ISSUE_TIME;
} else {
wait_ms = DEF_MAX_DISCARD_ISSUE_TIME;
dcc->pend_list_tag[i] |= P_ACTIVE;
}
INIT_LIST_HEAD(&dcc->wait_list);
+ INIT_LIST_HEAD(&dcc->fstrim_list);
mutex_init(&dcc->cmd_lock);
atomic_set(&dcc->issued_discard, 0);
atomic_set(&dcc->issing_discard, 0);
{
__u64 start = F2FS_BYTES_TO_BLK(range->start);
__u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
- unsigned int start_segno, end_segno;
+ unsigned int start_segno, end_segno, cur_segno;
+ block_t start_block, end_block;
struct cp_control cpc;
int err = 0;
start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
GET_SEGNO(sbi, end);
+
+ start_block = START_BLOCK(sbi, start_segno);
+ end_block = START_BLOCK(sbi, end_segno + 1);
+
cpc.reason = CP_DISCARD;
cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
/* do checkpoint to issue discard commands safely */
- for (; start_segno <= end_segno; start_segno = cpc.trim_end + 1) {
- cpc.trim_start = start_segno;
+ for (cur_segno = start_segno; cur_segno <= end_segno;
+ cur_segno = cpc.trim_end + 1) {
+ cpc.trim_start = cur_segno;
if (sbi->discard_blks == 0)
break;
cpc.trim_end = end_segno;
else
cpc.trim_end = min_t(unsigned int,
- rounddown(start_segno +
+ rounddown(cur_segno +
BATCHED_TRIM_SEGMENTS(sbi),
sbi->segs_per_sec) - 1, end_segno);
schedule();
}
- /* It's time to issue all the filed discards */
- mark_discard_range_all(sbi);
- f2fs_wait_discard_bios(sbi, false);
+
+ start_block = START_BLOCK(sbi, start_segno);
+ end_block = START_BLOCK(sbi, min(cur_segno, end_segno) + 1);
+
+ __issue_discard_cmd_range(sbi, start_block, end_block, cpc.trim_minlen);
+ __wait_discard_cmd_range(sbi, true, start_block, end_block,
+ cpc.trim_minlen, true);
out:
range->len = F2FS_BLK_TO_BYTES(cpc.trimmed);
return err;