void destroy_flush_cmd_control(struct f2fs_sb_info *);
void invalidate_blocks(struct f2fs_sb_info *, block_t);
void refresh_sit_entry(struct f2fs_sb_info *, block_t, block_t);
-void clear_prefree_segments(struct f2fs_sb_info *);
+void clear_prefree_segments(struct f2fs_sb_info *, struct cp_control *);
void release_discard_addrs(struct f2fs_sb_info *);
void discard_next_dnode(struct f2fs_sb_info *, block_t);
int npages_for_summary_flush(struct f2fs_sb_info *, bool);
break;
end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
-
- if (force && end - start < cpc->trim_minlen)
- continue;
-
__add_discard_entry(sbi, cpc, se, start, end);
}
}
mutex_unlock(&dirty_i->seglist_lock);
}
-void clear_prefree_segments(struct f2fs_sb_info *sbi)
+void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
{
struct list_head *head = &(SM_I(sbi)->discard_list);
struct discard_entry *entry, *this;
/* send small discards */
list_for_each_entry_safe(entry, this, head, list) {
+ if (cpc->reason == CP_DISCARD && entry->len < cpc->trim_minlen)
+ goto skip;
f2fs_issue_discard(sbi, entry->blkaddr, entry->len);
+skip:
list_del(&entry->list);
SM_I(sbi)->nr_discards -= entry->len;
kmem_cache_free(discard_entry_slab, entry);
unsigned int start_segno, end_segno;
struct cp_control cpc;
- if (range->minlen > SEGMENT_SIZE(sbi) || start >= MAX_BLKADDR(sbi) ||
- range->len < sbi->blocksize)
+ if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
return -EINVAL;
cpc.trimmed = 0;
end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
GET_SEGNO(sbi, end);
cpc.reason = CP_DISCARD;
- cpc.trim_minlen = F2FS_BYTES_TO_BLK(range->minlen);
+ cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
/* do checkpoint to issue discard commands safely */
for (; start_segno <= end_segno; start_segno = cpc.trim_end + 1) {