This patch registers shrinking nat_cache entries.
Reviewed-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
+ int nr = nr_shrink;
- if (available_free_memory(sbi, NAT_ENTRIES))
+ if (!down_write_trylock(&nm_i->nat_tree_lock))
return 0;
- down_write(&nm_i->nat_tree_lock);
while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
struct nat_entry *ne;
ne = list_first_entry(&nm_i->nat_entries,
nr_shrink--;
}
up_write(&nm_i->nat_tree_lock);
- return nr_shrink;
+ return nr - nr_shrink;
}
/*
/* try to shrink extent cache when there is no enough memory */
f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
- /* check the # of cached NAT entries and prefree segments */
- if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK) ||
+ /* check the # of cached NAT entries */
+ if (!available_free_memory(sbi, NAT_ENTRIES))
+ try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
+
+ /* checkpoint is the only way to shrink partial cached entries */
+ if (!available_free_memory(sbi, NAT_ENTRIES) ||
excess_prefree_segs(sbi) ||
!available_free_memory(sbi, INO_ENTRIES))
f2fs_sync_fs(sbi->sb, true);
static DEFINE_SPINLOCK(f2fs_list_lock);
static unsigned int shrinker_run_no;
+static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
+{
+ return NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
+}
+
unsigned long f2fs_shrink_count(struct shrinker *shrink,
struct shrink_control *sc)
{
}
spin_unlock(&f2fs_list_lock);
- /* TODO: count # of objects */
+ /* shrink clean nat cache entries */
+ count += __count_nat_entries(sbi);
spin_lock(&f2fs_list_lock);
p = p->next;
sbi->shrinker_run_no = run_no;
- /* TODO: shrink caches */
+ /* shrink clean nat cache entries */
+ freed += try_to_free_nats(sbi, nr);
spin_lock(&f2fs_list_lock);
p = p->next;