f2fs: catch up to v4.14-rc1
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / fs / f2fs / gc.c
1 /*
2 * fs/f2fs/gc.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #include <linux/fs.h>
12 #include <linux/module.h>
13 #include <linux/backing-dev.h>
14 #include <linux/init.h>
15 #include <linux/f2fs_fs.h>
16 #include <linux/kthread.h>
17 #include <linux/delay.h>
18 #include <linux/freezer.h>
19
20 #include "f2fs.h"
21 #include "node.h"
22 #include "segment.h"
23 #include "gc.h"
24 #include <trace/events/f2fs.h>
25
26 static int gc_thread_func(void *data)
27 {
28 struct f2fs_sb_info *sbi = data;
29 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
30 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
31 unsigned int wait_ms;
32
33 wait_ms = gc_th->min_sleep_time;
34
35 set_freezable();
36 do {
37 wait_event_interruptible_timeout(*wq,
38 kthread_should_stop() || freezing(current) ||
39 gc_th->gc_wake,
40 msecs_to_jiffies(wait_ms));
41
42 /* give it a try one time */
43 if (gc_th->gc_wake)
44 gc_th->gc_wake = 0;
45
46 if (try_to_freeze())
47 continue;
48 if (kthread_should_stop())
49 break;
50
51 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
52 increase_sleep_time(gc_th, &wait_ms);
53 continue;
54 }
55
56 #ifdef CONFIG_F2FS_FAULT_INJECTION
57 if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
58 f2fs_show_injection_info(FAULT_CHECKPOINT);
59 f2fs_stop_checkpoint(sbi, false);
60 }
61 #endif
62
63 if (!sb_start_write_trylock(sbi->sb))
64 continue;
65
66 /*
67 * [GC triggering condition]
68 * 0. GC is not conducted currently.
69 * 1. There are enough dirty segments.
70 * 2. IO subsystem is idle by checking the # of writeback pages.
71 * 3. IO subsystem is idle by checking the # of requests in
72 * bdev's request list.
73 *
74 * Note) We have to avoid triggering GCs frequently.
75 * Because it is possible that some segments can be
76 * invalidated soon after by user update or deletion.
77 * So, I'd like to wait some time to collect dirty segments.
78 */
79 if (!mutex_trylock(&sbi->gc_mutex))
80 goto next;
81
82 if (gc_th->gc_urgent) {
83 wait_ms = gc_th->urgent_sleep_time;
84 goto do_gc;
85 }
86
87 if (!is_idle(sbi)) {
88 increase_sleep_time(gc_th, &wait_ms);
89 mutex_unlock(&sbi->gc_mutex);
90 goto next;
91 }
92
93 if (has_enough_invalid_blocks(sbi))
94 decrease_sleep_time(gc_th, &wait_ms);
95 else
96 increase_sleep_time(gc_th, &wait_ms);
97 do_gc:
98 stat_inc_bggc_count(sbi);
99
100 /* if return value is not zero, no victim was selected */
101 if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true, NULL_SEGNO))
102 wait_ms = gc_th->no_gc_sleep_time;
103
104 trace_f2fs_background_gc(sbi->sb, wait_ms,
105 prefree_segments(sbi), free_segments(sbi));
106
107 /* balancing f2fs's metadata periodically */
108 f2fs_balance_fs_bg(sbi);
109 next:
110 sb_end_write(sbi->sb);
111
112 } while (!kthread_should_stop());
113 return 0;
114 }
115
116 int start_gc_thread(struct f2fs_sb_info *sbi)
117 {
118 struct f2fs_gc_kthread *gc_th;
119 dev_t dev = sbi->sb->s_bdev->bd_dev;
120 int err = 0;
121
122 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
123 if (!gc_th) {
124 err = -ENOMEM;
125 goto out;
126 }
127
128 gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
129 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
130 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
131 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
132
133 gc_th->gc_idle = 0;
134 gc_th->gc_urgent = 0;
135 gc_th->gc_wake= 0;
136
137 sbi->gc_thread = gc_th;
138 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
139 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
140 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
141 if (IS_ERR(gc_th->f2fs_gc_task)) {
142 err = PTR_ERR(gc_th->f2fs_gc_task);
143 kfree(gc_th);
144 sbi->gc_thread = NULL;
145 }
146 out:
147 return err;
148 }
149
150 void stop_gc_thread(struct f2fs_sb_info *sbi)
151 {
152 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
153 if (!gc_th)
154 return;
155 kthread_stop(gc_th->f2fs_gc_task);
156 kfree(gc_th);
157 sbi->gc_thread = NULL;
158 }
159
160 static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type)
161 {
162 int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
163
164 if (gc_th && gc_th->gc_idle) {
165 if (gc_th->gc_idle == 1)
166 gc_mode = GC_CB;
167 else if (gc_th->gc_idle == 2)
168 gc_mode = GC_GREEDY;
169 }
170 return gc_mode;
171 }
172
173 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
174 int type, struct victim_sel_policy *p)
175 {
176 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
177
178 if (p->alloc_mode == SSR) {
179 p->gc_mode = GC_GREEDY;
180 p->dirty_segmap = dirty_i->dirty_segmap[type];
181 p->max_search = dirty_i->nr_dirty[type];
182 p->ofs_unit = 1;
183 } else {
184 p->gc_mode = select_gc_type(sbi->gc_thread, gc_type);
185 p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
186 p->max_search = dirty_i->nr_dirty[DIRTY];
187 p->ofs_unit = sbi->segs_per_sec;
188 }
189
190 /* we need to check every dirty segments in the FG_GC case */
191 if (gc_type != FG_GC && p->max_search > sbi->max_victim_search)
192 p->max_search = sbi->max_victim_search;
193
194 /* let's select beginning hot/small space first */
195 if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
196 p->offset = 0;
197 else
198 p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
199 }
200
201 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
202 struct victim_sel_policy *p)
203 {
204 /* SSR allocates in a segment unit */
205 if (p->alloc_mode == SSR)
206 return sbi->blocks_per_seg;
207 if (p->gc_mode == GC_GREEDY)
208 return 2 * sbi->blocks_per_seg * p->ofs_unit;
209 else if (p->gc_mode == GC_CB)
210 return UINT_MAX;
211 else /* No other gc_mode */
212 return 0;
213 }
214
215 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
216 {
217 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
218 unsigned int secno;
219
220 /*
221 * If the gc_type is FG_GC, we can select victim segments
222 * selected by background GC before.
223 * Those segments guarantee they have small valid blocks.
224 */
225 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
226 if (sec_usage_check(sbi, secno))
227 continue;
228
229 if (no_fggc_candidate(sbi, secno))
230 continue;
231
232 clear_bit(secno, dirty_i->victim_secmap);
233 return GET_SEG_FROM_SEC(sbi, secno);
234 }
235 return NULL_SEGNO;
236 }
237
238 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
239 {
240 struct sit_info *sit_i = SIT_I(sbi);
241 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
242 unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
243 unsigned long long mtime = 0;
244 unsigned int vblocks;
245 unsigned char age = 0;
246 unsigned char u;
247 unsigned int i;
248
249 for (i = 0; i < sbi->segs_per_sec; i++)
250 mtime += get_seg_entry(sbi, start + i)->mtime;
251 vblocks = get_valid_blocks(sbi, segno, true);
252
253 mtime = div_u64(mtime, sbi->segs_per_sec);
254 vblocks = div_u64(vblocks, sbi->segs_per_sec);
255
256 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
257
258 /* Handle if the system time has changed by the user */
259 if (mtime < sit_i->min_mtime)
260 sit_i->min_mtime = mtime;
261 if (mtime > sit_i->max_mtime)
262 sit_i->max_mtime = mtime;
263 if (sit_i->max_mtime != sit_i->min_mtime)
264 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
265 sit_i->max_mtime - sit_i->min_mtime);
266
267 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
268 }
269
270 static unsigned int get_greedy_cost(struct f2fs_sb_info *sbi,
271 unsigned int segno)
272 {
273 unsigned int valid_blocks =
274 get_valid_blocks(sbi, segno, true);
275
276 return IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
277 valid_blocks * 2 : valid_blocks;
278 }
279
280 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
281 unsigned int segno, struct victim_sel_policy *p)
282 {
283 if (p->alloc_mode == SSR)
284 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
285
286 /* alloc_mode == LFS */
287 if (p->gc_mode == GC_GREEDY)
288 return get_greedy_cost(sbi, segno);
289 else
290 return get_cb_cost(sbi, segno);
291 }
292
293 static unsigned int count_bits(const unsigned long *addr,
294 unsigned int offset, unsigned int len)
295 {
296 unsigned int end = offset + len, sum = 0;
297
298 while (offset < end) {
299 if (test_bit(offset++, addr))
300 ++sum;
301 }
302 return sum;
303 }
304
305 /*
306 * This function is called from two paths.
307 * One is garbage collection and the other is SSR segment selection.
308 * When it is called during GC, it just gets a victim segment
309 * and it does not remove it from dirty seglist.
310 * When it is called from SSR segment selection, it finds a segment
311 * which has minimum valid blocks and removes it from dirty seglist.
312 */
313 static int get_victim_by_default(struct f2fs_sb_info *sbi,
314 unsigned int *result, int gc_type, int type, char alloc_mode)
315 {
316 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
317 struct sit_info *sm = SIT_I(sbi);
318 struct victim_sel_policy p;
319 unsigned int secno, last_victim;
320 unsigned int last_segment = MAIN_SEGS(sbi);
321 unsigned int nsearched = 0;
322
323 mutex_lock(&dirty_i->seglist_lock);
324
325 p.alloc_mode = alloc_mode;
326 select_policy(sbi, gc_type, type, &p);
327
328 p.min_segno = NULL_SEGNO;
329 p.min_cost = get_max_cost(sbi, &p);
330
331 if (*result != NULL_SEGNO) {
332 if (IS_DATASEG(get_seg_entry(sbi, *result)->type) &&
333 get_valid_blocks(sbi, *result, false) &&
334 !sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
335 p.min_segno = *result;
336 goto out;
337 }
338
339 if (p.max_search == 0)
340 goto out;
341
342 last_victim = sm->last_victim[p.gc_mode];
343 if (p.alloc_mode == LFS && gc_type == FG_GC) {
344 p.min_segno = check_bg_victims(sbi);
345 if (p.min_segno != NULL_SEGNO)
346 goto got_it;
347 }
348
349 while (1) {
350 unsigned long cost;
351 unsigned int segno;
352
353 segno = find_next_bit(p.dirty_segmap, last_segment, p.offset);
354 if (segno >= last_segment) {
355 if (sm->last_victim[p.gc_mode]) {
356 last_segment =
357 sm->last_victim[p.gc_mode];
358 sm->last_victim[p.gc_mode] = 0;
359 p.offset = 0;
360 continue;
361 }
362 break;
363 }
364
365 p.offset = segno + p.ofs_unit;
366 if (p.ofs_unit > 1) {
367 p.offset -= segno % p.ofs_unit;
368 nsearched += count_bits(p.dirty_segmap,
369 p.offset - p.ofs_unit,
370 p.ofs_unit);
371 } else {
372 nsearched++;
373 }
374
375 secno = GET_SEC_FROM_SEG(sbi, segno);
376
377 if (sec_usage_check(sbi, secno))
378 goto next;
379 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
380 goto next;
381 if (gc_type == FG_GC && p.alloc_mode == LFS &&
382 no_fggc_candidate(sbi, secno))
383 goto next;
384
385 cost = get_gc_cost(sbi, segno, &p);
386
387 if (p.min_cost > cost) {
388 p.min_segno = segno;
389 p.min_cost = cost;
390 }
391 next:
392 if (nsearched >= p.max_search) {
393 if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
394 sm->last_victim[p.gc_mode] = last_victim + 1;
395 else
396 sm->last_victim[p.gc_mode] = segno + 1;
397 sm->last_victim[p.gc_mode] %= MAIN_SEGS(sbi);
398 break;
399 }
400 }
401 if (p.min_segno != NULL_SEGNO) {
402 got_it:
403 if (p.alloc_mode == LFS) {
404 secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
405 if (gc_type == FG_GC)
406 sbi->cur_victim_sec = secno;
407 else
408 set_bit(secno, dirty_i->victim_secmap);
409 }
410 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
411
412 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
413 sbi->cur_victim_sec,
414 prefree_segments(sbi), free_segments(sbi));
415 }
416 out:
417 mutex_unlock(&dirty_i->seglist_lock);
418
419 return (p.min_segno == NULL_SEGNO) ? 0 : 1;
420 }
421
422 static const struct victim_selection default_v_ops = {
423 .get_victim = get_victim_by_default,
424 };
425
426 static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
427 {
428 struct inode_entry *ie;
429
430 ie = radix_tree_lookup(&gc_list->iroot, ino);
431 if (ie)
432 return ie->inode;
433 return NULL;
434 }
435
436 static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
437 {
438 struct inode_entry *new_ie;
439
440 if (inode == find_gc_inode(gc_list, inode->i_ino)) {
441 iput(inode);
442 return;
443 }
444 new_ie = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
445 new_ie->inode = inode;
446
447 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
448 list_add_tail(&new_ie->list, &gc_list->ilist);
449 }
450
451 static void put_gc_inode(struct gc_inode_list *gc_list)
452 {
453 struct inode_entry *ie, *next_ie;
454 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
455 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
456 iput(ie->inode);
457 list_del(&ie->list);
458 kmem_cache_free(inode_entry_slab, ie);
459 }
460 }
461
462 static int check_valid_map(struct f2fs_sb_info *sbi,
463 unsigned int segno, int offset)
464 {
465 struct sit_info *sit_i = SIT_I(sbi);
466 struct seg_entry *sentry;
467 int ret;
468
469 mutex_lock(&sit_i->sentry_lock);
470 sentry = get_seg_entry(sbi, segno);
471 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
472 mutex_unlock(&sit_i->sentry_lock);
473 return ret;
474 }
475
476 /*
477 * This function compares node address got in summary with that in NAT.
478 * On validity, copy that node with cold status, otherwise (invalid node)
479 * ignore that.
480 */
481 static void gc_node_segment(struct f2fs_sb_info *sbi,
482 struct f2fs_summary *sum, unsigned int segno, int gc_type)
483 {
484 struct f2fs_summary *entry;
485 block_t start_addr;
486 int off;
487 int phase = 0;
488
489 start_addr = START_BLOCK(sbi, segno);
490
491 next_step:
492 entry = sum;
493
494 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
495 nid_t nid = le32_to_cpu(entry->nid);
496 struct page *node_page;
497 struct node_info ni;
498
499 /* stop BG_GC if there is not enough free sections. */
500 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
501 return;
502
503 if (check_valid_map(sbi, segno, off) == 0)
504 continue;
505
506 if (phase == 0) {
507 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
508 META_NAT, true);
509 continue;
510 }
511
512 if (phase == 1) {
513 ra_node_page(sbi, nid);
514 continue;
515 }
516
517 /* phase == 2 */
518 node_page = get_node_page(sbi, nid);
519 if (IS_ERR(node_page))
520 continue;
521
522 /* block may become invalid during get_node_page */
523 if (check_valid_map(sbi, segno, off) == 0) {
524 f2fs_put_page(node_page, 1);
525 continue;
526 }
527
528 get_node_info(sbi, nid, &ni);
529 if (ni.blk_addr != start_addr + off) {
530 f2fs_put_page(node_page, 1);
531 continue;
532 }
533
534 move_node_page(node_page, gc_type);
535 stat_inc_node_blk_count(sbi, 1, gc_type);
536 }
537
538 if (++phase < 3)
539 goto next_step;
540 }
541
542 /*
543 * Calculate start block index indicating the given node offset.
544 * Be careful, caller should give this node offset only indicating direct node
545 * blocks. If any node offsets, which point the other types of node blocks such
546 * as indirect or double indirect node blocks, are given, it must be a caller's
547 * bug.
548 */
549 block_t start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
550 {
551 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
552 unsigned int bidx;
553
554 if (node_ofs == 0)
555 return 0;
556
557 if (node_ofs <= 2) {
558 bidx = node_ofs - 1;
559 } else if (node_ofs <= indirect_blks) {
560 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
561 bidx = node_ofs - 2 - dec;
562 } else {
563 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
564 bidx = node_ofs - 5 - dec;
565 }
566 return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode);
567 }
568
569 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
570 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
571 {
572 struct page *node_page;
573 nid_t nid;
574 unsigned int ofs_in_node;
575 block_t source_blkaddr;
576
577 nid = le32_to_cpu(sum->nid);
578 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
579
580 node_page = get_node_page(sbi, nid);
581 if (IS_ERR(node_page))
582 return false;
583
584 get_node_info(sbi, nid, dni);
585
586 if (sum->version != dni->version) {
587 f2fs_msg(sbi->sb, KERN_WARNING,
588 "%s: valid data with mismatched node version.",
589 __func__);
590 set_sbi_flag(sbi, SBI_NEED_FSCK);
591 }
592
593 *nofs = ofs_of_node(node_page);
594 source_blkaddr = datablock_addr(NULL, node_page, ofs_in_node);
595 f2fs_put_page(node_page, 1);
596
597 if (source_blkaddr != blkaddr)
598 return false;
599 return true;
600 }
601
602 /*
603 * Move data block via META_MAPPING while keeping locked data page.
604 * This can be used to move blocks, aka LBAs, directly on disk.
605 */
606 static void move_data_block(struct inode *inode, block_t bidx,
607 unsigned int segno, int off)
608 {
609 struct f2fs_io_info fio = {
610 .sbi = F2FS_I_SB(inode),
611 .type = DATA,
612 .temp = COLD,
613 .op = REQ_OP_READ,
614 .op_flags = REQ_SYNC,
615 .encrypted_page = NULL,
616 .in_list = false,
617 };
618 struct dnode_of_data dn;
619 struct f2fs_summary sum;
620 struct node_info ni;
621 struct page *page;
622 block_t newaddr;
623 int err;
624
625 /* do not read out */
626 page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
627 if (!page)
628 return;
629
630 if (!check_valid_map(F2FS_I_SB(inode), segno, off))
631 goto out;
632
633 if (f2fs_is_atomic_file(inode))
634 goto out;
635
636 set_new_dnode(&dn, inode, NULL, NULL, 0);
637 err = get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
638 if (err)
639 goto out;
640
641 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
642 ClearPageUptodate(page);
643 goto put_out;
644 }
645
646 /*
647 * don't cache encrypted data into meta inode until previous dirty
648 * data were writebacked to avoid racing between GC and flush.
649 */
650 f2fs_wait_on_page_writeback(page, DATA, true);
651
652 get_node_info(fio.sbi, dn.nid, &ni);
653 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
654
655 /* read page */
656 fio.page = page;
657 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
658
659 allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
660 &sum, CURSEG_COLD_DATA, NULL, false);
661
662 fio.encrypted_page = pagecache_get_page(META_MAPPING(fio.sbi), newaddr,
663 FGP_LOCK | FGP_CREAT, GFP_NOFS);
664 if (!fio.encrypted_page) {
665 err = -ENOMEM;
666 goto recover_block;
667 }
668
669 err = f2fs_submit_page_bio(&fio);
670 if (err)
671 goto put_page_out;
672
673 /* write page */
674 lock_page(fio.encrypted_page);
675
676 if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
677 err = -EIO;
678 goto put_page_out;
679 }
680 if (unlikely(!PageUptodate(fio.encrypted_page))) {
681 err = -EIO;
682 goto put_page_out;
683 }
684
685 set_page_dirty(fio.encrypted_page);
686 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
687 if (clear_page_dirty_for_io(fio.encrypted_page))
688 dec_page_count(fio.sbi, F2FS_DIRTY_META);
689
690 set_page_writeback(fio.encrypted_page);
691
692 /* allocate block address */
693 f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
694
695 fio.op = REQ_OP_WRITE;
696 fio.op_flags = REQ_SYNC | REQ_NOIDLE;
697 fio.new_blkaddr = newaddr;
698 f2fs_submit_page_write(&fio);
699
700 f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE);
701
702 f2fs_update_data_blkaddr(&dn, newaddr);
703 set_inode_flag(inode, FI_APPEND_WRITE);
704 if (page->index == 0)
705 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
706 put_page_out:
707 f2fs_put_page(fio.encrypted_page, 1);
708 recover_block:
709 if (err)
710 __f2fs_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
711 true, true);
712 put_out:
713 f2fs_put_dnode(&dn);
714 out:
715 f2fs_put_page(page, 1);
716 }
717
718 static void move_data_page(struct inode *inode, block_t bidx, int gc_type,
719 unsigned int segno, int off)
720 {
721 struct page *page;
722
723 page = get_lock_data_page(inode, bidx, true);
724 if (IS_ERR(page))
725 return;
726
727 if (!check_valid_map(F2FS_I_SB(inode), segno, off))
728 goto out;
729
730 if (f2fs_is_atomic_file(inode))
731 goto out;
732
733 if (gc_type == BG_GC) {
734 if (PageWriteback(page))
735 goto out;
736 set_page_dirty(page);
737 set_cold_data(page);
738 } else {
739 struct f2fs_io_info fio = {
740 .sbi = F2FS_I_SB(inode),
741 .type = DATA,
742 .temp = COLD,
743 .op = REQ_OP_WRITE,
744 .op_flags = REQ_SYNC,
745 .old_blkaddr = NULL_ADDR,
746 .page = page,
747 .encrypted_page = NULL,
748 .need_lock = LOCK_REQ,
749 .io_type = FS_GC_DATA_IO,
750 };
751 bool is_dirty = PageDirty(page);
752 int err;
753
754 retry:
755 set_page_dirty(page);
756 f2fs_wait_on_page_writeback(page, DATA, true);
757 if (clear_page_dirty_for_io(page)) {
758 inode_dec_dirty_pages(inode);
759 remove_dirty_inode(inode);
760 }
761
762 set_cold_data(page);
763
764 err = do_write_data_page(&fio);
765 if (err == -ENOMEM && is_dirty) {
766 congestion_wait(BLK_RW_ASYNC, HZ/50);
767 goto retry;
768 }
769 }
770 out:
771 f2fs_put_page(page, 1);
772 }
773
774 /*
775 * This function tries to get parent node of victim data block, and identifies
776 * data block validity. If the block is valid, copy that with cold status and
777 * modify parent node.
778 * If the parent node is not valid or the data block address is different,
779 * the victim data block is ignored.
780 */
781 static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
782 struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
783 {
784 struct super_block *sb = sbi->sb;
785 struct f2fs_summary *entry;
786 block_t start_addr;
787 int off;
788 int phase = 0;
789
790 start_addr = START_BLOCK(sbi, segno);
791
792 next_step:
793 entry = sum;
794
795 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
796 struct page *data_page;
797 struct inode *inode;
798 struct node_info dni; /* dnode info for the data */
799 unsigned int ofs_in_node, nofs;
800 block_t start_bidx;
801 nid_t nid = le32_to_cpu(entry->nid);
802
803 /* stop BG_GC if there is not enough free sections. */
804 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
805 return;
806
807 if (check_valid_map(sbi, segno, off) == 0)
808 continue;
809
810 if (phase == 0) {
811 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
812 META_NAT, true);
813 continue;
814 }
815
816 if (phase == 1) {
817 ra_node_page(sbi, nid);
818 continue;
819 }
820
821 /* Get an inode by ino with checking validity */
822 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
823 continue;
824
825 if (phase == 2) {
826 ra_node_page(sbi, dni.ino);
827 continue;
828 }
829
830 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
831
832 if (phase == 3) {
833 inode = f2fs_iget(sb, dni.ino);
834 if (IS_ERR(inode) || is_bad_inode(inode))
835 continue;
836
837 /* if encrypted inode, let's go phase 3 */
838 if (f2fs_encrypted_file(inode)) {
839 add_gc_inode(gc_list, inode);
840 continue;
841 }
842
843 start_bidx = start_bidx_of_node(nofs, inode);
844 data_page = get_read_data_page(inode,
845 start_bidx + ofs_in_node, REQ_RAHEAD,
846 true);
847 if (IS_ERR(data_page)) {
848 iput(inode);
849 continue;
850 }
851
852 f2fs_put_page(data_page, 0);
853 add_gc_inode(gc_list, inode);
854 continue;
855 }
856
857 /* phase 4 */
858 inode = find_gc_inode(gc_list, dni.ino);
859 if (inode) {
860 struct f2fs_inode_info *fi = F2FS_I(inode);
861 bool locked = false;
862
863 if (S_ISREG(inode->i_mode)) {
864 if (!down_write_trylock(&fi->dio_rwsem[READ]))
865 continue;
866 if (!down_write_trylock(
867 &fi->dio_rwsem[WRITE])) {
868 up_write(&fi->dio_rwsem[READ]);
869 continue;
870 }
871 locked = true;
872
873 /* wait for all inflight aio data */
874 inode_dio_wait(inode);
875 }
876
877 start_bidx = start_bidx_of_node(nofs, inode)
878 + ofs_in_node;
879 if (f2fs_encrypted_file(inode))
880 move_data_block(inode, start_bidx, segno, off);
881 else
882 move_data_page(inode, start_bidx, gc_type,
883 segno, off);
884
885 if (locked) {
886 up_write(&fi->dio_rwsem[WRITE]);
887 up_write(&fi->dio_rwsem[READ]);
888 }
889
890 stat_inc_data_blk_count(sbi, 1, gc_type);
891 }
892 }
893
894 if (++phase < 5)
895 goto next_step;
896 }
897
898 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
899 int gc_type)
900 {
901 struct sit_info *sit_i = SIT_I(sbi);
902 int ret;
903
904 mutex_lock(&sit_i->sentry_lock);
905 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
906 NO_CHECK_TYPE, LFS);
907 mutex_unlock(&sit_i->sentry_lock);
908 return ret;
909 }
910
911 static int do_garbage_collect(struct f2fs_sb_info *sbi,
912 unsigned int start_segno,
913 struct gc_inode_list *gc_list, int gc_type)
914 {
915 struct page *sum_page;
916 struct f2fs_summary_block *sum;
917 struct blk_plug plug;
918 unsigned int segno = start_segno;
919 unsigned int end_segno = start_segno + sbi->segs_per_sec;
920 int seg_freed = 0;
921 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
922 SUM_TYPE_DATA : SUM_TYPE_NODE;
923
924 /* readahead multi ssa blocks those have contiguous address */
925 if (sbi->segs_per_sec > 1)
926 ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
927 sbi->segs_per_sec, META_SSA, true);
928
929 /* reference all summary page */
930 while (segno < end_segno) {
931 sum_page = get_sum_page(sbi, segno++);
932 unlock_page(sum_page);
933 }
934
935 blk_start_plug(&plug);
936
937 for (segno = start_segno; segno < end_segno; segno++) {
938
939 /* find segment summary of victim */
940 sum_page = find_get_page(META_MAPPING(sbi),
941 GET_SUM_BLOCK(sbi, segno));
942 f2fs_put_page(sum_page, 0);
943
944 if (get_valid_blocks(sbi, segno, false) == 0 ||
945 !PageUptodate(sum_page) ||
946 unlikely(f2fs_cp_error(sbi)))
947 goto next;
948
949 sum = page_address(sum_page);
950 f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
951
952 /*
953 * this is to avoid deadlock:
954 * - lock_page(sum_page) - f2fs_replace_block
955 * - check_valid_map() - mutex_lock(sentry_lock)
956 * - mutex_lock(sentry_lock) - change_curseg()
957 * - lock_page(sum_page)
958 */
959 if (type == SUM_TYPE_NODE)
960 gc_node_segment(sbi, sum->entries, segno, gc_type);
961 else
962 gc_data_segment(sbi, sum->entries, gc_list, segno,
963 gc_type);
964
965 stat_inc_seg_count(sbi, type, gc_type);
966
967 if (gc_type == FG_GC &&
968 get_valid_blocks(sbi, segno, false) == 0)
969 seg_freed++;
970 next:
971 f2fs_put_page(sum_page, 0);
972 }
973
974 if (gc_type == FG_GC)
975 f2fs_submit_merged_write(sbi,
976 (type == SUM_TYPE_NODE) ? NODE : DATA);
977
978 blk_finish_plug(&plug);
979
980 stat_inc_call_count(sbi->stat_info);
981
982 return seg_freed;
983 }
984
985 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
986 bool background, unsigned int segno)
987 {
988 int gc_type = sync ? FG_GC : BG_GC;
989 int sec_freed = 0, seg_freed = 0, total_freed = 0;
990 int ret = 0;
991 struct cp_control cpc;
992 unsigned int init_segno = segno;
993 struct gc_inode_list gc_list = {
994 .ilist = LIST_HEAD_INIT(gc_list.ilist),
995 .iroot = RADIX_TREE_INIT(GFP_NOFS),
996 };
997
998 trace_f2fs_gc_begin(sbi->sb, sync, background,
999 get_pages(sbi, F2FS_DIRTY_NODES),
1000 get_pages(sbi, F2FS_DIRTY_DENTS),
1001 get_pages(sbi, F2FS_DIRTY_IMETA),
1002 free_sections(sbi),
1003 free_segments(sbi),
1004 reserved_segments(sbi),
1005 prefree_segments(sbi));
1006
1007 cpc.reason = __get_cp_reason(sbi);
1008 gc_more:
1009 if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE))) {
1010 ret = -EINVAL;
1011 goto stop;
1012 }
1013 if (unlikely(f2fs_cp_error(sbi))) {
1014 ret = -EIO;
1015 goto stop;
1016 }
1017
1018 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) {
1019 /*
1020 * For example, if there are many prefree_segments below given
1021 * threshold, we can make them free by checkpoint. Then, we
1022 * secure free segments which doesn't need fggc any more.
1023 */
1024 if (prefree_segments(sbi)) {
1025 ret = write_checkpoint(sbi, &cpc);
1026 if (ret)
1027 goto stop;
1028 }
1029 if (has_not_enough_free_secs(sbi, 0, 0))
1030 gc_type = FG_GC;
1031 }
1032
1033 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1034 if (gc_type == BG_GC && !background) {
1035 ret = -EINVAL;
1036 goto stop;
1037 }
1038 if (!__get_victim(sbi, &segno, gc_type)) {
1039 ret = -ENODATA;
1040 goto stop;
1041 }
1042
1043 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type);
1044 if (gc_type == FG_GC && seg_freed == sbi->segs_per_sec)
1045 sec_freed++;
1046 total_freed += seg_freed;
1047
1048 if (gc_type == FG_GC)
1049 sbi->cur_victim_sec = NULL_SEGNO;
1050
1051 if (!sync) {
1052 if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
1053 segno = NULL_SEGNO;
1054 goto gc_more;
1055 }
1056
1057 if (gc_type == FG_GC)
1058 ret = write_checkpoint(sbi, &cpc);
1059 }
1060 stop:
1061 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1062 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno;
1063
1064 trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
1065 get_pages(sbi, F2FS_DIRTY_NODES),
1066 get_pages(sbi, F2FS_DIRTY_DENTS),
1067 get_pages(sbi, F2FS_DIRTY_IMETA),
1068 free_sections(sbi),
1069 free_segments(sbi),
1070 reserved_segments(sbi),
1071 prefree_segments(sbi));
1072
1073 mutex_unlock(&sbi->gc_mutex);
1074
1075 put_gc_inode(&gc_list);
1076
1077 if (sync)
1078 ret = sec_freed ? 0 : -EAGAIN;
1079 return ret;
1080 }
1081
1082 void build_gc_manager(struct f2fs_sb_info *sbi)
1083 {
1084 u64 main_count, resv_count, ovp_count;
1085
1086 DIRTY_I(sbi)->v_ops = &default_v_ops;
1087
1088 /* threshold of # of valid blocks in a section for victims of FG_GC */
1089 main_count = SM_I(sbi)->main_segments << sbi->log_blocks_per_seg;
1090 resv_count = SM_I(sbi)->reserved_segments << sbi->log_blocks_per_seg;
1091 ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
1092
1093 sbi->fggc_threshold = div64_u64((main_count - ovp_count) *
1094 BLKS_PER_SEC(sbi), (main_count - resv_count));
1095
1096 /* give warm/cold data area from slower device */
1097 if (sbi->s_ndevs && sbi->segs_per_sec == 1)
1098 SIT_I(sbi)->last_victim[ALLOC_NEXT] =
1099 GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
1100 }