4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/buffer_head.h>
14 #include <linux/mpage.h>
15 #include <linux/writeback.h>
16 #include <linux/backing-dev.h>
17 #include <linux/pagevec.h>
18 #include <linux/blkdev.h>
19 #include <linux/bio.h>
20 #include <linux/prefetch.h>
21 #include <linux/uio.h>
23 #include <linux/memcontrol.h>
24 #include <linux/cleancache.h>
25 #include <linux/sched/signal.h>
31 #include <trace/events/f2fs.h>
32 #include <trace/events/android_fs.h>
34 static bool __is_cp_guaranteed(struct page
*page
)
36 struct address_space
*mapping
= page
->mapping
;
38 struct f2fs_sb_info
*sbi
;
43 inode
= mapping
->host
;
44 sbi
= F2FS_I_SB(inode
);
46 if (inode
->i_ino
== F2FS_META_INO(sbi
) ||
47 inode
->i_ino
== F2FS_NODE_INO(sbi
) ||
48 S_ISDIR(inode
->i_mode
) ||
54 static void f2fs_read_end_io(struct bio
*bio
)
59 #ifdef CONFIG_F2FS_FAULT_INJECTION
60 if (time_to_inject(F2FS_P_SB(bio
->bi_io_vec
->bv_page
), FAULT_IO
)) {
61 f2fs_show_injection_info(FAULT_IO
);
62 bio
->bi_status
= BLK_STS_IOERR
;
66 if (f2fs_bio_encrypted(bio
)) {
68 fscrypt_release_ctx(bio
->bi_private
);
70 fscrypt_decrypt_bio_pages(bio
->bi_private
, bio
);
75 bio_for_each_segment_all(bvec
, bio
, i
) {
76 struct page
*page
= bvec
->bv_page
;
78 if (!bio
->bi_status
) {
79 if (!PageUptodate(page
))
80 SetPageUptodate(page
);
82 ClearPageUptodate(page
);
90 static void f2fs_write_end_io(struct bio
*bio
)
92 struct f2fs_sb_info
*sbi
= bio
->bi_private
;
96 bio_for_each_segment_all(bvec
, bio
, i
) {
97 struct page
*page
= bvec
->bv_page
;
98 enum count_type type
= WB_DATA_TYPE(page
);
100 if (IS_DUMMY_WRITTEN_PAGE(page
)) {
101 set_page_private(page
, (unsigned long)NULL
);
102 ClearPagePrivate(page
);
104 mempool_free(page
, sbi
->write_io_dummy
);
106 if (unlikely(bio
->bi_status
))
107 f2fs_stop_checkpoint(sbi
, true);
111 fscrypt_pullback_bio_page(&page
, true);
113 if (unlikely(bio
->bi_status
)) {
114 mapping_set_error(page
->mapping
, -EIO
);
115 f2fs_stop_checkpoint(sbi
, true);
117 dec_page_count(sbi
, type
);
118 clear_cold_data(page
);
119 end_page_writeback(page
);
121 if (!get_pages(sbi
, F2FS_WB_CP_DATA
) &&
122 wq_has_sleeper(&sbi
->cp_wait
))
123 wake_up(&sbi
->cp_wait
);
129 * Return true, if pre_bio's bdev is same as its target device.
131 struct block_device
*f2fs_target_device(struct f2fs_sb_info
*sbi
,
132 block_t blk_addr
, struct bio
*bio
)
134 struct block_device
*bdev
= sbi
->sb
->s_bdev
;
137 for (i
= 0; i
< sbi
->s_ndevs
; i
++) {
138 if (FDEV(i
).start_blk
<= blk_addr
&&
139 FDEV(i
).end_blk
>= blk_addr
) {
140 blk_addr
-= FDEV(i
).start_blk
;
146 bio_set_dev(bio
, bdev
);
147 bio
->bi_iter
.bi_sector
= SECTOR_FROM_BLOCK(blk_addr
);
152 int f2fs_target_device_index(struct f2fs_sb_info
*sbi
, block_t blkaddr
)
156 for (i
= 0; i
< sbi
->s_ndevs
; i
++)
157 if (FDEV(i
).start_blk
<= blkaddr
&& FDEV(i
).end_blk
>= blkaddr
)
162 static bool __same_bdev(struct f2fs_sb_info
*sbi
,
163 block_t blk_addr
, struct bio
*bio
)
165 struct block_device
*b
= f2fs_target_device(sbi
, blk_addr
, NULL
);
166 return bio
->bi_disk
== b
->bd_disk
&& bio
->bi_partno
== b
->bd_partno
;
170 * Low-level block read/write IO operations.
172 static struct bio
*__bio_alloc(struct f2fs_sb_info
*sbi
, block_t blk_addr
,
173 int npages
, bool is_read
)
177 bio
= f2fs_bio_alloc(sbi
, npages
, true);
179 f2fs_target_device(sbi
, blk_addr
, bio
);
180 bio
->bi_end_io
= is_read
? f2fs_read_end_io
: f2fs_write_end_io
;
181 bio
->bi_private
= is_read
? NULL
: sbi
;
186 static inline void __submit_bio(struct f2fs_sb_info
*sbi
,
187 struct bio
*bio
, enum page_type type
)
189 if (!is_read_io(bio_op(bio
))) {
192 if (f2fs_sb_mounted_blkzoned(sbi
->sb
) &&
193 current
->plug
&& (type
== DATA
|| type
== NODE
))
194 blk_finish_plug(current
->plug
);
196 if (type
!= DATA
&& type
!= NODE
)
199 start
= bio
->bi_iter
.bi_size
>> F2FS_BLKSIZE_BITS
;
200 start
%= F2FS_IO_SIZE(sbi
);
205 /* fill dummy pages */
206 for (; start
< F2FS_IO_SIZE(sbi
); start
++) {
208 mempool_alloc(sbi
->write_io_dummy
,
209 GFP_NOIO
| __GFP_ZERO
| __GFP_NOFAIL
);
210 f2fs_bug_on(sbi
, !page
);
212 SetPagePrivate(page
);
213 set_page_private(page
, (unsigned long)DUMMY_WRITTEN_PAGE
);
215 if (bio_add_page(bio
, page
, PAGE_SIZE
, 0) < PAGE_SIZE
)
219 * In the NODE case, we lose next block address chain. So, we
220 * need to do checkpoint in f2fs_sync_file.
223 set_sbi_flag(sbi
, SBI_NEED_CP
);
226 if (is_read_io(bio_op(bio
)))
227 trace_f2fs_submit_read_bio(sbi
->sb
, type
, bio
);
229 trace_f2fs_submit_write_bio(sbi
->sb
, type
, bio
);
233 static void __submit_merged_bio(struct f2fs_bio_info
*io
)
235 struct f2fs_io_info
*fio
= &io
->fio
;
240 bio_set_op_attrs(io
->bio
, fio
->op
, fio
->op_flags
);
242 if (is_read_io(fio
->op
))
243 trace_f2fs_prepare_read_bio(io
->sbi
->sb
, fio
->type
, io
->bio
);
245 trace_f2fs_prepare_write_bio(io
->sbi
->sb
, fio
->type
, io
->bio
);
247 __submit_bio(io
->sbi
, io
->bio
, fio
->type
);
251 static bool __has_merged_page(struct f2fs_bio_info
*io
,
252 struct inode
*inode
, nid_t ino
, pgoff_t idx
)
254 struct bio_vec
*bvec
;
264 bio_for_each_segment_all(bvec
, io
->bio
, i
) {
266 if (bvec
->bv_page
->mapping
)
267 target
= bvec
->bv_page
;
269 target
= fscrypt_control_page(bvec
->bv_page
);
271 if (idx
!= target
->index
)
274 if (inode
&& inode
== target
->mapping
->host
)
276 if (ino
&& ino
== ino_of_node(target
))
283 static bool has_merged_page(struct f2fs_sb_info
*sbi
, struct inode
*inode
,
284 nid_t ino
, pgoff_t idx
, enum page_type type
)
286 enum page_type btype
= PAGE_TYPE_OF_BIO(type
);
288 struct f2fs_bio_info
*io
;
291 for (temp
= HOT
; temp
< NR_TEMP_TYPE
; temp
++) {
292 io
= sbi
->write_io
[btype
] + temp
;
294 down_read(&io
->io_rwsem
);
295 ret
= __has_merged_page(io
, inode
, ino
, idx
);
296 up_read(&io
->io_rwsem
);
298 /* TODO: use HOT temp only for meta pages now. */
299 if (ret
|| btype
== META
)
305 static void __f2fs_submit_merged_write(struct f2fs_sb_info
*sbi
,
306 enum page_type type
, enum temp_type temp
)
308 enum page_type btype
= PAGE_TYPE_OF_BIO(type
);
309 struct f2fs_bio_info
*io
= sbi
->write_io
[btype
] + temp
;
311 down_write(&io
->io_rwsem
);
313 /* change META to META_FLUSH in the checkpoint procedure */
314 if (type
>= META_FLUSH
) {
315 io
->fio
.type
= META_FLUSH
;
316 io
->fio
.op
= REQ_OP_WRITE
;
317 io
->fio
.op_flags
= REQ_META
| REQ_PRIO
| REQ_SYNC
;
318 if (!test_opt(sbi
, NOBARRIER
))
319 io
->fio
.op_flags
|= REQ_PREFLUSH
| REQ_FUA
;
321 __submit_merged_bio(io
);
322 up_write(&io
->io_rwsem
);
325 static void __submit_merged_write_cond(struct f2fs_sb_info
*sbi
,
326 struct inode
*inode
, nid_t ino
, pgoff_t idx
,
327 enum page_type type
, bool force
)
331 if (!force
&& !has_merged_page(sbi
, inode
, ino
, idx
, type
))
334 for (temp
= HOT
; temp
< NR_TEMP_TYPE
; temp
++) {
336 __f2fs_submit_merged_write(sbi
, type
, temp
);
338 /* TODO: use HOT temp only for meta pages now. */
344 void f2fs_submit_merged_write(struct f2fs_sb_info
*sbi
, enum page_type type
)
346 __submit_merged_write_cond(sbi
, NULL
, 0, 0, type
, true);
349 void f2fs_submit_merged_write_cond(struct f2fs_sb_info
*sbi
,
350 struct inode
*inode
, nid_t ino
, pgoff_t idx
,
353 __submit_merged_write_cond(sbi
, inode
, ino
, idx
, type
, false);
356 void f2fs_flush_merged_writes(struct f2fs_sb_info
*sbi
)
358 f2fs_submit_merged_write(sbi
, DATA
);
359 f2fs_submit_merged_write(sbi
, NODE
);
360 f2fs_submit_merged_write(sbi
, META
);
364 * Fill the locked page with data located in the block address.
365 * A caller needs to unlock the page on failure.
367 int f2fs_submit_page_bio(struct f2fs_io_info
*fio
)
370 struct page
*page
= fio
->encrypted_page
?
371 fio
->encrypted_page
: fio
->page
;
373 trace_f2fs_submit_page_bio(page
, fio
);
374 f2fs_trace_ios(fio
, 0);
376 /* Allocate a new bio */
377 bio
= __bio_alloc(fio
->sbi
, fio
->new_blkaddr
, 1, is_read_io(fio
->op
));
379 if (bio_add_page(bio
, page
, PAGE_SIZE
, 0) < PAGE_SIZE
) {
383 bio_set_op_attrs(bio
, fio
->op
, fio
->op_flags
);
385 __submit_bio(fio
->sbi
, bio
, fio
->type
);
387 if (!is_read_io(fio
->op
))
388 inc_page_count(fio
->sbi
, WB_DATA_TYPE(fio
->page
));
392 int f2fs_submit_page_write(struct f2fs_io_info
*fio
)
394 struct f2fs_sb_info
*sbi
= fio
->sbi
;
395 enum page_type btype
= PAGE_TYPE_OF_BIO(fio
->type
);
396 struct f2fs_bio_info
*io
= sbi
->write_io
[btype
] + fio
->temp
;
397 struct page
*bio_page
;
400 f2fs_bug_on(sbi
, is_read_io(fio
->op
));
402 down_write(&io
->io_rwsem
);
405 spin_lock(&io
->io_lock
);
406 if (list_empty(&io
->io_list
)) {
407 spin_unlock(&io
->io_lock
);
410 fio
= list_first_entry(&io
->io_list
,
411 struct f2fs_io_info
, list
);
412 list_del(&fio
->list
);
413 spin_unlock(&io
->io_lock
);
416 if (fio
->old_blkaddr
!= NEW_ADDR
)
417 verify_block_addr(sbi
, fio
->old_blkaddr
);
418 verify_block_addr(sbi
, fio
->new_blkaddr
);
420 bio_page
= fio
->encrypted_page
? fio
->encrypted_page
: fio
->page
;
422 /* set submitted = true as a return value */
423 fio
->submitted
= true;
425 inc_page_count(sbi
, WB_DATA_TYPE(bio_page
));
427 if (io
->bio
&& (io
->last_block_in_bio
!= fio
->new_blkaddr
- 1 ||
428 (io
->fio
.op
!= fio
->op
|| io
->fio
.op_flags
!= fio
->op_flags
) ||
429 !__same_bdev(sbi
, fio
->new_blkaddr
, io
->bio
)))
430 __submit_merged_bio(io
);
432 if (io
->bio
== NULL
) {
433 if ((fio
->type
== DATA
|| fio
->type
== NODE
) &&
434 fio
->new_blkaddr
& F2FS_IO_SIZE_MASK(sbi
)) {
436 dec_page_count(sbi
, WB_DATA_TYPE(bio_page
));
439 io
->bio
= __bio_alloc(sbi
, fio
->new_blkaddr
,
440 BIO_MAX_PAGES
, false);
444 if (bio_add_page(io
->bio
, bio_page
, PAGE_SIZE
, 0) < PAGE_SIZE
) {
445 __submit_merged_bio(io
);
449 io
->last_block_in_bio
= fio
->new_blkaddr
;
450 f2fs_trace_ios(fio
, 0);
452 trace_f2fs_submit_page_write(fio
->page
, fio
);
457 up_write(&io
->io_rwsem
);
461 static struct bio
*f2fs_grab_read_bio(struct inode
*inode
, block_t blkaddr
,
464 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
465 struct fscrypt_ctx
*ctx
= NULL
;
468 if (f2fs_encrypted_file(inode
)) {
469 ctx
= fscrypt_get_ctx(inode
, GFP_NOFS
);
471 return ERR_CAST(ctx
);
473 /* wait the page to be moved by cleaning */
474 f2fs_wait_on_block_writeback(sbi
, blkaddr
);
477 bio
= f2fs_bio_alloc(sbi
, min_t(int, nr_pages
, BIO_MAX_PAGES
), false);
480 fscrypt_release_ctx(ctx
);
481 return ERR_PTR(-ENOMEM
);
483 f2fs_target_device(sbi
, blkaddr
, bio
);
484 bio
->bi_end_io
= f2fs_read_end_io
;
485 bio
->bi_private
= ctx
;
486 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
491 /* This can handle encryption stuffs */
492 static int f2fs_submit_page_read(struct inode
*inode
, struct page
*page
,
495 struct bio
*bio
= f2fs_grab_read_bio(inode
, blkaddr
, 1);
500 if (bio_add_page(bio
, page
, PAGE_SIZE
, 0) < PAGE_SIZE
) {
504 __submit_bio(F2FS_I_SB(inode
), bio
, DATA
);
508 static void __set_data_blkaddr(struct dnode_of_data
*dn
)
510 struct f2fs_node
*rn
= F2FS_NODE(dn
->node_page
);
514 if (IS_INODE(dn
->node_page
) && f2fs_has_extra_attr(dn
->inode
))
515 base
= get_extra_isize(dn
->inode
);
517 /* Get physical address of data block */
518 addr_array
= blkaddr_in_node(rn
);
519 addr_array
[base
+ dn
->ofs_in_node
] = cpu_to_le32(dn
->data_blkaddr
);
523 * Lock ordering for the change of data block address:
526 * update block addresses in the node page
528 void set_data_blkaddr(struct dnode_of_data
*dn
)
530 f2fs_wait_on_page_writeback(dn
->node_page
, NODE
, true);
531 __set_data_blkaddr(dn
);
532 if (set_page_dirty(dn
->node_page
))
533 dn
->node_changed
= true;
536 void f2fs_update_data_blkaddr(struct dnode_of_data
*dn
, block_t blkaddr
)
538 dn
->data_blkaddr
= blkaddr
;
539 set_data_blkaddr(dn
);
540 f2fs_update_extent_cache(dn
);
543 /* dn->ofs_in_node will be returned with up-to-date last block pointer */
544 int reserve_new_blocks(struct dnode_of_data
*dn
, blkcnt_t count
)
546 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
552 if (unlikely(is_inode_flag_set(dn
->inode
, FI_NO_ALLOC
)))
554 if (unlikely((err
= inc_valid_block_count(sbi
, dn
->inode
, &count
))))
557 trace_f2fs_reserve_new_blocks(dn
->inode
, dn
->nid
,
558 dn
->ofs_in_node
, count
);
560 f2fs_wait_on_page_writeback(dn
->node_page
, NODE
, true);
562 for (; count
> 0; dn
->ofs_in_node
++) {
563 block_t blkaddr
= datablock_addr(dn
->inode
,
564 dn
->node_page
, dn
->ofs_in_node
);
565 if (blkaddr
== NULL_ADDR
) {
566 dn
->data_blkaddr
= NEW_ADDR
;
567 __set_data_blkaddr(dn
);
572 if (set_page_dirty(dn
->node_page
))
573 dn
->node_changed
= true;
577 /* Should keep dn->ofs_in_node unchanged */
578 int reserve_new_block(struct dnode_of_data
*dn
)
580 unsigned int ofs_in_node
= dn
->ofs_in_node
;
583 ret
= reserve_new_blocks(dn
, 1);
584 dn
->ofs_in_node
= ofs_in_node
;
588 int f2fs_reserve_block(struct dnode_of_data
*dn
, pgoff_t index
)
590 bool need_put
= dn
->inode_page
? false : true;
593 err
= get_dnode_of_data(dn
, index
, ALLOC_NODE
);
597 if (dn
->data_blkaddr
== NULL_ADDR
)
598 err
= reserve_new_block(dn
);
604 int f2fs_get_block(struct dnode_of_data
*dn
, pgoff_t index
)
606 struct extent_info ei
= {0,0,0};
607 struct inode
*inode
= dn
->inode
;
609 if (f2fs_lookup_extent_cache(inode
, index
, &ei
)) {
610 dn
->data_blkaddr
= ei
.blk
+ index
- ei
.fofs
;
614 return f2fs_reserve_block(dn
, index
);
617 struct page
*get_read_data_page(struct inode
*inode
, pgoff_t index
,
618 int op_flags
, bool for_write
)
620 struct address_space
*mapping
= inode
->i_mapping
;
621 struct dnode_of_data dn
;
623 struct extent_info ei
= {0,0,0};
626 page
= f2fs_grab_cache_page(mapping
, index
, for_write
);
628 return ERR_PTR(-ENOMEM
);
630 if (f2fs_lookup_extent_cache(inode
, index
, &ei
)) {
631 dn
.data_blkaddr
= ei
.blk
+ index
- ei
.fofs
;
635 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
636 err
= get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
641 if (unlikely(dn
.data_blkaddr
== NULL_ADDR
)) {
646 if (PageUptodate(page
)) {
652 * A new dentry page is allocated but not able to be written, since its
653 * new inode page couldn't be allocated due to -ENOSPC.
654 * In such the case, its blkaddr can be remained as NEW_ADDR.
655 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
657 if (dn
.data_blkaddr
== NEW_ADDR
) {
658 zero_user_segment(page
, 0, PAGE_SIZE
);
659 if (!PageUptodate(page
))
660 SetPageUptodate(page
);
665 err
= f2fs_submit_page_read(inode
, page
, dn
.data_blkaddr
);
671 f2fs_put_page(page
, 1);
675 struct page
*find_data_page(struct inode
*inode
, pgoff_t index
)
677 struct address_space
*mapping
= inode
->i_mapping
;
680 page
= find_get_page(mapping
, index
);
681 if (page
&& PageUptodate(page
))
683 f2fs_put_page(page
, 0);
685 page
= get_read_data_page(inode
, index
, 0, false);
689 if (PageUptodate(page
))
692 wait_on_page_locked(page
);
693 if (unlikely(!PageUptodate(page
))) {
694 f2fs_put_page(page
, 0);
695 return ERR_PTR(-EIO
);
701 * If it tries to access a hole, return an error.
702 * Because, the callers, functions in dir.c and GC, should be able to know
703 * whether this page exists or not.
705 struct page
*get_lock_data_page(struct inode
*inode
, pgoff_t index
,
708 struct address_space
*mapping
= inode
->i_mapping
;
711 page
= get_read_data_page(inode
, index
, 0, for_write
);
715 /* wait for read completion */
717 if (unlikely(page
->mapping
!= mapping
)) {
718 f2fs_put_page(page
, 1);
721 if (unlikely(!PageUptodate(page
))) {
722 f2fs_put_page(page
, 1);
723 return ERR_PTR(-EIO
);
729 * Caller ensures that this data page is never allocated.
730 * A new zero-filled data page is allocated in the page cache.
732 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
734 * Note that, ipage is set only by make_empty_dir, and if any error occur,
735 * ipage should be released by this function.
737 struct page
*get_new_data_page(struct inode
*inode
,
738 struct page
*ipage
, pgoff_t index
, bool new_i_size
)
740 struct address_space
*mapping
= inode
->i_mapping
;
742 struct dnode_of_data dn
;
745 page
= f2fs_grab_cache_page(mapping
, index
, true);
748 * before exiting, we should make sure ipage will be released
749 * if any error occur.
751 f2fs_put_page(ipage
, 1);
752 return ERR_PTR(-ENOMEM
);
755 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
756 err
= f2fs_reserve_block(&dn
, index
);
758 f2fs_put_page(page
, 1);
764 if (PageUptodate(page
))
767 if (dn
.data_blkaddr
== NEW_ADDR
) {
768 zero_user_segment(page
, 0, PAGE_SIZE
);
769 if (!PageUptodate(page
))
770 SetPageUptodate(page
);
772 f2fs_put_page(page
, 1);
774 /* if ipage exists, blkaddr should be NEW_ADDR */
775 f2fs_bug_on(F2FS_I_SB(inode
), ipage
);
776 page
= get_lock_data_page(inode
, index
, true);
781 if (new_i_size
&& i_size_read(inode
) <
782 ((loff_t
)(index
+ 1) << PAGE_SHIFT
))
783 f2fs_i_size_write(inode
, ((loff_t
)(index
+ 1) << PAGE_SHIFT
));
787 static int __allocate_data_block(struct dnode_of_data
*dn
)
789 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
790 struct f2fs_summary sum
;
796 if (unlikely(is_inode_flag_set(dn
->inode
, FI_NO_ALLOC
)))
799 dn
->data_blkaddr
= datablock_addr(dn
->inode
,
800 dn
->node_page
, dn
->ofs_in_node
);
801 if (dn
->data_blkaddr
== NEW_ADDR
)
804 if (unlikely((err
= inc_valid_block_count(sbi
, dn
->inode
, &count
))))
808 get_node_info(sbi
, dn
->nid
, &ni
);
809 set_summary(&sum
, dn
->nid
, dn
->ofs_in_node
, ni
.version
);
811 allocate_data_block(sbi
, NULL
, dn
->data_blkaddr
, &dn
->data_blkaddr
,
812 &sum
, CURSEG_WARM_DATA
, NULL
, false);
813 set_data_blkaddr(dn
);
816 fofs
= start_bidx_of_node(ofs_of_node(dn
->node_page
), dn
->inode
) +
818 if (i_size_read(dn
->inode
) < ((loff_t
)(fofs
+ 1) << PAGE_SHIFT
))
819 f2fs_i_size_write(dn
->inode
,
820 ((loff_t
)(fofs
+ 1) << PAGE_SHIFT
));
824 static inline bool __force_buffered_io(struct inode
*inode
, int rw
)
826 return (f2fs_encrypted_file(inode
) ||
827 (rw
== WRITE
&& test_opt(F2FS_I_SB(inode
), LFS
)) ||
828 F2FS_I_SB(inode
)->s_ndevs
);
831 int f2fs_preallocate_blocks(struct kiocb
*iocb
, struct iov_iter
*from
)
833 struct inode
*inode
= file_inode(iocb
->ki_filp
);
834 struct f2fs_map_blocks map
;
837 /* convert inline data for Direct I/O*/
838 if (iocb
->ki_flags
& IOCB_DIRECT
) {
839 err
= f2fs_convert_inline_inode(inode
);
844 if (is_inode_flag_set(inode
, FI_NO_PREALLOC
))
847 map
.m_lblk
= F2FS_BLK_ALIGN(iocb
->ki_pos
);
848 map
.m_len
= F2FS_BYTES_TO_BLK(iocb
->ki_pos
+ iov_iter_count(from
));
849 if (map
.m_len
> map
.m_lblk
)
850 map
.m_len
-= map
.m_lblk
;
854 map
.m_next_pgofs
= NULL
;
856 if (iocb
->ki_flags
& IOCB_DIRECT
)
857 return f2fs_map_blocks(inode
, &map
, 1,
858 __force_buffered_io(inode
, WRITE
) ?
859 F2FS_GET_BLOCK_PRE_AIO
:
860 F2FS_GET_BLOCK_PRE_DIO
);
861 if (iocb
->ki_pos
+ iov_iter_count(from
) > MAX_INLINE_DATA(inode
)) {
862 err
= f2fs_convert_inline_inode(inode
);
866 if (!f2fs_has_inline_data(inode
))
867 return f2fs_map_blocks(inode
, &map
, 1, F2FS_GET_BLOCK_PRE_AIO
);
871 static inline void __do_map_lock(struct f2fs_sb_info
*sbi
, int flag
, bool lock
)
873 if (flag
== F2FS_GET_BLOCK_PRE_AIO
) {
875 down_read(&sbi
->node_change
);
877 up_read(&sbi
->node_change
);
887 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
888 * f2fs_map_blocks structure.
889 * If original data blocks are allocated, then give them to blockdev.
891 * a. preallocate requested block addresses
892 * b. do not use extent cache for better performance
893 * c. give the block addresses to blockdev
895 int f2fs_map_blocks(struct inode
*inode
, struct f2fs_map_blocks
*map
,
896 int create
, int flag
)
898 unsigned int maxblocks
= map
->m_len
;
899 struct dnode_of_data dn
;
900 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
901 int mode
= create
? ALLOC_NODE
: LOOKUP_NODE
;
902 pgoff_t pgofs
, end_offset
, end
;
903 int err
= 0, ofs
= 1;
904 unsigned int ofs_in_node
, last_ofs_in_node
;
906 struct extent_info ei
= {0,0,0};
915 /* it only supports block size == page size */
916 pgofs
= (pgoff_t
)map
->m_lblk
;
917 end
= pgofs
+ maxblocks
;
919 if (!create
&& f2fs_lookup_extent_cache(inode
, pgofs
, &ei
)) {
920 map
->m_pblk
= ei
.blk
+ pgofs
- ei
.fofs
;
921 map
->m_len
= min((pgoff_t
)maxblocks
, ei
.fofs
+ ei
.len
- pgofs
);
922 map
->m_flags
= F2FS_MAP_MAPPED
;
928 __do_map_lock(sbi
, flag
, true);
930 /* When reading holes, we need its node page */
931 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
932 err
= get_dnode_of_data(&dn
, pgofs
, mode
);
934 if (flag
== F2FS_GET_BLOCK_BMAP
)
936 if (err
== -ENOENT
) {
938 if (map
->m_next_pgofs
)
940 get_next_page_offset(&dn
, pgofs
);
946 last_ofs_in_node
= ofs_in_node
= dn
.ofs_in_node
;
947 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
950 blkaddr
= datablock_addr(dn
.inode
, dn
.node_page
, dn
.ofs_in_node
);
952 if (blkaddr
== NEW_ADDR
|| blkaddr
== NULL_ADDR
) {
954 if (unlikely(f2fs_cp_error(sbi
))) {
958 if (flag
== F2FS_GET_BLOCK_PRE_AIO
) {
959 if (blkaddr
== NULL_ADDR
) {
961 last_ofs_in_node
= dn
.ofs_in_node
;
964 err
= __allocate_data_block(&dn
);
966 set_inode_flag(inode
, FI_APPEND_WRITE
);
970 map
->m_flags
|= F2FS_MAP_NEW
;
971 blkaddr
= dn
.data_blkaddr
;
973 if (flag
== F2FS_GET_BLOCK_BMAP
) {
977 if (flag
== F2FS_GET_BLOCK_FIEMAP
&&
978 blkaddr
== NULL_ADDR
) {
979 if (map
->m_next_pgofs
)
980 *map
->m_next_pgofs
= pgofs
+ 1;
982 if (flag
!= F2FS_GET_BLOCK_FIEMAP
||
988 if (flag
== F2FS_GET_BLOCK_PRE_AIO
)
991 if (map
->m_len
== 0) {
992 /* preallocated unwritten block should be mapped for fiemap. */
993 if (blkaddr
== NEW_ADDR
)
994 map
->m_flags
|= F2FS_MAP_UNWRITTEN
;
995 map
->m_flags
|= F2FS_MAP_MAPPED
;
997 map
->m_pblk
= blkaddr
;
999 } else if ((map
->m_pblk
!= NEW_ADDR
&&
1000 blkaddr
== (map
->m_pblk
+ ofs
)) ||
1001 (map
->m_pblk
== NEW_ADDR
&& blkaddr
== NEW_ADDR
) ||
1002 flag
== F2FS_GET_BLOCK_PRE_DIO
) {
1013 /* preallocate blocks in batch for one dnode page */
1014 if (flag
== F2FS_GET_BLOCK_PRE_AIO
&&
1015 (pgofs
== end
|| dn
.ofs_in_node
== end_offset
)) {
1017 dn
.ofs_in_node
= ofs_in_node
;
1018 err
= reserve_new_blocks(&dn
, prealloc
);
1022 map
->m_len
+= dn
.ofs_in_node
- ofs_in_node
;
1023 if (prealloc
&& dn
.ofs_in_node
!= last_ofs_in_node
+ 1) {
1027 dn
.ofs_in_node
= end_offset
;
1032 else if (dn
.ofs_in_node
< end_offset
)
1035 f2fs_put_dnode(&dn
);
1038 __do_map_lock(sbi
, flag
, false);
1039 f2fs_balance_fs(sbi
, dn
.node_changed
);
1044 f2fs_put_dnode(&dn
);
1047 __do_map_lock(sbi
, flag
, false);
1048 f2fs_balance_fs(sbi
, dn
.node_changed
);
1051 trace_f2fs_map_blocks(inode
, map
, err
);
1055 static int __get_data_block(struct inode
*inode
, sector_t iblock
,
1056 struct buffer_head
*bh
, int create
, int flag
,
1057 pgoff_t
*next_pgofs
)
1059 struct f2fs_map_blocks map
;
1062 map
.m_lblk
= iblock
;
1063 map
.m_len
= bh
->b_size
>> inode
->i_blkbits
;
1064 map
.m_next_pgofs
= next_pgofs
;
1066 err
= f2fs_map_blocks(inode
, &map
, create
, flag
);
1068 map_bh(bh
, inode
->i_sb
, map
.m_pblk
);
1069 bh
->b_state
= (bh
->b_state
& ~F2FS_MAP_FLAGS
) | map
.m_flags
;
1070 bh
->b_size
= (u64
)map
.m_len
<< inode
->i_blkbits
;
1075 static int get_data_block(struct inode
*inode
, sector_t iblock
,
1076 struct buffer_head
*bh_result
, int create
, int flag
,
1077 pgoff_t
*next_pgofs
)
1079 return __get_data_block(inode
, iblock
, bh_result
, create
,
1083 static int get_data_block_dio(struct inode
*inode
, sector_t iblock
,
1084 struct buffer_head
*bh_result
, int create
)
1086 return __get_data_block(inode
, iblock
, bh_result
, create
,
1087 F2FS_GET_BLOCK_DEFAULT
, NULL
);
1090 static int get_data_block_bmap(struct inode
*inode
, sector_t iblock
,
1091 struct buffer_head
*bh_result
, int create
)
1093 /* Block number less than F2FS MAX BLOCKS */
1094 if (unlikely(iblock
>= F2FS_I_SB(inode
)->max_file_blocks
))
1097 return __get_data_block(inode
, iblock
, bh_result
, create
,
1098 F2FS_GET_BLOCK_BMAP
, NULL
);
1101 static inline sector_t
logical_to_blk(struct inode
*inode
, loff_t offset
)
1103 return (offset
>> inode
->i_blkbits
);
1106 static inline loff_t
blk_to_logical(struct inode
*inode
, sector_t blk
)
1108 return (blk
<< inode
->i_blkbits
);
1111 int f2fs_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fieinfo
,
1114 struct buffer_head map_bh
;
1115 sector_t start_blk
, last_blk
;
1117 u64 logical
= 0, phys
= 0, size
= 0;
1121 ret
= fiemap_check_flags(fieinfo
, FIEMAP_FLAG_SYNC
);
1125 if (f2fs_has_inline_data(inode
)) {
1126 ret
= f2fs_inline_data_fiemap(inode
, fieinfo
, start
, len
);
1133 if (logical_to_blk(inode
, len
) == 0)
1134 len
= blk_to_logical(inode
, 1);
1136 start_blk
= logical_to_blk(inode
, start
);
1137 last_blk
= logical_to_blk(inode
, start
+ len
- 1);
1140 memset(&map_bh
, 0, sizeof(struct buffer_head
));
1141 map_bh
.b_size
= len
;
1143 ret
= get_data_block(inode
, start_blk
, &map_bh
, 0,
1144 F2FS_GET_BLOCK_FIEMAP
, &next_pgofs
);
1149 if (!buffer_mapped(&map_bh
)) {
1150 start_blk
= next_pgofs
;
1152 if (blk_to_logical(inode
, start_blk
) < blk_to_logical(inode
,
1153 F2FS_I_SB(inode
)->max_file_blocks
))
1156 flags
|= FIEMAP_EXTENT_LAST
;
1160 if (f2fs_encrypted_inode(inode
))
1161 flags
|= FIEMAP_EXTENT_DATA_ENCRYPTED
;
1163 ret
= fiemap_fill_next_extent(fieinfo
, logical
,
1167 if (start_blk
> last_blk
|| ret
)
1170 logical
= blk_to_logical(inode
, start_blk
);
1171 phys
= blk_to_logical(inode
, map_bh
.b_blocknr
);
1172 size
= map_bh
.b_size
;
1174 if (buffer_unwritten(&map_bh
))
1175 flags
= FIEMAP_EXTENT_UNWRITTEN
;
1177 start_blk
+= logical_to_blk(inode
, size
);
1181 if (fatal_signal_pending(current
))
1189 inode_unlock(inode
);
1194 * This function was originally taken from fs/mpage.c, and customized for f2fs.
1195 * Major change was from block_size == page_size in f2fs by default.
1197 static int f2fs_mpage_readpages(struct address_space
*mapping
,
1198 struct list_head
*pages
, struct page
*page
,
1201 struct bio
*bio
= NULL
;
1203 sector_t last_block_in_bio
= 0;
1204 struct inode
*inode
= mapping
->host
;
1205 const unsigned blkbits
= inode
->i_blkbits
;
1206 const unsigned blocksize
= 1 << blkbits
;
1207 sector_t block_in_file
;
1208 sector_t last_block
;
1209 sector_t last_block_in_file
;
1211 struct f2fs_map_blocks map
;
1217 map
.m_next_pgofs
= NULL
;
1219 for (page_idx
= 0; nr_pages
; page_idx
++, nr_pages
--) {
1222 page
= list_last_entry(pages
, struct page
, lru
);
1224 prefetchw(&page
->flags
);
1225 list_del(&page
->lru
);
1226 if (add_to_page_cache_lru(page
, mapping
,
1228 readahead_gfp_mask(mapping
)))
1232 block_in_file
= (sector_t
)page
->index
;
1233 last_block
= block_in_file
+ nr_pages
;
1234 last_block_in_file
= (i_size_read(inode
) + blocksize
- 1) >>
1236 if (last_block
> last_block_in_file
)
1237 last_block
= last_block_in_file
;
1240 * Map blocks using the previous result first.
1242 if ((map
.m_flags
& F2FS_MAP_MAPPED
) &&
1243 block_in_file
> map
.m_lblk
&&
1244 block_in_file
< (map
.m_lblk
+ map
.m_len
))
1248 * Then do more f2fs_map_blocks() calls until we are
1249 * done with this page.
1253 if (block_in_file
< last_block
) {
1254 map
.m_lblk
= block_in_file
;
1255 map
.m_len
= last_block
- block_in_file
;
1257 if (f2fs_map_blocks(inode
, &map
, 0,
1258 F2FS_GET_BLOCK_DEFAULT
))
1259 goto set_error_page
;
1262 if ((map
.m_flags
& F2FS_MAP_MAPPED
)) {
1263 block_nr
= map
.m_pblk
+ block_in_file
- map
.m_lblk
;
1264 SetPageMappedToDisk(page
);
1266 if (!PageUptodate(page
) && !cleancache_get_page(page
)) {
1267 SetPageUptodate(page
);
1271 zero_user_segment(page
, 0, PAGE_SIZE
);
1272 if (!PageUptodate(page
))
1273 SetPageUptodate(page
);
1279 * This page will go to BIO. Do we need to send this
1282 if (bio
&& (last_block_in_bio
!= block_nr
- 1 ||
1283 !__same_bdev(F2FS_I_SB(inode
), block_nr
, bio
))) {
1285 __submit_bio(F2FS_I_SB(inode
), bio
, DATA
);
1289 bio
= f2fs_grab_read_bio(inode
, block_nr
, nr_pages
);
1292 goto set_error_page
;
1296 if (bio_add_page(bio
, page
, blocksize
, 0) < blocksize
)
1297 goto submit_and_realloc
;
1299 last_block_in_bio
= block_nr
;
1303 zero_user_segment(page
, 0, PAGE_SIZE
);
1308 __submit_bio(F2FS_I_SB(inode
), bio
, DATA
);
1316 BUG_ON(pages
&& !list_empty(pages
));
1318 __submit_bio(F2FS_I_SB(inode
), bio
, DATA
);
1322 static int f2fs_read_data_page(struct file
*file
, struct page
*page
)
1324 struct inode
*inode
= page
->mapping
->host
;
1327 trace_f2fs_readpage(page
, DATA
);
1329 /* If the file has inline data, try to read it directly */
1330 if (f2fs_has_inline_data(inode
))
1331 ret
= f2fs_read_inline_data(inode
, page
);
1333 ret
= f2fs_mpage_readpages(page
->mapping
, NULL
, page
, 1);
1337 static int f2fs_read_data_pages(struct file
*file
,
1338 struct address_space
*mapping
,
1339 struct list_head
*pages
, unsigned nr_pages
)
1341 struct inode
*inode
= mapping
->host
;
1342 struct page
*page
= list_last_entry(pages
, struct page
, lru
);
1344 trace_f2fs_readpages(inode
, page
, nr_pages
);
1346 /* If the file has inline data, skip readpages */
1347 if (f2fs_has_inline_data(inode
))
1350 return f2fs_mpage_readpages(mapping
, pages
, NULL
, nr_pages
);
1353 static int encrypt_one_page(struct f2fs_io_info
*fio
)
1355 struct inode
*inode
= fio
->page
->mapping
->host
;
1356 gfp_t gfp_flags
= GFP_NOFS
;
1358 if (!f2fs_encrypted_file(inode
))
1361 /* wait for GCed encrypted page writeback */
1362 f2fs_wait_on_block_writeback(fio
->sbi
, fio
->old_blkaddr
);
1365 fio
->encrypted_page
= fscrypt_encrypt_page(inode
, fio
->page
,
1366 PAGE_SIZE
, 0, fio
->page
->index
, gfp_flags
);
1367 if (!IS_ERR(fio
->encrypted_page
))
1370 /* flush pending IOs and wait for a while in the ENOMEM case */
1371 if (PTR_ERR(fio
->encrypted_page
) == -ENOMEM
) {
1372 f2fs_flush_merged_writes(fio
->sbi
);
1373 congestion_wait(BLK_RW_ASYNC
, HZ
/50);
1374 gfp_flags
|= __GFP_NOFAIL
;
1377 return PTR_ERR(fio
->encrypted_page
);
1380 static inline bool need_inplace_update(struct f2fs_io_info
*fio
)
1382 struct inode
*inode
= fio
->page
->mapping
->host
;
1384 if (S_ISDIR(inode
->i_mode
) || f2fs_is_atomic_file(inode
))
1386 if (is_cold_data(fio
->page
))
1388 if (IS_ATOMIC_WRITTEN_PAGE(fio
->page
))
1391 return need_inplace_update_policy(inode
, fio
);
1394 static inline bool valid_ipu_blkaddr(struct f2fs_io_info
*fio
)
1396 if (fio
->old_blkaddr
== NEW_ADDR
)
1398 if (fio
->old_blkaddr
== NULL_ADDR
)
1403 int do_write_data_page(struct f2fs_io_info
*fio
)
1405 struct page
*page
= fio
->page
;
1406 struct inode
*inode
= page
->mapping
->host
;
1407 struct dnode_of_data dn
;
1408 struct extent_info ei
= {0,0,0};
1409 bool ipu_force
= false;
1412 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1413 if (need_inplace_update(fio
) &&
1414 f2fs_lookup_extent_cache(inode
, page
->index
, &ei
)) {
1415 fio
->old_blkaddr
= ei
.blk
+ page
->index
- ei
.fofs
;
1417 if (valid_ipu_blkaddr(fio
)) {
1419 fio
->need_lock
= LOCK_DONE
;
1424 /* Deadlock due to between page->lock and f2fs_lock_op */
1425 if (fio
->need_lock
== LOCK_REQ
&& !f2fs_trylock_op(fio
->sbi
))
1428 err
= get_dnode_of_data(&dn
, page
->index
, LOOKUP_NODE
);
1432 fio
->old_blkaddr
= dn
.data_blkaddr
;
1434 /* This page is already truncated */
1435 if (fio
->old_blkaddr
== NULL_ADDR
) {
1436 ClearPageUptodate(page
);
1441 * If current allocation needs SSR,
1442 * it had better in-place writes for updated data.
1444 if (ipu_force
|| (valid_ipu_blkaddr(fio
) && need_inplace_update(fio
))) {
1445 err
= encrypt_one_page(fio
);
1449 set_page_writeback(page
);
1450 f2fs_put_dnode(&dn
);
1451 if (fio
->need_lock
== LOCK_REQ
)
1452 f2fs_unlock_op(fio
->sbi
);
1453 err
= rewrite_data_page(fio
);
1454 trace_f2fs_do_write_data_page(fio
->page
, IPU
);
1455 set_inode_flag(inode
, FI_UPDATE_WRITE
);
1459 if (fio
->need_lock
== LOCK_RETRY
) {
1460 if (!f2fs_trylock_op(fio
->sbi
)) {
1464 fio
->need_lock
= LOCK_REQ
;
1467 err
= encrypt_one_page(fio
);
1471 set_page_writeback(page
);
1473 /* LFS mode write path */
1474 write_data_page(&dn
, fio
);
1475 trace_f2fs_do_write_data_page(page
, OPU
);
1476 set_inode_flag(inode
, FI_APPEND_WRITE
);
1477 if (page
->index
== 0)
1478 set_inode_flag(inode
, FI_FIRST_BLOCK_WRITTEN
);
1480 f2fs_put_dnode(&dn
);
1482 if (fio
->need_lock
== LOCK_REQ
)
1483 f2fs_unlock_op(fio
->sbi
);
1487 static int __write_data_page(struct page
*page
, bool *submitted
,
1488 struct writeback_control
*wbc
,
1489 enum iostat_type io_type
)
1491 struct inode
*inode
= page
->mapping
->host
;
1492 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1493 loff_t i_size
= i_size_read(inode
);
1494 const pgoff_t end_index
= ((unsigned long long) i_size
)
1496 loff_t psize
= (page
->index
+ 1) << PAGE_SHIFT
;
1497 unsigned offset
= 0;
1498 bool need_balance_fs
= false;
1500 struct f2fs_io_info fio
= {
1502 .ino
= inode
->i_ino
,
1505 .op_flags
= wbc_to_write_flags(wbc
),
1506 .old_blkaddr
= NULL_ADDR
,
1508 .encrypted_page
= NULL
,
1510 .need_lock
= LOCK_RETRY
,
1514 trace_f2fs_writepage(page
, DATA
);
1516 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
1519 if (page
->index
< end_index
)
1523 * If the offset is out-of-range of file size,
1524 * this page does not have to be written to disk.
1526 offset
= i_size
& (PAGE_SIZE
- 1);
1527 if ((page
->index
>= end_index
+ 1) || !offset
)
1530 zero_user_segment(page
, offset
, PAGE_SIZE
);
1532 if (f2fs_is_drop_cache(inode
))
1534 /* we should not write 0'th page having journal header */
1535 if (f2fs_is_volatile_file(inode
) && (!page
->index
||
1536 (!wbc
->for_reclaim
&&
1537 available_free_memory(sbi
, BASE_CHECK
))))
1540 /* we should bypass data pages to proceed the kworkder jobs */
1541 if (unlikely(f2fs_cp_error(sbi
))) {
1542 mapping_set_error(page
->mapping
, -EIO
);
1546 /* Dentry blocks are controlled by checkpoint */
1547 if (S_ISDIR(inode
->i_mode
)) {
1548 fio
.need_lock
= LOCK_DONE
;
1549 err
= do_write_data_page(&fio
);
1553 if (!wbc
->for_reclaim
)
1554 need_balance_fs
= true;
1555 else if (has_not_enough_free_secs(sbi
, 0, 0))
1558 set_inode_flag(inode
, FI_HOT_DATA
);
1561 if (f2fs_has_inline_data(inode
)) {
1562 err
= f2fs_write_inline_data(inode
, page
);
1567 if (err
== -EAGAIN
) {
1568 err
= do_write_data_page(&fio
);
1569 if (err
== -EAGAIN
) {
1570 fio
.need_lock
= LOCK_REQ
;
1571 err
= do_write_data_page(&fio
);
1575 down_write(&F2FS_I(inode
)->i_sem
);
1576 if (F2FS_I(inode
)->last_disk_size
< psize
)
1577 F2FS_I(inode
)->last_disk_size
= psize
;
1578 up_write(&F2FS_I(inode
)->i_sem
);
1581 if (err
&& err
!= -ENOENT
)
1585 inode_dec_dirty_pages(inode
);
1587 ClearPageUptodate(page
);
1589 if (wbc
->for_reclaim
) {
1590 f2fs_submit_merged_write_cond(sbi
, inode
, 0, page
->index
, DATA
);
1591 clear_inode_flag(inode
, FI_HOT_DATA
);
1592 remove_dirty_inode(inode
);
1597 if (!S_ISDIR(inode
->i_mode
))
1598 f2fs_balance_fs(sbi
, need_balance_fs
);
1600 if (unlikely(f2fs_cp_error(sbi
))) {
1601 f2fs_submit_merged_write(sbi
, DATA
);
1606 *submitted
= fio
.submitted
;
1611 redirty_page_for_writepage(wbc
, page
);
1613 return AOP_WRITEPAGE_ACTIVATE
;
1618 static int f2fs_write_data_page(struct page
*page
,
1619 struct writeback_control
*wbc
)
1621 return __write_data_page(page
, NULL
, wbc
, FS_DATA_IO
);
1625 * This function was copied from write_cche_pages from mm/page-writeback.c.
1626 * The major change is making write step of cold data page separately from
1627 * warm/hot data page.
1629 static int f2fs_write_cache_pages(struct address_space
*mapping
,
1630 struct writeback_control
*wbc
,
1631 enum iostat_type io_type
)
1635 struct pagevec pvec
;
1637 pgoff_t
uninitialized_var(writeback_index
);
1639 pgoff_t end
; /* Inclusive */
1641 pgoff_t last_idx
= ULONG_MAX
;
1643 int range_whole
= 0;
1646 pagevec_init(&pvec
, 0);
1648 if (get_dirty_pages(mapping
->host
) <=
1649 SM_I(F2FS_M_SB(mapping
))->min_hot_blocks
)
1650 set_inode_flag(mapping
->host
, FI_HOT_DATA
);
1652 clear_inode_flag(mapping
->host
, FI_HOT_DATA
);
1654 if (wbc
->range_cyclic
) {
1655 writeback_index
= mapping
->writeback_index
; /* prev offset */
1656 index
= writeback_index
;
1663 index
= wbc
->range_start
>> PAGE_SHIFT
;
1664 end
= wbc
->range_end
>> PAGE_SHIFT
;
1665 if (wbc
->range_start
== 0 && wbc
->range_end
== LLONG_MAX
)
1667 cycled
= 1; /* ignore range_cyclic tests */
1669 if (wbc
->sync_mode
== WB_SYNC_ALL
|| wbc
->tagged_writepages
)
1670 tag
= PAGECACHE_TAG_TOWRITE
;
1672 tag
= PAGECACHE_TAG_DIRTY
;
1674 if (wbc
->sync_mode
== WB_SYNC_ALL
|| wbc
->tagged_writepages
)
1675 tag_pages_for_writeback(mapping
, index
, end
);
1677 while (!done
&& (index
<= end
)) {
1680 nr_pages
= pagevec_lookup_tag(&pvec
, mapping
, &index
, tag
,
1681 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
- 1) + 1);
1685 for (i
= 0; i
< nr_pages
; i
++) {
1686 struct page
*page
= pvec
.pages
[i
];
1687 bool submitted
= false;
1689 if (page
->index
> end
) {
1694 done_index
= page
->index
;
1698 if (unlikely(page
->mapping
!= mapping
)) {
1704 if (!PageDirty(page
)) {
1705 /* someone wrote it for us */
1706 goto continue_unlock
;
1709 if (PageWriteback(page
)) {
1710 if (wbc
->sync_mode
!= WB_SYNC_NONE
)
1711 f2fs_wait_on_page_writeback(page
,
1714 goto continue_unlock
;
1717 BUG_ON(PageWriteback(page
));
1718 if (!clear_page_dirty_for_io(page
))
1719 goto continue_unlock
;
1721 ret
= __write_data_page(page
, &submitted
, wbc
, io_type
);
1722 if (unlikely(ret
)) {
1724 * keep nr_to_write, since vfs uses this to
1725 * get # of written pages.
1727 if (ret
== AOP_WRITEPAGE_ACTIVATE
) {
1731 } else if (ret
== -EAGAIN
) {
1733 if (wbc
->sync_mode
== WB_SYNC_ALL
) {
1735 congestion_wait(BLK_RW_ASYNC
,
1741 done_index
= page
->index
+ 1;
1744 } else if (submitted
) {
1745 last_idx
= page
->index
;
1748 /* give a priority to WB_SYNC threads */
1749 if ((atomic_read(&F2FS_M_SB(mapping
)->wb_sync_req
) ||
1750 --wbc
->nr_to_write
<= 0) &&
1751 wbc
->sync_mode
== WB_SYNC_NONE
) {
1756 pagevec_release(&pvec
);
1760 if (!cycled
&& !done
) {
1763 end
= writeback_index
- 1;
1766 if (wbc
->range_cyclic
|| (range_whole
&& wbc
->nr_to_write
> 0))
1767 mapping
->writeback_index
= done_index
;
1769 if (last_idx
!= ULONG_MAX
)
1770 f2fs_submit_merged_write_cond(F2FS_M_SB(mapping
), mapping
->host
,
1776 int __f2fs_write_data_pages(struct address_space
*mapping
,
1777 struct writeback_control
*wbc
,
1778 enum iostat_type io_type
)
1780 struct inode
*inode
= mapping
->host
;
1781 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1782 struct blk_plug plug
;
1785 /* deal with chardevs and other special file */
1786 if (!mapping
->a_ops
->writepage
)
1789 /* skip writing if there is no dirty page in this inode */
1790 if (!get_dirty_pages(inode
) && wbc
->sync_mode
== WB_SYNC_NONE
)
1793 /* during POR, we don't need to trigger writepage at all. */
1794 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
1797 if (S_ISDIR(inode
->i_mode
) && wbc
->sync_mode
== WB_SYNC_NONE
&&
1798 get_dirty_pages(inode
) < nr_pages_to_skip(sbi
, DATA
) &&
1799 available_free_memory(sbi
, DIRTY_DENTS
))
1802 /* skip writing during file defragment */
1803 if (is_inode_flag_set(inode
, FI_DO_DEFRAG
))
1806 trace_f2fs_writepages(mapping
->host
, wbc
, DATA
);
1808 /* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
1809 if (wbc
->sync_mode
== WB_SYNC_ALL
)
1810 atomic_inc(&sbi
->wb_sync_req
);
1811 else if (atomic_read(&sbi
->wb_sync_req
))
1814 blk_start_plug(&plug
);
1815 ret
= f2fs_write_cache_pages(mapping
, wbc
, io_type
);
1816 blk_finish_plug(&plug
);
1818 if (wbc
->sync_mode
== WB_SYNC_ALL
)
1819 atomic_dec(&sbi
->wb_sync_req
);
1821 * if some pages were truncated, we cannot guarantee its mapping->host
1822 * to detect pending bios.
1825 remove_dirty_inode(inode
);
1829 wbc
->pages_skipped
+= get_dirty_pages(inode
);
1830 trace_f2fs_writepages(mapping
->host
, wbc
, DATA
);
1834 static int f2fs_write_data_pages(struct address_space
*mapping
,
1835 struct writeback_control
*wbc
)
1837 struct inode
*inode
= mapping
->host
;
1839 return __f2fs_write_data_pages(mapping
, wbc
,
1840 F2FS_I(inode
)->cp_task
== current
?
1841 FS_CP_DATA_IO
: FS_DATA_IO
);
1844 static void f2fs_write_failed(struct address_space
*mapping
, loff_t to
)
1846 struct inode
*inode
= mapping
->host
;
1847 loff_t i_size
= i_size_read(inode
);
1850 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1851 truncate_pagecache(inode
, i_size
);
1852 truncate_blocks(inode
, i_size
, true);
1853 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1857 static int prepare_write_begin(struct f2fs_sb_info
*sbi
,
1858 struct page
*page
, loff_t pos
, unsigned len
,
1859 block_t
*blk_addr
, bool *node_changed
)
1861 struct inode
*inode
= page
->mapping
->host
;
1862 pgoff_t index
= page
->index
;
1863 struct dnode_of_data dn
;
1865 bool locked
= false;
1866 struct extent_info ei
= {0,0,0};
1870 * we already allocated all the blocks, so we don't need to get
1871 * the block addresses when there is no need to fill the page.
1873 if (!f2fs_has_inline_data(inode
) && len
== PAGE_SIZE
&&
1874 !is_inode_flag_set(inode
, FI_NO_PREALLOC
))
1877 if (f2fs_has_inline_data(inode
) ||
1878 (pos
& PAGE_MASK
) >= i_size_read(inode
)) {
1879 __do_map_lock(sbi
, F2FS_GET_BLOCK_PRE_AIO
, true);
1883 /* check inline_data */
1884 ipage
= get_node_page(sbi
, inode
->i_ino
);
1885 if (IS_ERR(ipage
)) {
1886 err
= PTR_ERR(ipage
);
1890 set_new_dnode(&dn
, inode
, ipage
, ipage
, 0);
1892 if (f2fs_has_inline_data(inode
)) {
1893 if (pos
+ len
<= MAX_INLINE_DATA(inode
)) {
1894 read_inline_data(page
, ipage
);
1895 set_inode_flag(inode
, FI_DATA_EXIST
);
1897 set_inline_node(ipage
);
1899 err
= f2fs_convert_inline_page(&dn
, page
);
1902 if (dn
.data_blkaddr
== NULL_ADDR
)
1903 err
= f2fs_get_block(&dn
, index
);
1905 } else if (locked
) {
1906 err
= f2fs_get_block(&dn
, index
);
1908 if (f2fs_lookup_extent_cache(inode
, index
, &ei
)) {
1909 dn
.data_blkaddr
= ei
.blk
+ index
- ei
.fofs
;
1912 err
= get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
1913 if (err
|| dn
.data_blkaddr
== NULL_ADDR
) {
1914 f2fs_put_dnode(&dn
);
1915 __do_map_lock(sbi
, F2FS_GET_BLOCK_PRE_AIO
,
1923 /* convert_inline_page can make node_changed */
1924 *blk_addr
= dn
.data_blkaddr
;
1925 *node_changed
= dn
.node_changed
;
1927 f2fs_put_dnode(&dn
);
1930 __do_map_lock(sbi
, F2FS_GET_BLOCK_PRE_AIO
, false);
1934 static int f2fs_write_begin(struct file
*file
, struct address_space
*mapping
,
1935 loff_t pos
, unsigned len
, unsigned flags
,
1936 struct page
**pagep
, void **fsdata
)
1938 struct inode
*inode
= mapping
->host
;
1939 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1940 struct page
*page
= NULL
;
1941 pgoff_t index
= ((unsigned long long) pos
) >> PAGE_SHIFT
;
1942 bool need_balance
= false;
1943 block_t blkaddr
= NULL_ADDR
;
1946 if (trace_android_fs_datawrite_start_enabled()) {
1947 char *path
, pathbuf
[MAX_TRACE_PATHBUF_LEN
];
1949 path
= android_fstrace_get_pathname(pathbuf
,
1950 MAX_TRACE_PATHBUF_LEN
,
1952 trace_android_fs_datawrite_start(inode
, pos
, len
,
1956 trace_f2fs_write_begin(inode
, pos
, len
, flags
);
1958 if (f2fs_is_atomic_file(inode
) &&
1959 !available_free_memory(sbi
, INMEM_PAGES
)) {
1965 * We should check this at this moment to avoid deadlock on inode page
1966 * and #0 page. The locking rule for inline_data conversion should be:
1967 * lock_page(page #0) -> lock_page(inode_page)
1970 err
= f2fs_convert_inline_inode(inode
);
1976 * Do not use grab_cache_page_write_begin() to avoid deadlock due to
1977 * wait_for_stable_page. Will wait that below with our IO control.
1979 page
= f2fs_pagecache_get_page(mapping
, index
,
1980 FGP_LOCK
| FGP_WRITE
| FGP_CREAT
, GFP_NOFS
);
1988 err
= prepare_write_begin(sbi
, page
, pos
, len
,
1989 &blkaddr
, &need_balance
);
1993 if (need_balance
&& has_not_enough_free_secs(sbi
, 0, 0)) {
1995 f2fs_balance_fs(sbi
, true);
1997 if (page
->mapping
!= mapping
) {
1998 /* The page got truncated from under us */
1999 f2fs_put_page(page
, 1);
2004 f2fs_wait_on_page_writeback(page
, DATA
, false);
2006 /* wait for GCed encrypted page writeback */
2007 if (f2fs_encrypted_file(inode
))
2008 f2fs_wait_on_block_writeback(sbi
, blkaddr
);
2010 if (len
== PAGE_SIZE
|| PageUptodate(page
))
2013 if (!(pos
& (PAGE_SIZE
- 1)) && (pos
+ len
) >= i_size_read(inode
)) {
2014 zero_user_segment(page
, len
, PAGE_SIZE
);
2018 if (blkaddr
== NEW_ADDR
) {
2019 zero_user_segment(page
, 0, PAGE_SIZE
);
2020 SetPageUptodate(page
);
2022 err
= f2fs_submit_page_read(inode
, page
, blkaddr
);
2027 if (unlikely(page
->mapping
!= mapping
)) {
2028 f2fs_put_page(page
, 1);
2031 if (unlikely(!PageUptodate(page
))) {
2039 f2fs_put_page(page
, 1);
2040 f2fs_write_failed(mapping
, pos
+ len
);
2041 if (f2fs_is_atomic_file(inode
))
2042 drop_inmem_pages_all(sbi
);
2046 static int f2fs_write_end(struct file
*file
,
2047 struct address_space
*mapping
,
2048 loff_t pos
, unsigned len
, unsigned copied
,
2049 struct page
*page
, void *fsdata
)
2051 struct inode
*inode
= page
->mapping
->host
;
2053 trace_android_fs_datawrite_end(inode
, pos
, len
);
2054 trace_f2fs_write_end(inode
, pos
, len
, copied
);
2057 * This should be come from len == PAGE_SIZE, and we expect copied
2058 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
2059 * let generic_perform_write() try to copy data again through copied=0.
2061 if (!PageUptodate(page
)) {
2062 if (unlikely(copied
!= len
))
2065 SetPageUptodate(page
);
2070 set_page_dirty(page
);
2072 if (pos
+ copied
> i_size_read(inode
))
2073 f2fs_i_size_write(inode
, pos
+ copied
);
2075 f2fs_put_page(page
, 1);
2076 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
2080 static int check_direct_IO(struct inode
*inode
, struct iov_iter
*iter
,
2083 unsigned blocksize_mask
= inode
->i_sb
->s_blocksize
- 1;
2085 if (offset
& blocksize_mask
)
2088 if (iov_iter_alignment(iter
) & blocksize_mask
)
2094 static ssize_t
f2fs_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
)
2096 struct address_space
*mapping
= iocb
->ki_filp
->f_mapping
;
2097 struct inode
*inode
= mapping
->host
;
2098 size_t count
= iov_iter_count(iter
);
2099 loff_t offset
= iocb
->ki_pos
;
2100 int rw
= iov_iter_rw(iter
);
2103 err
= check_direct_IO(inode
, iter
, offset
);
2107 if (__force_buffered_io(inode
, rw
))
2110 trace_f2fs_direct_IO_enter(inode
, offset
, count
, rw
);
2112 if (trace_android_fs_dataread_start_enabled() &&
2114 char *path
, pathbuf
[MAX_TRACE_PATHBUF_LEN
];
2116 path
= android_fstrace_get_pathname(pathbuf
,
2117 MAX_TRACE_PATHBUF_LEN
,
2119 trace_android_fs_dataread_start(inode
, offset
,
2120 count
, current
->pid
, path
,
2123 if (trace_android_fs_datawrite_start_enabled() &&
2125 char *path
, pathbuf
[MAX_TRACE_PATHBUF_LEN
];
2127 path
= android_fstrace_get_pathname(pathbuf
,
2128 MAX_TRACE_PATHBUF_LEN
,
2130 trace_android_fs_datawrite_start(inode
, offset
, count
,
2135 down_read(&F2FS_I(inode
)->dio_rwsem
[rw
]);
2136 err
= blockdev_direct_IO(iocb
, inode
, iter
, get_data_block_dio
);
2137 up_read(&F2FS_I(inode
)->dio_rwsem
[rw
]);
2141 f2fs_update_iostat(F2FS_I_SB(inode
), APP_DIRECT_IO
,
2143 set_inode_flag(inode
, FI_UPDATE_WRITE
);
2144 } else if (err
< 0) {
2145 f2fs_write_failed(mapping
, offset
+ count
);
2149 if (trace_android_fs_dataread_start_enabled() &&
2151 trace_android_fs_dataread_end(inode
, offset
, count
);
2152 if (trace_android_fs_datawrite_start_enabled() &&
2154 trace_android_fs_datawrite_end(inode
, offset
, count
);
2156 trace_f2fs_direct_IO_exit(inode
, offset
, count
, rw
, err
);
2161 void f2fs_invalidate_page(struct page
*page
, unsigned int offset
,
2162 unsigned int length
)
2164 struct inode
*inode
= page
->mapping
->host
;
2165 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2167 if (inode
->i_ino
>= F2FS_ROOT_INO(sbi
) &&
2168 (offset
% PAGE_SIZE
|| length
!= PAGE_SIZE
))
2171 if (PageDirty(page
)) {
2172 if (inode
->i_ino
== F2FS_META_INO(sbi
)) {
2173 dec_page_count(sbi
, F2FS_DIRTY_META
);
2174 } else if (inode
->i_ino
== F2FS_NODE_INO(sbi
)) {
2175 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
2177 inode_dec_dirty_pages(inode
);
2178 remove_dirty_inode(inode
);
2182 /* This is atomic written page, keep Private */
2183 if (IS_ATOMIC_WRITTEN_PAGE(page
))
2184 return drop_inmem_page(inode
, page
);
2186 set_page_private(page
, 0);
2187 ClearPagePrivate(page
);
2190 int f2fs_release_page(struct page
*page
, gfp_t wait
)
2192 /* If this is dirty page, keep PagePrivate */
2193 if (PageDirty(page
))
2196 /* This is atomic written page, keep Private */
2197 if (IS_ATOMIC_WRITTEN_PAGE(page
))
2200 set_page_private(page
, 0);
2201 ClearPagePrivate(page
);
2206 * This was copied from __set_page_dirty_buffers which gives higher performance
2207 * in very high speed storages. (e.g., pmem)
2209 void f2fs_set_page_dirty_nobuffers(struct page
*page
)
2211 struct address_space
*mapping
= page
->mapping
;
2212 unsigned long flags
;
2214 if (unlikely(!mapping
))
2217 spin_lock(&mapping
->private_lock
);
2218 lock_page_memcg(page
);
2220 spin_unlock(&mapping
->private_lock
);
2222 spin_lock_irqsave(&mapping
->tree_lock
, flags
);
2223 WARN_ON_ONCE(!PageUptodate(page
));
2224 account_page_dirtied(page
, mapping
);
2225 radix_tree_tag_set(&mapping
->page_tree
,
2226 page_index(page
), PAGECACHE_TAG_DIRTY
);
2227 spin_unlock_irqrestore(&mapping
->tree_lock
, flags
);
2228 unlock_page_memcg(page
);
2230 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
2234 static int f2fs_set_data_page_dirty(struct page
*page
)
2236 struct address_space
*mapping
= page
->mapping
;
2237 struct inode
*inode
= mapping
->host
;
2239 trace_f2fs_set_page_dirty(page
, DATA
);
2241 if (!PageUptodate(page
))
2242 SetPageUptodate(page
);
2244 if (f2fs_is_atomic_file(inode
) && !f2fs_is_commit_atomic_write(inode
)) {
2245 if (!IS_ATOMIC_WRITTEN_PAGE(page
)) {
2246 register_inmem_page(inode
, page
);
2250 * Previously, this page has been registered, we just
2256 if (!PageDirty(page
)) {
2257 f2fs_set_page_dirty_nobuffers(page
);
2258 update_dirty_page(inode
, page
);
2264 static sector_t
f2fs_bmap(struct address_space
*mapping
, sector_t block
)
2266 struct inode
*inode
= mapping
->host
;
2268 if (f2fs_has_inline_data(inode
))
2271 /* make sure allocating whole blocks */
2272 if (mapping_tagged(mapping
, PAGECACHE_TAG_DIRTY
))
2273 filemap_write_and_wait(mapping
);
2275 return generic_block_bmap(mapping
, block
, get_data_block_bmap
);
2278 #ifdef CONFIG_MIGRATION
2279 #include <linux/migrate.h>
2281 int f2fs_migrate_page(struct address_space
*mapping
,
2282 struct page
*newpage
, struct page
*page
, enum migrate_mode mode
)
2284 int rc
, extra_count
;
2285 struct f2fs_inode_info
*fi
= F2FS_I(mapping
->host
);
2286 bool atomic_written
= IS_ATOMIC_WRITTEN_PAGE(page
);
2288 BUG_ON(PageWriteback(page
));
2290 /* migrating an atomic written page is safe with the inmem_lock hold */
2291 if (atomic_written
) {
2292 if (mode
!= MIGRATE_SYNC
)
2294 if (!mutex_trylock(&fi
->inmem_lock
))
2299 * A reference is expected if PagePrivate set when move mapping,
2300 * however F2FS breaks this for maintaining dirty page counts when
2301 * truncating pages. So here adjusting the 'extra_count' make it work.
2303 extra_count
= (atomic_written
? 1 : 0) - page_has_private(page
);
2304 rc
= migrate_page_move_mapping(mapping
, newpage
,
2305 page
, NULL
, mode
, extra_count
);
2306 if (rc
!= MIGRATEPAGE_SUCCESS
) {
2308 mutex_unlock(&fi
->inmem_lock
);
2312 if (atomic_written
) {
2313 struct inmem_pages
*cur
;
2314 list_for_each_entry(cur
, &fi
->inmem_pages
, list
)
2315 if (cur
->page
== page
) {
2316 cur
->page
= newpage
;
2319 mutex_unlock(&fi
->inmem_lock
);
2324 if (PagePrivate(page
))
2325 SetPagePrivate(newpage
);
2326 set_page_private(newpage
, page_private(page
));
2328 if (mode
!= MIGRATE_SYNC_NO_COPY
)
2329 migrate_page_copy(newpage
, page
);
2331 migrate_page_states(newpage
, page
);
2333 return MIGRATEPAGE_SUCCESS
;
2337 const struct address_space_operations f2fs_dblock_aops
= {
2338 .readpage
= f2fs_read_data_page
,
2339 .readpages
= f2fs_read_data_pages
,
2340 .writepage
= f2fs_write_data_page
,
2341 .writepages
= f2fs_write_data_pages
,
2342 .write_begin
= f2fs_write_begin
,
2343 .write_end
= f2fs_write_end
,
2344 .set_page_dirty
= f2fs_set_data_page_dirty
,
2345 .invalidatepage
= f2fs_invalidate_page
,
2346 .releasepage
= f2fs_release_page
,
2347 .direct_IO
= f2fs_direct_IO
,
2349 #ifdef CONFIG_MIGRATION
2350 .migratepage
= f2fs_migrate_page
,