4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/buffer_head.h>
14 #include <linux/mpage.h>
15 #include <linux/writeback.h>
16 #include <linux/backing-dev.h>
17 #include <linux/pagevec.h>
18 #include <linux/blkdev.h>
19 #include <linux/bio.h>
20 #include <linux/prefetch.h>
21 #include <linux/uio.h>
22 #include <linux/cleancache.h>
23 #include <linux/sched/signal.h>
29 #include <trace/events/f2fs.h>
30 #include <trace/events/android_fs.h>
32 #define NUM_PREALLOC_POST_READ_CTXS 128
34 static struct kmem_cache
*bio_post_read_ctx_cache
;
35 static mempool_t
*bio_post_read_ctx_pool
;
37 static bool __is_cp_guaranteed(struct page
*page
)
39 struct address_space
*mapping
= page
->mapping
;
41 struct f2fs_sb_info
*sbi
;
46 inode
= mapping
->host
;
47 sbi
= F2FS_I_SB(inode
);
49 if (inode
->i_ino
== F2FS_META_INO(sbi
) ||
50 inode
->i_ino
== F2FS_NODE_INO(sbi
) ||
51 S_ISDIR(inode
->i_mode
) ||
57 /* postprocessing steps for read bios */
58 enum bio_post_read_step
{
63 struct bio_post_read_ctx
{
65 struct work_struct work
;
66 unsigned int cur_step
;
67 unsigned int enabled_steps
;
70 /* device unit number for iv sector */
72 ((((i)->i_ino & 0xffffffff) << 32) | ((p)->index & 0xffffffff))
74 static inline bool f2fs_may_encrypt_bio(struct inode
*inode
,
75 struct f2fs_io_info
*fio
)
77 #ifdef CONFIG_CRYPTO_DISKCIPHER
78 if (fio
&& (fio
->type
!= DATA
|| fio
->encrypted_page
))
81 return (f2fs_encrypted_file(inode
) &&
82 fscrypt_disk_encrypted(inode
));
88 static inline bool f2fs_bio_disk_encrypted(unsigned int bi_opf
)
90 if (bi_opf
& REQ_CRYPT
)
96 static bool f2fs_mergeable_bio(struct bio
*bio
, u64 dun
, void *ci
, bool bio_encrypted
)
98 #ifdef CONFIG_CRYPTO_DISKCIPHER
102 /* if both of them are not encrypted, no further check is needed */
103 if (!f2fs_bio_disk_encrypted(bio
->bi_opf
) && !bio_encrypted
)
106 #ifdef CONFIG_CRYPTO_DISKCIPHER_DUN
107 if (bio
->bi_aux_private
== ci
)
108 return bio_end_dun(bio
) == dun
;
112 return bio
->bi_aux_private
== ci
;
119 static void __read_end_io(struct bio
*bio
)
125 bio_for_each_segment_all(bv
, bio
, i
) {
128 /* PG_error was set if any post_read step failed */
129 if (bio
->bi_status
|| PageError(page
)) {
130 ClearPageUptodate(page
);
133 SetPageUptodate(page
);
138 mempool_free(bio
->bi_private
, bio_post_read_ctx_pool
);
142 static void bio_post_read_processing(struct bio_post_read_ctx
*ctx
);
144 static void decrypt_work(struct work_struct
*work
)
146 struct bio_post_read_ctx
*ctx
=
147 container_of(work
, struct bio_post_read_ctx
, work
);
149 fscrypt_decrypt_bio(ctx
->bio
);
151 bio_post_read_processing(ctx
);
154 static void bio_post_read_processing(struct bio_post_read_ctx
*ctx
)
156 switch (++ctx
->cur_step
) {
158 if (ctx
->enabled_steps
& (1 << STEP_DECRYPT
)) {
159 INIT_WORK(&ctx
->work
, decrypt_work
);
160 fscrypt_enqueue_decrypt_work(&ctx
->work
);
166 __read_end_io(ctx
->bio
);
170 static bool f2fs_bio_post_read_required(struct bio
*bio
)
172 return bio
->bi_private
&& !bio
->bi_status
;
175 static void f2fs_read_end_io(struct bio
*bio
)
177 #ifdef CONFIG_F2FS_FAULT_INJECTION
178 if (time_to_inject(F2FS_P_SB(bio
->bi_io_vec
->bv_page
), FAULT_IO
)) {
179 f2fs_show_injection_info(FAULT_IO
);
180 bio
->bi_status
= BLK_STS_IOERR
;
184 if(f2fs_bio_disk_encrypted(bio
->bi_opf
))
187 if (f2fs_bio_post_read_required(bio
)) {
188 struct bio_post_read_ctx
*ctx
= bio
->bi_private
;
190 ctx
->cur_step
= STEP_INITIAL
;
191 bio_post_read_processing(ctx
);
199 static void f2fs_write_end_io(struct bio
*bio
)
201 struct f2fs_sb_info
*sbi
= bio
->bi_private
;
202 struct bio_vec
*bvec
;
205 bio_for_each_segment_all(bvec
, bio
, i
) {
206 struct page
*page
= bvec
->bv_page
;
207 enum count_type type
= WB_DATA_TYPE(page
);
209 if (IS_DUMMY_WRITTEN_PAGE(page
)) {
210 set_page_private(page
, (unsigned long)NULL
);
211 ClearPagePrivate(page
);
213 mempool_free(page
, sbi
->write_io_dummy
);
215 if (unlikely(bio
->bi_status
))
216 f2fs_stop_checkpoint(sbi
, true);
220 fscrypt_pullback_bio_page(&page
, true);
222 if (unlikely(bio
->bi_status
)) {
223 mapping_set_error(page
->mapping
, -EIO
);
224 if (type
== F2FS_WB_CP_DATA
)
225 f2fs_stop_checkpoint(sbi
, true);
228 f2fs_bug_on(sbi
, page
->mapping
== NODE_MAPPING(sbi
) &&
229 page
->index
!= nid_of_node(page
));
231 dec_page_count(sbi
, type
);
232 clear_cold_data(page
);
233 end_page_writeback(page
);
235 if (!get_pages(sbi
, F2FS_WB_CP_DATA
) &&
236 wq_has_sleeper(&sbi
->cp_wait
))
237 wake_up(&sbi
->cp_wait
);
243 * Return true, if pre_bio's bdev is same as its target device.
245 struct block_device
*f2fs_target_device(struct f2fs_sb_info
*sbi
,
246 block_t blk_addr
, struct bio
*bio
)
248 struct block_device
*bdev
= sbi
->sb
->s_bdev
;
251 for (i
= 0; i
< sbi
->s_ndevs
; i
++) {
252 if (FDEV(i
).start_blk
<= blk_addr
&&
253 FDEV(i
).end_blk
>= blk_addr
) {
254 blk_addr
-= FDEV(i
).start_blk
;
260 bio_set_dev(bio
, bdev
);
261 bio
->bi_iter
.bi_sector
= SECTOR_FROM_BLOCK(blk_addr
);
266 int f2fs_target_device_index(struct f2fs_sb_info
*sbi
, block_t blkaddr
)
270 for (i
= 0; i
< sbi
->s_ndevs
; i
++)
271 if (FDEV(i
).start_blk
<= blkaddr
&& FDEV(i
).end_blk
>= blkaddr
)
276 static bool __same_bdev(struct f2fs_sb_info
*sbi
,
277 block_t blk_addr
, struct bio
*bio
)
279 struct block_device
*b
= f2fs_target_device(sbi
, blk_addr
, NULL
);
280 return bio
->bi_disk
== b
->bd_disk
&& bio
->bi_partno
== b
->bd_partno
;
284 * Low-level block read/write IO operations.
286 static struct bio
*__bio_alloc(struct f2fs_sb_info
*sbi
, block_t blk_addr
,
287 struct writeback_control
*wbc
,
288 int npages
, bool is_read
,
289 enum page_type type
, enum temp_type temp
)
293 bio
= f2fs_bio_alloc(sbi
, npages
, true);
295 f2fs_target_device(sbi
, blk_addr
, bio
);
297 bio
->bi_end_io
= f2fs_read_end_io
;
298 bio
->bi_private
= NULL
;
300 bio
->bi_end_io
= f2fs_write_end_io
;
301 bio
->bi_private
= sbi
;
302 bio
->bi_write_hint
= io_type_to_rw_hint(sbi
, type
, temp
);
305 wbc_init_bio(wbc
, bio
);
310 static inline void __submit_bio(struct f2fs_sb_info
*sbi
,
311 struct bio
*bio
, enum page_type type
)
313 if (!is_read_io(bio_op(bio
))) {
316 if (type
!= DATA
&& type
!= NODE
)
319 if (f2fs_sb_has_blkzoned(sbi
->sb
) && current
->plug
)
320 blk_finish_plug(current
->plug
);
322 start
= bio
->bi_iter
.bi_size
>> F2FS_BLKSIZE_BITS
;
323 start
%= F2FS_IO_SIZE(sbi
);
328 /* fill dummy pages */
329 for (; start
< F2FS_IO_SIZE(sbi
); start
++) {
331 mempool_alloc(sbi
->write_io_dummy
,
332 GFP_NOIO
| __GFP_ZERO
| __GFP_NOFAIL
);
333 f2fs_bug_on(sbi
, !page
);
335 SetPagePrivate(page
);
336 set_page_private(page
, (unsigned long)DUMMY_WRITTEN_PAGE
);
338 if (bio_add_page(bio
, page
, PAGE_SIZE
, 0) < PAGE_SIZE
)
342 * In the NODE case, we lose next block address chain. So, we
343 * need to do checkpoint in f2fs_sync_file.
346 set_sbi_flag(sbi
, SBI_NEED_CP
);
349 if (is_read_io(bio_op(bio
)))
350 trace_f2fs_submit_read_bio(sbi
->sb
, type
, bio
);
352 trace_f2fs_submit_write_bio(sbi
->sb
, type
, bio
);
356 static void __submit_merged_bio(struct f2fs_bio_info
*io
)
358 struct f2fs_io_info
*fio
= &io
->fio
;
363 if (f2fs_bio_disk_encrypted(io
->bio
->bi_opf
))
364 bio_set_op_attrs(io
->bio
, fio
->op
, fio
->op_flags
| REQ_CRYPT
);
366 bio_set_op_attrs(io
->bio
, fio
->op
, fio
->op_flags
);
368 if (is_read_io(fio
->op
))
369 trace_f2fs_prepare_read_bio(io
->sbi
->sb
, fio
->type
, io
->bio
);
371 trace_f2fs_prepare_write_bio(io
->sbi
->sb
, fio
->type
, io
->bio
);
373 __submit_bio(io
->sbi
, io
->bio
, fio
->type
);
377 static bool __has_merged_page(struct f2fs_bio_info
*io
,
378 struct inode
*inode
, nid_t ino
, pgoff_t idx
)
380 struct bio_vec
*bvec
;
390 bio_for_each_segment_all(bvec
, io
->bio
, i
) {
392 if (bvec
->bv_page
->mapping
)
393 target
= bvec
->bv_page
;
395 target
= fscrypt_control_page(bvec
->bv_page
);
397 if (idx
!= target
->index
)
400 if (inode
&& inode
== target
->mapping
->host
)
402 if (ino
&& ino
== ino_of_node(target
))
409 static bool has_merged_page(struct f2fs_sb_info
*sbi
, struct inode
*inode
,
410 nid_t ino
, pgoff_t idx
, enum page_type type
)
412 enum page_type btype
= PAGE_TYPE_OF_BIO(type
);
414 struct f2fs_bio_info
*io
;
417 for (temp
= HOT
; temp
< NR_TEMP_TYPE
; temp
++) {
418 io
= sbi
->write_io
[btype
] + temp
;
420 down_read(&io
->io_rwsem
);
421 ret
= __has_merged_page(io
, inode
, ino
, idx
);
422 up_read(&io
->io_rwsem
);
424 /* TODO: use HOT temp only for meta pages now. */
425 if (ret
|| btype
== META
)
431 static void __f2fs_submit_merged_write(struct f2fs_sb_info
*sbi
,
432 enum page_type type
, enum temp_type temp
)
434 enum page_type btype
= PAGE_TYPE_OF_BIO(type
);
435 struct f2fs_bio_info
*io
= sbi
->write_io
[btype
] + temp
;
437 down_write(&io
->io_rwsem
);
439 /* change META to META_FLUSH in the checkpoint procedure */
440 if (type
>= META_FLUSH
) {
441 io
->fio
.type
= META_FLUSH
;
442 io
->fio
.op
= REQ_OP_WRITE
;
443 io
->fio
.op_flags
= REQ_META
| REQ_PRIO
| REQ_SYNC
;
444 if (!test_opt(sbi
, NOBARRIER
))
445 io
->fio
.op_flags
|= REQ_PREFLUSH
| REQ_FUA
;
447 __submit_merged_bio(io
);
448 up_write(&io
->io_rwsem
);
451 static void __submit_merged_write_cond(struct f2fs_sb_info
*sbi
,
452 struct inode
*inode
, nid_t ino
, pgoff_t idx
,
453 enum page_type type
, bool force
)
457 if (!force
&& !has_merged_page(sbi
, inode
, ino
, idx
, type
))
460 for (temp
= HOT
; temp
< NR_TEMP_TYPE
; temp
++) {
462 __f2fs_submit_merged_write(sbi
, type
, temp
);
464 /* TODO: use HOT temp only for meta pages now. */
470 void f2fs_submit_merged_write(struct f2fs_sb_info
*sbi
, enum page_type type
)
472 __submit_merged_write_cond(sbi
, NULL
, 0, 0, type
, true);
475 void f2fs_submit_merged_write_cond(struct f2fs_sb_info
*sbi
,
476 struct inode
*inode
, nid_t ino
, pgoff_t idx
,
479 __submit_merged_write_cond(sbi
, inode
, ino
, idx
, type
, false);
482 void f2fs_flush_merged_writes(struct f2fs_sb_info
*sbi
)
484 f2fs_submit_merged_write(sbi
, DATA
);
485 f2fs_submit_merged_write(sbi
, NODE
);
486 f2fs_submit_merged_write(sbi
, META
);
490 * Fill the locked page with data located in the block address.
491 * A caller needs to unlock the page on failure.
493 int f2fs_submit_page_bio(struct f2fs_io_info
*fio
)
496 struct page
*page
= fio
->encrypted_page
?
497 fio
->encrypted_page
: fio
->page
;
498 struct inode
*inode
= fio
->page
->mapping
->host
;
500 if (!f2fs_is_valid_blkaddr(fio
->sbi
, fio
->new_blkaddr
,
501 __is_meta_io(fio
) ? META_GENERIC
: DATA_GENERIC
))
504 trace_f2fs_submit_page_bio(page
, fio
);
505 f2fs_trace_ios(fio
, 0);
507 /* Allocate a new bio */
508 bio
= __bio_alloc(fio
->sbi
, fio
->new_blkaddr
, fio
->io_wbc
,
509 1, is_read_io(fio
->op
), fio
->type
, fio
->temp
);
511 if (bio_add_page(bio
, page
, PAGE_SIZE
, 0) < PAGE_SIZE
) {
515 fio
->op_flags
|= fio
->encrypted_page
? REQ_NOENCRYPT
: 0;
516 bio_set_op_attrs(bio
, fio
->op
, fio
->op_flags
);
518 if (f2fs_may_encrypt_bio(inode
, fio
))
519 fscrypt_set_bio(inode
, bio
, PG_DUN(inode
, fio
->page
));
521 if (!is_read_io(fio
->op
))
522 inc_page_count(fio
->sbi
, WB_DATA_TYPE(fio
->page
));
524 __submit_bio(fio
->sbi
, bio
, fio
->type
);
528 int f2fs_submit_page_write(struct f2fs_io_info
*fio
)
530 struct f2fs_sb_info
*sbi
= fio
->sbi
;
531 enum page_type btype
= PAGE_TYPE_OF_BIO(fio
->type
);
532 struct f2fs_bio_info
*io
= sbi
->write_io
[btype
] + fio
->temp
;
533 struct page
*bio_page
;
539 f2fs_bug_on(sbi
, is_read_io(fio
->op
));
541 down_write(&io
->io_rwsem
);
544 spin_lock(&io
->io_lock
);
545 if (list_empty(&io
->io_list
)) {
546 spin_unlock(&io
->io_lock
);
549 fio
= list_first_entry(&io
->io_list
,
550 struct f2fs_io_info
, list
);
551 list_del(&fio
->list
);
552 spin_unlock(&io
->io_lock
);
555 if (__is_valid_data_blkaddr(fio
->old_blkaddr
))
556 verify_block_addr(fio
, fio
->old_blkaddr
);
557 verify_block_addr(fio
, fio
->new_blkaddr
);
559 bio_page
= fio
->encrypted_page
? fio
->encrypted_page
: fio
->page
;
560 inode
= fio
->page
->mapping
->host
;
561 dun
= PG_DUN(inode
, fio
->page
);
562 bio_encrypted
= f2fs_may_encrypt_bio(inode
, fio
);
563 fio
->op_flags
|= fio
->encrypted_page
? REQ_NOENCRYPT
: 0;
565 /* set submitted = true as a return value */
566 fio
->submitted
= true;
568 inc_page_count(sbi
, WB_DATA_TYPE(bio_page
));
570 if (io
->bio
&& (io
->last_block_in_bio
!= fio
->new_blkaddr
- 1 ||
571 (io
->fio
.op
!= fio
->op
|| io
->fio
.op_flags
!= fio
->op_flags
) ||
572 !__same_bdev(sbi
, fio
->new_blkaddr
, io
->bio
)))
573 __submit_merged_bio(io
);
575 if (!f2fs_mergeable_bio(io
->bio
, dun
, fscrypt_get_diskcipher(inode
), bio_encrypted
))
576 __submit_merged_bio(io
);
579 if (io
->bio
== NULL
) {
580 if ((fio
->type
== DATA
|| fio
->type
== NODE
) &&
581 fio
->new_blkaddr
& F2FS_IO_SIZE_MASK(sbi
)) {
583 dec_page_count(sbi
, WB_DATA_TYPE(bio_page
));
586 io
->bio
= __bio_alloc(sbi
, fio
->new_blkaddr
, fio
->io_wbc
,
587 BIO_MAX_PAGES
, false,
588 fio
->type
, fio
->temp
);
590 fscrypt_set_bio(inode
, io
->bio
, dun
);
595 if (bio_add_page(io
->bio
, bio_page
, PAGE_SIZE
, 0) < PAGE_SIZE
) {
596 __submit_merged_bio(io
);
601 wbc_account_io(fio
->io_wbc
, bio_page
, PAGE_SIZE
);
603 io
->last_block_in_bio
= fio
->new_blkaddr
;
604 f2fs_trace_ios(fio
, 0);
606 trace_f2fs_submit_page_write(fio
->page
, fio
);
611 up_write(&io
->io_rwsem
);
615 static struct bio
*f2fs_grab_read_bio(struct inode
*inode
, block_t blkaddr
,
618 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
620 struct bio_post_read_ctx
*ctx
;
621 unsigned int post_read_steps
= 0;
623 if (!f2fs_is_valid_blkaddr(sbi
, blkaddr
, DATA_GENERIC
))
624 return ERR_PTR(-EFAULT
);
626 bio
= f2fs_bio_alloc(sbi
, min_t(int, nr_pages
, BIO_MAX_PAGES
), false);
628 return ERR_PTR(-ENOMEM
);
629 f2fs_target_device(sbi
, blkaddr
, bio
);
630 bio
->bi_end_io
= f2fs_read_end_io
;
631 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
633 if (f2fs_encrypted_file(inode
) && !fscrypt_disk_encrypted(inode
))
634 post_read_steps
|= 1 << STEP_DECRYPT
;
635 if (post_read_steps
) {
636 ctx
= mempool_alloc(bio_post_read_ctx_pool
, GFP_NOFS
);
639 return ERR_PTR(-ENOMEM
);
642 ctx
->enabled_steps
= post_read_steps
;
643 bio
->bi_private
= ctx
;
644 bio_set_op_attrs(bio
, REQ_OP_READ
,
645 (f2fs_encrypted_inode(inode
) ?
649 /* wait the page to be moved by cleaning */
650 f2fs_wait_on_block_writeback(sbi
, blkaddr
);
656 /* This can handle encryption stuffs */
657 static int f2fs_submit_page_read(struct inode
*inode
, struct page
*page
,
660 struct bio
*bio
= f2fs_grab_read_bio(inode
, blkaddr
, 1);
665 if (bio_add_page(bio
, page
, PAGE_SIZE
, 0) < PAGE_SIZE
) {
670 if (f2fs_may_encrypt_bio(inode
, NULL
))
671 fscrypt_set_bio(inode
, bio
, PG_DUN(inode
, page
));
673 __submit_bio(F2FS_I_SB(inode
), bio
, DATA
);
677 static void __set_data_blkaddr(struct dnode_of_data
*dn
)
679 struct f2fs_node
*rn
= F2FS_NODE(dn
->node_page
);
683 if (IS_INODE(dn
->node_page
) && f2fs_has_extra_attr(dn
->inode
))
684 base
= get_extra_isize(dn
->inode
);
686 /* Get physical address of data block */
687 addr_array
= blkaddr_in_node(rn
);
688 addr_array
[base
+ dn
->ofs_in_node
] = cpu_to_le32(dn
->data_blkaddr
);
692 * Lock ordering for the change of data block address:
695 * update block addresses in the node page
697 void set_data_blkaddr(struct dnode_of_data
*dn
)
699 f2fs_wait_on_page_writeback(dn
->node_page
, NODE
, true);
700 __set_data_blkaddr(dn
);
701 if (set_page_dirty(dn
->node_page
))
702 dn
->node_changed
= true;
705 void f2fs_update_data_blkaddr(struct dnode_of_data
*dn
, block_t blkaddr
)
707 dn
->data_blkaddr
= blkaddr
;
708 set_data_blkaddr(dn
);
709 f2fs_update_extent_cache(dn
);
712 /* dn->ofs_in_node will be returned with up-to-date last block pointer */
713 int reserve_new_blocks(struct dnode_of_data
*dn
, blkcnt_t count
)
715 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
721 if (unlikely(is_inode_flag_set(dn
->inode
, FI_NO_ALLOC
)))
723 if (unlikely((err
= inc_valid_block_count(sbi
, dn
->inode
, &count
))))
726 trace_f2fs_reserve_new_blocks(dn
->inode
, dn
->nid
,
727 dn
->ofs_in_node
, count
);
729 f2fs_wait_on_page_writeback(dn
->node_page
, NODE
, true);
731 for (; count
> 0; dn
->ofs_in_node
++) {
732 block_t blkaddr
= datablock_addr(dn
->inode
,
733 dn
->node_page
, dn
->ofs_in_node
);
734 if (blkaddr
== NULL_ADDR
) {
735 dn
->data_blkaddr
= NEW_ADDR
;
736 __set_data_blkaddr(dn
);
741 if (set_page_dirty(dn
->node_page
))
742 dn
->node_changed
= true;
746 /* Should keep dn->ofs_in_node unchanged */
747 int reserve_new_block(struct dnode_of_data
*dn
)
749 unsigned int ofs_in_node
= dn
->ofs_in_node
;
752 ret
= reserve_new_blocks(dn
, 1);
753 dn
->ofs_in_node
= ofs_in_node
;
757 int f2fs_reserve_block(struct dnode_of_data
*dn
, pgoff_t index
)
759 bool need_put
= dn
->inode_page
? false : true;
762 err
= get_dnode_of_data(dn
, index
, ALLOC_NODE
);
766 if (dn
->data_blkaddr
== NULL_ADDR
)
767 err
= reserve_new_block(dn
);
773 int f2fs_get_block(struct dnode_of_data
*dn
, pgoff_t index
)
775 struct extent_info ei
= {0,0,0};
776 struct inode
*inode
= dn
->inode
;
778 if (f2fs_lookup_extent_cache(inode
, index
, &ei
)) {
779 dn
->data_blkaddr
= ei
.blk
+ index
- ei
.fofs
;
783 return f2fs_reserve_block(dn
, index
);
786 struct page
*get_read_data_page(struct inode
*inode
, pgoff_t index
,
787 int op_flags
, bool for_write
)
789 struct address_space
*mapping
= inode
->i_mapping
;
790 struct dnode_of_data dn
;
792 struct extent_info ei
= {0,0,0};
795 page
= f2fs_grab_cache_page(mapping
, index
, for_write
);
797 return ERR_PTR(-ENOMEM
);
799 if (f2fs_lookup_extent_cache(inode
, index
, &ei
)) {
800 dn
.data_blkaddr
= ei
.blk
+ index
- ei
.fofs
;
804 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
805 err
= get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
810 if (unlikely(dn
.data_blkaddr
== NULL_ADDR
)) {
815 if (PageUptodate(page
)) {
821 * A new dentry page is allocated but not able to be written, since its
822 * new inode page couldn't be allocated due to -ENOSPC.
823 * In such the case, its blkaddr can be remained as NEW_ADDR.
824 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
826 if (dn
.data_blkaddr
== NEW_ADDR
) {
827 zero_user_segment(page
, 0, PAGE_SIZE
);
828 if (!PageUptodate(page
))
829 SetPageUptodate(page
);
834 err
= f2fs_submit_page_read(inode
, page
, dn
.data_blkaddr
);
840 f2fs_put_page(page
, 1);
844 struct page
*find_data_page(struct inode
*inode
, pgoff_t index
)
846 struct address_space
*mapping
= inode
->i_mapping
;
849 page
= find_get_page(mapping
, index
);
850 if (page
&& PageUptodate(page
))
852 f2fs_put_page(page
, 0);
854 page
= get_read_data_page(inode
, index
, 0, false);
858 if (PageUptodate(page
))
861 wait_on_page_locked(page
);
862 if (unlikely(!PageUptodate(page
))) {
863 f2fs_put_page(page
, 0);
864 return ERR_PTR(-EIO
);
870 * If it tries to access a hole, return an error.
871 * Because, the callers, functions in dir.c and GC, should be able to know
872 * whether this page exists or not.
874 struct page
*get_lock_data_page(struct inode
*inode
, pgoff_t index
,
877 struct address_space
*mapping
= inode
->i_mapping
;
880 page
= get_read_data_page(inode
, index
, 0, for_write
);
884 /* wait for read completion */
886 if (unlikely(page
->mapping
!= mapping
)) {
887 f2fs_put_page(page
, 1);
890 if (unlikely(!PageUptodate(page
))) {
891 f2fs_put_page(page
, 1);
892 return ERR_PTR(-EIO
);
898 * Caller ensures that this data page is never allocated.
899 * A new zero-filled data page is allocated in the page cache.
901 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
903 * Note that, ipage is set only by make_empty_dir, and if any error occur,
904 * ipage should be released by this function.
906 struct page
*get_new_data_page(struct inode
*inode
,
907 struct page
*ipage
, pgoff_t index
, bool new_i_size
)
909 struct address_space
*mapping
= inode
->i_mapping
;
911 struct dnode_of_data dn
;
914 page
= f2fs_grab_cache_page(mapping
, index
, true);
917 * before exiting, we should make sure ipage will be released
918 * if any error occur.
920 f2fs_put_page(ipage
, 1);
921 return ERR_PTR(-ENOMEM
);
924 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
925 err
= f2fs_reserve_block(&dn
, index
);
927 f2fs_put_page(page
, 1);
933 if (PageUptodate(page
))
936 if (dn
.data_blkaddr
== NEW_ADDR
) {
937 zero_user_segment(page
, 0, PAGE_SIZE
);
938 if (!PageUptodate(page
))
939 SetPageUptodate(page
);
941 f2fs_put_page(page
, 1);
943 /* if ipage exists, blkaddr should be NEW_ADDR */
944 f2fs_bug_on(F2FS_I_SB(inode
), ipage
);
945 page
= get_lock_data_page(inode
, index
, true);
950 if (new_i_size
&& i_size_read(inode
) <
951 ((loff_t
)(index
+ 1) << PAGE_SHIFT
))
952 f2fs_i_size_write(inode
, ((loff_t
)(index
+ 1) << PAGE_SHIFT
));
956 static int __allocate_data_block(struct dnode_of_data
*dn
, int seg_type
)
958 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
959 struct f2fs_summary sum
;
965 if (unlikely(is_inode_flag_set(dn
->inode
, FI_NO_ALLOC
)))
968 dn
->data_blkaddr
= datablock_addr(dn
->inode
,
969 dn
->node_page
, dn
->ofs_in_node
);
970 if (dn
->data_blkaddr
== NEW_ADDR
)
973 if (unlikely((err
= inc_valid_block_count(sbi
, dn
->inode
, &count
))))
977 get_node_info(sbi
, dn
->nid
, &ni
);
978 set_summary(&sum
, dn
->nid
, dn
->ofs_in_node
, ni
.version
);
980 allocate_data_block(sbi
, NULL
, dn
->data_blkaddr
, &dn
->data_blkaddr
,
981 &sum
, seg_type
, NULL
, false);
982 set_data_blkaddr(dn
);
985 fofs
= start_bidx_of_node(ofs_of_node(dn
->node_page
), dn
->inode
) +
987 if (i_size_read(dn
->inode
) < ((loff_t
)(fofs
+ 1) << PAGE_SHIFT
))
988 f2fs_i_size_write(dn
->inode
,
989 ((loff_t
)(fofs
+ 1) << PAGE_SHIFT
));
993 int f2fs_preallocate_blocks(struct kiocb
*iocb
, struct iov_iter
*from
)
995 struct inode
*inode
= file_inode(iocb
->ki_filp
);
996 struct f2fs_map_blocks map
;
999 bool direct_io
= iocb
->ki_flags
& IOCB_DIRECT
;
1001 /* convert inline data for Direct I/O*/
1003 err
= f2fs_convert_inline_inode(inode
);
1008 if (is_inode_flag_set(inode
, FI_NO_PREALLOC
))
1011 map
.m_lblk
= F2FS_BLK_ALIGN(iocb
->ki_pos
);
1012 map
.m_len
= F2FS_BYTES_TO_BLK(iocb
->ki_pos
+ iov_iter_count(from
));
1013 if (map
.m_len
> map
.m_lblk
)
1014 map
.m_len
-= map
.m_lblk
;
1018 map
.m_next_pgofs
= NULL
;
1019 map
.m_next_extent
= NULL
;
1020 map
.m_seg_type
= NO_CHECK_TYPE
;
1023 map
.m_seg_type
= rw_hint_to_seg_type(iocb
->ki_hint
);
1024 flag
= f2fs_force_buffered_io(inode
, WRITE
) ?
1025 F2FS_GET_BLOCK_PRE_AIO
:
1026 F2FS_GET_BLOCK_PRE_DIO
;
1029 if (iocb
->ki_pos
+ iov_iter_count(from
) > MAX_INLINE_DATA(inode
)) {
1030 err
= f2fs_convert_inline_inode(inode
);
1034 if (f2fs_has_inline_data(inode
))
1037 flag
= F2FS_GET_BLOCK_PRE_AIO
;
1040 err
= f2fs_map_blocks(inode
, &map
, 1, flag
);
1041 if (map
.m_len
> 0 && err
== -ENOSPC
) {
1043 set_inode_flag(inode
, FI_NO_PREALLOC
);
1049 static inline void __do_map_lock(struct f2fs_sb_info
*sbi
, int flag
, bool lock
)
1051 if (flag
== F2FS_GET_BLOCK_PRE_AIO
) {
1053 down_read(&sbi
->node_change
);
1055 up_read(&sbi
->node_change
);
1060 f2fs_unlock_op(sbi
);
1065 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
1066 * f2fs_map_blocks structure.
1067 * If original data blocks are allocated, then give them to blockdev.
1069 * a. preallocate requested block addresses
1070 * b. do not use extent cache for better performance
1071 * c. give the block addresses to blockdev
1073 int f2fs_map_blocks(struct inode
*inode
, struct f2fs_map_blocks
*map
,
1074 int create
, int flag
)
1076 unsigned int maxblocks
= map
->m_len
;
1077 struct dnode_of_data dn
;
1078 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1079 int mode
= create
? ALLOC_NODE
: LOOKUP_NODE
;
1080 pgoff_t pgofs
, end_offset
, end
;
1081 int err
= 0, ofs
= 1;
1082 unsigned int ofs_in_node
, last_ofs_in_node
;
1084 struct extent_info ei
= {0,0,0};
1086 unsigned int start_pgofs
;
1094 /* it only supports block size == page size */
1095 pgofs
= (pgoff_t
)map
->m_lblk
;
1096 end
= pgofs
+ maxblocks
;
1098 if (!create
&& f2fs_lookup_extent_cache(inode
, pgofs
, &ei
)) {
1099 map
->m_pblk
= ei
.blk
+ pgofs
- ei
.fofs
;
1100 map
->m_len
= min((pgoff_t
)maxblocks
, ei
.fofs
+ ei
.len
- pgofs
);
1101 map
->m_flags
= F2FS_MAP_MAPPED
;
1102 if (map
->m_next_extent
)
1103 *map
->m_next_extent
= pgofs
+ map
->m_len
;
1109 __do_map_lock(sbi
, flag
, true);
1111 /* When reading holes, we need its node page */
1112 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1113 err
= get_dnode_of_data(&dn
, pgofs
, mode
);
1115 if (flag
== F2FS_GET_BLOCK_BMAP
)
1117 if (err
== -ENOENT
) {
1119 if (map
->m_next_pgofs
)
1120 *map
->m_next_pgofs
=
1121 get_next_page_offset(&dn
, pgofs
);
1122 if (map
->m_next_extent
)
1123 *map
->m_next_extent
=
1124 get_next_page_offset(&dn
, pgofs
);
1129 start_pgofs
= pgofs
;
1131 last_ofs_in_node
= ofs_in_node
= dn
.ofs_in_node
;
1132 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
1135 blkaddr
= datablock_addr(dn
.inode
, dn
.node_page
, dn
.ofs_in_node
);
1137 if (__is_valid_data_blkaddr(blkaddr
) &&
1138 !f2fs_is_valid_blkaddr(sbi
, blkaddr
, DATA_GENERIC
)) {
1143 if (!is_valid_data_blkaddr(sbi
, blkaddr
)) {
1145 if (unlikely(f2fs_cp_error(sbi
))) {
1149 if (flag
== F2FS_GET_BLOCK_PRE_AIO
) {
1150 if (blkaddr
== NULL_ADDR
) {
1152 last_ofs_in_node
= dn
.ofs_in_node
;
1155 err
= __allocate_data_block(&dn
,
1158 set_inode_flag(inode
, FI_APPEND_WRITE
);
1162 map
->m_flags
|= F2FS_MAP_NEW
;
1163 blkaddr
= dn
.data_blkaddr
;
1165 if (flag
== F2FS_GET_BLOCK_BMAP
) {
1169 if (flag
== F2FS_GET_BLOCK_PRECACHE
)
1171 if (flag
== F2FS_GET_BLOCK_FIEMAP
&&
1172 blkaddr
== NULL_ADDR
) {
1173 if (map
->m_next_pgofs
)
1174 *map
->m_next_pgofs
= pgofs
+ 1;
1177 if (flag
!= F2FS_GET_BLOCK_FIEMAP
) {
1178 /* for defragment case */
1179 if (map
->m_next_pgofs
)
1180 *map
->m_next_pgofs
= pgofs
+ 1;
1186 if (flag
== F2FS_GET_BLOCK_PRE_AIO
)
1189 if (map
->m_len
== 0) {
1190 /* preallocated unwritten block should be mapped for fiemap. */
1191 if (blkaddr
== NEW_ADDR
)
1192 map
->m_flags
|= F2FS_MAP_UNWRITTEN
;
1193 map
->m_flags
|= F2FS_MAP_MAPPED
;
1195 map
->m_pblk
= blkaddr
;
1197 } else if ((map
->m_pblk
!= NEW_ADDR
&&
1198 blkaddr
== (map
->m_pblk
+ ofs
)) ||
1199 (map
->m_pblk
== NEW_ADDR
&& blkaddr
== NEW_ADDR
) ||
1200 flag
== F2FS_GET_BLOCK_PRE_DIO
) {
1211 /* preallocate blocks in batch for one dnode page */
1212 if (flag
== F2FS_GET_BLOCK_PRE_AIO
&&
1213 (pgofs
== end
|| dn
.ofs_in_node
== end_offset
)) {
1215 dn
.ofs_in_node
= ofs_in_node
;
1216 err
= reserve_new_blocks(&dn
, prealloc
);
1220 map
->m_len
+= dn
.ofs_in_node
- ofs_in_node
;
1221 if (prealloc
&& dn
.ofs_in_node
!= last_ofs_in_node
+ 1) {
1225 dn
.ofs_in_node
= end_offset
;
1230 else if (dn
.ofs_in_node
< end_offset
)
1233 if (flag
== F2FS_GET_BLOCK_PRECACHE
) {
1234 if (map
->m_flags
& F2FS_MAP_MAPPED
) {
1235 unsigned int ofs
= start_pgofs
- map
->m_lblk
;
1237 f2fs_update_extent_cache_range(&dn
,
1238 start_pgofs
, map
->m_pblk
+ ofs
,
1243 f2fs_put_dnode(&dn
);
1246 __do_map_lock(sbi
, flag
, false);
1247 f2fs_balance_fs(sbi
, dn
.node_changed
);
1252 if (flag
== F2FS_GET_BLOCK_PRECACHE
) {
1253 if (map
->m_flags
& F2FS_MAP_MAPPED
) {
1254 unsigned int ofs
= start_pgofs
- map
->m_lblk
;
1256 f2fs_update_extent_cache_range(&dn
,
1257 start_pgofs
, map
->m_pblk
+ ofs
,
1260 if (map
->m_next_extent
)
1261 *map
->m_next_extent
= pgofs
+ 1;
1263 f2fs_put_dnode(&dn
);
1266 __do_map_lock(sbi
, flag
, false);
1267 f2fs_balance_fs(sbi
, dn
.node_changed
);
1270 trace_f2fs_map_blocks(inode
, map
, err
);
1274 bool f2fs_overwrite_io(struct inode
*inode
, loff_t pos
, size_t len
)
1276 struct f2fs_map_blocks map
;
1280 if (pos
+ len
> i_size_read(inode
))
1283 map
.m_lblk
= F2FS_BYTES_TO_BLK(pos
);
1284 map
.m_next_pgofs
= NULL
;
1285 map
.m_next_extent
= NULL
;
1286 map
.m_seg_type
= NO_CHECK_TYPE
;
1287 last_lblk
= F2FS_BLK_ALIGN(pos
+ len
);
1289 while (map
.m_lblk
< last_lblk
) {
1290 map
.m_len
= last_lblk
- map
.m_lblk
;
1291 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_DEFAULT
);
1292 if (err
|| map
.m_len
== 0)
1294 map
.m_lblk
+= map
.m_len
;
1299 static int __get_data_block(struct inode
*inode
, sector_t iblock
,
1300 struct buffer_head
*bh
, int create
, int flag
,
1301 pgoff_t
*next_pgofs
, int seg_type
)
1303 struct f2fs_map_blocks map
;
1306 map
.m_lblk
= iblock
;
1307 map
.m_len
= bh
->b_size
>> inode
->i_blkbits
;
1308 map
.m_next_pgofs
= next_pgofs
;
1309 map
.m_next_extent
= NULL
;
1310 map
.m_seg_type
= seg_type
;
1312 err
= f2fs_map_blocks(inode
, &map
, create
, flag
);
1314 map_bh(bh
, inode
->i_sb
, map
.m_pblk
);
1315 bh
->b_state
= (bh
->b_state
& ~F2FS_MAP_FLAGS
) | map
.m_flags
;
1316 bh
->b_size
= (u64
)map
.m_len
<< inode
->i_blkbits
;
1321 static int get_data_block(struct inode
*inode
, sector_t iblock
,
1322 struct buffer_head
*bh_result
, int create
, int flag
,
1323 pgoff_t
*next_pgofs
)
1325 return __get_data_block(inode
, iblock
, bh_result
, create
,
1330 static int get_data_block_dio(struct inode
*inode
, sector_t iblock
,
1331 struct buffer_head
*bh_result
, int create
)
1333 return __get_data_block(inode
, iblock
, bh_result
, create
,
1334 F2FS_GET_BLOCK_DEFAULT
, NULL
,
1335 rw_hint_to_seg_type(
1336 inode
->i_write_hint
));
1339 static int get_data_block_bmap(struct inode
*inode
, sector_t iblock
,
1340 struct buffer_head
*bh_result
, int create
)
1342 /* Block number less than F2FS MAX BLOCKS */
1343 if (unlikely(iblock
>= F2FS_I_SB(inode
)->max_file_blocks
))
1346 return __get_data_block(inode
, iblock
, bh_result
, create
,
1347 F2FS_GET_BLOCK_BMAP
, NULL
,
1351 static inline sector_t
logical_to_blk(struct inode
*inode
, loff_t offset
)
1353 return (offset
>> inode
->i_blkbits
);
1356 static inline loff_t
blk_to_logical(struct inode
*inode
, sector_t blk
)
1358 return (blk
<< inode
->i_blkbits
);
1361 static int f2fs_xattr_fiemap(struct inode
*inode
,
1362 struct fiemap_extent_info
*fieinfo
)
1364 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1366 struct node_info ni
;
1367 __u64 phys
= 0, len
;
1369 nid_t xnid
= F2FS_I(inode
)->i_xattr_nid
;
1372 if (f2fs_has_inline_xattr(inode
)) {
1375 page
= f2fs_grab_cache_page(NODE_MAPPING(sbi
),
1376 inode
->i_ino
, false);
1380 get_node_info(sbi
, inode
->i_ino
, &ni
);
1382 phys
= (__u64
)blk_to_logical(inode
, ni
.blk_addr
);
1383 offset
= offsetof(struct f2fs_inode
, i_addr
) +
1384 sizeof(__le32
) * (DEF_ADDRS_PER_INODE
-
1385 get_inline_xattr_addrs(inode
));
1388 len
= inline_xattr_size(inode
);
1390 f2fs_put_page(page
, 1);
1392 flags
= FIEMAP_EXTENT_DATA_INLINE
| FIEMAP_EXTENT_NOT_ALIGNED
;
1395 flags
|= FIEMAP_EXTENT_LAST
;
1397 err
= fiemap_fill_next_extent(fieinfo
, 0, phys
, len
, flags
);
1398 if (err
|| err
== 1)
1403 page
= f2fs_grab_cache_page(NODE_MAPPING(sbi
), xnid
, false);
1407 get_node_info(sbi
, xnid
, &ni
);
1409 phys
= (__u64
)blk_to_logical(inode
, ni
.blk_addr
);
1410 len
= inode
->i_sb
->s_blocksize
;
1412 f2fs_put_page(page
, 1);
1414 flags
= FIEMAP_EXTENT_LAST
;
1418 err
= fiemap_fill_next_extent(fieinfo
, 0, phys
, len
, flags
);
1420 return (err
< 0 ? err
: 0);
1423 int f2fs_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fieinfo
,
1426 struct buffer_head map_bh
;
1427 sector_t start_blk
, last_blk
;
1429 u64 logical
= 0, phys
= 0, size
= 0;
1433 if (fieinfo
->fi_flags
& FIEMAP_FLAG_CACHE
) {
1434 ret
= f2fs_precache_extents(inode
);
1439 ret
= fiemap_check_flags(fieinfo
, FIEMAP_FLAG_SYNC
| FIEMAP_FLAG_XATTR
);
1445 if (fieinfo
->fi_flags
& FIEMAP_FLAG_XATTR
) {
1446 ret
= f2fs_xattr_fiemap(inode
, fieinfo
);
1450 if (f2fs_has_inline_data(inode
)) {
1451 ret
= f2fs_inline_data_fiemap(inode
, fieinfo
, start
, len
);
1456 if (logical_to_blk(inode
, len
) == 0)
1457 len
= blk_to_logical(inode
, 1);
1459 start_blk
= logical_to_blk(inode
, start
);
1460 last_blk
= logical_to_blk(inode
, start
+ len
- 1);
1463 memset(&map_bh
, 0, sizeof(struct buffer_head
));
1464 map_bh
.b_size
= len
;
1466 ret
= get_data_block(inode
, start_blk
, &map_bh
, 0,
1467 F2FS_GET_BLOCK_FIEMAP
, &next_pgofs
);
1472 if (!buffer_mapped(&map_bh
)) {
1473 start_blk
= next_pgofs
;
1475 if (blk_to_logical(inode
, start_blk
) < blk_to_logical(inode
,
1476 F2FS_I_SB(inode
)->max_file_blocks
))
1479 flags
|= FIEMAP_EXTENT_LAST
;
1483 if (f2fs_encrypted_inode(inode
))
1484 flags
|= FIEMAP_EXTENT_DATA_ENCRYPTED
;
1486 ret
= fiemap_fill_next_extent(fieinfo
, logical
,
1490 if (start_blk
> last_blk
|| ret
)
1493 logical
= blk_to_logical(inode
, start_blk
);
1494 phys
= blk_to_logical(inode
, map_bh
.b_blocknr
);
1495 size
= map_bh
.b_size
;
1497 if (buffer_unwritten(&map_bh
))
1498 flags
= FIEMAP_EXTENT_UNWRITTEN
;
1500 start_blk
+= logical_to_blk(inode
, size
);
1504 if (fatal_signal_pending(current
))
1512 inode_unlock(inode
);
1517 * This function was originally taken from fs/mpage.c, and customized for f2fs.
1518 * Major change was from block_size == page_size in f2fs by default.
1520 static int f2fs_mpage_readpages(struct address_space
*mapping
,
1521 struct list_head
*pages
, struct page
*page
,
1524 struct bio
*bio
= NULL
;
1525 sector_t last_block_in_bio
= 0;
1526 struct inode
*inode
= mapping
->host
;
1527 const unsigned blkbits
= inode
->i_blkbits
;
1528 const unsigned blocksize
= 1 << blkbits
;
1529 sector_t block_in_file
;
1530 sector_t last_block
;
1531 sector_t last_block_in_file
;
1533 struct f2fs_map_blocks map
;
1541 map
.m_next_pgofs
= NULL
;
1542 map
.m_next_extent
= NULL
;
1543 map
.m_seg_type
= NO_CHECK_TYPE
;
1545 for (; nr_pages
; nr_pages
--) {
1547 page
= list_last_entry(pages
, struct page
, lru
);
1549 prefetchw(&page
->flags
);
1550 list_del(&page
->lru
);
1551 if (add_to_page_cache_lru(page
, mapping
,
1553 readahead_gfp_mask(mapping
)))
1557 block_in_file
= (sector_t
)page
->index
;
1558 last_block
= block_in_file
+ nr_pages
;
1559 last_block_in_file
= (i_size_read(inode
) + blocksize
- 1) >>
1561 if (last_block
> last_block_in_file
)
1562 last_block
= last_block_in_file
;
1565 * Map blocks using the previous result first.
1567 if ((map
.m_flags
& F2FS_MAP_MAPPED
) &&
1568 block_in_file
> map
.m_lblk
&&
1569 block_in_file
< (map
.m_lblk
+ map
.m_len
))
1573 * Then do more f2fs_map_blocks() calls until we are
1574 * done with this page.
1578 if (block_in_file
< last_block
) {
1579 map
.m_lblk
= block_in_file
;
1580 map
.m_len
= last_block
- block_in_file
;
1582 if (f2fs_map_blocks(inode
, &map
, 0,
1583 F2FS_GET_BLOCK_DEFAULT
))
1584 goto set_error_page
;
1587 if ((map
.m_flags
& F2FS_MAP_MAPPED
)) {
1588 block_nr
= map
.m_pblk
+ block_in_file
- map
.m_lblk
;
1589 SetPageMappedToDisk(page
);
1591 if (!PageUptodate(page
) && !cleancache_get_page(page
)) {
1592 SetPageUptodate(page
);
1596 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode
), block_nr
,
1598 goto set_error_page
;
1600 zero_user_segment(page
, 0, PAGE_SIZE
);
1601 if (!PageUptodate(page
))
1602 SetPageUptodate(page
);
1608 * This page will go to BIO. Do we need to send this
1611 if (bio
&& (last_block_in_bio
!= block_nr
- 1 ||
1612 !__same_bdev(F2FS_I_SB(inode
), block_nr
, bio
))) {
1614 __submit_bio(F2FS_I_SB(inode
), bio
, DATA
);
1618 dun
= PG_DUN(inode
, page
);
1619 bio_encrypted
= f2fs_may_encrypt_bio(inode
, NULL
);
1620 if (!f2fs_mergeable_bio(bio
, dun
, fscrypt_get_diskcipher(inode
), bio_encrypted
)) {
1621 __submit_bio(F2FS_I_SB(inode
), bio
, DATA
);
1626 bio
= f2fs_grab_read_bio(inode
, block_nr
, nr_pages
);
1629 goto set_error_page
;
1631 if (f2fs_may_encrypt_bio(inode
, NULL
))
1632 fscrypt_set_bio(inode
, bio
, dun
);
1635 if (bio_add_page(bio
, page
, blocksize
, 0) < blocksize
)
1636 goto submit_and_realloc
;
1638 last_block_in_bio
= block_nr
;
1642 zero_user_segment(page
, 0, PAGE_SIZE
);
1647 __submit_bio(F2FS_I_SB(inode
), bio
, DATA
);
1655 BUG_ON(pages
&& !list_empty(pages
));
1657 __submit_bio(F2FS_I_SB(inode
), bio
, DATA
);
1661 static int f2fs_read_data_page(struct file
*file
, struct page
*page
)
1663 struct inode
*inode
= page
->mapping
->host
;
1666 trace_f2fs_readpage(page
, DATA
);
1668 /* If the file has inline data, try to read it directly */
1669 if (f2fs_has_inline_data(inode
))
1670 ret
= f2fs_read_inline_data(inode
, page
);
1672 ret
= f2fs_mpage_readpages(page
->mapping
, NULL
, page
, 1);
1676 static int f2fs_read_data_pages(struct file
*file
,
1677 struct address_space
*mapping
,
1678 struct list_head
*pages
, unsigned nr_pages
)
1680 struct inode
*inode
= mapping
->host
;
1681 struct page
*page
= list_last_entry(pages
, struct page
, lru
);
1683 trace_f2fs_readpages(inode
, page
, nr_pages
);
1685 /* If the file has inline data, skip readpages */
1686 if (f2fs_has_inline_data(inode
))
1689 return f2fs_mpage_readpages(mapping
, pages
, NULL
, nr_pages
);
1692 static int encrypt_one_page(struct f2fs_io_info
*fio
)
1694 struct inode
*inode
= fio
->page
->mapping
->host
;
1695 gfp_t gfp_flags
= GFP_NOFS
;
1697 if (!f2fs_encrypted_file(inode
))
1700 /* wait for GCed page writeback via META_MAPPING */
1701 f2fs_wait_on_block_writeback(fio
->sbi
, fio
->old_blkaddr
);
1704 if (fscrypt_disk_encrypted(inode
))
1707 fio
->encrypted_page
= fscrypt_encrypt_page(inode
, fio
->page
,
1708 PAGE_SIZE
, 0, fio
->page
->index
, gfp_flags
);
1709 if (!IS_ERR(fio
->encrypted_page
))
1712 /* flush pending IOs and wait for a while in the ENOMEM case */
1713 if (PTR_ERR(fio
->encrypted_page
) == -ENOMEM
) {
1714 f2fs_flush_merged_writes(fio
->sbi
);
1715 congestion_wait(BLK_RW_ASYNC
, HZ
/50);
1716 gfp_flags
|= __GFP_NOFAIL
;
1719 return PTR_ERR(fio
->encrypted_page
);
1722 static inline bool check_inplace_update_policy(struct inode
*inode
,
1723 struct f2fs_io_info
*fio
)
1725 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1726 unsigned int policy
= SM_I(sbi
)->ipu_policy
;
1728 if (policy
& (0x1 << F2FS_IPU_FORCE
))
1730 if (policy
& (0x1 << F2FS_IPU_SSR
) && need_SSR(sbi
))
1732 if (policy
& (0x1 << F2FS_IPU_UTIL
) &&
1733 utilization(sbi
) > SM_I(sbi
)->min_ipu_util
)
1735 if (policy
& (0x1 << F2FS_IPU_SSR_UTIL
) && need_SSR(sbi
) &&
1736 utilization(sbi
) > SM_I(sbi
)->min_ipu_util
)
1740 * IPU for rewrite async pages
1742 if (policy
& (0x1 << F2FS_IPU_ASYNC
) &&
1743 fio
&& fio
->op
== REQ_OP_WRITE
&&
1744 !(fio
->op_flags
& REQ_SYNC
) &&
1745 !f2fs_encrypted_inode(inode
))
1748 /* this is only set during fdatasync */
1749 if (policy
& (0x1 << F2FS_IPU_FSYNC
) &&
1750 is_inode_flag_set(inode
, FI_NEED_IPU
))
1756 bool should_update_inplace(struct inode
*inode
, struct f2fs_io_info
*fio
)
1758 if (f2fs_is_pinned_file(inode
))
1761 /* if this is cold file, we should overwrite to avoid fragmentation */
1762 if (file_is_cold(inode
))
1765 return check_inplace_update_policy(inode
, fio
);
1768 bool should_update_outplace(struct inode
*inode
, struct f2fs_io_info
*fio
)
1770 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1772 if (test_opt(sbi
, LFS
))
1774 if (S_ISDIR(inode
->i_mode
))
1776 if (f2fs_is_atomic_file(inode
))
1779 if (is_cold_data(fio
->page
))
1781 if (IS_ATOMIC_WRITTEN_PAGE(fio
->page
))
1787 static inline bool need_inplace_update(struct f2fs_io_info
*fio
)
1789 struct inode
*inode
= fio
->page
->mapping
->host
;
1791 if (should_update_outplace(inode
, fio
))
1794 return should_update_inplace(inode
, fio
);
1797 int do_write_data_page(struct f2fs_io_info
*fio
)
1799 struct page
*page
= fio
->page
;
1800 struct inode
*inode
= page
->mapping
->host
;
1801 struct dnode_of_data dn
;
1802 struct extent_info ei
= {0,0,0};
1803 bool ipu_force
= false;
1806 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1807 if (need_inplace_update(fio
) &&
1808 f2fs_lookup_extent_cache(inode
, page
->index
, &ei
)) {
1809 fio
->old_blkaddr
= ei
.blk
+ page
->index
- ei
.fofs
;
1811 if (!f2fs_is_valid_blkaddr(fio
->sbi
, fio
->old_blkaddr
,
1816 fio
->need_lock
= LOCK_DONE
;
1820 /* Deadlock due to between page->lock and f2fs_lock_op */
1821 if (fio
->need_lock
== LOCK_REQ
&& !f2fs_trylock_op(fio
->sbi
))
1824 err
= get_dnode_of_data(&dn
, page
->index
, LOOKUP_NODE
);
1828 fio
->old_blkaddr
= dn
.data_blkaddr
;
1830 /* This page is already truncated */
1831 if (fio
->old_blkaddr
== NULL_ADDR
) {
1832 ClearPageUptodate(page
);
1836 if (__is_valid_data_blkaddr(fio
->old_blkaddr
) &&
1837 !f2fs_is_valid_blkaddr(fio
->sbi
, fio
->old_blkaddr
,
1843 * If current allocation needs SSR,
1844 * it had better in-place writes for updated data.
1846 if (ipu_force
|| (is_valid_data_blkaddr(fio
->sbi
, fio
->old_blkaddr
) &&
1847 need_inplace_update(fio
))) {
1848 err
= encrypt_one_page(fio
);
1852 set_page_writeback(page
);
1853 ClearPageError(page
);
1854 f2fs_put_dnode(&dn
);
1855 if (fio
->need_lock
== LOCK_REQ
)
1856 f2fs_unlock_op(fio
->sbi
);
1857 err
= rewrite_data_page(fio
);
1858 trace_f2fs_do_write_data_page(fio
->page
, IPU
);
1859 set_inode_flag(inode
, FI_UPDATE_WRITE
);
1863 if (fio
->need_lock
== LOCK_RETRY
) {
1864 if (!f2fs_trylock_op(fio
->sbi
)) {
1868 fio
->need_lock
= LOCK_REQ
;
1871 err
= encrypt_one_page(fio
);
1875 set_page_writeback(page
);
1876 ClearPageError(page
);
1878 /* LFS mode write path */
1879 write_data_page(&dn
, fio
);
1880 trace_f2fs_do_write_data_page(page
, OPU
);
1881 set_inode_flag(inode
, FI_APPEND_WRITE
);
1882 if (page
->index
== 0)
1883 set_inode_flag(inode
, FI_FIRST_BLOCK_WRITTEN
);
1885 f2fs_put_dnode(&dn
);
1887 if (fio
->need_lock
== LOCK_REQ
)
1888 f2fs_unlock_op(fio
->sbi
);
1892 static int __write_data_page(struct page
*page
, bool *submitted
,
1893 struct writeback_control
*wbc
,
1894 enum iostat_type io_type
)
1896 struct inode
*inode
= page
->mapping
->host
;
1897 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1898 loff_t i_size
= i_size_read(inode
);
1899 const pgoff_t end_index
= ((unsigned long long) i_size
)
1901 loff_t psize
= (page
->index
+ 1) << PAGE_SHIFT
;
1902 unsigned offset
= 0;
1903 bool need_balance_fs
= false;
1905 struct f2fs_io_info fio
= {
1907 .ino
= inode
->i_ino
,
1910 .op_flags
= wbc_to_write_flags(wbc
),
1911 .old_blkaddr
= NULL_ADDR
,
1913 .encrypted_page
= NULL
,
1915 .need_lock
= LOCK_RETRY
,
1920 trace_f2fs_writepage(page
, DATA
);
1922 /* we should bypass data pages to proceed the kworkder jobs */
1923 if (unlikely(f2fs_cp_error(sbi
))) {
1924 mapping_set_error(page
->mapping
, -EIO
);
1928 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
1931 if (page
->index
< end_index
)
1935 * If the offset is out-of-range of file size,
1936 * this page does not have to be written to disk.
1938 offset
= i_size
& (PAGE_SIZE
- 1);
1939 if ((page
->index
>= end_index
+ 1) || !offset
)
1942 zero_user_segment(page
, offset
, PAGE_SIZE
);
1944 if (f2fs_is_drop_cache(inode
))
1946 /* we should not write 0'th page having journal header */
1947 if (f2fs_is_volatile_file(inode
) && (!page
->index
||
1948 (!wbc
->for_reclaim
&&
1949 available_free_memory(sbi
, BASE_CHECK
))))
1952 /* Dentry blocks are controlled by checkpoint */
1953 if (S_ISDIR(inode
->i_mode
)) {
1954 fio
.need_lock
= LOCK_DONE
;
1955 err
= do_write_data_page(&fio
);
1959 if (!wbc
->for_reclaim
)
1960 need_balance_fs
= true;
1961 else if (has_not_enough_free_secs(sbi
, 0, 0))
1964 set_inode_flag(inode
, FI_HOT_DATA
);
1967 if (f2fs_has_inline_data(inode
)) {
1968 err
= f2fs_write_inline_data(inode
, page
);
1973 if (err
== -EAGAIN
) {
1974 err
= do_write_data_page(&fio
);
1975 if (err
== -EAGAIN
) {
1976 fio
.need_lock
= LOCK_REQ
;
1977 err
= do_write_data_page(&fio
);
1982 file_set_keep_isize(inode
);
1984 down_write(&F2FS_I(inode
)->i_sem
);
1985 if (F2FS_I(inode
)->last_disk_size
< psize
)
1986 F2FS_I(inode
)->last_disk_size
= psize
;
1987 up_write(&F2FS_I(inode
)->i_sem
);
1991 if (err
&& err
!= -ENOENT
)
1995 inode_dec_dirty_pages(inode
);
1997 ClearPageUptodate(page
);
1999 if (wbc
->for_reclaim
) {
2000 f2fs_submit_merged_write_cond(sbi
, inode
, 0, page
->index
, DATA
);
2001 clear_inode_flag(inode
, FI_HOT_DATA
);
2002 remove_dirty_inode(inode
);
2007 if (!S_ISDIR(inode
->i_mode
))
2008 f2fs_balance_fs(sbi
, need_balance_fs
);
2010 if (unlikely(f2fs_cp_error(sbi
))) {
2011 f2fs_submit_merged_write(sbi
, DATA
);
2016 *submitted
= fio
.submitted
;
2021 redirty_page_for_writepage(wbc
, page
);
2023 * pageout() in MM traslates EAGAIN, so calls handle_write_error()
2024 * -> mapping_set_error() -> set_bit(AS_EIO, ...).
2025 * file_write_and_wait_range() will see EIO error, which is critical
2026 * to return value of fsync() followed by atomic_write failure to user.
2028 if (!err
|| wbc
->for_reclaim
)
2029 return AOP_WRITEPAGE_ACTIVATE
;
2034 static int f2fs_write_data_page(struct page
*page
,
2035 struct writeback_control
*wbc
)
2037 return __write_data_page(page
, NULL
, wbc
, FS_DATA_IO
);
2041 * This function was copied from write_cche_pages from mm/page-writeback.c.
2042 * The major change is making write step of cold data page separately from
2043 * warm/hot data page.
2045 static int f2fs_write_cache_pages(struct address_space
*mapping
,
2046 struct writeback_control
*wbc
,
2047 enum iostat_type io_type
)
2051 struct pagevec pvec
;
2053 pgoff_t
uninitialized_var(writeback_index
);
2055 pgoff_t end
; /* Inclusive */
2057 pgoff_t last_idx
= ULONG_MAX
;
2059 int range_whole
= 0;
2062 pagevec_init(&pvec
, 0);
2064 if (get_dirty_pages(mapping
->host
) <=
2065 SM_I(F2FS_M_SB(mapping
))->min_hot_blocks
)
2066 set_inode_flag(mapping
->host
, FI_HOT_DATA
);
2068 clear_inode_flag(mapping
->host
, FI_HOT_DATA
);
2070 if (wbc
->range_cyclic
) {
2071 writeback_index
= mapping
->writeback_index
; /* prev offset */
2072 index
= writeback_index
;
2079 index
= wbc
->range_start
>> PAGE_SHIFT
;
2080 end
= wbc
->range_end
>> PAGE_SHIFT
;
2081 if (wbc
->range_start
== 0 && wbc
->range_end
== LLONG_MAX
)
2083 cycled
= 1; /* ignore range_cyclic tests */
2085 if (wbc
->sync_mode
== WB_SYNC_ALL
|| wbc
->tagged_writepages
)
2086 tag
= PAGECACHE_TAG_TOWRITE
;
2088 tag
= PAGECACHE_TAG_DIRTY
;
2090 if (wbc
->sync_mode
== WB_SYNC_ALL
|| wbc
->tagged_writepages
)
2091 tag_pages_for_writeback(mapping
, index
, end
);
2093 while (!done
&& (index
<= end
)) {
2096 nr_pages
= pagevec_lookup_tag(&pvec
, mapping
, &index
, tag
,
2097 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
- 1) + 1);
2101 for (i
= 0; i
< nr_pages
; i
++) {
2102 struct page
*page
= pvec
.pages
[i
];
2103 bool submitted
= false;
2105 if (page
->index
> end
) {
2110 done_index
= page
->index
;
2114 if (unlikely(page
->mapping
!= mapping
)) {
2120 if (!PageDirty(page
)) {
2121 /* someone wrote it for us */
2122 goto continue_unlock
;
2125 if (PageWriteback(page
)) {
2126 if (wbc
->sync_mode
!= WB_SYNC_NONE
)
2127 f2fs_wait_on_page_writeback(page
,
2130 goto continue_unlock
;
2133 BUG_ON(PageWriteback(page
));
2134 if (!clear_page_dirty_for_io(page
))
2135 goto continue_unlock
;
2137 ret
= __write_data_page(page
, &submitted
, wbc
, io_type
);
2138 if (unlikely(ret
)) {
2140 * keep nr_to_write, since vfs uses this to
2141 * get # of written pages.
2143 if (ret
== AOP_WRITEPAGE_ACTIVATE
) {
2147 } else if (ret
== -EAGAIN
) {
2149 if (wbc
->sync_mode
== WB_SYNC_ALL
) {
2151 congestion_wait(BLK_RW_ASYNC
,
2157 done_index
= page
->index
+ 1;
2160 } else if (submitted
) {
2161 last_idx
= page
->index
;
2164 /* give a priority to WB_SYNC threads */
2165 if ((atomic_read(&F2FS_M_SB(mapping
)->wb_sync_req
) ||
2166 --wbc
->nr_to_write
<= 0) &&
2167 wbc
->sync_mode
== WB_SYNC_NONE
) {
2172 pagevec_release(&pvec
);
2176 if (!cycled
&& !done
) {
2179 end
= writeback_index
- 1;
2182 if (wbc
->range_cyclic
|| (range_whole
&& wbc
->nr_to_write
> 0))
2183 mapping
->writeback_index
= done_index
;
2185 if (last_idx
!= ULONG_MAX
)
2186 f2fs_submit_merged_write_cond(F2FS_M_SB(mapping
), mapping
->host
,
2192 int __f2fs_write_data_pages(struct address_space
*mapping
,
2193 struct writeback_control
*wbc
,
2194 enum iostat_type io_type
)
2196 struct inode
*inode
= mapping
->host
;
2197 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2198 struct blk_plug plug
;
2201 /* deal with chardevs and other special file */
2202 if (!mapping
->a_ops
->writepage
)
2205 /* skip writing if there is no dirty page in this inode */
2206 if (!get_dirty_pages(inode
) && wbc
->sync_mode
== WB_SYNC_NONE
)
2209 /* during POR, we don't need to trigger writepage at all. */
2210 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
2213 if (S_ISDIR(inode
->i_mode
) && wbc
->sync_mode
== WB_SYNC_NONE
&&
2214 get_dirty_pages(inode
) < nr_pages_to_skip(sbi
, DATA
) &&
2215 available_free_memory(sbi
, DIRTY_DENTS
))
2218 /* skip writing during file defragment */
2219 if (is_inode_flag_set(inode
, FI_DO_DEFRAG
))
2222 trace_f2fs_writepages(mapping
->host
, wbc
, DATA
);
2224 /* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
2225 if (wbc
->sync_mode
== WB_SYNC_ALL
)
2226 atomic_inc(&sbi
->wb_sync_req
);
2227 else if (atomic_read(&sbi
->wb_sync_req
))
2230 blk_start_plug(&plug
);
2231 ret
= f2fs_write_cache_pages(mapping
, wbc
, io_type
);
2232 blk_finish_plug(&plug
);
2234 if (wbc
->sync_mode
== WB_SYNC_ALL
)
2235 atomic_dec(&sbi
->wb_sync_req
);
2237 * if some pages were truncated, we cannot guarantee its mapping->host
2238 * to detect pending bios.
2241 remove_dirty_inode(inode
);
2245 wbc
->pages_skipped
+= get_dirty_pages(inode
);
2246 trace_f2fs_writepages(mapping
->host
, wbc
, DATA
);
2250 static int f2fs_write_data_pages(struct address_space
*mapping
,
2251 struct writeback_control
*wbc
)
2253 struct inode
*inode
= mapping
->host
;
2255 return __f2fs_write_data_pages(mapping
, wbc
,
2256 F2FS_I(inode
)->cp_task
== current
?
2257 FS_CP_DATA_IO
: FS_DATA_IO
);
2260 static void f2fs_write_failed(struct address_space
*mapping
, loff_t to
)
2262 struct inode
*inode
= mapping
->host
;
2263 loff_t i_size
= i_size_read(inode
);
2266 down_write(&F2FS_I(inode
)->i_mmap_sem
);
2267 truncate_pagecache(inode
, i_size
);
2268 truncate_blocks(inode
, i_size
, true);
2269 up_write(&F2FS_I(inode
)->i_mmap_sem
);
2273 static int prepare_write_begin(struct f2fs_sb_info
*sbi
,
2274 struct page
*page
, loff_t pos
, unsigned len
,
2275 block_t
*blk_addr
, bool *node_changed
)
2277 struct inode
*inode
= page
->mapping
->host
;
2278 pgoff_t index
= page
->index
;
2279 struct dnode_of_data dn
;
2281 bool locked
= false;
2282 struct extent_info ei
= {0,0,0};
2287 * we already allocated all the blocks, so we don't need to get
2288 * the block addresses when there is no need to fill the page.
2290 if (!f2fs_has_inline_data(inode
) && len
== PAGE_SIZE
&&
2291 !is_inode_flag_set(inode
, FI_NO_PREALLOC
))
2294 /* f2fs_lock_op avoids race between write CP and convert_inline_page */
2295 if (f2fs_has_inline_data(inode
) && pos
+ len
> MAX_INLINE_DATA(inode
))
2296 flag
= F2FS_GET_BLOCK_DEFAULT
;
2298 flag
= F2FS_GET_BLOCK_PRE_AIO
;
2300 if (f2fs_has_inline_data(inode
) ||
2301 (pos
& PAGE_MASK
) >= i_size_read(inode
)) {
2302 __do_map_lock(sbi
, flag
, true);
2306 /* check inline_data */
2307 ipage
= get_node_page(sbi
, inode
->i_ino
);
2308 if (IS_ERR(ipage
)) {
2309 err
= PTR_ERR(ipage
);
2313 set_new_dnode(&dn
, inode
, ipage
, ipage
, 0);
2315 if (f2fs_has_inline_data(inode
)) {
2316 if (pos
+ len
<= MAX_INLINE_DATA(inode
)) {
2317 read_inline_data(page
, ipage
);
2318 set_inode_flag(inode
, FI_DATA_EXIST
);
2320 set_inline_node(ipage
);
2322 err
= f2fs_convert_inline_page(&dn
, page
);
2325 if (dn
.data_blkaddr
== NULL_ADDR
)
2326 err
= f2fs_get_block(&dn
, index
);
2328 } else if (locked
) {
2329 err
= f2fs_get_block(&dn
, index
);
2331 if (f2fs_lookup_extent_cache(inode
, index
, &ei
)) {
2332 dn
.data_blkaddr
= ei
.blk
+ index
- ei
.fofs
;
2335 err
= get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
2336 if (err
|| dn
.data_blkaddr
== NULL_ADDR
) {
2337 f2fs_put_dnode(&dn
);
2338 __do_map_lock(sbi
, F2FS_GET_BLOCK_PRE_AIO
,
2340 WARN_ON(flag
!= F2FS_GET_BLOCK_PRE_AIO
);
2347 /* convert_inline_page can make node_changed */
2348 *blk_addr
= dn
.data_blkaddr
;
2349 *node_changed
= dn
.node_changed
;
2351 f2fs_put_dnode(&dn
);
2354 __do_map_lock(sbi
, flag
, false);
2358 static int f2fs_write_begin(struct file
*file
, struct address_space
*mapping
,
2359 loff_t pos
, unsigned len
, unsigned flags
,
2360 struct page
**pagep
, void **fsdata
)
2362 struct inode
*inode
= mapping
->host
;
2363 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2364 struct page
*page
= NULL
;
2365 pgoff_t index
= ((unsigned long long) pos
) >> PAGE_SHIFT
;
2366 bool need_balance
= false, drop_atomic
= false;
2367 block_t blkaddr
= NULL_ADDR
;
2370 if (trace_android_fs_datawrite_start_enabled()) {
2371 char *path
, pathbuf
[MAX_TRACE_PATHBUF_LEN
];
2373 path
= android_fstrace_get_pathname(pathbuf
,
2374 MAX_TRACE_PATHBUF_LEN
,
2376 trace_android_fs_datawrite_start(inode
, pos
, len
,
2380 trace_f2fs_write_begin(inode
, pos
, len
, flags
);
2382 if (f2fs_is_atomic_file(inode
) &&
2383 !available_free_memory(sbi
, INMEM_PAGES
)) {
2390 * We should check this at this moment to avoid deadlock on inode page
2391 * and #0 page. The locking rule for inline_data conversion should be:
2392 * lock_page(page #0) -> lock_page(inode_page)
2395 err
= f2fs_convert_inline_inode(inode
);
2401 * Do not use grab_cache_page_write_begin() to avoid deadlock due to
2402 * wait_for_stable_page. Will wait that below with our IO control.
2404 page
= f2fs_pagecache_get_page(mapping
, index
,
2405 FGP_LOCK
| FGP_WRITE
| FGP_CREAT
, GFP_NOFS
);
2413 err
= prepare_write_begin(sbi
, page
, pos
, len
,
2414 &blkaddr
, &need_balance
);
2418 if (need_balance
&& has_not_enough_free_secs(sbi
, 0, 0)) {
2420 f2fs_balance_fs(sbi
, true);
2422 if (page
->mapping
!= mapping
) {
2423 /* The page got truncated from under us */
2424 f2fs_put_page(page
, 1);
2429 f2fs_wait_on_page_writeback(page
, DATA
, false);
2431 /* wait for GCed page writeback via META_MAPPING */
2432 if (f2fs_post_read_required(inode
))
2433 f2fs_wait_on_block_writeback(sbi
, blkaddr
);
2435 if (len
== PAGE_SIZE
|| PageUptodate(page
))
2438 if (!(pos
& (PAGE_SIZE
- 1)) && (pos
+ len
) >= i_size_read(inode
)) {
2439 zero_user_segment(page
, len
, PAGE_SIZE
);
2443 if (blkaddr
== NEW_ADDR
) {
2444 zero_user_segment(page
, 0, PAGE_SIZE
);
2445 SetPageUptodate(page
);
2447 err
= f2fs_submit_page_read(inode
, page
, blkaddr
);
2452 if (unlikely(page
->mapping
!= mapping
)) {
2453 f2fs_put_page(page
, 1);
2456 if (unlikely(!PageUptodate(page
))) {
2464 f2fs_put_page(page
, 1);
2465 f2fs_write_failed(mapping
, pos
+ len
);
2467 drop_inmem_pages_all(sbi
);
2471 static int f2fs_write_end(struct file
*file
,
2472 struct address_space
*mapping
,
2473 loff_t pos
, unsigned len
, unsigned copied
,
2474 struct page
*page
, void *fsdata
)
2476 struct inode
*inode
= page
->mapping
->host
;
2478 trace_android_fs_datawrite_end(inode
, pos
, len
);
2479 trace_f2fs_write_end(inode
, pos
, len
, copied
);
2482 * This should be come from len == PAGE_SIZE, and we expect copied
2483 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
2484 * let generic_perform_write() try to copy data again through copied=0.
2486 if (!PageUptodate(page
)) {
2487 if (unlikely(copied
!= len
))
2490 SetPageUptodate(page
);
2495 set_page_dirty(page
);
2497 if (pos
+ copied
> i_size_read(inode
))
2498 f2fs_i_size_write(inode
, pos
+ copied
);
2500 f2fs_put_page(page
, 1);
2501 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
2505 static int check_direct_IO(struct inode
*inode
, struct iov_iter
*iter
,
2508 unsigned blocksize_mask
= inode
->i_sb
->s_blocksize
- 1;
2510 if (offset
& blocksize_mask
)
2513 if (iov_iter_alignment(iter
) & blocksize_mask
)
2519 static ssize_t
f2fs_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
)
2521 struct address_space
*mapping
= iocb
->ki_filp
->f_mapping
;
2522 struct inode
*inode
= mapping
->host
;
2523 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2524 size_t count
= iov_iter_count(iter
);
2525 loff_t offset
= iocb
->ki_pos
;
2526 int rw
= iov_iter_rw(iter
);
2528 enum rw_hint hint
= iocb
->ki_hint
;
2529 int whint_mode
= F2FS_OPTION(sbi
).whint_mode
;
2531 err
= check_direct_IO(inode
, iter
, offset
);
2535 if (f2fs_force_buffered_io(inode
, rw
))
2538 trace_f2fs_direct_IO_enter(inode
, offset
, count
, rw
);
2540 if (trace_android_fs_dataread_start_enabled() &&
2542 char *path
, pathbuf
[MAX_TRACE_PATHBUF_LEN
];
2544 path
= android_fstrace_get_pathname(pathbuf
,
2545 MAX_TRACE_PATHBUF_LEN
,
2547 trace_android_fs_dataread_start(inode
, offset
,
2548 count
, current
->pid
, path
,
2551 if (trace_android_fs_datawrite_start_enabled() &&
2553 char *path
, pathbuf
[MAX_TRACE_PATHBUF_LEN
];
2555 path
= android_fstrace_get_pathname(pathbuf
,
2556 MAX_TRACE_PATHBUF_LEN
,
2558 trace_android_fs_datawrite_start(inode
, offset
, count
,
2562 if (rw
== WRITE
&& whint_mode
== WHINT_MODE_OFF
)
2563 iocb
->ki_hint
= WRITE_LIFE_NOT_SET
;
2565 if (!down_read_trylock(&F2FS_I(inode
)->dio_rwsem
[rw
])) {
2566 if (iocb
->ki_flags
& IOCB_NOWAIT
) {
2567 iocb
->ki_hint
= hint
;
2571 down_read(&F2FS_I(inode
)->dio_rwsem
[rw
]);
2574 err
= blockdev_direct_IO(iocb
, inode
, iter
, get_data_block_dio
);
2575 up_read(&F2FS_I(inode
)->dio_rwsem
[rw
]);
2578 if (whint_mode
== WHINT_MODE_OFF
)
2579 iocb
->ki_hint
= hint
;
2581 f2fs_update_iostat(F2FS_I_SB(inode
), APP_DIRECT_IO
,
2583 set_inode_flag(inode
, FI_UPDATE_WRITE
);
2584 } else if (err
< 0) {
2585 f2fs_write_failed(mapping
, offset
+ count
);
2589 if (trace_android_fs_dataread_start_enabled() &&
2591 trace_android_fs_dataread_end(inode
, offset
, count
);
2592 if (trace_android_fs_datawrite_start_enabled() &&
2594 trace_android_fs_datawrite_end(inode
, offset
, count
);
2596 trace_f2fs_direct_IO_exit(inode
, offset
, count
, rw
, err
);
2601 void f2fs_invalidate_page(struct page
*page
, unsigned int offset
,
2602 unsigned int length
)
2604 struct inode
*inode
= page
->mapping
->host
;
2605 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2607 if (inode
->i_ino
>= F2FS_ROOT_INO(sbi
) &&
2608 (offset
% PAGE_SIZE
|| length
!= PAGE_SIZE
))
2611 if (PageDirty(page
)) {
2612 if (inode
->i_ino
== F2FS_META_INO(sbi
)) {
2613 dec_page_count(sbi
, F2FS_DIRTY_META
);
2614 } else if (inode
->i_ino
== F2FS_NODE_INO(sbi
)) {
2615 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
2617 inode_dec_dirty_pages(inode
);
2618 remove_dirty_inode(inode
);
2622 /* This is atomic written page, keep Private */
2623 if (IS_ATOMIC_WRITTEN_PAGE(page
))
2624 return drop_inmem_page(inode
, page
);
2626 set_page_private(page
, 0);
2627 ClearPagePrivate(page
);
2630 int f2fs_release_page(struct page
*page
, gfp_t wait
)
2632 /* If this is dirty page, keep PagePrivate */
2633 if (PageDirty(page
))
2636 /* This is atomic written page, keep Private */
2637 if (IS_ATOMIC_WRITTEN_PAGE(page
))
2640 set_page_private(page
, 0);
2641 ClearPagePrivate(page
);
2645 static int f2fs_set_data_page_dirty(struct page
*page
)
2647 struct address_space
*mapping
= page
->mapping
;
2648 struct inode
*inode
= mapping
->host
;
2650 trace_f2fs_set_page_dirty(page
, DATA
);
2652 if (!PageUptodate(page
))
2653 SetPageUptodate(page
);
2655 if (f2fs_is_atomic_file(inode
) && !f2fs_is_commit_atomic_write(inode
)) {
2656 if (!IS_ATOMIC_WRITTEN_PAGE(page
)) {
2657 register_inmem_page(inode
, page
);
2661 * Previously, this page has been registered, we just
2667 if (!PageDirty(page
)) {
2668 __set_page_dirty_nobuffers(page
);
2669 update_dirty_page(inode
, page
);
2675 static sector_t
f2fs_bmap(struct address_space
*mapping
, sector_t block
)
2677 struct inode
*inode
= mapping
->host
;
2679 if (f2fs_has_inline_data(inode
))
2682 /* make sure allocating whole blocks */
2683 if (mapping_tagged(mapping
, PAGECACHE_TAG_DIRTY
))
2684 filemap_write_and_wait(mapping
);
2686 return generic_block_bmap(mapping
, block
, get_data_block_bmap
);
2689 #ifdef CONFIG_MIGRATION
2690 #include <linux/migrate.h>
2692 int f2fs_migrate_page(struct address_space
*mapping
,
2693 struct page
*newpage
, struct page
*page
, enum migrate_mode mode
)
2695 int rc
, extra_count
;
2696 struct f2fs_inode_info
*fi
= F2FS_I(mapping
->host
);
2697 bool atomic_written
= IS_ATOMIC_WRITTEN_PAGE(page
);
2699 BUG_ON(PageWriteback(page
));
2701 /* migrating an atomic written page is safe with the inmem_lock hold */
2702 if (atomic_written
) {
2703 if (mode
!= MIGRATE_SYNC
)
2705 if (!mutex_trylock(&fi
->inmem_lock
))
2710 * A reference is expected if PagePrivate set when move mapping,
2711 * however F2FS breaks this for maintaining dirty page counts when
2712 * truncating pages. So here adjusting the 'extra_count' make it work.
2714 extra_count
= (atomic_written
? 1 : 0) - page_has_private(page
);
2715 rc
= migrate_page_move_mapping(mapping
, newpage
,
2716 page
, NULL
, mode
, extra_count
);
2717 if (rc
!= MIGRATEPAGE_SUCCESS
) {
2719 mutex_unlock(&fi
->inmem_lock
);
2723 if (atomic_written
) {
2724 struct inmem_pages
*cur
;
2725 list_for_each_entry(cur
, &fi
->inmem_pages
, list
)
2726 if (cur
->page
== page
) {
2727 cur
->page
= newpage
;
2730 mutex_unlock(&fi
->inmem_lock
);
2735 if (PagePrivate(page
))
2736 SetPagePrivate(newpage
);
2737 set_page_private(newpage
, page_private(page
));
2739 if (mode
!= MIGRATE_SYNC_NO_COPY
)
2740 migrate_page_copy(newpage
, page
);
2742 migrate_page_states(newpage
, page
);
2744 return MIGRATEPAGE_SUCCESS
;
2748 const struct address_space_operations f2fs_dblock_aops
= {
2749 .readpage
= f2fs_read_data_page
,
2750 .readpages
= f2fs_read_data_pages
,
2751 .writepage
= f2fs_write_data_page
,
2752 .writepages
= f2fs_write_data_pages
,
2753 .write_begin
= f2fs_write_begin
,
2754 .write_end
= f2fs_write_end
,
2755 .set_page_dirty
= f2fs_set_data_page_dirty
,
2756 .invalidatepage
= f2fs_invalidate_page
,
2757 .releasepage
= f2fs_release_page
,
2758 .direct_IO
= f2fs_direct_IO
,
2760 #ifdef CONFIG_MIGRATION
2761 .migratepage
= f2fs_migrate_page
,
2765 int __init
f2fs_init_post_read_processing(void)
2767 bio_post_read_ctx_cache
= KMEM_CACHE(bio_post_read_ctx
, 0);
2768 if (!bio_post_read_ctx_cache
)
2770 bio_post_read_ctx_pool
=
2771 mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS
,
2772 bio_post_read_ctx_cache
);
2773 if (!bio_post_read_ctx_pool
)
2774 goto fail_free_cache
;
2778 kmem_cache_destroy(bio_post_read_ctx_cache
);
2783 void __exit
f2fs_destroy_post_read_processing(void)
2785 mempool_destroy(bio_post_read_ctx_pool
);
2786 kmem_cache_destroy(bio_post_read_ctx_cache
);