4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/buffer_head.h>
14 #include <linux/mpage.h>
15 #include <linux/writeback.h>
16 #include <linux/backing-dev.h>
17 #include <linux/pagevec.h>
18 #include <linux/blkdev.h>
19 #include <linux/bio.h>
20 #include <linux/prefetch.h>
21 #include <linux/uio.h>
23 #include <linux/memcontrol.h>
24 #include <linux/cleancache.h>
30 #include <trace/events/f2fs.h>
31 #include <trace/events/android_fs.h>
33 static void f2fs_read_end_io(struct bio
*bio
)
38 #ifdef CONFIG_F2FS_FAULT_INJECTION
39 if (time_to_inject(F2FS_P_SB(bio
->bi_io_vec
->bv_page
), FAULT_IO
))
43 if (f2fs_bio_encrypted(bio
)) {
45 fscrypt_release_ctx(bio
->bi_private
);
47 fscrypt_decrypt_bio_pages(bio
->bi_private
, bio
);
52 bio_for_each_segment_all(bvec
, bio
, i
) {
53 struct page
*page
= bvec
->bv_page
;
56 if (!PageUptodate(page
))
57 SetPageUptodate(page
);
59 ClearPageUptodate(page
);
67 static void f2fs_write_end_io(struct bio
*bio
)
69 struct f2fs_sb_info
*sbi
= bio
->bi_private
;
73 bio_for_each_segment_all(bvec
, bio
, i
) {
74 struct page
*page
= bvec
->bv_page
;
76 fscrypt_pullback_bio_page(&page
, true);
78 if (unlikely(bio
->bi_error
)) {
79 set_bit(AS_EIO
, &page
->mapping
->flags
);
80 f2fs_stop_checkpoint(sbi
, true);
82 end_page_writeback(page
);
84 if (atomic_dec_and_test(&sbi
->nr_wb_bios
) &&
85 wq_has_sleeper(&sbi
->cp_wait
))
86 wake_up(&sbi
->cp_wait
);
92 * Low-level block read/write IO operations.
94 static struct bio
*__bio_alloc(struct f2fs_sb_info
*sbi
, block_t blk_addr
,
95 int npages
, bool is_read
)
99 bio
= f2fs_bio_alloc(npages
);
101 bio
->bi_bdev
= sbi
->sb
->s_bdev
;
102 bio
->bi_iter
.bi_sector
= SECTOR_FROM_BLOCK(blk_addr
);
103 bio
->bi_end_io
= is_read
? f2fs_read_end_io
: f2fs_write_end_io
;
104 bio
->bi_private
= is_read
? NULL
: sbi
;
109 static inline void __submit_bio(struct f2fs_sb_info
*sbi
, int rw
,
110 struct bio
*bio
, enum page_type type
)
112 if (!is_read_io(rw
)) {
113 atomic_inc(&sbi
->nr_wb_bios
);
114 if (f2fs_sb_mounted_hmsmr(sbi
->sb
) &&
115 current
->plug
&& (type
== DATA
|| type
== NODE
))
116 blk_finish_plug(current
->plug
);
121 static void __submit_merged_bio(struct f2fs_bio_info
*io
)
123 struct f2fs_io_info
*fio
= &io
->fio
;
128 if (is_read_io(fio
->rw
))
129 trace_f2fs_submit_read_bio(io
->sbi
->sb
, fio
, io
->bio
);
131 trace_f2fs_submit_write_bio(io
->sbi
->sb
, fio
, io
->bio
);
133 __submit_bio(io
->sbi
, fio
->rw
, io
->bio
, fio
->type
);
137 static bool __has_merged_page(struct f2fs_bio_info
*io
, struct inode
*inode
,
138 struct page
*page
, nid_t ino
)
140 struct bio_vec
*bvec
;
147 if (!inode
&& !page
&& !ino
)
150 bio_for_each_segment_all(bvec
, io
->bio
, i
) {
152 if (bvec
->bv_page
->mapping
)
153 target
= bvec
->bv_page
;
155 target
= fscrypt_control_page(bvec
->bv_page
);
157 if (inode
&& inode
== target
->mapping
->host
)
159 if (page
&& page
== target
)
161 if (ino
&& ino
== ino_of_node(target
))
168 static bool has_merged_page(struct f2fs_sb_info
*sbi
, struct inode
*inode
,
169 struct page
*page
, nid_t ino
,
172 enum page_type btype
= PAGE_TYPE_OF_BIO(type
);
173 struct f2fs_bio_info
*io
= &sbi
->write_io
[btype
];
176 down_read(&io
->io_rwsem
);
177 ret
= __has_merged_page(io
, inode
, page
, ino
);
178 up_read(&io
->io_rwsem
);
182 static void __f2fs_submit_merged_bio(struct f2fs_sb_info
*sbi
,
183 struct inode
*inode
, struct page
*page
,
184 nid_t ino
, enum page_type type
, int rw
)
186 enum page_type btype
= PAGE_TYPE_OF_BIO(type
);
187 struct f2fs_bio_info
*io
;
189 io
= is_read_io(rw
) ? &sbi
->read_io
: &sbi
->write_io
[btype
];
191 down_write(&io
->io_rwsem
);
193 if (!__has_merged_page(io
, inode
, page
, ino
))
196 /* change META to META_FLUSH in the checkpoint procedure */
197 if (type
>= META_FLUSH
) {
198 io
->fio
.type
= META_FLUSH
;
199 if (test_opt(sbi
, NOBARRIER
))
200 io
->fio
.rw
= WRITE_FLUSH
| REQ_META
| REQ_PRIO
;
202 io
->fio
.rw
= WRITE_FLUSH_FUA
| REQ_META
| REQ_PRIO
;
204 __submit_merged_bio(io
);
206 up_write(&io
->io_rwsem
);
209 void f2fs_submit_merged_bio(struct f2fs_sb_info
*sbi
, enum page_type type
,
212 __f2fs_submit_merged_bio(sbi
, NULL
, NULL
, 0, type
, rw
);
215 void f2fs_submit_merged_bio_cond(struct f2fs_sb_info
*sbi
,
216 struct inode
*inode
, struct page
*page
,
217 nid_t ino
, enum page_type type
, int rw
)
219 if (has_merged_page(sbi
, inode
, page
, ino
, type
))
220 __f2fs_submit_merged_bio(sbi
, inode
, page
, ino
, type
, rw
);
223 void f2fs_flush_merged_bios(struct f2fs_sb_info
*sbi
)
225 f2fs_submit_merged_bio(sbi
, DATA
, WRITE
);
226 f2fs_submit_merged_bio(sbi
, NODE
, WRITE
);
227 f2fs_submit_merged_bio(sbi
, META
, WRITE
);
231 * Fill the locked page with data located in the block address.
232 * Return unlocked page.
234 int f2fs_submit_page_bio(struct f2fs_io_info
*fio
)
237 struct page
*page
= fio
->encrypted_page
?
238 fio
->encrypted_page
: fio
->page
;
240 trace_f2fs_submit_page_bio(page
, fio
);
241 f2fs_trace_ios(fio
, 0);
243 /* Allocate a new bio */
244 bio
= __bio_alloc(fio
->sbi
, fio
->new_blkaddr
, 1, is_read_io(fio
->rw
));
246 if (bio_add_page(bio
, page
, PAGE_SIZE
, 0) < PAGE_SIZE
) {
251 __submit_bio(fio
->sbi
, fio
->rw
, bio
, fio
->type
);
255 void f2fs_submit_page_mbio(struct f2fs_io_info
*fio
)
257 struct f2fs_sb_info
*sbi
= fio
->sbi
;
258 enum page_type btype
= PAGE_TYPE_OF_BIO(fio
->type
);
259 struct f2fs_bio_info
*io
;
260 bool is_read
= is_read_io(fio
->rw
);
261 struct page
*bio_page
;
263 io
= is_read
? &sbi
->read_io
: &sbi
->write_io
[btype
];
265 if (fio
->old_blkaddr
!= NEW_ADDR
)
266 verify_block_addr(sbi
, fio
->old_blkaddr
);
267 verify_block_addr(sbi
, fio
->new_blkaddr
);
269 down_write(&io
->io_rwsem
);
271 if (io
->bio
&& (io
->last_block_in_bio
!= fio
->new_blkaddr
- 1 ||
272 io
->fio
.rw
!= fio
->rw
))
273 __submit_merged_bio(io
);
275 if (io
->bio
== NULL
) {
276 int bio_blocks
= MAX_BIO_BLOCKS(sbi
);
278 io
->bio
= __bio_alloc(sbi
, fio
->new_blkaddr
,
279 bio_blocks
, is_read
);
283 bio_page
= fio
->encrypted_page
? fio
->encrypted_page
: fio
->page
;
285 if (bio_add_page(io
->bio
, bio_page
, PAGE_SIZE
, 0) <
287 __submit_merged_bio(io
);
291 io
->last_block_in_bio
= fio
->new_blkaddr
;
292 f2fs_trace_ios(fio
, 0);
294 up_write(&io
->io_rwsem
);
295 trace_f2fs_submit_page_mbio(fio
->page
, fio
);
298 static void __set_data_blkaddr(struct dnode_of_data
*dn
)
300 struct f2fs_node
*rn
= F2FS_NODE(dn
->node_page
);
303 /* Get physical address of data block */
304 addr_array
= blkaddr_in_node(rn
);
305 addr_array
[dn
->ofs_in_node
] = cpu_to_le32(dn
->data_blkaddr
);
309 * Lock ordering for the change of data block address:
312 * update block addresses in the node page
314 void set_data_blkaddr(struct dnode_of_data
*dn
)
316 f2fs_wait_on_page_writeback(dn
->node_page
, NODE
, true);
317 __set_data_blkaddr(dn
);
318 if (set_page_dirty(dn
->node_page
))
319 dn
->node_changed
= true;
322 void f2fs_update_data_blkaddr(struct dnode_of_data
*dn
, block_t blkaddr
)
324 dn
->data_blkaddr
= blkaddr
;
325 set_data_blkaddr(dn
);
326 f2fs_update_extent_cache(dn
);
329 /* dn->ofs_in_node will be returned with up-to-date last block pointer */
330 int reserve_new_blocks(struct dnode_of_data
*dn
, blkcnt_t count
)
332 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
337 if (unlikely(is_inode_flag_set(dn
->inode
, FI_NO_ALLOC
)))
339 if (unlikely(!inc_valid_block_count(sbi
, dn
->inode
, &count
)))
342 trace_f2fs_reserve_new_blocks(dn
->inode
, dn
->nid
,
343 dn
->ofs_in_node
, count
);
345 f2fs_wait_on_page_writeback(dn
->node_page
, NODE
, true);
347 for (; count
> 0; dn
->ofs_in_node
++) {
349 datablock_addr(dn
->node_page
, dn
->ofs_in_node
);
350 if (blkaddr
== NULL_ADDR
) {
351 dn
->data_blkaddr
= NEW_ADDR
;
352 __set_data_blkaddr(dn
);
357 if (set_page_dirty(dn
->node_page
))
358 dn
->node_changed
= true;
362 /* Should keep dn->ofs_in_node unchanged */
363 int reserve_new_block(struct dnode_of_data
*dn
)
365 unsigned int ofs_in_node
= dn
->ofs_in_node
;
368 ret
= reserve_new_blocks(dn
, 1);
369 dn
->ofs_in_node
= ofs_in_node
;
373 int f2fs_reserve_block(struct dnode_of_data
*dn
, pgoff_t index
)
375 bool need_put
= dn
->inode_page
? false : true;
378 err
= get_dnode_of_data(dn
, index
, ALLOC_NODE
);
382 if (dn
->data_blkaddr
== NULL_ADDR
)
383 err
= reserve_new_block(dn
);
389 int f2fs_get_block(struct dnode_of_data
*dn
, pgoff_t index
)
391 struct extent_info ei
;
392 struct inode
*inode
= dn
->inode
;
394 if (f2fs_lookup_extent_cache(inode
, index
, &ei
)) {
395 dn
->data_blkaddr
= ei
.blk
+ index
- ei
.fofs
;
399 return f2fs_reserve_block(dn
, index
);
402 struct page
*get_read_data_page(struct inode
*inode
, pgoff_t index
,
403 int rw
, bool for_write
)
405 struct address_space
*mapping
= inode
->i_mapping
;
406 struct dnode_of_data dn
;
408 struct extent_info ei
;
410 struct f2fs_io_info fio
= {
411 .sbi
= F2FS_I_SB(inode
),
414 .encrypted_page
= NULL
,
417 if (f2fs_encrypted_inode(inode
) && S_ISREG(inode
->i_mode
))
418 return read_mapping_page(mapping
, index
, NULL
);
420 page
= f2fs_grab_cache_page(mapping
, index
, for_write
);
422 return ERR_PTR(-ENOMEM
);
424 if (f2fs_lookup_extent_cache(inode
, index
, &ei
)) {
425 dn
.data_blkaddr
= ei
.blk
+ index
- ei
.fofs
;
429 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
430 err
= get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
435 if (unlikely(dn
.data_blkaddr
== NULL_ADDR
)) {
440 if (PageUptodate(page
)) {
446 * A new dentry page is allocated but not able to be written, since its
447 * new inode page couldn't be allocated due to -ENOSPC.
448 * In such the case, its blkaddr can be remained as NEW_ADDR.
449 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
451 if (dn
.data_blkaddr
== NEW_ADDR
) {
452 zero_user_segment(page
, 0, PAGE_SIZE
);
453 if (!PageUptodate(page
))
454 SetPageUptodate(page
);
459 fio
.new_blkaddr
= fio
.old_blkaddr
= dn
.data_blkaddr
;
461 err
= f2fs_submit_page_bio(&fio
);
467 f2fs_put_page(page
, 1);
471 struct page
*find_data_page(struct inode
*inode
, pgoff_t index
)
473 struct address_space
*mapping
= inode
->i_mapping
;
476 page
= find_get_page(mapping
, index
);
477 if (page
&& PageUptodate(page
))
479 f2fs_put_page(page
, 0);
481 page
= get_read_data_page(inode
, index
, READ_SYNC
, false);
485 if (PageUptodate(page
))
488 wait_on_page_locked(page
);
489 if (unlikely(!PageUptodate(page
))) {
490 f2fs_put_page(page
, 0);
491 return ERR_PTR(-EIO
);
497 * If it tries to access a hole, return an error.
498 * Because, the callers, functions in dir.c and GC, should be able to know
499 * whether this page exists or not.
501 struct page
*get_lock_data_page(struct inode
*inode
, pgoff_t index
,
504 struct address_space
*mapping
= inode
->i_mapping
;
507 page
= get_read_data_page(inode
, index
, READ_SYNC
, for_write
);
511 /* wait for read completion */
513 if (unlikely(page
->mapping
!= mapping
)) {
514 f2fs_put_page(page
, 1);
517 if (unlikely(!PageUptodate(page
))) {
518 f2fs_put_page(page
, 1);
519 return ERR_PTR(-EIO
);
525 * Caller ensures that this data page is never allocated.
526 * A new zero-filled data page is allocated in the page cache.
528 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
530 * Note that, ipage is set only by make_empty_dir, and if any error occur,
531 * ipage should be released by this function.
533 struct page
*get_new_data_page(struct inode
*inode
,
534 struct page
*ipage
, pgoff_t index
, bool new_i_size
)
536 struct address_space
*mapping
= inode
->i_mapping
;
538 struct dnode_of_data dn
;
541 page
= f2fs_grab_cache_page(mapping
, index
, true);
544 * before exiting, we should make sure ipage will be released
545 * if any error occur.
547 f2fs_put_page(ipage
, 1);
548 return ERR_PTR(-ENOMEM
);
551 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
552 err
= f2fs_reserve_block(&dn
, index
);
554 f2fs_put_page(page
, 1);
560 if (PageUptodate(page
))
563 if (dn
.data_blkaddr
== NEW_ADDR
) {
564 zero_user_segment(page
, 0, PAGE_SIZE
);
565 if (!PageUptodate(page
))
566 SetPageUptodate(page
);
568 f2fs_put_page(page
, 1);
570 /* if ipage exists, blkaddr should be NEW_ADDR */
571 f2fs_bug_on(F2FS_I_SB(inode
), ipage
);
572 page
= get_lock_data_page(inode
, index
, true);
577 if (new_i_size
&& i_size_read(inode
) <
578 ((loff_t
)(index
+ 1) << PAGE_SHIFT
))
579 f2fs_i_size_write(inode
, ((loff_t
)(index
+ 1) << PAGE_SHIFT
));
583 static int __allocate_data_block(struct dnode_of_data
*dn
)
585 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
586 struct f2fs_summary sum
;
588 int seg
= CURSEG_WARM_DATA
;
592 if (unlikely(is_inode_flag_set(dn
->inode
, FI_NO_ALLOC
)))
595 dn
->data_blkaddr
= datablock_addr(dn
->node_page
, dn
->ofs_in_node
);
596 if (dn
->data_blkaddr
== NEW_ADDR
)
599 if (unlikely(!inc_valid_block_count(sbi
, dn
->inode
, &count
)))
603 get_node_info(sbi
, dn
->nid
, &ni
);
604 set_summary(&sum
, dn
->nid
, dn
->ofs_in_node
, ni
.version
);
606 if (dn
->ofs_in_node
== 0 && dn
->inode_page
== dn
->node_page
)
607 seg
= CURSEG_DIRECT_IO
;
609 allocate_data_block(sbi
, NULL
, dn
->data_blkaddr
, &dn
->data_blkaddr
,
611 set_data_blkaddr(dn
);
614 fofs
= start_bidx_of_node(ofs_of_node(dn
->node_page
), dn
->inode
) +
616 if (i_size_read(dn
->inode
) < ((loff_t
)(fofs
+ 1) << PAGE_SHIFT
))
617 f2fs_i_size_write(dn
->inode
,
618 ((loff_t
)(fofs
+ 1) << PAGE_SHIFT
));
622 ssize_t
f2fs_preallocate_blocks(struct kiocb
*iocb
, struct iov_iter
*from
)
624 struct inode
*inode
= file_inode(iocb
->ki_filp
);
625 struct f2fs_map_blocks map
;
628 map
.m_lblk
= F2FS_BLK_ALIGN(iocb
->ki_pos
);
629 map
.m_len
= F2FS_BYTES_TO_BLK(iocb
->ki_pos
+ iov_iter_count(from
));
630 if (map
.m_len
> map
.m_lblk
)
631 map
.m_len
-= map
.m_lblk
;
635 map
.m_next_pgofs
= NULL
;
637 if (iocb
->ki_flags
& IOCB_DIRECT
) {
638 ret
= f2fs_convert_inline_inode(inode
);
641 return f2fs_map_blocks(inode
, &map
, 1, F2FS_GET_BLOCK_PRE_DIO
);
643 if (iocb
->ki_pos
+ iov_iter_count(from
) > MAX_INLINE_DATA
) {
644 ret
= f2fs_convert_inline_inode(inode
);
648 if (!f2fs_has_inline_data(inode
))
649 return f2fs_map_blocks(inode
, &map
, 1, F2FS_GET_BLOCK_PRE_AIO
);
654 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
655 * f2fs_map_blocks structure.
656 * If original data blocks are allocated, then give them to blockdev.
658 * a. preallocate requested block addresses
659 * b. do not use extent cache for better performance
660 * c. give the block addresses to blockdev
662 int f2fs_map_blocks(struct inode
*inode
, struct f2fs_map_blocks
*map
,
663 int create
, int flag
)
665 unsigned int maxblocks
= map
->m_len
;
666 struct dnode_of_data dn
;
667 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
668 int mode
= create
? ALLOC_NODE
: LOOKUP_NODE
;
669 pgoff_t pgofs
, end_offset
, end
;
670 int err
= 0, ofs
= 1;
671 unsigned int ofs_in_node
, last_ofs_in_node
;
673 struct extent_info ei
;
682 /* it only supports block size == page size */
683 pgofs
= (pgoff_t
)map
->m_lblk
;
684 end
= pgofs
+ maxblocks
;
686 if (!create
&& f2fs_lookup_extent_cache(inode
, pgofs
, &ei
)) {
687 map
->m_pblk
= ei
.blk
+ pgofs
- ei
.fofs
;
688 map
->m_len
= min((pgoff_t
)maxblocks
, ei
.fofs
+ ei
.len
- pgofs
);
689 map
->m_flags
= F2FS_MAP_MAPPED
;
697 /* When reading holes, we need its node page */
698 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
699 err
= get_dnode_of_data(&dn
, pgofs
, mode
);
701 if (flag
== F2FS_GET_BLOCK_BMAP
)
703 if (err
== -ENOENT
) {
705 if (map
->m_next_pgofs
)
707 get_next_page_offset(&dn
, pgofs
);
713 ofs_in_node
= dn
.ofs_in_node
;
714 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
717 blkaddr
= datablock_addr(dn
.node_page
, dn
.ofs_in_node
);
719 if (blkaddr
== NEW_ADDR
|| blkaddr
== NULL_ADDR
) {
721 if (unlikely(f2fs_cp_error(sbi
))) {
725 if (flag
== F2FS_GET_BLOCK_PRE_AIO
) {
726 if (blkaddr
== NULL_ADDR
) {
728 last_ofs_in_node
= dn
.ofs_in_node
;
731 err
= __allocate_data_block(&dn
);
733 set_inode_flag(inode
, FI_APPEND_WRITE
);
737 map
->m_flags
= F2FS_MAP_NEW
;
738 blkaddr
= dn
.data_blkaddr
;
740 if (flag
== F2FS_GET_BLOCK_BMAP
) {
744 if (flag
== F2FS_GET_BLOCK_FIEMAP
&&
745 blkaddr
== NULL_ADDR
) {
746 if (map
->m_next_pgofs
)
747 *map
->m_next_pgofs
= pgofs
+ 1;
749 if (flag
!= F2FS_GET_BLOCK_FIEMAP
||
755 if (flag
== F2FS_GET_BLOCK_PRE_AIO
)
758 if (map
->m_len
== 0) {
759 /* preallocated unwritten block should be mapped for fiemap. */
760 if (blkaddr
== NEW_ADDR
)
761 map
->m_flags
|= F2FS_MAP_UNWRITTEN
;
762 map
->m_flags
|= F2FS_MAP_MAPPED
;
764 map
->m_pblk
= blkaddr
;
766 } else if ((map
->m_pblk
!= NEW_ADDR
&&
767 blkaddr
== (map
->m_pblk
+ ofs
)) ||
768 (map
->m_pblk
== NEW_ADDR
&& blkaddr
== NEW_ADDR
) ||
769 flag
== F2FS_GET_BLOCK_PRE_DIO
) {
780 /* preallocate blocks in batch for one dnode page */
781 if (flag
== F2FS_GET_BLOCK_PRE_AIO
&&
782 (pgofs
== end
|| dn
.ofs_in_node
== end_offset
)) {
784 dn
.ofs_in_node
= ofs_in_node
;
785 err
= reserve_new_blocks(&dn
, prealloc
);
789 map
->m_len
+= dn
.ofs_in_node
- ofs_in_node
;
790 if (prealloc
&& dn
.ofs_in_node
!= last_ofs_in_node
+ 1) {
794 dn
.ofs_in_node
= end_offset
;
799 else if (dn
.ofs_in_node
< end_offset
)
806 f2fs_balance_fs(sbi
, dn
.node_changed
);
815 f2fs_balance_fs(sbi
, dn
.node_changed
);
818 trace_f2fs_map_blocks(inode
, map
, err
);
822 static int __get_data_block(struct inode
*inode
, sector_t iblock
,
823 struct buffer_head
*bh
, int create
, int flag
,
826 struct f2fs_map_blocks map
;
830 map
.m_len
= bh
->b_size
>> inode
->i_blkbits
;
831 map
.m_next_pgofs
= next_pgofs
;
833 ret
= f2fs_map_blocks(inode
, &map
, create
, flag
);
835 map_bh(bh
, inode
->i_sb
, map
.m_pblk
);
836 bh
->b_state
= (bh
->b_state
& ~F2FS_MAP_FLAGS
) | map
.m_flags
;
837 bh
->b_size
= map
.m_len
<< inode
->i_blkbits
;
842 static int get_data_block(struct inode
*inode
, sector_t iblock
,
843 struct buffer_head
*bh_result
, int create
, int flag
,
846 return __get_data_block(inode
, iblock
, bh_result
, create
,
850 static int get_data_block_dio(struct inode
*inode
, sector_t iblock
,
851 struct buffer_head
*bh_result
, int create
)
853 return __get_data_block(inode
, iblock
, bh_result
, create
,
854 F2FS_GET_BLOCK_DIO
, NULL
);
857 static int get_data_block_bmap(struct inode
*inode
, sector_t iblock
,
858 struct buffer_head
*bh_result
, int create
)
860 /* Block number less than F2FS MAX BLOCKS */
861 if (unlikely(iblock
>= F2FS_I_SB(inode
)->max_file_blocks
))
864 return __get_data_block(inode
, iblock
, bh_result
, create
,
865 F2FS_GET_BLOCK_BMAP
, NULL
);
868 static inline sector_t
logical_to_blk(struct inode
*inode
, loff_t offset
)
870 return (offset
>> inode
->i_blkbits
);
873 static inline loff_t
blk_to_logical(struct inode
*inode
, sector_t blk
)
875 return (blk
<< inode
->i_blkbits
);
878 int f2fs_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fieinfo
,
881 struct buffer_head map_bh
;
882 sector_t start_blk
, last_blk
;
884 u64 logical
= 0, phys
= 0, size
= 0;
888 ret
= fiemap_check_flags(fieinfo
, FIEMAP_FLAG_SYNC
);
892 if (f2fs_has_inline_data(inode
)) {
893 ret
= f2fs_inline_data_fiemap(inode
, fieinfo
, start
, len
);
900 if (logical_to_blk(inode
, len
) == 0)
901 len
= blk_to_logical(inode
, 1);
903 start_blk
= logical_to_blk(inode
, start
);
904 last_blk
= logical_to_blk(inode
, start
+ len
- 1);
907 memset(&map_bh
, 0, sizeof(struct buffer_head
));
910 ret
= get_data_block(inode
, start_blk
, &map_bh
, 0,
911 F2FS_GET_BLOCK_FIEMAP
, &next_pgofs
);
916 if (!buffer_mapped(&map_bh
)) {
917 start_blk
= next_pgofs
;
919 if (blk_to_logical(inode
, start_blk
) < blk_to_logical(inode
,
920 F2FS_I_SB(inode
)->max_file_blocks
))
923 flags
|= FIEMAP_EXTENT_LAST
;
927 if (f2fs_encrypted_inode(inode
))
928 flags
|= FIEMAP_EXTENT_DATA_ENCRYPTED
;
930 ret
= fiemap_fill_next_extent(fieinfo
, logical
,
934 if (start_blk
> last_blk
|| ret
)
937 logical
= blk_to_logical(inode
, start_blk
);
938 phys
= blk_to_logical(inode
, map_bh
.b_blocknr
);
939 size
= map_bh
.b_size
;
941 if (buffer_unwritten(&map_bh
))
942 flags
= FIEMAP_EXTENT_UNWRITTEN
;
944 start_blk
+= logical_to_blk(inode
, size
);
948 if (fatal_signal_pending(current
))
960 static struct bio
*f2fs_grab_bio(struct inode
*inode
, block_t blkaddr
,
963 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
964 struct fscrypt_ctx
*ctx
= NULL
;
965 struct block_device
*bdev
= sbi
->sb
->s_bdev
;
968 if (f2fs_encrypted_inode(inode
) && S_ISREG(inode
->i_mode
)) {
969 ctx
= fscrypt_get_ctx(inode
, GFP_NOFS
);
971 return ERR_CAST(ctx
);
973 /* wait the page to be moved by cleaning */
974 f2fs_wait_on_encrypted_page_writeback(sbi
, blkaddr
);
977 bio
= bio_alloc(GFP_KERNEL
, min_t(int, nr_pages
, BIO_MAX_PAGES
));
980 fscrypt_release_ctx(ctx
);
981 return ERR_PTR(-ENOMEM
);
984 bio
->bi_iter
.bi_sector
= SECTOR_FROM_BLOCK(blkaddr
);
985 bio
->bi_end_io
= f2fs_read_end_io
;
986 bio
->bi_private
= ctx
;
992 * This function was originally taken from fs/mpage.c, and customized for f2fs.
993 * Major change was from block_size == page_size in f2fs by default.
995 static int f2fs_mpage_readpages(struct address_space
*mapping
,
996 struct list_head
*pages
, struct page
*page
,
999 struct bio
*bio
= NULL
;
1001 sector_t last_block_in_bio
= 0;
1002 struct inode
*inode
= mapping
->host
;
1003 const unsigned blkbits
= inode
->i_blkbits
;
1004 const unsigned blocksize
= 1 << blkbits
;
1005 sector_t block_in_file
;
1006 sector_t last_block
;
1007 sector_t last_block_in_file
;
1009 struct f2fs_map_blocks map
;
1015 map
.m_next_pgofs
= NULL
;
1017 for (page_idx
= 0; nr_pages
; page_idx
++, nr_pages
--) {
1019 prefetchw(&page
->flags
);
1021 page
= list_entry(pages
->prev
, struct page
, lru
);
1022 list_del(&page
->lru
);
1023 if (add_to_page_cache_lru(page
, mapping
,
1024 page
->index
, GFP_KERNEL
))
1028 block_in_file
= (sector_t
)page
->index
;
1029 last_block
= block_in_file
+ nr_pages
;
1030 last_block_in_file
= (i_size_read(inode
) + blocksize
- 1) >>
1032 if (last_block
> last_block_in_file
)
1033 last_block
= last_block_in_file
;
1036 * Map blocks using the previous result first.
1038 if ((map
.m_flags
& F2FS_MAP_MAPPED
) &&
1039 block_in_file
> map
.m_lblk
&&
1040 block_in_file
< (map
.m_lblk
+ map
.m_len
))
1044 * Then do more f2fs_map_blocks() calls until we are
1045 * done with this page.
1049 if (block_in_file
< last_block
) {
1050 map
.m_lblk
= block_in_file
;
1051 map
.m_len
= last_block
- block_in_file
;
1053 if (f2fs_map_blocks(inode
, &map
, 0,
1054 F2FS_GET_BLOCK_READ
))
1055 goto set_error_page
;
1058 if ((map
.m_flags
& F2FS_MAP_MAPPED
)) {
1059 block_nr
= map
.m_pblk
+ block_in_file
- map
.m_lblk
;
1060 SetPageMappedToDisk(page
);
1062 if (!PageUptodate(page
) && !cleancache_get_page(page
)) {
1063 SetPageUptodate(page
);
1067 zero_user_segment(page
, 0, PAGE_SIZE
);
1068 if (!PageUptodate(page
))
1069 SetPageUptodate(page
);
1075 * This page will go to BIO. Do we need to send this
1078 if (bio
&& (last_block_in_bio
!= block_nr
- 1)) {
1080 __submit_bio(F2FS_I_SB(inode
), READ
, bio
, DATA
);
1084 bio
= f2fs_grab_bio(inode
, block_nr
, nr_pages
);
1087 goto set_error_page
;
1091 if (bio_add_page(bio
, page
, blocksize
, 0) < blocksize
)
1092 goto submit_and_realloc
;
1094 last_block_in_bio
= block_nr
;
1098 zero_user_segment(page
, 0, PAGE_SIZE
);
1103 __submit_bio(F2FS_I_SB(inode
), READ
, bio
, DATA
);
1111 BUG_ON(pages
&& !list_empty(pages
));
1113 __submit_bio(F2FS_I_SB(inode
), READ
, bio
, DATA
);
1117 static int f2fs_read_data_page(struct file
*file
, struct page
*page
)
1119 struct inode
*inode
= page
->mapping
->host
;
1122 trace_f2fs_readpage(page
, DATA
);
1124 /* If the file has inline data, try to read it directly */
1125 if (f2fs_has_inline_data(inode
))
1126 ret
= f2fs_read_inline_data(inode
, page
);
1128 ret
= f2fs_mpage_readpages(page
->mapping
, NULL
, page
, 1);
1132 static int f2fs_read_data_pages(struct file
*file
,
1133 struct address_space
*mapping
,
1134 struct list_head
*pages
, unsigned nr_pages
)
1136 struct inode
*inode
= file
->f_mapping
->host
;
1137 struct page
*page
= list_entry(pages
->prev
, struct page
, lru
);
1139 trace_f2fs_readpages(inode
, page
, nr_pages
);
1141 /* If the file has inline data, skip readpages */
1142 if (f2fs_has_inline_data(inode
))
1145 return f2fs_mpage_readpages(mapping
, pages
, NULL
, nr_pages
);
1148 int do_write_data_page(struct f2fs_io_info
*fio
)
1150 struct page
*page
= fio
->page
;
1151 struct inode
*inode
= page
->mapping
->host
;
1152 struct dnode_of_data dn
;
1155 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1156 err
= get_dnode_of_data(&dn
, page
->index
, LOOKUP_NODE
);
1160 fio
->old_blkaddr
= dn
.data_blkaddr
;
1162 /* This page is already truncated */
1163 if (fio
->old_blkaddr
== NULL_ADDR
) {
1164 ClearPageUptodate(page
);
1168 if (f2fs_encrypted_inode(inode
) && S_ISREG(inode
->i_mode
)) {
1169 gfp_t gfp_flags
= GFP_NOFS
;
1171 /* wait for GCed encrypted page writeback */
1172 f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode
),
1175 fio
->encrypted_page
= fscrypt_encrypt_page(inode
, fio
->page
,
1177 if (IS_ERR(fio
->encrypted_page
)) {
1178 err
= PTR_ERR(fio
->encrypted_page
);
1179 if (err
== -ENOMEM
) {
1180 /* flush pending ios and wait for a while */
1181 f2fs_flush_merged_bios(F2FS_I_SB(inode
));
1182 congestion_wait(BLK_RW_ASYNC
, HZ
/50);
1183 gfp_flags
|= __GFP_NOFAIL
;
1191 set_page_writeback(page
);
1194 * If current allocation needs SSR,
1195 * it had better in-place writes for updated data.
1197 if (unlikely(fio
->old_blkaddr
!= NEW_ADDR
&&
1198 !is_cold_data(page
) &&
1199 !IS_ATOMIC_WRITTEN_PAGE(page
) &&
1200 need_inplace_update(inode
))) {
1201 rewrite_data_page(fio
);
1202 set_inode_flag(inode
, FI_UPDATE_WRITE
);
1203 trace_f2fs_do_write_data_page(page
, IPU
);
1205 write_data_page(&dn
, fio
);
1206 trace_f2fs_do_write_data_page(page
, OPU
);
1207 set_inode_flag(inode
, FI_APPEND_WRITE
);
1208 if (page
->index
== 0)
1209 set_inode_flag(inode
, FI_FIRST_BLOCK_WRITTEN
);
1212 f2fs_put_dnode(&dn
);
1216 static int f2fs_write_data_page(struct page
*page
,
1217 struct writeback_control
*wbc
)
1219 struct inode
*inode
= page
->mapping
->host
;
1220 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1221 loff_t i_size
= i_size_read(inode
);
1222 const pgoff_t end_index
= ((unsigned long long) i_size
)
1224 loff_t psize
= (page
->index
+ 1) << PAGE_SHIFT
;
1225 unsigned offset
= 0;
1226 bool need_balance_fs
= false;
1228 struct f2fs_io_info fio
= {
1231 .rw
= (wbc
->sync_mode
== WB_SYNC_ALL
) ? WRITE_SYNC
: WRITE
,
1233 .encrypted_page
= NULL
,
1236 trace_f2fs_writepage(page
, DATA
);
1238 if (page
->index
< end_index
)
1242 * If the offset is out-of-range of file size,
1243 * this page does not have to be written to disk.
1245 offset
= i_size
& (PAGE_SIZE
- 1);
1246 if ((page
->index
>= end_index
+ 1) || !offset
)
1249 zero_user_segment(page
, offset
, PAGE_SIZE
);
1251 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
1253 if (f2fs_is_drop_cache(inode
))
1255 /* we should not write 0'th page having journal header */
1256 if (f2fs_is_volatile_file(inode
) && (!page
->index
||
1257 (!wbc
->for_reclaim
&&
1258 available_free_memory(sbi
, BASE_CHECK
))))
1261 /* we should bypass data pages to proceed the kworkder jobs */
1262 if (unlikely(f2fs_cp_error(sbi
))) {
1263 mapping_set_error(page
->mapping
, -EIO
);
1267 /* Dentry blocks are controlled by checkpoint */
1268 if (S_ISDIR(inode
->i_mode
)) {
1269 err
= do_write_data_page(&fio
);
1273 if (!wbc
->for_reclaim
)
1274 need_balance_fs
= true;
1275 else if (has_not_enough_free_secs(sbi
, 0, 0))
1280 if (f2fs_has_inline_data(inode
))
1281 err
= f2fs_write_inline_data(inode
, page
);
1283 err
= do_write_data_page(&fio
);
1284 if (F2FS_I(inode
)->last_disk_size
< psize
)
1285 F2FS_I(inode
)->last_disk_size
= psize
;
1286 f2fs_unlock_op(sbi
);
1288 if (err
&& err
!= -ENOENT
)
1291 clear_cold_data(page
);
1293 inode_dec_dirty_pages(inode
);
1295 ClearPageUptodate(page
);
1297 if (wbc
->for_reclaim
) {
1298 f2fs_submit_merged_bio_cond(sbi
, NULL
, page
, 0, DATA
, WRITE
);
1299 remove_dirty_inode(inode
);
1303 f2fs_balance_fs(sbi
, need_balance_fs
);
1305 if (unlikely(f2fs_cp_error(sbi
)))
1306 f2fs_submit_merged_bio(sbi
, DATA
, WRITE
);
1311 redirty_page_for_writepage(wbc
, page
);
1317 * This function was copied from write_cche_pages from mm/page-writeback.c.
1318 * The major change is making write step of cold data page separately from
1319 * warm/hot data page.
1321 static int f2fs_write_cache_pages(struct address_space
*mapping
,
1322 struct writeback_control
*wbc
)
1326 struct pagevec pvec
;
1328 pgoff_t
uninitialized_var(writeback_index
);
1330 pgoff_t end
; /* Inclusive */
1333 int range_whole
= 0;
1337 pagevec_init(&pvec
, 0);
1339 if (wbc
->range_cyclic
) {
1340 writeback_index
= mapping
->writeback_index
; /* prev offset */
1341 index
= writeback_index
;
1348 index
= wbc
->range_start
>> PAGE_SHIFT
;
1349 end
= wbc
->range_end
>> PAGE_SHIFT
;
1350 if (wbc
->range_start
== 0 && wbc
->range_end
== LLONG_MAX
)
1352 cycled
= 1; /* ignore range_cyclic tests */
1354 if (wbc
->sync_mode
== WB_SYNC_ALL
|| wbc
->tagged_writepages
)
1355 tag
= PAGECACHE_TAG_TOWRITE
;
1357 tag
= PAGECACHE_TAG_DIRTY
;
1359 if (wbc
->sync_mode
== WB_SYNC_ALL
|| wbc
->tagged_writepages
)
1360 tag_pages_for_writeback(mapping
, index
, end
);
1362 while (!done
&& (index
<= end
)) {
1365 nr_pages
= pagevec_lookup_tag(&pvec
, mapping
, &index
, tag
,
1366 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
- 1) + 1);
1370 for (i
= 0; i
< nr_pages
; i
++) {
1371 struct page
*page
= pvec
.pages
[i
];
1373 if (page
->index
> end
) {
1378 done_index
= page
->index
;
1382 if (unlikely(page
->mapping
!= mapping
)) {
1388 if (!PageDirty(page
)) {
1389 /* someone wrote it for us */
1390 goto continue_unlock
;
1393 if (PageWriteback(page
)) {
1394 if (wbc
->sync_mode
!= WB_SYNC_NONE
)
1395 f2fs_wait_on_page_writeback(page
,
1398 goto continue_unlock
;
1401 BUG_ON(PageWriteback(page
));
1402 if (!clear_page_dirty_for_io(page
))
1403 goto continue_unlock
;
1405 ret
= mapping
->a_ops
->writepage(page
, wbc
);
1406 if (unlikely(ret
)) {
1407 done_index
= page
->index
+ 1;
1414 if (--wbc
->nr_to_write
<= 0 &&
1415 wbc
->sync_mode
== WB_SYNC_NONE
) {
1420 pagevec_release(&pvec
);
1424 if (!cycled
&& !done
) {
1427 end
= writeback_index
- 1;
1430 if (wbc
->range_cyclic
|| (range_whole
&& wbc
->nr_to_write
> 0))
1431 mapping
->writeback_index
= done_index
;
1434 f2fs_submit_merged_bio_cond(F2FS_M_SB(mapping
), mapping
->host
,
1435 NULL
, 0, DATA
, WRITE
);
1440 static int f2fs_write_data_pages(struct address_space
*mapping
,
1441 struct writeback_control
*wbc
)
1443 struct inode
*inode
= mapping
->host
;
1444 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1445 struct blk_plug plug
;
1448 /* deal with chardevs and other special file */
1449 if (!mapping
->a_ops
->writepage
)
1452 /* skip writing if there is no dirty page in this inode */
1453 if (!get_dirty_pages(inode
) && wbc
->sync_mode
== WB_SYNC_NONE
)
1456 if (S_ISDIR(inode
->i_mode
) && wbc
->sync_mode
== WB_SYNC_NONE
&&
1457 get_dirty_pages(inode
) < nr_pages_to_skip(sbi
, DATA
) &&
1458 available_free_memory(sbi
, DIRTY_DENTS
))
1461 /* skip writing during file defragment */
1462 if (is_inode_flag_set(inode
, FI_DO_DEFRAG
))
1465 /* during POR, we don't need to trigger writepage at all. */
1466 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
1469 trace_f2fs_writepages(mapping
->host
, wbc
, DATA
);
1471 blk_start_plug(&plug
);
1472 ret
= f2fs_write_cache_pages(mapping
, wbc
);
1473 blk_finish_plug(&plug
);
1475 * if some pages were truncated, we cannot guarantee its mapping->host
1476 * to detect pending bios.
1479 remove_dirty_inode(inode
);
1483 wbc
->pages_skipped
+= get_dirty_pages(inode
);
1484 trace_f2fs_writepages(mapping
->host
, wbc
, DATA
);
1488 static void f2fs_write_failed(struct address_space
*mapping
, loff_t to
)
1490 struct inode
*inode
= mapping
->host
;
1491 loff_t i_size
= i_size_read(inode
);
1494 truncate_pagecache(inode
, i_size
);
1495 truncate_blocks(inode
, i_size
, true);
1499 static int prepare_write_begin(struct f2fs_sb_info
*sbi
,
1500 struct page
*page
, loff_t pos
, unsigned len
,
1501 block_t
*blk_addr
, bool *node_changed
)
1503 struct inode
*inode
= page
->mapping
->host
;
1504 pgoff_t index
= page
->index
;
1505 struct dnode_of_data dn
;
1507 bool locked
= false;
1508 struct extent_info ei
;
1512 * we already allocated all the blocks, so we don't need to get
1513 * the block addresses when there is no need to fill the page.
1515 if (!f2fs_has_inline_data(inode
) && len
== PAGE_SIZE
)
1518 if (f2fs_has_inline_data(inode
) ||
1519 (pos
& PAGE_MASK
) >= i_size_read(inode
)) {
1524 /* check inline_data */
1525 ipage
= get_node_page(sbi
, inode
->i_ino
);
1526 if (IS_ERR(ipage
)) {
1527 err
= PTR_ERR(ipage
);
1531 set_new_dnode(&dn
, inode
, ipage
, ipage
, 0);
1533 if (f2fs_has_inline_data(inode
)) {
1534 if (pos
+ len
<= MAX_INLINE_DATA
) {
1535 read_inline_data(page
, ipage
);
1536 set_inode_flag(inode
, FI_DATA_EXIST
);
1538 set_inline_node(ipage
);
1540 err
= f2fs_convert_inline_page(&dn
, page
);
1543 if (dn
.data_blkaddr
== NULL_ADDR
)
1544 err
= f2fs_get_block(&dn
, index
);
1546 } else if (locked
) {
1547 err
= f2fs_get_block(&dn
, index
);
1549 if (f2fs_lookup_extent_cache(inode
, index
, &ei
)) {
1550 dn
.data_blkaddr
= ei
.blk
+ index
- ei
.fofs
;
1553 err
= get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
1554 if (err
|| dn
.data_blkaddr
== NULL_ADDR
) {
1555 f2fs_put_dnode(&dn
);
1563 /* convert_inline_page can make node_changed */
1564 *blk_addr
= dn
.data_blkaddr
;
1565 *node_changed
= dn
.node_changed
;
1567 f2fs_put_dnode(&dn
);
1570 f2fs_unlock_op(sbi
);
1574 static int f2fs_write_begin(struct file
*file
, struct address_space
*mapping
,
1575 loff_t pos
, unsigned len
, unsigned flags
,
1576 struct page
**pagep
, void **fsdata
)
1578 struct inode
*inode
= mapping
->host
;
1579 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1580 struct page
*page
= NULL
;
1581 pgoff_t index
= ((unsigned long long) pos
) >> PAGE_SHIFT
;
1582 bool need_balance
= false;
1583 block_t blkaddr
= NULL_ADDR
;
1586 if (trace_android_fs_datawrite_start_enabled()) {
1587 char *path
, pathbuf
[MAX_TRACE_PATHBUF_LEN
];
1589 path
= android_fstrace_get_pathname(pathbuf
,
1590 MAX_TRACE_PATHBUF_LEN
,
1592 trace_android_fs_datawrite_start(inode
, pos
, len
,
1596 trace_f2fs_write_begin(inode
, pos
, len
, flags
);
1599 * We should check this at this moment to avoid deadlock on inode page
1600 * and #0 page. The locking rule for inline_data conversion should be:
1601 * lock_page(page #0) -> lock_page(inode_page)
1604 err
= f2fs_convert_inline_inode(inode
);
1609 page
= grab_cache_page_write_begin(mapping
, index
, flags
);
1617 err
= prepare_write_begin(sbi
, page
, pos
, len
,
1618 &blkaddr
, &need_balance
);
1622 if (need_balance
&& has_not_enough_free_secs(sbi
, 0, 0)) {
1624 f2fs_balance_fs(sbi
, true);
1626 if (page
->mapping
!= mapping
) {
1627 /* The page got truncated from under us */
1628 f2fs_put_page(page
, 1);
1633 f2fs_wait_on_page_writeback(page
, DATA
, false);
1635 /* wait for GCed encrypted page writeback */
1636 if (f2fs_encrypted_inode(inode
) && S_ISREG(inode
->i_mode
))
1637 f2fs_wait_on_encrypted_page_writeback(sbi
, blkaddr
);
1639 if (len
== PAGE_SIZE
|| PageUptodate(page
))
1642 if (blkaddr
== NEW_ADDR
) {
1643 zero_user_segment(page
, 0, PAGE_SIZE
);
1644 SetPageUptodate(page
);
1648 bio
= f2fs_grab_bio(inode
, blkaddr
, 1);
1654 if (bio_add_page(bio
, page
, PAGE_SIZE
, 0) < PAGE_SIZE
) {
1660 __submit_bio(sbi
, READ_SYNC
, bio
, DATA
);
1663 if (unlikely(page
->mapping
!= mapping
)) {
1664 f2fs_put_page(page
, 1);
1667 if (unlikely(!PageUptodate(page
))) {
1675 f2fs_put_page(page
, 1);
1676 f2fs_write_failed(mapping
, pos
+ len
);
1680 static int f2fs_write_end(struct file
*file
,
1681 struct address_space
*mapping
,
1682 loff_t pos
, unsigned len
, unsigned copied
,
1683 struct page
*page
, void *fsdata
)
1685 struct inode
*inode
= page
->mapping
->host
;
1687 trace_android_fs_datawrite_end(inode
, pos
, len
);
1688 trace_f2fs_write_end(inode
, pos
, len
, copied
);
1691 * This should be come from len == PAGE_SIZE, and we expect copied
1692 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
1693 * let generic_perform_write() try to copy data again through copied=0.
1695 if (!PageUptodate(page
)) {
1696 if (unlikely(copied
!= PAGE_SIZE
))
1699 SetPageUptodate(page
);
1704 set_page_dirty(page
);
1705 clear_cold_data(page
);
1707 if (pos
+ copied
> i_size_read(inode
))
1708 f2fs_i_size_write(inode
, pos
+ copied
);
1710 f2fs_put_page(page
, 1);
1711 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1715 static int check_direct_IO(struct inode
*inode
, struct iov_iter
*iter
,
1718 unsigned blocksize_mask
= inode
->i_sb
->s_blocksize
- 1;
1720 if (offset
& blocksize_mask
)
1723 if (iov_iter_alignment(iter
) & blocksize_mask
)
1729 static ssize_t
f2fs_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
,
1732 struct address_space
*mapping
= iocb
->ki_filp
->f_mapping
;
1733 struct inode
*inode
= mapping
->host
;
1734 size_t count
= iov_iter_count(iter
);
1735 int rw
= iov_iter_rw(iter
);
1738 err
= check_direct_IO(inode
, iter
, offset
);
1742 if (f2fs_encrypted_inode(inode
) && S_ISREG(inode
->i_mode
))
1744 if (test_opt(F2FS_I_SB(inode
), LFS
))
1747 if (trace_android_fs_dataread_start_enabled() &&
1748 (iov_iter_rw(iter
) == READ
)) {
1749 char *path
, pathbuf
[MAX_TRACE_PATHBUF_LEN
];
1751 path
= android_fstrace_get_pathname(pathbuf
,
1752 MAX_TRACE_PATHBUF_LEN
,
1754 trace_android_fs_dataread_start(inode
, offset
,
1755 count
, current
->pid
, path
,
1758 if (trace_android_fs_datawrite_start_enabled() &&
1759 (iov_iter_rw(iter
) == WRITE
)) {
1760 char *path
, pathbuf
[MAX_TRACE_PATHBUF_LEN
];
1762 path
= android_fstrace_get_pathname(pathbuf
,
1763 MAX_TRACE_PATHBUF_LEN
,
1765 trace_android_fs_datawrite_start(inode
, offset
, count
,
1769 trace_f2fs_direct_IO_enter(inode
, offset
, count
, rw
);
1771 down_read(&F2FS_I(inode
)->dio_rwsem
[rw
]);
1772 err
= blockdev_direct_IO(iocb
, inode
, iter
, offset
, get_data_block_dio
);
1773 up_read(&F2FS_I(inode
)->dio_rwsem
[rw
]);
1777 set_inode_flag(inode
, FI_UPDATE_WRITE
);
1779 f2fs_write_failed(mapping
, offset
+ count
);
1782 if (trace_android_fs_dataread_start_enabled() &&
1783 (iov_iter_rw(iter
) == READ
))
1784 trace_android_fs_dataread_end(inode
, offset
, count
);
1785 if (trace_android_fs_datawrite_start_enabled() &&
1786 (iov_iter_rw(iter
) == WRITE
))
1787 trace_android_fs_datawrite_end(inode
, offset
, count
);
1789 trace_f2fs_direct_IO_exit(inode
, offset
, count
, rw
, err
);
1794 void f2fs_invalidate_page(struct page
*page
, unsigned int offset
,
1795 unsigned int length
)
1797 struct inode
*inode
= page
->mapping
->host
;
1798 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1800 if (inode
->i_ino
>= F2FS_ROOT_INO(sbi
) &&
1801 (offset
% PAGE_SIZE
|| length
!= PAGE_SIZE
))
1804 if (PageDirty(page
)) {
1805 if (inode
->i_ino
== F2FS_META_INO(sbi
)) {
1806 dec_page_count(sbi
, F2FS_DIRTY_META
);
1807 } else if (inode
->i_ino
== F2FS_NODE_INO(sbi
)) {
1808 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1810 inode_dec_dirty_pages(inode
);
1811 remove_dirty_inode(inode
);
1815 /* This is atomic written page, keep Private */
1816 if (IS_ATOMIC_WRITTEN_PAGE(page
))
1819 set_page_private(page
, 0);
1820 ClearPagePrivate(page
);
1823 int f2fs_release_page(struct page
*page
, gfp_t wait
)
1825 /* If this is dirty page, keep PagePrivate */
1826 if (PageDirty(page
))
1829 /* This is atomic written page, keep Private */
1830 if (IS_ATOMIC_WRITTEN_PAGE(page
))
1833 set_page_private(page
, 0);
1834 ClearPagePrivate(page
);
1839 * This was copied from __set_page_dirty_buffers which gives higher performance
1840 * in very high speed storages. (e.g., pmem)
1842 void f2fs_set_page_dirty_nobuffers(struct page
*page
)
1844 struct address_space
*mapping
= page
->mapping
;
1845 struct mem_cgroup
*memcg
;
1846 unsigned long flags
;
1848 if (unlikely(!mapping
))
1851 spin_lock(&mapping
->private_lock
);
1852 memcg
= mem_cgroup_begin_page_stat(page
);
1854 spin_unlock(&mapping
->private_lock
);
1856 spin_lock_irqsave(&mapping
->tree_lock
, flags
);
1857 WARN_ON_ONCE(!PageUptodate(page
));
1858 account_page_dirtied(page
, mapping
, memcg
);
1859 radix_tree_tag_set(&mapping
->page_tree
,
1860 page_index(page
), PAGECACHE_TAG_DIRTY
);
1861 spin_unlock_irqrestore(&mapping
->tree_lock
, flags
);
1863 mem_cgroup_end_page_stat(memcg
);
1865 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
1869 static int f2fs_set_data_page_dirty(struct page
*page
)
1871 struct address_space
*mapping
= page
->mapping
;
1872 struct inode
*inode
= mapping
->host
;
1874 trace_f2fs_set_page_dirty(page
, DATA
);
1876 if (!PageUptodate(page
))
1877 SetPageUptodate(page
);
1879 if (f2fs_is_atomic_file(inode
)) {
1880 if (!IS_ATOMIC_WRITTEN_PAGE(page
)) {
1881 register_inmem_page(inode
, page
);
1885 * Previously, this page has been registered, we just
1891 if (!PageDirty(page
)) {
1892 f2fs_set_page_dirty_nobuffers(page
);
1893 update_dirty_page(inode
, page
);
1899 static sector_t
f2fs_bmap(struct address_space
*mapping
, sector_t block
)
1901 struct inode
*inode
= mapping
->host
;
1903 if (f2fs_has_inline_data(inode
))
1906 /* make sure allocating whole blocks */
1907 if (mapping_tagged(mapping
, PAGECACHE_TAG_DIRTY
))
1908 filemap_write_and_wait(mapping
);
1910 return generic_block_bmap(mapping
, block
, get_data_block_bmap
);
1913 #ifdef CONFIG_MIGRATION
1914 #include <linux/migrate.h>
1916 int f2fs_migrate_page(struct address_space
*mapping
,
1917 struct page
*newpage
, struct page
*page
, enum migrate_mode mode
)
1919 int rc
, extra_count
;
1920 struct f2fs_inode_info
*fi
= F2FS_I(mapping
->host
);
1921 bool atomic_written
= IS_ATOMIC_WRITTEN_PAGE(page
);
1923 BUG_ON(PageWriteback(page
));
1925 /* migrating an atomic written page is safe with the inmem_lock hold */
1926 if (atomic_written
&& !mutex_trylock(&fi
->inmem_lock
))
1930 * A reference is expected if PagePrivate set when move mapping,
1931 * however F2FS breaks this for maintaining dirty page counts when
1932 * truncating pages. So here adjusting the 'extra_count' make it work.
1934 extra_count
= (atomic_written
? 1 : 0) - page_has_private(page
);
1935 rc
= migrate_page_move_mapping(mapping
, newpage
,
1936 page
, NULL
, mode
, extra_count
);
1937 if (rc
!= MIGRATEPAGE_SUCCESS
) {
1939 mutex_unlock(&fi
->inmem_lock
);
1943 if (atomic_written
) {
1944 struct inmem_pages
*cur
;
1945 list_for_each_entry(cur
, &fi
->inmem_pages
, list
)
1946 if (cur
->page
== page
) {
1947 cur
->page
= newpage
;
1950 mutex_unlock(&fi
->inmem_lock
);
1955 if (PagePrivate(page
))
1956 SetPagePrivate(newpage
);
1957 set_page_private(newpage
, page_private(page
));
1959 migrate_page_copy(newpage
, page
);
1961 return MIGRATEPAGE_SUCCESS
;
1965 const struct address_space_operations f2fs_dblock_aops
= {
1966 .readpage
= f2fs_read_data_page
,
1967 .readpages
= f2fs_read_data_pages
,
1968 .writepage
= f2fs_write_data_page
,
1969 .writepages
= f2fs_write_data_pages
,
1970 .write_begin
= f2fs_write_begin
,
1971 .write_end
= f2fs_write_end
,
1972 .set_page_dirty
= f2fs_set_data_page_dirty
,
1973 .invalidatepage
= f2fs_invalidate_page
,
1974 .releasepage
= f2fs_release_page
,
1975 .direct_IO
= f2fs_direct_IO
,
1977 #ifdef CONFIG_MIGRATION
1978 .migratepage
= f2fs_migrate_page
,