4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/buffer_head.h>
14 #include <linux/mpage.h>
15 #include <linux/aio.h>
16 #include <linux/writeback.h>
17 #include <linux/backing-dev.h>
18 #include <linux/blkdev.h>
19 #include <linux/bio.h>
20 #include <linux/prefetch.h>
25 #include <trace/events/f2fs.h>
27 static void f2fs_read_end_io(struct bio
*bio
, int err
)
32 bio_for_each_segment_all(bvec
, bio
, i
) {
33 struct page
*page
= bvec
->bv_page
;
36 SetPageUptodate(page
);
38 ClearPageUptodate(page
);
46 static void f2fs_write_end_io(struct bio
*bio
, int err
)
48 struct f2fs_sb_info
*sbi
= bio
->bi_private
;
52 bio_for_each_segment_all(bvec
, bio
, i
) {
53 struct page
*page
= bvec
->bv_page
;
57 set_bit(AS_EIO
, &page
->mapping
->flags
);
58 f2fs_stop_checkpoint(sbi
);
60 end_page_writeback(page
);
61 dec_page_count(sbi
, F2FS_WRITEBACK
);
65 complete(sbi
->wait_io
);
69 if (!get_pages(sbi
, F2FS_WRITEBACK
) &&
70 !list_empty(&sbi
->cp_wait
.task_list
))
71 wake_up(&sbi
->cp_wait
);
77 * Low-level block read/write IO operations.
79 static struct bio
*__bio_alloc(struct f2fs_sb_info
*sbi
, block_t blk_addr
,
80 int npages
, bool is_read
)
84 /* No failure on bio allocation */
85 bio
= bio_alloc(GFP_NOIO
, npages
);
87 bio
->bi_bdev
= sbi
->sb
->s_bdev
;
88 bio
->bi_iter
.bi_sector
= SECTOR_FROM_BLOCK(blk_addr
);
89 bio
->bi_end_io
= is_read
? f2fs_read_end_io
: f2fs_write_end_io
;
90 bio
->bi_private
= sbi
;
95 static void __submit_merged_bio(struct f2fs_bio_info
*io
)
97 struct f2fs_io_info
*fio
= &io
->fio
;
105 if (is_read_io(rw
)) {
106 trace_f2fs_submit_read_bio(io
->sbi
->sb
, rw
,
108 submit_bio(rw
, io
->bio
);
110 trace_f2fs_submit_write_bio(io
->sbi
->sb
, rw
,
113 * META_FLUSH is only from the checkpoint procedure, and we
114 * should wait this metadata bio for FS consistency.
116 if (fio
->type
== META_FLUSH
) {
117 DECLARE_COMPLETION_ONSTACK(wait
);
118 io
->sbi
->wait_io
= &wait
;
119 submit_bio(rw
, io
->bio
);
120 wait_for_completion(&wait
);
122 submit_bio(rw
, io
->bio
);
129 void f2fs_submit_merged_bio(struct f2fs_sb_info
*sbi
,
130 enum page_type type
, int rw
)
132 enum page_type btype
= PAGE_TYPE_OF_BIO(type
);
133 struct f2fs_bio_info
*io
;
135 io
= is_read_io(rw
) ? &sbi
->read_io
: &sbi
->write_io
[btype
];
137 down_write(&io
->io_rwsem
);
139 /* change META to META_FLUSH in the checkpoint procedure */
140 if (type
>= META_FLUSH
) {
141 io
->fio
.type
= META_FLUSH
;
142 if (test_opt(sbi
, NOBARRIER
))
143 io
->fio
.rw
= WRITE_FLUSH
| REQ_META
| REQ_PRIO
;
145 io
->fio
.rw
= WRITE_FLUSH_FUA
| REQ_META
| REQ_PRIO
;
147 __submit_merged_bio(io
);
148 up_write(&io
->io_rwsem
);
152 * Fill the locked page with data located in the block address.
153 * Return unlocked page.
155 int f2fs_submit_page_bio(struct f2fs_sb_info
*sbi
, struct page
*page
,
156 block_t blk_addr
, int rw
)
160 trace_f2fs_submit_page_bio(page
, blk_addr
, rw
);
162 /* Allocate a new bio */
163 bio
= __bio_alloc(sbi
, blk_addr
, 1, is_read_io(rw
));
165 if (bio_add_page(bio
, page
, PAGE_CACHE_SIZE
, 0) < PAGE_CACHE_SIZE
) {
167 f2fs_put_page(page
, 1);
175 void f2fs_submit_page_mbio(struct f2fs_sb_info
*sbi
, struct page
*page
,
176 block_t blk_addr
, struct f2fs_io_info
*fio
)
178 enum page_type btype
= PAGE_TYPE_OF_BIO(fio
->type
);
179 struct f2fs_bio_info
*io
;
180 bool is_read
= is_read_io(fio
->rw
);
182 io
= is_read
? &sbi
->read_io
: &sbi
->write_io
[btype
];
184 verify_block_addr(sbi
, blk_addr
);
186 down_write(&io
->io_rwsem
);
189 inc_page_count(sbi
, F2FS_WRITEBACK
);
191 if (io
->bio
&& (io
->last_block_in_bio
!= blk_addr
- 1 ||
192 io
->fio
.rw
!= fio
->rw
))
193 __submit_merged_bio(io
);
195 if (io
->bio
== NULL
) {
196 int bio_blocks
= MAX_BIO_BLOCKS(sbi
);
198 io
->bio
= __bio_alloc(sbi
, blk_addr
, bio_blocks
, is_read
);
202 if (bio_add_page(io
->bio
, page
, PAGE_CACHE_SIZE
, 0) <
204 __submit_merged_bio(io
);
208 io
->last_block_in_bio
= blk_addr
;
210 up_write(&io
->io_rwsem
);
211 trace_f2fs_submit_page_mbio(page
, fio
->rw
, fio
->type
, blk_addr
);
215 * Lock ordering for the change of data block address:
218 * update block addresses in the node page
220 static void __set_data_blkaddr(struct dnode_of_data
*dn
, block_t new_addr
)
222 struct f2fs_node
*rn
;
224 struct page
*node_page
= dn
->node_page
;
225 unsigned int ofs_in_node
= dn
->ofs_in_node
;
227 f2fs_wait_on_page_writeback(node_page
, NODE
);
229 rn
= F2FS_NODE(node_page
);
231 /* Get physical address of data block */
232 addr_array
= blkaddr_in_node(rn
);
233 addr_array
[ofs_in_node
] = cpu_to_le32(new_addr
);
234 set_page_dirty(node_page
);
237 int reserve_new_block(struct dnode_of_data
*dn
)
239 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
241 if (unlikely(is_inode_flag_set(F2FS_I(dn
->inode
), FI_NO_ALLOC
)))
243 if (unlikely(!inc_valid_block_count(sbi
, dn
->inode
, 1)))
246 trace_f2fs_reserve_new_block(dn
->inode
, dn
->nid
, dn
->ofs_in_node
);
248 __set_data_blkaddr(dn
, NEW_ADDR
);
249 dn
->data_blkaddr
= NEW_ADDR
;
250 mark_inode_dirty(dn
->inode
);
255 int f2fs_reserve_block(struct dnode_of_data
*dn
, pgoff_t index
)
257 bool need_put
= dn
->inode_page
? false : true;
260 err
= get_dnode_of_data(dn
, index
, ALLOC_NODE
);
264 if (dn
->data_blkaddr
== NULL_ADDR
)
265 err
= reserve_new_block(dn
);
271 static int check_extent_cache(struct inode
*inode
, pgoff_t pgofs
,
272 struct buffer_head
*bh_result
)
274 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
275 pgoff_t start_fofs
, end_fofs
;
276 block_t start_blkaddr
;
278 if (is_inode_flag_set(fi
, FI_NO_EXTENT
))
281 read_lock(&fi
->ext
.ext_lock
);
282 if (fi
->ext
.len
== 0) {
283 read_unlock(&fi
->ext
.ext_lock
);
287 stat_inc_total_hit(inode
->i_sb
);
289 start_fofs
= fi
->ext
.fofs
;
290 end_fofs
= fi
->ext
.fofs
+ fi
->ext
.len
- 1;
291 start_blkaddr
= fi
->ext
.blk_addr
;
293 if (pgofs
>= start_fofs
&& pgofs
<= end_fofs
) {
294 unsigned int blkbits
= inode
->i_sb
->s_blocksize_bits
;
297 clear_buffer_new(bh_result
);
298 map_bh(bh_result
, inode
->i_sb
,
299 start_blkaddr
+ pgofs
- start_fofs
);
300 count
= end_fofs
- pgofs
+ 1;
301 if (count
< (UINT_MAX
>> blkbits
))
302 bh_result
->b_size
= (count
<< blkbits
);
304 bh_result
->b_size
= UINT_MAX
;
306 stat_inc_read_hit(inode
->i_sb
);
307 read_unlock(&fi
->ext
.ext_lock
);
310 read_unlock(&fi
->ext
.ext_lock
);
314 void update_extent_cache(block_t blk_addr
, struct dnode_of_data
*dn
)
316 struct f2fs_inode_info
*fi
= F2FS_I(dn
->inode
);
317 pgoff_t fofs
, start_fofs
, end_fofs
;
318 block_t start_blkaddr
, end_blkaddr
;
319 int need_update
= true;
321 f2fs_bug_on(F2FS_I_SB(dn
->inode
), blk_addr
== NEW_ADDR
);
322 fofs
= start_bidx_of_node(ofs_of_node(dn
->node_page
), fi
) +
325 /* Update the page address in the parent node */
326 __set_data_blkaddr(dn
, blk_addr
);
328 if (is_inode_flag_set(fi
, FI_NO_EXTENT
))
331 write_lock(&fi
->ext
.ext_lock
);
333 start_fofs
= fi
->ext
.fofs
;
334 end_fofs
= fi
->ext
.fofs
+ fi
->ext
.len
- 1;
335 start_blkaddr
= fi
->ext
.blk_addr
;
336 end_blkaddr
= fi
->ext
.blk_addr
+ fi
->ext
.len
- 1;
338 /* Drop and initialize the matched extent */
339 if (fi
->ext
.len
== 1 && fofs
== start_fofs
)
343 if (fi
->ext
.len
== 0) {
344 if (blk_addr
!= NULL_ADDR
) {
346 fi
->ext
.blk_addr
= blk_addr
;
353 if (fofs
== start_fofs
- 1 && blk_addr
== start_blkaddr
- 1) {
361 if (fofs
== end_fofs
+ 1 && blk_addr
== end_blkaddr
+ 1) {
366 /* Split the existing extent */
367 if (fi
->ext
.len
> 1 &&
368 fofs
>= start_fofs
&& fofs
<= end_fofs
) {
369 if ((end_fofs
- fofs
) < (fi
->ext
.len
>> 1)) {
370 fi
->ext
.len
= fofs
- start_fofs
;
372 fi
->ext
.fofs
= fofs
+ 1;
373 fi
->ext
.blk_addr
= start_blkaddr
+
374 fofs
- start_fofs
+ 1;
375 fi
->ext
.len
-= fofs
- start_fofs
+ 1;
381 /* Finally, if the extent is very fragmented, let's drop the cache. */
382 if (fi
->ext
.len
< F2FS_MIN_EXTENT_LEN
) {
384 set_inode_flag(fi
, FI_NO_EXTENT
);
388 write_unlock(&fi
->ext
.ext_lock
);
394 struct page
*find_data_page(struct inode
*inode
, pgoff_t index
, bool sync
)
396 struct address_space
*mapping
= inode
->i_mapping
;
397 struct dnode_of_data dn
;
401 page
= find_get_page(mapping
, index
);
402 if (page
&& PageUptodate(page
))
404 f2fs_put_page(page
, 0);
406 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
407 err
= get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
412 if (dn
.data_blkaddr
== NULL_ADDR
)
413 return ERR_PTR(-ENOENT
);
415 /* By fallocate(), there is no cached page, but with NEW_ADDR */
416 if (unlikely(dn
.data_blkaddr
== NEW_ADDR
))
417 return ERR_PTR(-EINVAL
);
419 page
= grab_cache_page(mapping
, index
);
421 return ERR_PTR(-ENOMEM
);
423 if (PageUptodate(page
)) {
428 err
= f2fs_submit_page_bio(F2FS_I_SB(inode
), page
, dn
.data_blkaddr
,
429 sync
? READ_SYNC
: READA
);
434 wait_on_page_locked(page
);
435 if (unlikely(!PageUptodate(page
))) {
436 f2fs_put_page(page
, 0);
437 return ERR_PTR(-EIO
);
444 * If it tries to access a hole, return an error.
445 * Because, the callers, functions in dir.c and GC, should be able to know
446 * whether this page exists or not.
448 struct page
*get_lock_data_page(struct inode
*inode
, pgoff_t index
)
450 struct address_space
*mapping
= inode
->i_mapping
;
451 struct dnode_of_data dn
;
456 page
= grab_cache_page(mapping
, index
);
458 return ERR_PTR(-ENOMEM
);
460 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
461 err
= get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
463 f2fs_put_page(page
, 1);
468 if (unlikely(dn
.data_blkaddr
== NULL_ADDR
)) {
469 f2fs_put_page(page
, 1);
470 return ERR_PTR(-ENOENT
);
473 if (PageUptodate(page
))
477 * A new dentry page is allocated but not able to be written, since its
478 * new inode page couldn't be allocated due to -ENOSPC.
479 * In such the case, its blkaddr can be remained as NEW_ADDR.
480 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
482 if (dn
.data_blkaddr
== NEW_ADDR
) {
483 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
484 SetPageUptodate(page
);
488 err
= f2fs_submit_page_bio(F2FS_I_SB(inode
), page
,
489 dn
.data_blkaddr
, READ_SYNC
);
494 if (unlikely(!PageUptodate(page
))) {
495 f2fs_put_page(page
, 1);
496 return ERR_PTR(-EIO
);
498 if (unlikely(page
->mapping
!= mapping
)) {
499 f2fs_put_page(page
, 1);
506 * Caller ensures that this data page is never allocated.
507 * A new zero-filled data page is allocated in the page cache.
509 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
511 * Note that, ipage is set only by make_empty_dir.
513 struct page
*get_new_data_page(struct inode
*inode
,
514 struct page
*ipage
, pgoff_t index
, bool new_i_size
)
516 struct address_space
*mapping
= inode
->i_mapping
;
518 struct dnode_of_data dn
;
521 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
522 err
= f2fs_reserve_block(&dn
, index
);
526 page
= grab_cache_page(mapping
, index
);
532 if (PageUptodate(page
))
535 if (dn
.data_blkaddr
== NEW_ADDR
) {
536 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
537 SetPageUptodate(page
);
539 err
= f2fs_submit_page_bio(F2FS_I_SB(inode
), page
,
540 dn
.data_blkaddr
, READ_SYNC
);
545 if (unlikely(!PageUptodate(page
))) {
546 f2fs_put_page(page
, 1);
550 if (unlikely(page
->mapping
!= mapping
)) {
551 f2fs_put_page(page
, 1);
557 i_size_read(inode
) < ((index
+ 1) << PAGE_CACHE_SHIFT
)) {
558 i_size_write(inode
, ((index
+ 1) << PAGE_CACHE_SHIFT
));
559 /* Only the directory inode sets new_i_size */
560 set_inode_flag(F2FS_I(inode
), FI_UPDATE_DIR
);
569 static int __allocate_data_block(struct dnode_of_data
*dn
)
571 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
572 struct f2fs_inode_info
*fi
= F2FS_I(dn
->inode
);
573 struct f2fs_summary sum
;
579 if (unlikely(is_inode_flag_set(F2FS_I(dn
->inode
), FI_NO_ALLOC
)))
581 if (unlikely(!inc_valid_block_count(sbi
, dn
->inode
, 1)))
584 __set_data_blkaddr(dn
, NEW_ADDR
);
585 dn
->data_blkaddr
= NEW_ADDR
;
587 get_node_info(sbi
, dn
->nid
, &ni
);
588 set_summary(&sum
, dn
->nid
, dn
->ofs_in_node
, ni
.version
);
590 type
= CURSEG_WARM_DATA
;
592 allocate_data_block(sbi
, NULL
, NULL_ADDR
, &new_blkaddr
, &sum
, type
);
594 /* direct IO doesn't use extent cache to maximize the performance */
595 set_inode_flag(F2FS_I(dn
->inode
), FI_NO_EXTENT
);
596 update_extent_cache(new_blkaddr
, dn
);
597 clear_inode_flag(F2FS_I(dn
->inode
), FI_NO_EXTENT
);
600 fofs
= start_bidx_of_node(ofs_of_node(dn
->node_page
), fi
) +
602 if (i_size_read(dn
->inode
) < ((fofs
+ 1) << PAGE_CACHE_SHIFT
))
603 i_size_write(dn
->inode
, ((fofs
+ 1) << PAGE_CACHE_SHIFT
));
605 dn
->data_blkaddr
= new_blkaddr
;
610 * get_data_block() now supported readahead/bmap/rw direct_IO with mapped bh.
611 * If original data blocks are allocated, then give them to blockdev.
613 * a. preallocate requested block addresses
614 * b. do not use extent cache for better performance
615 * c. give the block addresses to blockdev
617 static int __get_data_block(struct inode
*inode
, sector_t iblock
,
618 struct buffer_head
*bh_result
, int create
, bool fiemap
)
620 unsigned int blkbits
= inode
->i_sb
->s_blocksize_bits
;
621 unsigned maxblocks
= bh_result
->b_size
>> blkbits
;
622 struct dnode_of_data dn
;
623 int mode
= create
? ALLOC_NODE
: LOOKUP_NODE_RA
;
624 pgoff_t pgofs
, end_offset
;
625 int err
= 0, ofs
= 1;
626 bool allocated
= false;
628 /* Get the page offset from the block offset(iblock) */
629 pgofs
= (pgoff_t
)(iblock
>> (PAGE_CACHE_SHIFT
- blkbits
));
631 if (check_extent_cache(inode
, pgofs
, bh_result
))
635 f2fs_balance_fs(F2FS_I_SB(inode
));
636 f2fs_lock_op(F2FS_I_SB(inode
));
639 /* When reading holes, we need its node page */
640 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
641 err
= get_dnode_of_data(&dn
, pgofs
, mode
);
647 if (dn
.data_blkaddr
== NEW_ADDR
&& !fiemap
)
650 if (dn
.data_blkaddr
!= NULL_ADDR
) {
651 map_bh(bh_result
, inode
->i_sb
, dn
.data_blkaddr
);
653 err
= __allocate_data_block(&dn
);
657 map_bh(bh_result
, inode
->i_sb
, dn
.data_blkaddr
);
662 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, F2FS_I(inode
));
663 bh_result
->b_size
= (((size_t)1) << blkbits
);
668 if (dn
.ofs_in_node
>= end_offset
) {
670 sync_inode_page(&dn
);
674 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
675 err
= get_dnode_of_data(&dn
, pgofs
, mode
);
681 if (dn
.data_blkaddr
== NEW_ADDR
&& !fiemap
)
684 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, F2FS_I(inode
));
687 if (maxblocks
> (bh_result
->b_size
>> blkbits
)) {
688 block_t blkaddr
= datablock_addr(dn
.node_page
, dn
.ofs_in_node
);
689 if (blkaddr
== NULL_ADDR
&& create
) {
690 err
= __allocate_data_block(&dn
);
694 blkaddr
= dn
.data_blkaddr
;
696 /* Give more consecutive addresses for the readahead */
697 if (blkaddr
== (bh_result
->b_blocknr
+ ofs
)) {
701 bh_result
->b_size
+= (((size_t)1) << blkbits
);
707 sync_inode_page(&dn
);
712 f2fs_unlock_op(F2FS_I_SB(inode
));
714 trace_f2fs_get_data_block(inode
, iblock
, bh_result
, err
);
718 static int get_data_block(struct inode
*inode
, sector_t iblock
,
719 struct buffer_head
*bh_result
, int create
)
721 return __get_data_block(inode
, iblock
, bh_result
, create
, false);
724 static int get_data_block_fiemap(struct inode
*inode
, sector_t iblock
,
725 struct buffer_head
*bh_result
, int create
)
727 return __get_data_block(inode
, iblock
, bh_result
, create
, true);
730 int f2fs_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fieinfo
,
733 return generic_block_fiemap(inode
, fieinfo
,
734 start
, len
, get_data_block_fiemap
);
737 static int f2fs_read_data_page(struct file
*file
, struct page
*page
)
739 struct inode
*inode
= page
->mapping
->host
;
742 trace_f2fs_readpage(page
, DATA
);
744 /* If the file has inline data, try to read it directly */
745 if (f2fs_has_inline_data(inode
))
746 ret
= f2fs_read_inline_data(inode
, page
);
748 ret
= mpage_readpage(page
, get_data_block
);
753 static int f2fs_read_data_pages(struct file
*file
,
754 struct address_space
*mapping
,
755 struct list_head
*pages
, unsigned nr_pages
)
757 struct inode
*inode
= file
->f_mapping
->host
;
759 /* If the file has inline data, skip readpages */
760 if (f2fs_has_inline_data(inode
))
763 return mpage_readpages(mapping
, pages
, nr_pages
, get_data_block
);
766 int do_write_data_page(struct page
*page
, struct f2fs_io_info
*fio
)
768 struct inode
*inode
= page
->mapping
->host
;
769 block_t old_blkaddr
, new_blkaddr
;
770 struct dnode_of_data dn
;
773 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
774 err
= get_dnode_of_data(&dn
, page
->index
, LOOKUP_NODE
);
778 old_blkaddr
= dn
.data_blkaddr
;
780 /* This page is already truncated */
781 if (old_blkaddr
== NULL_ADDR
)
784 set_page_writeback(page
);
787 * If current allocation needs SSR,
788 * it had better in-place writes for updated data.
790 if (unlikely(old_blkaddr
!= NEW_ADDR
&&
791 !is_cold_data(page
) &&
792 need_inplace_update(inode
))) {
793 rewrite_data_page(page
, old_blkaddr
, fio
);
794 set_inode_flag(F2FS_I(inode
), FI_UPDATE_WRITE
);
796 write_data_page(page
, &dn
, &new_blkaddr
, fio
);
797 update_extent_cache(new_blkaddr
, &dn
);
798 set_inode_flag(F2FS_I(inode
), FI_APPEND_WRITE
);
805 static int f2fs_write_data_page(struct page
*page
,
806 struct writeback_control
*wbc
)
808 struct inode
*inode
= page
->mapping
->host
;
809 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
810 loff_t i_size
= i_size_read(inode
);
811 const pgoff_t end_index
= ((unsigned long long) i_size
)
814 bool need_balance_fs
= false;
816 struct f2fs_io_info fio
= {
818 .rw
= (wbc
->sync_mode
== WB_SYNC_ALL
) ? WRITE_SYNC
: WRITE
,
821 trace_f2fs_writepage(page
, DATA
);
823 if (page
->index
< end_index
)
827 * If the offset is out-of-range of file size,
828 * this page does not have to be written to disk.
830 offset
= i_size
& (PAGE_CACHE_SIZE
- 1);
831 if ((page
->index
>= end_index
+ 1) || !offset
)
834 zero_user_segment(page
, offset
, PAGE_CACHE_SIZE
);
836 if (unlikely(sbi
->por_doing
))
839 /* Dentry blocks are controlled by checkpoint */
840 if (S_ISDIR(inode
->i_mode
)) {
841 if (unlikely(f2fs_cp_error(sbi
)))
843 err
= do_write_data_page(page
, &fio
);
847 /* we should bypass data pages to proceed the kworkder jobs */
848 if (unlikely(f2fs_cp_error(sbi
))) {
854 if (!wbc
->for_reclaim
)
855 need_balance_fs
= true;
856 else if (has_not_enough_free_secs(sbi
, 0))
860 if (f2fs_has_inline_data(inode
) || f2fs_may_inline(inode
))
861 err
= f2fs_write_inline_data(inode
, page
, offset
);
863 err
= do_write_data_page(page
, &fio
);
866 if (err
&& err
!= -ENOENT
)
869 clear_cold_data(page
);
871 inode_dec_dirty_pages(inode
);
874 f2fs_balance_fs(sbi
);
875 if (wbc
->for_reclaim
)
876 f2fs_submit_merged_bio(sbi
, DATA
, WRITE
);
880 redirty_page_for_writepage(wbc
, page
);
881 return AOP_WRITEPAGE_ACTIVATE
;
884 static int __f2fs_writepage(struct page
*page
, struct writeback_control
*wbc
,
887 struct address_space
*mapping
= data
;
888 int ret
= mapping
->a_ops
->writepage(page
, wbc
);
889 mapping_set_error(mapping
, ret
);
893 static int f2fs_write_data_pages(struct address_space
*mapping
,
894 struct writeback_control
*wbc
)
896 struct inode
*inode
= mapping
->host
;
897 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
902 trace_f2fs_writepages(mapping
->host
, wbc
, DATA
);
904 /* deal with chardevs and other special file */
905 if (!mapping
->a_ops
->writepage
)
908 if (S_ISDIR(inode
->i_mode
) && wbc
->sync_mode
== WB_SYNC_NONE
&&
909 get_dirty_pages(inode
) < nr_pages_to_skip(sbi
, DATA
) &&
910 available_free_memory(sbi
, DIRTY_DENTS
))
913 diff
= nr_pages_to_write(sbi
, DATA
, wbc
);
915 if (!S_ISDIR(inode
->i_mode
)) {
916 mutex_lock(&sbi
->writepages
);
919 ret
= write_cache_pages(mapping
, wbc
, __f2fs_writepage
, mapping
);
921 mutex_unlock(&sbi
->writepages
);
923 f2fs_submit_merged_bio(sbi
, DATA
, WRITE
);
925 remove_dirty_dir_inode(inode
);
927 wbc
->nr_to_write
= max((long)0, wbc
->nr_to_write
- diff
);
931 wbc
->pages_skipped
+= get_dirty_pages(inode
);
935 static void f2fs_write_failed(struct address_space
*mapping
, loff_t to
)
937 struct inode
*inode
= mapping
->host
;
939 if (to
> inode
->i_size
) {
940 truncate_pagecache(inode
, inode
->i_size
);
941 truncate_blocks(inode
, inode
->i_size
, true);
945 static int f2fs_write_begin(struct file
*file
, struct address_space
*mapping
,
946 loff_t pos
, unsigned len
, unsigned flags
,
947 struct page
**pagep
, void **fsdata
)
949 struct inode
*inode
= mapping
->host
;
950 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
951 struct page
*page
, *ipage
;
952 pgoff_t index
= ((unsigned long long) pos
) >> PAGE_CACHE_SHIFT
;
953 struct dnode_of_data dn
;
956 trace_f2fs_write_begin(inode
, pos
, len
, flags
);
958 f2fs_balance_fs(sbi
);
960 err
= f2fs_convert_inline_data(inode
, pos
+ len
, NULL
);
964 page
= grab_cache_page_write_begin(mapping
, index
, flags
);
970 /* to avoid latency during memory pressure */
975 if (f2fs_has_inline_data(inode
) && (pos
+ len
) <= MAX_INLINE_DATA
)
980 /* check inline_data */
981 ipage
= get_node_page(sbi
, inode
->i_ino
);
985 if (f2fs_has_inline_data(inode
)) {
986 f2fs_put_page(ipage
, 1);
988 f2fs_put_page(page
, 0);
992 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
993 err
= f2fs_reserve_block(&dn
, index
);
1001 if (unlikely(page
->mapping
!= mapping
)) {
1002 f2fs_put_page(page
, 1);
1006 f2fs_wait_on_page_writeback(page
, DATA
);
1008 if ((len
== PAGE_CACHE_SIZE
) || PageUptodate(page
))
1011 if ((pos
& PAGE_CACHE_MASK
) >= i_size_read(inode
)) {
1012 unsigned start
= pos
& (PAGE_CACHE_SIZE
- 1);
1013 unsigned end
= start
+ len
;
1015 /* Reading beyond i_size is simple: memset to zero */
1016 zero_user_segments(page
, 0, start
, end
, PAGE_CACHE_SIZE
);
1020 if (dn
.data_blkaddr
== NEW_ADDR
) {
1021 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
1023 if (f2fs_has_inline_data(inode
)) {
1024 err
= f2fs_read_inline_data(inode
, page
);
1026 page_cache_release(page
);
1030 err
= f2fs_submit_page_bio(sbi
, page
, dn
.data_blkaddr
,
1037 if (unlikely(!PageUptodate(page
))) {
1038 f2fs_put_page(page
, 1);
1042 if (unlikely(page
->mapping
!= mapping
)) {
1043 f2fs_put_page(page
, 1);
1048 SetPageUptodate(page
);
1049 clear_cold_data(page
);
1053 f2fs_unlock_op(sbi
);
1054 f2fs_put_page(page
, 0);
1056 f2fs_write_failed(mapping
, pos
+ len
);
1060 static int f2fs_write_end(struct file
*file
,
1061 struct address_space
*mapping
,
1062 loff_t pos
, unsigned len
, unsigned copied
,
1063 struct page
*page
, void *fsdata
)
1065 struct inode
*inode
= page
->mapping
->host
;
1067 trace_f2fs_write_end(inode
, pos
, len
, copied
);
1069 set_page_dirty(page
);
1071 if (pos
+ copied
> i_size_read(inode
)) {
1072 i_size_write(inode
, pos
+ copied
);
1073 mark_inode_dirty(inode
);
1074 update_inode_page(inode
);
1077 f2fs_put_page(page
, 1);
1081 static int check_direct_IO(struct inode
*inode
, int rw
,
1082 struct iov_iter
*iter
, loff_t offset
)
1084 unsigned blocksize_mask
= inode
->i_sb
->s_blocksize
- 1;
1089 if (offset
& blocksize_mask
)
1092 if (iov_iter_alignment(iter
) & blocksize_mask
)
1098 static ssize_t
f2fs_direct_IO(int rw
, struct kiocb
*iocb
,
1099 struct iov_iter
*iter
, loff_t offset
)
1101 struct file
*file
= iocb
->ki_filp
;
1102 struct address_space
*mapping
= file
->f_mapping
;
1103 struct inode
*inode
= mapping
->host
;
1104 size_t count
= iov_iter_count(iter
);
1107 /* Let buffer I/O handle the inline data case. */
1108 if (f2fs_has_inline_data(inode
))
1111 if (check_direct_IO(inode
, rw
, iter
, offset
))
1114 trace_f2fs_direct_IO_enter(inode
, offset
, count
, rw
);
1116 err
= blockdev_direct_IO(rw
, iocb
, inode
, iter
, offset
, get_data_block
);
1117 if (err
< 0 && (rw
& WRITE
))
1118 f2fs_write_failed(mapping
, offset
+ count
);
1120 trace_f2fs_direct_IO_exit(inode
, offset
, count
, rw
, err
);
1125 static void f2fs_invalidate_data_page(struct page
*page
, unsigned int offset
,
1126 unsigned int length
)
1128 struct inode
*inode
= page
->mapping
->host
;
1130 if (offset
% PAGE_CACHE_SIZE
|| length
!= PAGE_CACHE_SIZE
)
1133 if (f2fs_is_atomic_file(inode
) || f2fs_is_volatile_file(inode
))
1134 invalidate_inmem_page(inode
, page
);
1136 if (PageDirty(page
))
1137 inode_dec_dirty_pages(inode
);
1138 ClearPagePrivate(page
);
1141 static int f2fs_release_data_page(struct page
*page
, gfp_t wait
)
1143 ClearPagePrivate(page
);
1147 static int f2fs_set_data_page_dirty(struct page
*page
)
1149 struct address_space
*mapping
= page
->mapping
;
1150 struct inode
*inode
= mapping
->host
;
1152 trace_f2fs_set_page_dirty(page
, DATA
);
1154 SetPageUptodate(page
);
1156 if (f2fs_is_atomic_file(inode
) || f2fs_is_volatile_file(inode
)) {
1157 register_inmem_page(inode
, page
);
1161 mark_inode_dirty(inode
);
1163 if (!PageDirty(page
)) {
1164 __set_page_dirty_nobuffers(page
);
1165 update_dirty_page(inode
, page
);
1171 static sector_t
f2fs_bmap(struct address_space
*mapping
, sector_t block
)
1173 struct inode
*inode
= mapping
->host
;
1175 if (f2fs_has_inline_data(inode
))
1178 return generic_block_bmap(mapping
, block
, get_data_block
);
1181 const struct address_space_operations f2fs_dblock_aops
= {
1182 .readpage
= f2fs_read_data_page
,
1183 .readpages
= f2fs_read_data_pages
,
1184 .writepage
= f2fs_write_data_page
,
1185 .writepages
= f2fs_write_data_pages
,
1186 .write_begin
= f2fs_write_begin
,
1187 .write_end
= f2fs_write_end
,
1188 .set_page_dirty
= f2fs_set_data_page_dirty
,
1189 .invalidatepage
= f2fs_invalidate_data_page
,
1190 .releasepage
= f2fs_release_data_page
,
1191 .direct_IO
= f2fs_direct_IO
,