4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/buffer_head.h>
14 #include <linux/mpage.h>
15 #include <linux/aio.h>
16 #include <linux/writeback.h>
17 #include <linux/backing-dev.h>
18 #include <linux/blkdev.h>
19 #include <linux/bio.h>
20 #include <linux/prefetch.h>
25 #include <trace/events/f2fs.h>
28 * Low-level block read/write IO operations.
30 static struct bio
*__bio_alloc(struct block_device
*bdev
, int npages
)
34 /* No failure on bio allocation */
35 bio
= bio_alloc(GFP_NOIO
, npages
);
37 bio
->bi_private
= NULL
;
41 static void f2fs_read_end_io(struct bio
*bio
, int err
)
43 const int uptodate
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
44 struct bio_vec
*bvec
= bio
->bi_io_vec
+ bio
->bi_vcnt
- 1;
47 struct page
*page
= bvec
->bv_page
;
49 if (--bvec
>= bio
->bi_io_vec
)
50 prefetchw(&bvec
->bv_page
->flags
);
52 if (unlikely(!uptodate
)) {
53 ClearPageUptodate(page
);
56 SetPageUptodate(page
);
59 } while (bvec
>= bio
->bi_io_vec
);
64 static void f2fs_write_end_io(struct bio
*bio
, int err
)
66 const int uptodate
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
67 struct bio_vec
*bvec
= bio
->bi_io_vec
+ bio
->bi_vcnt
- 1;
68 struct f2fs_sb_info
*sbi
= F2FS_SB(bvec
->bv_page
->mapping
->host
->i_sb
);
71 struct page
*page
= bvec
->bv_page
;
73 if (--bvec
>= bio
->bi_io_vec
)
74 prefetchw(&bvec
->bv_page
->flags
);
76 if (unlikely(!uptodate
)) {
78 set_bit(AS_EIO
, &page
->mapping
->flags
);
79 set_ckpt_flags(sbi
->ckpt
, CP_ERROR_FLAG
);
80 sbi
->sb
->s_flags
|= MS_RDONLY
;
82 end_page_writeback(page
);
83 dec_page_count(sbi
, F2FS_WRITEBACK
);
84 } while (bvec
>= bio
->bi_io_vec
);
87 complete(bio
->bi_private
);
89 if (!get_pages(sbi
, F2FS_WRITEBACK
) &&
90 !list_empty(&sbi
->cp_wait
.task_list
))
91 wake_up(&sbi
->cp_wait
);
96 static void __submit_merged_bio(struct f2fs_bio_info
*io
)
98 struct f2fs_io_info
*fio
= &io
->fio
;
104 rw
= fio
->rw
| fio
->rw_flag
;
106 if (is_read_io(rw
)) {
107 submit_bio(rw
, io
->bio
);
108 trace_f2fs_submit_read_bio(io
->sbi
->sb
, rw
, fio
->type
, io
->bio
);
114 * META_FLUSH is only from the checkpoint procedure, and we should wait
115 * this metadata bio for FS consistency.
117 if (fio
->type
== META_FLUSH
) {
118 DECLARE_COMPLETION_ONSTACK(wait
);
119 io
->bio
->bi_private
= &wait
;
120 submit_bio(rw
, io
->bio
);
121 wait_for_completion(&wait
);
123 submit_bio(rw
, io
->bio
);
125 trace_f2fs_submit_write_bio(io
->sbi
->sb
, rw
, fio
->type
, io
->bio
);
129 void f2fs_submit_merged_bio(struct f2fs_sb_info
*sbi
,
130 enum page_type type
, int rw
)
132 enum page_type btype
= PAGE_TYPE_OF_BIO(type
);
133 struct f2fs_bio_info
*io
;
135 io
= is_read_io(rw
) ? &sbi
->read_io
: &sbi
->write_io
[btype
];
137 mutex_lock(&io
->io_mutex
);
139 /* change META to META_FLUSH in the checkpoint procedure */
140 if (type
>= META_FLUSH
) {
141 io
->fio
.type
= META_FLUSH
;
142 io
->fio
.rw
= WRITE_FLUSH_FUA
;
144 __submit_merged_bio(io
);
145 mutex_unlock(&io
->io_mutex
);
149 * Fill the locked page with data located in the block address.
150 * Return unlocked page.
152 int f2fs_submit_page_bio(struct f2fs_sb_info
*sbi
, struct page
*page
,
153 block_t blk_addr
, int rw
)
155 struct block_device
*bdev
= sbi
->sb
->s_bdev
;
158 trace_f2fs_submit_page_bio(page
, blk_addr
, rw
);
160 /* Allocate a new bio */
161 bio
= __bio_alloc(bdev
, 1);
163 /* Initialize the bio */
164 bio
->bi_sector
= SECTOR_FROM_BLOCK(sbi
, blk_addr
);
165 bio
->bi_end_io
= is_read_io(rw
) ? f2fs_read_end_io
: f2fs_write_end_io
;
167 if (bio_add_page(bio
, page
, PAGE_CACHE_SIZE
, 0) < PAGE_CACHE_SIZE
) {
169 f2fs_put_page(page
, 1);
177 void f2fs_submit_page_mbio(struct f2fs_sb_info
*sbi
, struct page
*page
,
178 block_t blk_addr
, struct f2fs_io_info
*fio
)
180 enum page_type btype
= PAGE_TYPE_OF_BIO(fio
->type
);
181 struct block_device
*bdev
= sbi
->sb
->s_bdev
;
182 struct f2fs_bio_info
*io
;
185 io
= is_read_io(fio
->rw
) ? &sbi
->read_io
: &sbi
->write_io
[btype
];
187 verify_block_addr(sbi
, blk_addr
);
189 mutex_lock(&io
->io_mutex
);
191 if (!is_read_io(fio
->rw
))
192 inc_page_count(sbi
, F2FS_WRITEBACK
);
194 if (io
->bio
&& (io
->last_block_in_bio
!= blk_addr
- 1 ||
195 io
->fio
.rw
!= fio
->rw
))
196 __submit_merged_bio(io
);
198 if (io
->bio
== NULL
) {
199 bio_blocks
= MAX_BIO_BLOCKS(max_hw_blocks(sbi
));
200 io
->bio
= __bio_alloc(bdev
, bio_blocks
);
201 io
->bio
->bi_sector
= SECTOR_FROM_BLOCK(sbi
, blk_addr
);
202 io
->bio
->bi_end_io
= is_read_io(fio
->rw
) ? f2fs_read_end_io
:
206 * The end_io will be assigned at the sumbission phase.
207 * Until then, let bio_add_page() merge consecutive IOs as much
212 if (bio_add_page(io
->bio
, page
, PAGE_CACHE_SIZE
, 0) <
214 __submit_merged_bio(io
);
218 io
->last_block_in_bio
= blk_addr
;
220 mutex_unlock(&io
->io_mutex
);
221 trace_f2fs_submit_page_mbio(page
, fio
->rw
, fio
->type
, blk_addr
);
225 * Lock ordering for the change of data block address:
228 * update block addresses in the node page
230 static void __set_data_blkaddr(struct dnode_of_data
*dn
, block_t new_addr
)
232 struct f2fs_node
*rn
;
234 struct page
*node_page
= dn
->node_page
;
235 unsigned int ofs_in_node
= dn
->ofs_in_node
;
237 f2fs_wait_on_page_writeback(node_page
, NODE
, false);
239 rn
= F2FS_NODE(node_page
);
241 /* Get physical address of data block */
242 addr_array
= blkaddr_in_node(rn
);
243 addr_array
[ofs_in_node
] = cpu_to_le32(new_addr
);
244 set_page_dirty(node_page
);
247 int reserve_new_block(struct dnode_of_data
*dn
)
249 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
251 if (unlikely(is_inode_flag_set(F2FS_I(dn
->inode
), FI_NO_ALLOC
)))
253 if (unlikely(!inc_valid_block_count(sbi
, dn
->inode
, 1)))
256 trace_f2fs_reserve_new_block(dn
->inode
, dn
->nid
, dn
->ofs_in_node
);
258 __set_data_blkaddr(dn
, NEW_ADDR
);
259 dn
->data_blkaddr
= NEW_ADDR
;
264 int f2fs_reserve_block(struct dnode_of_data
*dn
, pgoff_t index
)
266 bool need_put
= dn
->inode_page
? false : true;
269 err
= get_dnode_of_data(dn
, index
, ALLOC_NODE
);
272 if (dn
->data_blkaddr
== NULL_ADDR
)
273 err
= reserve_new_block(dn
);
280 static int check_extent_cache(struct inode
*inode
, pgoff_t pgofs
,
281 struct buffer_head
*bh_result
)
283 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
284 pgoff_t start_fofs
, end_fofs
;
285 block_t start_blkaddr
;
287 if (is_inode_flag_set(fi
, FI_NO_EXTENT
))
290 read_lock(&fi
->ext
.ext_lock
);
291 if (fi
->ext
.len
== 0) {
292 read_unlock(&fi
->ext
.ext_lock
);
296 stat_inc_total_hit(inode
->i_sb
);
298 start_fofs
= fi
->ext
.fofs
;
299 end_fofs
= fi
->ext
.fofs
+ fi
->ext
.len
- 1;
300 start_blkaddr
= fi
->ext
.blk_addr
;
302 if (pgofs
>= start_fofs
&& pgofs
<= end_fofs
) {
303 unsigned int blkbits
= inode
->i_sb
->s_blocksize_bits
;
306 clear_buffer_new(bh_result
);
307 map_bh(bh_result
, inode
->i_sb
,
308 start_blkaddr
+ pgofs
- start_fofs
);
309 count
= end_fofs
- pgofs
+ 1;
310 if (count
< (UINT_MAX
>> blkbits
))
311 bh_result
->b_size
= (count
<< blkbits
);
313 bh_result
->b_size
= UINT_MAX
;
315 stat_inc_read_hit(inode
->i_sb
);
316 read_unlock(&fi
->ext
.ext_lock
);
319 read_unlock(&fi
->ext
.ext_lock
);
323 void update_extent_cache(block_t blk_addr
, struct dnode_of_data
*dn
)
325 struct f2fs_inode_info
*fi
= F2FS_I(dn
->inode
);
326 pgoff_t fofs
, start_fofs
, end_fofs
;
327 block_t start_blkaddr
, end_blkaddr
;
328 int need_update
= true;
330 f2fs_bug_on(blk_addr
== NEW_ADDR
);
331 fofs
= start_bidx_of_node(ofs_of_node(dn
->node_page
), fi
) +
334 /* Update the page address in the parent node */
335 __set_data_blkaddr(dn
, blk_addr
);
337 if (is_inode_flag_set(fi
, FI_NO_EXTENT
))
340 write_lock(&fi
->ext
.ext_lock
);
342 start_fofs
= fi
->ext
.fofs
;
343 end_fofs
= fi
->ext
.fofs
+ fi
->ext
.len
- 1;
344 start_blkaddr
= fi
->ext
.blk_addr
;
345 end_blkaddr
= fi
->ext
.blk_addr
+ fi
->ext
.len
- 1;
347 /* Drop and initialize the matched extent */
348 if (fi
->ext
.len
== 1 && fofs
== start_fofs
)
352 if (fi
->ext
.len
== 0) {
353 if (blk_addr
!= NULL_ADDR
) {
355 fi
->ext
.blk_addr
= blk_addr
;
362 if (fofs
== start_fofs
- 1 && blk_addr
== start_blkaddr
- 1) {
370 if (fofs
== end_fofs
+ 1 && blk_addr
== end_blkaddr
+ 1) {
375 /* Split the existing extent */
376 if (fi
->ext
.len
> 1 &&
377 fofs
>= start_fofs
&& fofs
<= end_fofs
) {
378 if ((end_fofs
- fofs
) < (fi
->ext
.len
>> 1)) {
379 fi
->ext
.len
= fofs
- start_fofs
;
381 fi
->ext
.fofs
= fofs
+ 1;
382 fi
->ext
.blk_addr
= start_blkaddr
+
383 fofs
- start_fofs
+ 1;
384 fi
->ext
.len
-= fofs
- start_fofs
+ 1;
390 /* Finally, if the extent is very fragmented, let's drop the cache. */
391 if (fi
->ext
.len
< F2FS_MIN_EXTENT_LEN
) {
393 set_inode_flag(fi
, FI_NO_EXTENT
);
397 write_unlock(&fi
->ext
.ext_lock
);
403 struct page
*find_data_page(struct inode
*inode
, pgoff_t index
, bool sync
)
405 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
406 struct address_space
*mapping
= inode
->i_mapping
;
407 struct dnode_of_data dn
;
411 page
= find_get_page(mapping
, index
);
412 if (page
&& PageUptodate(page
))
414 f2fs_put_page(page
, 0);
416 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
417 err
= get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
422 if (dn
.data_blkaddr
== NULL_ADDR
)
423 return ERR_PTR(-ENOENT
);
425 /* By fallocate(), there is no cached page, but with NEW_ADDR */
426 if (unlikely(dn
.data_blkaddr
== NEW_ADDR
))
427 return ERR_PTR(-EINVAL
);
429 page
= grab_cache_page_write_begin(mapping
, index
, AOP_FLAG_NOFS
);
431 return ERR_PTR(-ENOMEM
);
433 if (PageUptodate(page
)) {
438 err
= f2fs_submit_page_bio(sbi
, page
, dn
.data_blkaddr
,
439 sync
? READ_SYNC
: READA
);
444 wait_on_page_locked(page
);
445 if (unlikely(!PageUptodate(page
))) {
446 f2fs_put_page(page
, 0);
447 return ERR_PTR(-EIO
);
454 * If it tries to access a hole, return an error.
455 * Because, the callers, functions in dir.c and GC, should be able to know
456 * whether this page exists or not.
458 struct page
*get_lock_data_page(struct inode
*inode
, pgoff_t index
)
460 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
461 struct address_space
*mapping
= inode
->i_mapping
;
462 struct dnode_of_data dn
;
467 page
= grab_cache_page_write_begin(mapping
, index
, AOP_FLAG_NOFS
);
469 return ERR_PTR(-ENOMEM
);
471 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
472 err
= get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
474 f2fs_put_page(page
, 1);
479 if (unlikely(dn
.data_blkaddr
== NULL_ADDR
)) {
480 f2fs_put_page(page
, 1);
481 return ERR_PTR(-ENOENT
);
484 if (PageUptodate(page
))
488 * A new dentry page is allocated but not able to be written, since its
489 * new inode page couldn't be allocated due to -ENOSPC.
490 * In such the case, its blkaddr can be remained as NEW_ADDR.
491 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
493 if (dn
.data_blkaddr
== NEW_ADDR
) {
494 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
495 SetPageUptodate(page
);
499 err
= f2fs_submit_page_bio(sbi
, page
, dn
.data_blkaddr
, READ_SYNC
);
504 if (unlikely(!PageUptodate(page
))) {
505 f2fs_put_page(page
, 1);
506 return ERR_PTR(-EIO
);
508 if (unlikely(page
->mapping
!= mapping
)) {
509 f2fs_put_page(page
, 1);
516 * Caller ensures that this data page is never allocated.
517 * A new zero-filled data page is allocated in the page cache.
519 * Also, caller should grab and release a mutex by calling mutex_lock_op() and
521 * Note that, npage is set only by make_empty_dir.
523 struct page
*get_new_data_page(struct inode
*inode
,
524 struct page
*npage
, pgoff_t index
, bool new_i_size
)
526 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
527 struct address_space
*mapping
= inode
->i_mapping
;
529 struct dnode_of_data dn
;
532 set_new_dnode(&dn
, inode
, npage
, npage
, 0);
533 err
= f2fs_reserve_block(&dn
, index
);
537 page
= grab_cache_page(mapping
, index
);
539 return ERR_PTR(-ENOMEM
);
541 if (PageUptodate(page
))
544 if (dn
.data_blkaddr
== NEW_ADDR
) {
545 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
546 SetPageUptodate(page
);
548 err
= f2fs_submit_page_bio(sbi
, page
, dn
.data_blkaddr
,
553 if (unlikely(!PageUptodate(page
))) {
554 f2fs_put_page(page
, 1);
555 return ERR_PTR(-EIO
);
557 if (unlikely(page
->mapping
!= mapping
)) {
558 f2fs_put_page(page
, 1);
564 i_size_read(inode
) < ((index
+ 1) << PAGE_CACHE_SHIFT
)) {
565 i_size_write(inode
, ((index
+ 1) << PAGE_CACHE_SHIFT
));
566 /* Only the directory inode sets new_i_size */
567 set_inode_flag(F2FS_I(inode
), FI_UPDATE_DIR
);
568 mark_inode_dirty_sync(inode
);
574 * This function should be used by the data read flow only where it
575 * does not check the "create" flag that indicates block allocation.
576 * The reason for this special functionality is to exploit VFS readahead
579 static int get_data_block_ro(struct inode
*inode
, sector_t iblock
,
580 struct buffer_head
*bh_result
, int create
)
582 unsigned int blkbits
= inode
->i_sb
->s_blocksize_bits
;
583 unsigned maxblocks
= bh_result
->b_size
>> blkbits
;
584 struct dnode_of_data dn
;
588 /* Get the page offset from the block offset(iblock) */
589 pgofs
= (pgoff_t
)(iblock
>> (PAGE_CACHE_SHIFT
- blkbits
));
591 if (check_extent_cache(inode
, pgofs
, bh_result
)) {
592 trace_f2fs_get_data_block(inode
, iblock
, bh_result
, 0);
596 /* When reading holes, we need its node page */
597 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
598 err
= get_dnode_of_data(&dn
, pgofs
, LOOKUP_NODE_RA
);
600 trace_f2fs_get_data_block(inode
, iblock
, bh_result
, err
);
601 return (err
== -ENOENT
) ? 0 : err
;
604 /* It does not support data allocation */
607 if (dn
.data_blkaddr
!= NEW_ADDR
&& dn
.data_blkaddr
!= NULL_ADDR
) {
609 unsigned int end_offset
;
611 end_offset
= IS_INODE(dn
.node_page
) ?
612 ADDRS_PER_INODE(F2FS_I(inode
)) :
615 clear_buffer_new(bh_result
);
617 /* Give more consecutive addresses for the read ahead */
618 for (i
= 0; i
< end_offset
- dn
.ofs_in_node
; i
++)
619 if (((datablock_addr(dn
.node_page
,
621 != (dn
.data_blkaddr
+ i
)) || maxblocks
== i
)
623 map_bh(bh_result
, inode
->i_sb
, dn
.data_blkaddr
);
624 bh_result
->b_size
= (((size_t)i
) << blkbits
);
627 trace_f2fs_get_data_block(inode
, iblock
, bh_result
, 0);
631 static int f2fs_read_data_page(struct file
*file
, struct page
*page
)
633 return mpage_readpage(page
, get_data_block_ro
);
636 static int f2fs_read_data_pages(struct file
*file
,
637 struct address_space
*mapping
,
638 struct list_head
*pages
, unsigned nr_pages
)
640 return mpage_readpages(mapping
, pages
, nr_pages
, get_data_block_ro
);
643 int do_write_data_page(struct page
*page
, struct f2fs_io_info
*fio
)
645 struct inode
*inode
= page
->mapping
->host
;
646 block_t old_blkaddr
, new_blkaddr
;
647 struct dnode_of_data dn
;
650 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
651 err
= get_dnode_of_data(&dn
, page
->index
, LOOKUP_NODE
);
655 old_blkaddr
= dn
.data_blkaddr
;
657 /* This page is already truncated */
658 if (old_blkaddr
== NULL_ADDR
)
661 set_page_writeback(page
);
664 * If current allocation needs SSR,
665 * it had better in-place writes for updated data.
667 if (unlikely(old_blkaddr
!= NEW_ADDR
&&
668 !is_cold_data(page
) &&
669 need_inplace_update(inode
))) {
670 rewrite_data_page(page
, old_blkaddr
, fio
);
672 write_data_page(page
, &dn
, &new_blkaddr
, fio
);
673 update_extent_cache(new_blkaddr
, &dn
);
680 static int f2fs_write_data_page(struct page
*page
,
681 struct writeback_control
*wbc
)
683 struct inode
*inode
= page
->mapping
->host
;
684 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
685 loff_t i_size
= i_size_read(inode
);
686 const pgoff_t end_index
= ((unsigned long long) i_size
)
689 bool need_balance_fs
= false;
691 struct f2fs_io_info fio
= {
693 .rw
= (wbc
->sync_mode
== WB_SYNC_ALL
) ? WRITE_SYNC
: WRITE
,
697 if (page
->index
< end_index
)
701 * If the offset is out-of-range of file size,
702 * this page does not have to be written to disk.
704 offset
= i_size
& (PAGE_CACHE_SIZE
- 1);
705 if ((page
->index
>= end_index
+ 1) || !offset
) {
706 if (S_ISDIR(inode
->i_mode
)) {
707 dec_page_count(sbi
, F2FS_DIRTY_DENTS
);
708 inode_dec_dirty_dents(inode
);
713 zero_user_segment(page
, offset
, PAGE_CACHE_SIZE
);
715 if (unlikely(sbi
->por_doing
)) {
716 err
= AOP_WRITEPAGE_ACTIVATE
;
720 /* Dentry blocks are controlled by checkpoint */
721 if (S_ISDIR(inode
->i_mode
)) {
722 dec_page_count(sbi
, F2FS_DIRTY_DENTS
);
723 inode_dec_dirty_dents(inode
);
724 err
= do_write_data_page(page
, &fio
);
727 err
= do_write_data_page(page
, &fio
);
729 need_balance_fs
= true;
736 if (wbc
->for_reclaim
)
737 f2fs_submit_merged_bio(sbi
, DATA
, WRITE
);
739 clear_cold_data(page
);
743 f2fs_balance_fs(sbi
);
747 wbc
->pages_skipped
++;
748 set_page_dirty(page
);
752 #define MAX_DESIRED_PAGES_WP 4096
754 static int __f2fs_writepage(struct page
*page
, struct writeback_control
*wbc
,
757 struct address_space
*mapping
= data
;
758 int ret
= mapping
->a_ops
->writepage(page
, wbc
);
759 mapping_set_error(mapping
, ret
);
763 static int f2fs_write_data_pages(struct address_space
*mapping
,
764 struct writeback_control
*wbc
)
766 struct inode
*inode
= mapping
->host
;
767 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
770 long excess_nrtw
= 0, desired_nrtw
;
772 /* deal with chardevs and other special file */
773 if (!mapping
->a_ops
->writepage
)
776 if (wbc
->nr_to_write
< MAX_DESIRED_PAGES_WP
) {
777 desired_nrtw
= MAX_DESIRED_PAGES_WP
;
778 excess_nrtw
= desired_nrtw
- wbc
->nr_to_write
;
779 wbc
->nr_to_write
= desired_nrtw
;
782 if (!S_ISDIR(inode
->i_mode
)) {
783 mutex_lock(&sbi
->writepages
);
786 ret
= write_cache_pages(mapping
, wbc
, __f2fs_writepage
, mapping
);
788 mutex_unlock(&sbi
->writepages
);
790 f2fs_submit_merged_bio(sbi
, DATA
, WRITE
);
792 remove_dirty_dir_inode(inode
);
794 wbc
->nr_to_write
-= excess_nrtw
;
798 static int f2fs_write_begin(struct file
*file
, struct address_space
*mapping
,
799 loff_t pos
, unsigned len
, unsigned flags
,
800 struct page
**pagep
, void **fsdata
)
802 struct inode
*inode
= mapping
->host
;
803 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
805 pgoff_t index
= ((unsigned long long) pos
) >> PAGE_CACHE_SHIFT
;
806 struct dnode_of_data dn
;
809 f2fs_balance_fs(sbi
);
811 page
= grab_cache_page_write_begin(mapping
, index
, flags
);
817 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
818 err
= f2fs_reserve_block(&dn
, index
);
822 f2fs_put_page(page
, 1);
826 if ((len
== PAGE_CACHE_SIZE
) || PageUptodate(page
))
829 if ((pos
& PAGE_CACHE_MASK
) >= i_size_read(inode
)) {
830 unsigned start
= pos
& (PAGE_CACHE_SIZE
- 1);
831 unsigned end
= start
+ len
;
833 /* Reading beyond i_size is simple: memset to zero */
834 zero_user_segments(page
, 0, start
, end
, PAGE_CACHE_SIZE
);
838 if (dn
.data_blkaddr
== NEW_ADDR
) {
839 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
841 err
= f2fs_submit_page_bio(sbi
, page
, dn
.data_blkaddr
,
846 if (unlikely(!PageUptodate(page
))) {
847 f2fs_put_page(page
, 1);
850 if (unlikely(page
->mapping
!= mapping
)) {
851 f2fs_put_page(page
, 1);
856 SetPageUptodate(page
);
857 clear_cold_data(page
);
861 static int f2fs_write_end(struct file
*file
,
862 struct address_space
*mapping
,
863 loff_t pos
, unsigned len
, unsigned copied
,
864 struct page
*page
, void *fsdata
)
866 struct inode
*inode
= page
->mapping
->host
;
868 SetPageUptodate(page
);
869 set_page_dirty(page
);
871 if (pos
+ copied
> i_size_read(inode
)) {
872 i_size_write(inode
, pos
+ copied
);
873 mark_inode_dirty(inode
);
874 update_inode_page(inode
);
877 f2fs_put_page(page
, 1);
881 static ssize_t
f2fs_direct_IO(int rw
, struct kiocb
*iocb
,
882 const struct iovec
*iov
, loff_t offset
, unsigned long nr_segs
)
884 struct file
*file
= iocb
->ki_filp
;
885 struct inode
*inode
= file
->f_mapping
->host
;
890 /* Needs synchronization with the cleaner */
891 return blockdev_direct_IO(rw
, iocb
, inode
, iov
, offset
, nr_segs
,
895 static void f2fs_invalidate_data_page(struct page
*page
, unsigned int offset
,
898 struct inode
*inode
= page
->mapping
->host
;
899 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
900 if (S_ISDIR(inode
->i_mode
) && PageDirty(page
)) {
901 dec_page_count(sbi
, F2FS_DIRTY_DENTS
);
902 inode_dec_dirty_dents(inode
);
904 ClearPagePrivate(page
);
907 static int f2fs_release_data_page(struct page
*page
, gfp_t wait
)
909 ClearPagePrivate(page
);
913 static int f2fs_set_data_page_dirty(struct page
*page
)
915 struct address_space
*mapping
= page
->mapping
;
916 struct inode
*inode
= mapping
->host
;
918 trace_f2fs_set_page_dirty(page
, DATA
);
920 SetPageUptodate(page
);
921 if (!PageDirty(page
)) {
922 __set_page_dirty_nobuffers(page
);
923 set_dirty_dir_page(inode
, page
);
929 static sector_t
f2fs_bmap(struct address_space
*mapping
, sector_t block
)
931 return generic_block_bmap(mapping
, block
, get_data_block_ro
);
934 const struct address_space_operations f2fs_dblock_aops
= {
935 .readpage
= f2fs_read_data_page
,
936 .readpages
= f2fs_read_data_pages
,
937 .writepage
= f2fs_write_data_page
,
938 .writepages
= f2fs_write_data_pages
,
939 .write_begin
= f2fs_write_begin
,
940 .write_end
= f2fs_write_end
,
941 .set_page_dirty
= f2fs_set_data_page_dirty
,
942 .invalidatepage
= f2fs_invalidate_data_page
,
943 .releasepage
= f2fs_release_data_page
,
944 .direct_IO
= f2fs_direct_IO
,