f2fs: avoid frequent background GC
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / fs / f2fs / data.c
CommitLineData
0a8165d7 1/*
eb47b800
JK
2 * fs/f2fs/data.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/buffer_head.h>
14#include <linux/mpage.h>
15#include <linux/writeback.h>
16#include <linux/backing-dev.h>
17#include <linux/blkdev.h>
18#include <linux/bio.h>
690e4a3e 19#include <linux/prefetch.h>
eb47b800
JK
20
21#include "f2fs.h"
22#include "node.h"
23#include "segment.h"
848753aa 24#include <trace/events/f2fs.h>
eb47b800 25
0a8165d7 26/*
eb47b800
JK
27 * Lock ordering for the change of data block address:
28 * ->data_page
29 * ->node_page
30 * update block addresses in the node page
31 */
32static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr)
33{
34 struct f2fs_node *rn;
35 __le32 *addr_array;
36 struct page *node_page = dn->node_page;
37 unsigned int ofs_in_node = dn->ofs_in_node;
38
39 wait_on_page_writeback(node_page);
40
41 rn = (struct f2fs_node *)page_address(node_page);
42
43 /* Get physical address of data block */
44 addr_array = blkaddr_in_node(rn);
45 addr_array[ofs_in_node] = cpu_to_le32(new_addr);
46 set_page_dirty(node_page);
47}
48
49int reserve_new_block(struct dnode_of_data *dn)
50{
51 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
52
53 if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))
54 return -EPERM;
55 if (!inc_valid_block_count(sbi, dn->inode, 1))
56 return -ENOSPC;
57
c01e2853
NJ
58 trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
59
eb47b800
JK
60 __set_data_blkaddr(dn, NEW_ADDR);
61 dn->data_blkaddr = NEW_ADDR;
62 sync_inode_page(dn);
63 return 0;
64}
65
66static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
67 struct buffer_head *bh_result)
68{
69 struct f2fs_inode_info *fi = F2FS_I(inode);
70 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
71 pgoff_t start_fofs, end_fofs;
72 block_t start_blkaddr;
73
74 read_lock(&fi->ext.ext_lock);
75 if (fi->ext.len == 0) {
76 read_unlock(&fi->ext.ext_lock);
77 return 0;
78 }
79
80 sbi->total_hit_ext++;
81 start_fofs = fi->ext.fofs;
82 end_fofs = fi->ext.fofs + fi->ext.len - 1;
83 start_blkaddr = fi->ext.blk_addr;
84
85 if (pgofs >= start_fofs && pgofs <= end_fofs) {
86 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
87 size_t count;
88
89 clear_buffer_new(bh_result);
90 map_bh(bh_result, inode->i_sb,
91 start_blkaddr + pgofs - start_fofs);
92 count = end_fofs - pgofs + 1;
93 if (count < (UINT_MAX >> blkbits))
94 bh_result->b_size = (count << blkbits);
95 else
96 bh_result->b_size = UINT_MAX;
97
98 sbi->read_hit_ext++;
99 read_unlock(&fi->ext.ext_lock);
100 return 1;
101 }
102 read_unlock(&fi->ext.ext_lock);
103 return 0;
104}
105
106void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
107{
108 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
109 pgoff_t fofs, start_fofs, end_fofs;
110 block_t start_blkaddr, end_blkaddr;
111
112 BUG_ON(blk_addr == NEW_ADDR);
113 fofs = start_bidx_of_node(ofs_of_node(dn->node_page)) + dn->ofs_in_node;
114
115 /* Update the page address in the parent node */
116 __set_data_blkaddr(dn, blk_addr);
117
118 write_lock(&fi->ext.ext_lock);
119
120 start_fofs = fi->ext.fofs;
121 end_fofs = fi->ext.fofs + fi->ext.len - 1;
122 start_blkaddr = fi->ext.blk_addr;
123 end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1;
124
125 /* Drop and initialize the matched extent */
126 if (fi->ext.len == 1 && fofs == start_fofs)
127 fi->ext.len = 0;
128
129 /* Initial extent */
130 if (fi->ext.len == 0) {
131 if (blk_addr != NULL_ADDR) {
132 fi->ext.fofs = fofs;
133 fi->ext.blk_addr = blk_addr;
134 fi->ext.len = 1;
135 }
136 goto end_update;
137 }
138
6224da87 139 /* Front merge */
eb47b800
JK
140 if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) {
141 fi->ext.fofs--;
142 fi->ext.blk_addr--;
143 fi->ext.len++;
144 goto end_update;
145 }
146
147 /* Back merge */
148 if (fofs == end_fofs + 1 && blk_addr == end_blkaddr + 1) {
149 fi->ext.len++;
150 goto end_update;
151 }
152
153 /* Split the existing extent */
154 if (fi->ext.len > 1 &&
155 fofs >= start_fofs && fofs <= end_fofs) {
156 if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
157 fi->ext.len = fofs - start_fofs;
158 } else {
159 fi->ext.fofs = fofs + 1;
160 fi->ext.blk_addr = start_blkaddr +
161 fofs - start_fofs + 1;
162 fi->ext.len -= fofs - start_fofs + 1;
163 }
164 goto end_update;
165 }
166 write_unlock(&fi->ext.ext_lock);
167 return;
168
169end_update:
170 write_unlock(&fi->ext.ext_lock);
171 sync_inode_page(dn);
172 return;
173}
174
175struct page *find_data_page(struct inode *inode, pgoff_t index)
176{
177 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
178 struct address_space *mapping = inode->i_mapping;
179 struct dnode_of_data dn;
180 struct page *page;
181 int err;
182
183 page = find_get_page(mapping, index);
184 if (page && PageUptodate(page))
185 return page;
186 f2fs_put_page(page, 0);
187
188 set_new_dnode(&dn, inode, NULL, NULL, 0);
266e97a8 189 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
eb47b800
JK
190 if (err)
191 return ERR_PTR(err);
192 f2fs_put_dnode(&dn);
193
194 if (dn.data_blkaddr == NULL_ADDR)
195 return ERR_PTR(-ENOENT);
196
197 /* By fallocate(), there is no cached page, but with NEW_ADDR */
198 if (dn.data_blkaddr == NEW_ADDR)
199 return ERR_PTR(-EINVAL);
200
201 page = grab_cache_page(mapping, index);
202 if (!page)
203 return ERR_PTR(-ENOMEM);
204
393ff91f
JK
205 if (PageUptodate(page)) {
206 unlock_page(page);
207 return page;
208 }
209
eb47b800 210 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
393ff91f
JK
211 wait_on_page_locked(page);
212 if (!PageUptodate(page)) {
213 f2fs_put_page(page, 0);
214 return ERR_PTR(-EIO);
eb47b800 215 }
eb47b800
JK
216 return page;
217}
218
0a8165d7 219/*
eb47b800
JK
220 * If it tries to access a hole, return an error.
221 * Because, the callers, functions in dir.c and GC, should be able to know
222 * whether this page exists or not.
223 */
224struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
225{
226 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
227 struct address_space *mapping = inode->i_mapping;
228 struct dnode_of_data dn;
229 struct page *page;
230 int err;
231
232 set_new_dnode(&dn, inode, NULL, NULL, 0);
266e97a8 233 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
eb47b800
JK
234 if (err)
235 return ERR_PTR(err);
236 f2fs_put_dnode(&dn);
237
238 if (dn.data_blkaddr == NULL_ADDR)
239 return ERR_PTR(-ENOENT);
240
241 page = grab_cache_page(mapping, index);
242 if (!page)
243 return ERR_PTR(-ENOMEM);
244
245 if (PageUptodate(page))
246 return page;
247
248 BUG_ON(dn.data_blkaddr == NEW_ADDR);
249 BUG_ON(dn.data_blkaddr == NULL_ADDR);
250
251 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
393ff91f 252 if (err)
eb47b800 253 return ERR_PTR(err);
393ff91f
JK
254
255 lock_page(page);
256 if (!PageUptodate(page)) {
257 f2fs_put_page(page, 1);
258 return ERR_PTR(-EIO);
eb47b800
JK
259 }
260 return page;
261}
262
0a8165d7 263/*
eb47b800
JK
264 * Caller ensures that this data page is never allocated.
265 * A new zero-filled data page is allocated in the page cache.
39936837
JK
266 *
267 * Also, caller should grab and release a mutex by calling mutex_lock_op() and
268 * mutex_unlock_op().
eb47b800
JK
269 */
270struct page *get_new_data_page(struct inode *inode, pgoff_t index,
271 bool new_i_size)
272{
273 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
274 struct address_space *mapping = inode->i_mapping;
275 struct page *page;
276 struct dnode_of_data dn;
277 int err;
278
279 set_new_dnode(&dn, inode, NULL, NULL, 0);
266e97a8 280 err = get_dnode_of_data(&dn, index, ALLOC_NODE);
eb47b800
JK
281 if (err)
282 return ERR_PTR(err);
283
284 if (dn.data_blkaddr == NULL_ADDR) {
285 if (reserve_new_block(&dn)) {
286 f2fs_put_dnode(&dn);
287 return ERR_PTR(-ENOSPC);
288 }
289 }
290 f2fs_put_dnode(&dn);
291
292 page = grab_cache_page(mapping, index);
293 if (!page)
294 return ERR_PTR(-ENOMEM);
295
296 if (PageUptodate(page))
297 return page;
298
299 if (dn.data_blkaddr == NEW_ADDR) {
300 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
393ff91f 301 SetPageUptodate(page);
eb47b800
JK
302 } else {
303 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
393ff91f 304 if (err)
eb47b800 305 return ERR_PTR(err);
393ff91f
JK
306 lock_page(page);
307 if (!PageUptodate(page)) {
308 f2fs_put_page(page, 1);
309 return ERR_PTR(-EIO);
eb47b800
JK
310 }
311 }
eb47b800
JK
312
313 if (new_i_size &&
314 i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
315 i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
316 mark_inode_dirty_sync(inode);
317 }
318 return page;
319}
320
321static void read_end_io(struct bio *bio, int err)
322{
323 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
324 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
325
326 do {
327 struct page *page = bvec->bv_page;
328
329 if (--bvec >= bio->bi_io_vec)
330 prefetchw(&bvec->bv_page->flags);
331
332 if (uptodate) {
333 SetPageUptodate(page);
334 } else {
335 ClearPageUptodate(page);
336 SetPageError(page);
337 }
338 unlock_page(page);
339 } while (bvec >= bio->bi_io_vec);
340 kfree(bio->bi_private);
341 bio_put(bio);
342}
343
0a8165d7 344/*
eb47b800 345 * Fill the locked page with data located in the block address.
393ff91f 346 * Return unlocked page.
eb47b800
JK
347 */
348int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page,
349 block_t blk_addr, int type)
350{
351 struct block_device *bdev = sbi->sb->s_bdev;
eb47b800
JK
352 struct bio *bio;
353
848753aa
NJ
354 trace_f2fs_readpage(page, blk_addr, type);
355
eb47b800
JK
356 down_read(&sbi->bio_sem);
357
358 /* Allocate a new bio */
3cd8a239 359 bio = f2fs_bio_alloc(bdev, 1);
eb47b800
JK
360
361 /* Initialize the bio */
3cd8a239 362 bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
eb47b800 363 bio->bi_end_io = read_end_io;
3cd8a239 364
eb47b800
JK
365 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
366 kfree(bio->bi_private);
367 bio_put(bio);
368 up_read(&sbi->bio_sem);
393ff91f 369 f2fs_put_page(page, 1);
eb47b800
JK
370 return -EFAULT;
371 }
372
373 submit_bio(type, bio);
374 up_read(&sbi->bio_sem);
eb47b800
JK
375 return 0;
376}
377
0a8165d7 378/*
eb47b800
JK
379 * This function should be used by the data read flow only where it
380 * does not check the "create" flag that indicates block allocation.
381 * The reason for this special functionality is to exploit VFS readahead
382 * mechanism.
383 */
384static int get_data_block_ro(struct inode *inode, sector_t iblock,
385 struct buffer_head *bh_result, int create)
386{
387 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
388 unsigned maxblocks = bh_result->b_size >> blkbits;
389 struct dnode_of_data dn;
390 pgoff_t pgofs;
391 int err;
392
393 /* Get the page offset from the block offset(iblock) */
394 pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));
395
848753aa
NJ
396 if (check_extent_cache(inode, pgofs, bh_result)) {
397 trace_f2fs_get_data_block(inode, iblock, bh_result, 0);
eb47b800 398 return 0;
848753aa 399 }
eb47b800
JK
400
401 /* When reading holes, we need its node page */
402 set_new_dnode(&dn, inode, NULL, NULL, 0);
266e97a8 403 err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
848753aa
NJ
404 if (err) {
405 trace_f2fs_get_data_block(inode, iblock, bh_result, err);
eb47b800 406 return (err == -ENOENT) ? 0 : err;
848753aa 407 }
eb47b800
JK
408
409 /* It does not support data allocation */
410 BUG_ON(create);
411
412 if (dn.data_blkaddr != NEW_ADDR && dn.data_blkaddr != NULL_ADDR) {
413 int i;
414 unsigned int end_offset;
415
416 end_offset = IS_INODE(dn.node_page) ?
417 ADDRS_PER_INODE :
418 ADDRS_PER_BLOCK;
419
420 clear_buffer_new(bh_result);
421
422 /* Give more consecutive addresses for the read ahead */
423 for (i = 0; i < end_offset - dn.ofs_in_node; i++)
424 if (((datablock_addr(dn.node_page,
425 dn.ofs_in_node + i))
426 != (dn.data_blkaddr + i)) || maxblocks == i)
427 break;
428 map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
429 bh_result->b_size = (i << blkbits);
430 }
431 f2fs_put_dnode(&dn);
848753aa 432 trace_f2fs_get_data_block(inode, iblock, bh_result, 0);
eb47b800
JK
433 return 0;
434}
435
436static int f2fs_read_data_page(struct file *file, struct page *page)
437{
438 return mpage_readpage(page, get_data_block_ro);
439}
440
441static int f2fs_read_data_pages(struct file *file,
442 struct address_space *mapping,
443 struct list_head *pages, unsigned nr_pages)
444{
445 return mpage_readpages(mapping, pages, nr_pages, get_data_block_ro);
446}
447
448int do_write_data_page(struct page *page)
449{
450 struct inode *inode = page->mapping->host;
eb47b800
JK
451 block_t old_blk_addr, new_blk_addr;
452 struct dnode_of_data dn;
453 int err = 0;
454
455 set_new_dnode(&dn, inode, NULL, NULL, 0);
266e97a8 456 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
eb47b800
JK
457 if (err)
458 return err;
459
460 old_blk_addr = dn.data_blkaddr;
461
462 /* This page is already truncated */
463 if (old_blk_addr == NULL_ADDR)
464 goto out_writepage;
465
466 set_page_writeback(page);
467
468 /*
469 * If current allocation needs SSR,
470 * it had better in-place writes for updated data.
471 */
472 if (old_blk_addr != NEW_ADDR && !is_cold_data(page) &&
473 need_inplace_update(inode)) {
474 rewrite_data_page(F2FS_SB(inode->i_sb), page,
475 old_blk_addr);
476 } else {
477 write_data_page(inode, page, &dn,
478 old_blk_addr, &new_blk_addr);
479 update_extent_cache(new_blk_addr, &dn);
eb47b800
JK
480 }
481out_writepage:
482 f2fs_put_dnode(&dn);
483 return err;
484}
485
486static int f2fs_write_data_page(struct page *page,
487 struct writeback_control *wbc)
488{
489 struct inode *inode = page->mapping->host;
490 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
491 loff_t i_size = i_size_read(inode);
492 const pgoff_t end_index = ((unsigned long long) i_size)
493 >> PAGE_CACHE_SHIFT;
494 unsigned offset;
39936837 495 bool need_balance_fs = false;
eb47b800
JK
496 int err = 0;
497
498 if (page->index < end_index)
39936837 499 goto write;
eb47b800
JK
500
501 /*
502 * If the offset is out-of-range of file size,
503 * this page does not have to be written to disk.
504 */
505 offset = i_size & (PAGE_CACHE_SIZE - 1);
506 if ((page->index >= end_index + 1) || !offset) {
507 if (S_ISDIR(inode->i_mode)) {
508 dec_page_count(sbi, F2FS_DIRTY_DENTS);
509 inode_dec_dirty_dents(inode);
510 }
39936837 511 goto out;
eb47b800
JK
512 }
513
514 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
39936837
JK
515write:
516 if (sbi->por_doing) {
517 err = AOP_WRITEPAGE_ACTIVATE;
eb47b800 518 goto redirty_out;
39936837 519 }
eb47b800 520
39936837 521 /* Dentry blocks are controlled by checkpoint */
eb47b800
JK
522 if (S_ISDIR(inode->i_mode)) {
523 dec_page_count(sbi, F2FS_DIRTY_DENTS);
524 inode_dec_dirty_dents(inode);
39936837
JK
525 err = do_write_data_page(page);
526 } else {
527 int ilock = mutex_lock_op(sbi);
528 err = do_write_data_page(page);
529 mutex_unlock_op(sbi, ilock);
530 need_balance_fs = true;
eb47b800 531 }
39936837
JK
532 if (err == -ENOENT)
533 goto out;
534 else if (err)
535 goto redirty_out;
eb47b800
JK
536
537 if (wbc->for_reclaim)
538 f2fs_submit_bio(sbi, DATA, true);
539
eb47b800 540 clear_cold_data(page);
39936837 541out:
eb47b800 542 unlock_page(page);
39936837 543 if (need_balance_fs)
eb47b800
JK
544 f2fs_balance_fs(sbi);
545 return 0;
546
eb47b800
JK
547redirty_out:
548 wbc->pages_skipped++;
549 set_page_dirty(page);
39936837 550 return err;
eb47b800
JK
551}
552
553#define MAX_DESIRED_PAGES_WP 4096
554
fa9150a8
NJ
555static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
556 void *data)
557{
558 struct address_space *mapping = data;
559 int ret = mapping->a_ops->writepage(page, wbc);
560 mapping_set_error(mapping, ret);
561 return ret;
562}
563
25ca923b 564static int f2fs_write_data_pages(struct address_space *mapping,
eb47b800
JK
565 struct writeback_control *wbc)
566{
567 struct inode *inode = mapping->host;
568 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
569 int ret;
570 long excess_nrtw = 0, desired_nrtw;
571
cfb185a1 572 /* deal with chardevs and other special file */
573 if (!mapping->a_ops->writepage)
574 return 0;
575
eb47b800
JK
576 if (wbc->nr_to_write < MAX_DESIRED_PAGES_WP) {
577 desired_nrtw = MAX_DESIRED_PAGES_WP;
578 excess_nrtw = desired_nrtw - wbc->nr_to_write;
579 wbc->nr_to_write = desired_nrtw;
580 }
581
582 if (!S_ISDIR(inode->i_mode))
583 mutex_lock(&sbi->writepages);
fa9150a8 584 ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
eb47b800
JK
585 if (!S_ISDIR(inode->i_mode))
586 mutex_unlock(&sbi->writepages);
587 f2fs_submit_bio(sbi, DATA, (wbc->sync_mode == WB_SYNC_ALL));
588
589 remove_dirty_dir_inode(inode);
590
591 wbc->nr_to_write -= excess_nrtw;
592 return ret;
593}
594
595static int f2fs_write_begin(struct file *file, struct address_space *mapping,
596 loff_t pos, unsigned len, unsigned flags,
597 struct page **pagep, void **fsdata)
598{
599 struct inode *inode = mapping->host;
600 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
601 struct page *page;
602 pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
603 struct dnode_of_data dn;
604 int err = 0;
39936837 605 int ilock;
eb47b800
JK
606
607 /* for nobh_write_end */
608 *fsdata = NULL;
609
610 f2fs_balance_fs(sbi);
611
612 page = grab_cache_page_write_begin(mapping, index, flags);
613 if (!page)
614 return -ENOMEM;
615 *pagep = page;
616
39936837 617 ilock = mutex_lock_op(sbi);
eb47b800
JK
618
619 set_new_dnode(&dn, inode, NULL, NULL, 0);
266e97a8 620 err = get_dnode_of_data(&dn, index, ALLOC_NODE);
39936837
JK
621 if (err)
622 goto err;
eb47b800 623
39936837 624 if (dn.data_blkaddr == NULL_ADDR)
eb47b800 625 err = reserve_new_block(&dn);
39936837 626
eb47b800 627 f2fs_put_dnode(&dn);
39936837
JK
628 if (err)
629 goto err;
eb47b800 630
39936837 631 mutex_unlock_op(sbi, ilock);
eb47b800
JK
632
633 if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
634 return 0;
635
636 if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
637 unsigned start = pos & (PAGE_CACHE_SIZE - 1);
638 unsigned end = start + len;
639
640 /* Reading beyond i_size is simple: memset to zero */
641 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
393ff91f 642 goto out;
eb47b800
JK
643 }
644
645 if (dn.data_blkaddr == NEW_ADDR) {
646 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
647 } else {
648 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
393ff91f 649 if (err)
eb47b800 650 return err;
393ff91f
JK
651 lock_page(page);
652 if (!PageUptodate(page)) {
653 f2fs_put_page(page, 1);
654 return -EIO;
eb47b800
JK
655 }
656 }
393ff91f 657out:
eb47b800
JK
658 SetPageUptodate(page);
659 clear_cold_data(page);
660 return 0;
39936837
JK
661
662err:
663 mutex_unlock_op(sbi, ilock);
664 f2fs_put_page(page, 1);
665 return err;
eb47b800
JK
666}
667
668static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
669 const struct iovec *iov, loff_t offset, unsigned long nr_segs)
670{
671 struct file *file = iocb->ki_filp;
672 struct inode *inode = file->f_mapping->host;
673
674 if (rw == WRITE)
675 return 0;
676
677 /* Needs synchronization with the cleaner */
678 return blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
679 get_data_block_ro);
680}
681
682static void f2fs_invalidate_data_page(struct page *page, unsigned long offset)
683{
684 struct inode *inode = page->mapping->host;
685 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
686 if (S_ISDIR(inode->i_mode) && PageDirty(page)) {
687 dec_page_count(sbi, F2FS_DIRTY_DENTS);
688 inode_dec_dirty_dents(inode);
689 }
690 ClearPagePrivate(page);
691}
692
693static int f2fs_release_data_page(struct page *page, gfp_t wait)
694{
695 ClearPagePrivate(page);
c3850aa1 696 return 1;
eb47b800
JK
697}
698
699static int f2fs_set_data_page_dirty(struct page *page)
700{
701 struct address_space *mapping = page->mapping;
702 struct inode *inode = mapping->host;
703
704 SetPageUptodate(page);
705 if (!PageDirty(page)) {
706 __set_page_dirty_nobuffers(page);
707 set_dirty_dir_page(inode, page);
708 return 1;
709 }
710 return 0;
711}
712
c01e54b7
JK
713static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
714{
715 return generic_block_bmap(mapping, block, get_data_block_ro);
716}
717
eb47b800
JK
718const struct address_space_operations f2fs_dblock_aops = {
719 .readpage = f2fs_read_data_page,
720 .readpages = f2fs_read_data_pages,
721 .writepage = f2fs_write_data_page,
722 .writepages = f2fs_write_data_pages,
723 .write_begin = f2fs_write_begin,
724 .write_end = nobh_write_end,
725 .set_page_dirty = f2fs_set_data_page_dirty,
726 .invalidatepage = f2fs_invalidate_data_page,
727 .releasepage = f2fs_release_data_page,
728 .direct_IO = f2fs_direct_IO,
c01e54b7 729 .bmap = f2fs_bmap,
eb47b800 730};