f2fs: activate f2fs_trace_pid
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / fs / f2fs / data.c
CommitLineData
0a8165d7 1/*
eb47b800
JK
2 * fs/f2fs/data.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/buffer_head.h>
14#include <linux/mpage.h>
a27bb332 15#include <linux/aio.h>
eb47b800
JK
16#include <linux/writeback.h>
17#include <linux/backing-dev.h>
18#include <linux/blkdev.h>
19#include <linux/bio.h>
690e4a3e 20#include <linux/prefetch.h>
eb47b800
JK
21
22#include "f2fs.h"
23#include "node.h"
24#include "segment.h"
848753aa 25#include <trace/events/f2fs.h>
eb47b800 26
93dfe2ac
JK
27static void f2fs_read_end_io(struct bio *bio, int err)
28{
f568849e
LT
29 struct bio_vec *bvec;
30 int i;
93dfe2ac 31
f568849e 32 bio_for_each_segment_all(bvec, bio, i) {
93dfe2ac
JK
33 struct page *page = bvec->bv_page;
34
f568849e
LT
35 if (!err) {
36 SetPageUptodate(page);
37 } else {
93dfe2ac
JK
38 ClearPageUptodate(page);
39 SetPageError(page);
40 }
41 unlock_page(page);
f568849e 42 }
93dfe2ac
JK
43 bio_put(bio);
44}
45
46static void f2fs_write_end_io(struct bio *bio, int err)
47{
1b1f559f 48 struct f2fs_sb_info *sbi = bio->bi_private;
f568849e
LT
49 struct bio_vec *bvec;
50 int i;
93dfe2ac 51
f568849e 52 bio_for_each_segment_all(bvec, bio, i) {
93dfe2ac
JK
53 struct page *page = bvec->bv_page;
54
f568849e 55 if (unlikely(err)) {
cf779cab 56 set_page_dirty(page);
93dfe2ac 57 set_bit(AS_EIO, &page->mapping->flags);
744602cf 58 f2fs_stop_checkpoint(sbi);
93dfe2ac
JK
59 }
60 end_page_writeback(page);
61 dec_page_count(sbi, F2FS_WRITEBACK);
f568849e 62 }
93dfe2ac 63
93dfe2ac
JK
64 if (!get_pages(sbi, F2FS_WRITEBACK) &&
65 !list_empty(&sbi->cp_wait.task_list))
66 wake_up(&sbi->cp_wait);
67
68 bio_put(bio);
69}
70
940a6d34
GZ
71/*
72 * Low-level block read/write IO operations.
73 */
74static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
75 int npages, bool is_read)
76{
77 struct bio *bio;
78
79 /* No failure on bio allocation */
80 bio = bio_alloc(GFP_NOIO, npages);
81
82 bio->bi_bdev = sbi->sb->s_bdev;
55cf9cb6 83 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
940a6d34 84 bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
1b1f559f 85 bio->bi_private = sbi;
940a6d34
GZ
86
87 return bio;
88}
89
458e6197 90static void __submit_merged_bio(struct f2fs_bio_info *io)
93dfe2ac 91{
458e6197 92 struct f2fs_io_info *fio = &io->fio;
93dfe2ac
JK
93
94 if (!io->bio)
95 return;
96
6a8f8ca5
JK
97 if (is_read_io(fio->rw))
98 trace_f2fs_submit_read_bio(io->sbi->sb, fio->rw,
99 fio->type, io->bio);
100 else
101 trace_f2fs_submit_write_bio(io->sbi->sb, fio->rw,
102 fio->type, io->bio);
940a6d34 103
6a8f8ca5 104 submit_bio(fio->rw, io->bio);
93dfe2ac
JK
105 io->bio = NULL;
106}
107
108void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
458e6197 109 enum page_type type, int rw)
93dfe2ac
JK
110{
111 enum page_type btype = PAGE_TYPE_OF_BIO(type);
112 struct f2fs_bio_info *io;
113
114 io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];
115
df0f8dc0 116 down_write(&io->io_rwsem);
458e6197
JK
117
118 /* change META to META_FLUSH in the checkpoint procedure */
119 if (type >= META_FLUSH) {
120 io->fio.type = META_FLUSH;
0f7b2abd
JK
121 if (test_opt(sbi, NOBARRIER))
122 io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO;
123 else
124 io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
458e6197
JK
125 }
126 __submit_merged_bio(io);
df0f8dc0 127 up_write(&io->io_rwsem);
93dfe2ac
JK
128}
129
130/*
131 * Fill the locked page with data located in the block address.
132 * Return unlocked page.
133 */
134int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page,
cf04e8eb 135 struct f2fs_io_info *fio)
93dfe2ac 136{
93dfe2ac
JK
137 struct bio *bio;
138
cf04e8eb 139 trace_f2fs_submit_page_bio(page, fio->blk_addr, fio->rw);
93dfe2ac
JK
140
141 /* Allocate a new bio */
cf04e8eb 142 bio = __bio_alloc(sbi, fio->blk_addr, 1, is_read_io(fio->rw));
93dfe2ac
JK
143
144 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
145 bio_put(bio);
146 f2fs_put_page(page, 1);
147 return -EFAULT;
148 }
149
cf04e8eb 150 submit_bio(fio->rw, bio);
93dfe2ac
JK
151 return 0;
152}
153
154void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page,
cf04e8eb 155 struct f2fs_io_info *fio)
93dfe2ac 156{
458e6197 157 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
93dfe2ac 158 struct f2fs_bio_info *io;
940a6d34 159 bool is_read = is_read_io(fio->rw);
93dfe2ac 160
940a6d34 161 io = is_read ? &sbi->read_io : &sbi->write_io[btype];
93dfe2ac 162
cf04e8eb 163 verify_block_addr(sbi, fio->blk_addr);
93dfe2ac 164
df0f8dc0 165 down_write(&io->io_rwsem);
93dfe2ac 166
940a6d34 167 if (!is_read)
93dfe2ac
JK
168 inc_page_count(sbi, F2FS_WRITEBACK);
169
cf04e8eb 170 if (io->bio && (io->last_block_in_bio != fio->blk_addr - 1 ||
458e6197
JK
171 io->fio.rw != fio->rw))
172 __submit_merged_bio(io);
93dfe2ac
JK
173alloc_new:
174 if (io->bio == NULL) {
90a893c7 175 int bio_blocks = MAX_BIO_BLOCKS(sbi);
940a6d34 176
cf04e8eb 177 io->bio = __bio_alloc(sbi, fio->blk_addr, bio_blocks, is_read);
458e6197 178 io->fio = *fio;
93dfe2ac
JK
179 }
180
181 if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) <
182 PAGE_CACHE_SIZE) {
458e6197 183 __submit_merged_bio(io);
93dfe2ac
JK
184 goto alloc_new;
185 }
186
cf04e8eb 187 io->last_block_in_bio = fio->blk_addr;
93dfe2ac 188
df0f8dc0 189 up_write(&io->io_rwsem);
cf04e8eb 190 trace_f2fs_submit_page_mbio(page, fio->rw, fio->type, fio->blk_addr);
93dfe2ac
JK
191}
192
0a8165d7 193/*
eb47b800
JK
194 * Lock ordering for the change of data block address:
195 * ->data_page
196 * ->node_page
197 * update block addresses in the node page
198 */
199static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr)
200{
201 struct f2fs_node *rn;
202 __le32 *addr_array;
203 struct page *node_page = dn->node_page;
204 unsigned int ofs_in_node = dn->ofs_in_node;
205
5514f0aa 206 f2fs_wait_on_page_writeback(node_page, NODE);
eb47b800 207
45590710 208 rn = F2FS_NODE(node_page);
eb47b800
JK
209
210 /* Get physical address of data block */
211 addr_array = blkaddr_in_node(rn);
212 addr_array[ofs_in_node] = cpu_to_le32(new_addr);
213 set_page_dirty(node_page);
214}
215
216int reserve_new_block(struct dnode_of_data *dn)
217{
4081363f 218 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
eb47b800 219
6bacf52f 220 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
eb47b800 221 return -EPERM;
cfb271d4 222 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
eb47b800
JK
223 return -ENOSPC;
224
c01e2853
NJ
225 trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
226
eb47b800
JK
227 __set_data_blkaddr(dn, NEW_ADDR);
228 dn->data_blkaddr = NEW_ADDR;
a18ff063 229 mark_inode_dirty(dn->inode);
eb47b800
JK
230 sync_inode_page(dn);
231 return 0;
232}
233
b600965c
HL
234int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
235{
236 bool need_put = dn->inode_page ? false : true;
237 int err;
238
239 err = get_dnode_of_data(dn, index, ALLOC_NODE);
240 if (err)
241 return err;
a8865372 242
b600965c
HL
243 if (dn->data_blkaddr == NULL_ADDR)
244 err = reserve_new_block(dn);
a8865372 245 if (err || need_put)
b600965c
HL
246 f2fs_put_dnode(dn);
247 return err;
248}
249
eb47b800
JK
250static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
251 struct buffer_head *bh_result)
252{
253 struct f2fs_inode_info *fi = F2FS_I(inode);
eb47b800
JK
254 pgoff_t start_fofs, end_fofs;
255 block_t start_blkaddr;
256
c11abd1a
JK
257 if (is_inode_flag_set(fi, FI_NO_EXTENT))
258 return 0;
259
eb47b800
JK
260 read_lock(&fi->ext.ext_lock);
261 if (fi->ext.len == 0) {
262 read_unlock(&fi->ext.ext_lock);
263 return 0;
264 }
265
dcdfff65
JK
266 stat_inc_total_hit(inode->i_sb);
267
eb47b800
JK
268 start_fofs = fi->ext.fofs;
269 end_fofs = fi->ext.fofs + fi->ext.len - 1;
270 start_blkaddr = fi->ext.blk_addr;
271
272 if (pgofs >= start_fofs && pgofs <= end_fofs) {
273 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
274 size_t count;
275
276 clear_buffer_new(bh_result);
277 map_bh(bh_result, inode->i_sb,
278 start_blkaddr + pgofs - start_fofs);
279 count = end_fofs - pgofs + 1;
280 if (count < (UINT_MAX >> blkbits))
281 bh_result->b_size = (count << blkbits);
282 else
283 bh_result->b_size = UINT_MAX;
284
dcdfff65 285 stat_inc_read_hit(inode->i_sb);
eb47b800
JK
286 read_unlock(&fi->ext.ext_lock);
287 return 1;
288 }
289 read_unlock(&fi->ext.ext_lock);
290 return 0;
291}
292
293void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
294{
295 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
296 pgoff_t fofs, start_fofs, end_fofs;
297 block_t start_blkaddr, end_blkaddr;
c11abd1a 298 int need_update = true;
eb47b800 299
9850cf4a 300 f2fs_bug_on(F2FS_I_SB(dn->inode), blk_addr == NEW_ADDR);
de93653f
JK
301 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
302 dn->ofs_in_node;
eb47b800
JK
303
304 /* Update the page address in the parent node */
305 __set_data_blkaddr(dn, blk_addr);
306
c11abd1a
JK
307 if (is_inode_flag_set(fi, FI_NO_EXTENT))
308 return;
309
eb47b800
JK
310 write_lock(&fi->ext.ext_lock);
311
312 start_fofs = fi->ext.fofs;
313 end_fofs = fi->ext.fofs + fi->ext.len - 1;
314 start_blkaddr = fi->ext.blk_addr;
315 end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1;
316
317 /* Drop and initialize the matched extent */
318 if (fi->ext.len == 1 && fofs == start_fofs)
319 fi->ext.len = 0;
320
321 /* Initial extent */
322 if (fi->ext.len == 0) {
323 if (blk_addr != NULL_ADDR) {
324 fi->ext.fofs = fofs;
325 fi->ext.blk_addr = blk_addr;
326 fi->ext.len = 1;
327 }
328 goto end_update;
329 }
330
6224da87 331 /* Front merge */
eb47b800
JK
332 if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) {
333 fi->ext.fofs--;
334 fi->ext.blk_addr--;
335 fi->ext.len++;
336 goto end_update;
337 }
338
339 /* Back merge */
340 if (fofs == end_fofs + 1 && blk_addr == end_blkaddr + 1) {
341 fi->ext.len++;
342 goto end_update;
343 }
344
345 /* Split the existing extent */
346 if (fi->ext.len > 1 &&
347 fofs >= start_fofs && fofs <= end_fofs) {
348 if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
349 fi->ext.len = fofs - start_fofs;
350 } else {
351 fi->ext.fofs = fofs + 1;
352 fi->ext.blk_addr = start_blkaddr +
353 fofs - start_fofs + 1;
354 fi->ext.len -= fofs - start_fofs + 1;
355 }
c11abd1a
JK
356 } else {
357 need_update = false;
eb47b800 358 }
eb47b800 359
c11abd1a
JK
360 /* Finally, if the extent is very fragmented, let's drop the cache. */
361 if (fi->ext.len < F2FS_MIN_EXTENT_LEN) {
362 fi->ext.len = 0;
363 set_inode_flag(fi, FI_NO_EXTENT);
364 need_update = true;
365 }
eb47b800
JK
366end_update:
367 write_unlock(&fi->ext.ext_lock);
c11abd1a
JK
368 if (need_update)
369 sync_inode_page(dn);
370 return;
eb47b800
JK
371}
372
c718379b 373struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
eb47b800 374{
eb47b800
JK
375 struct address_space *mapping = inode->i_mapping;
376 struct dnode_of_data dn;
377 struct page *page;
378 int err;
cf04e8eb
JK
379 struct f2fs_io_info fio = {
380 .type = DATA,
381 .rw = sync ? READ_SYNC : READA,
382 };
eb47b800
JK
383
384 page = find_get_page(mapping, index);
385 if (page && PageUptodate(page))
386 return page;
387 f2fs_put_page(page, 0);
388
389 set_new_dnode(&dn, inode, NULL, NULL, 0);
266e97a8 390 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
eb47b800
JK
391 if (err)
392 return ERR_PTR(err);
393 f2fs_put_dnode(&dn);
394
395 if (dn.data_blkaddr == NULL_ADDR)
396 return ERR_PTR(-ENOENT);
397
398 /* By fallocate(), there is no cached page, but with NEW_ADDR */
6bacf52f 399 if (unlikely(dn.data_blkaddr == NEW_ADDR))
eb47b800
JK
400 return ERR_PTR(-EINVAL);
401
9ac1349a 402 page = grab_cache_page(mapping, index);
eb47b800
JK
403 if (!page)
404 return ERR_PTR(-ENOMEM);
405
393ff91f
JK
406 if (PageUptodate(page)) {
407 unlock_page(page);
408 return page;
409 }
410
cf04e8eb
JK
411 fio.blk_addr = dn.data_blkaddr;
412 err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio);
1069bbf7
CY
413 if (err)
414 return ERR_PTR(err);
415
c718379b
JK
416 if (sync) {
417 wait_on_page_locked(page);
6bacf52f 418 if (unlikely(!PageUptodate(page))) {
c718379b
JK
419 f2fs_put_page(page, 0);
420 return ERR_PTR(-EIO);
421 }
eb47b800 422 }
eb47b800
JK
423 return page;
424}
425
0a8165d7 426/*
eb47b800
JK
427 * If it tries to access a hole, return an error.
428 * Because, the callers, functions in dir.c and GC, should be able to know
429 * whether this page exists or not.
430 */
431struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
432{
eb47b800
JK
433 struct address_space *mapping = inode->i_mapping;
434 struct dnode_of_data dn;
435 struct page *page;
436 int err;
cf04e8eb
JK
437 struct f2fs_io_info fio = {
438 .type = DATA,
439 .rw = READ_SYNC,
440 };
650495de 441repeat:
9ac1349a 442 page = grab_cache_page(mapping, index);
650495de
JK
443 if (!page)
444 return ERR_PTR(-ENOMEM);
445
eb47b800 446 set_new_dnode(&dn, inode, NULL, NULL, 0);
266e97a8 447 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
650495de
JK
448 if (err) {
449 f2fs_put_page(page, 1);
eb47b800 450 return ERR_PTR(err);
650495de 451 }
eb47b800
JK
452 f2fs_put_dnode(&dn);
453
6bacf52f 454 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
650495de 455 f2fs_put_page(page, 1);
eb47b800 456 return ERR_PTR(-ENOENT);
650495de 457 }
eb47b800
JK
458
459 if (PageUptodate(page))
460 return page;
461
d59ff4df
JK
462 /*
463 * A new dentry page is allocated but not able to be written, since its
464 * new inode page couldn't be allocated due to -ENOSPC.
465 * In such the case, its blkaddr can be remained as NEW_ADDR.
466 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
467 */
468 if (dn.data_blkaddr == NEW_ADDR) {
469 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
470 SetPageUptodate(page);
471 return page;
472 }
eb47b800 473
cf04e8eb
JK
474 fio.blk_addr = dn.data_blkaddr;
475 err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio);
393ff91f 476 if (err)
eb47b800 477 return ERR_PTR(err);
393ff91f
JK
478
479 lock_page(page);
6bacf52f 480 if (unlikely(!PageUptodate(page))) {
393ff91f
JK
481 f2fs_put_page(page, 1);
482 return ERR_PTR(-EIO);
eb47b800 483 }
6bacf52f 484 if (unlikely(page->mapping != mapping)) {
afcb7ca0
JK
485 f2fs_put_page(page, 1);
486 goto repeat;
eb47b800
JK
487 }
488 return page;
489}
490
0a8165d7 491/*
eb47b800
JK
492 * Caller ensures that this data page is never allocated.
493 * A new zero-filled data page is allocated in the page cache.
39936837 494 *
4f4124d0
CY
495 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
496 * f2fs_unlock_op().
a8865372 497 * Note that, ipage is set only by make_empty_dir.
eb47b800 498 */
64aa7ed9 499struct page *get_new_data_page(struct inode *inode,
a8865372 500 struct page *ipage, pgoff_t index, bool new_i_size)
eb47b800 501{
eb47b800
JK
502 struct address_space *mapping = inode->i_mapping;
503 struct page *page;
504 struct dnode_of_data dn;
505 int err;
506
a8865372 507 set_new_dnode(&dn, inode, ipage, NULL, 0);
b600965c 508 err = f2fs_reserve_block(&dn, index);
eb47b800
JK
509 if (err)
510 return ERR_PTR(err);
afcb7ca0 511repeat:
eb47b800 512 page = grab_cache_page(mapping, index);
a8865372
JK
513 if (!page) {
514 err = -ENOMEM;
515 goto put_err;
516 }
eb47b800
JK
517
518 if (PageUptodate(page))
519 return page;
520
521 if (dn.data_blkaddr == NEW_ADDR) {
522 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
393ff91f 523 SetPageUptodate(page);
eb47b800 524 } else {
cf04e8eb
JK
525 struct f2fs_io_info fio = {
526 .type = DATA,
527 .rw = READ_SYNC,
528 .blk_addr = dn.data_blkaddr,
529 };
530 err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio);
393ff91f 531 if (err)
a8865372
JK
532 goto put_err;
533
393ff91f 534 lock_page(page);
6bacf52f 535 if (unlikely(!PageUptodate(page))) {
393ff91f 536 f2fs_put_page(page, 1);
a8865372
JK
537 err = -EIO;
538 goto put_err;
eb47b800 539 }
6bacf52f 540 if (unlikely(page->mapping != mapping)) {
afcb7ca0
JK
541 f2fs_put_page(page, 1);
542 goto repeat;
eb47b800
JK
543 }
544 }
eb47b800
JK
545
546 if (new_i_size &&
547 i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
548 i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
699489bb
JK
549 /* Only the directory inode sets new_i_size */
550 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
eb47b800
JK
551 }
552 return page;
a8865372
JK
553
554put_err:
555 f2fs_put_dnode(&dn);
556 return ERR_PTR(err);
eb47b800
JK
557}
558
bfad7c2d
JK
559static int __allocate_data_block(struct dnode_of_data *dn)
560{
4081363f 561 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
976e4c50 562 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
bfad7c2d
JK
563 struct f2fs_summary sum;
564 block_t new_blkaddr;
565 struct node_info ni;
976e4c50 566 pgoff_t fofs;
bfad7c2d
JK
567 int type;
568
569 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
570 return -EPERM;
571 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
572 return -ENOSPC;
573
574 __set_data_blkaddr(dn, NEW_ADDR);
575 dn->data_blkaddr = NEW_ADDR;
576
577 get_node_info(sbi, dn->nid, &ni);
578 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
579
580 type = CURSEG_WARM_DATA;
581
582 allocate_data_block(sbi, NULL, NULL_ADDR, &new_blkaddr, &sum, type);
583
584 /* direct IO doesn't use extent cache to maximize the performance */
585 set_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT);
586 update_extent_cache(new_blkaddr, dn);
587 clear_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT);
588
976e4c50
JK
589 /* update i_size */
590 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
591 dn->ofs_in_node;
592 if (i_size_read(dn->inode) < ((fofs + 1) << PAGE_CACHE_SHIFT))
593 i_size_write(dn->inode, ((fofs + 1) << PAGE_CACHE_SHIFT));
594
bfad7c2d
JK
595 dn->data_blkaddr = new_blkaddr;
596 return 0;
597}
598
0a8165d7 599/*
4f4124d0
CY
600 * get_data_block() now supported readahead/bmap/rw direct_IO with mapped bh.
601 * If original data blocks are allocated, then give them to blockdev.
602 * Otherwise,
603 * a. preallocate requested block addresses
604 * b. do not use extent cache for better performance
605 * c. give the block addresses to blockdev
eb47b800 606 */
ccfb3000
JK
607static int __get_data_block(struct inode *inode, sector_t iblock,
608 struct buffer_head *bh_result, int create, bool fiemap)
eb47b800
JK
609{
610 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
611 unsigned maxblocks = bh_result->b_size >> blkbits;
612 struct dnode_of_data dn;
bfad7c2d
JK
613 int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
614 pgoff_t pgofs, end_offset;
615 int err = 0, ofs = 1;
616 bool allocated = false;
eb47b800
JK
617
618 /* Get the page offset from the block offset(iblock) */
619 pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));
620
bfad7c2d
JK
621 if (check_extent_cache(inode, pgofs, bh_result))
622 goto out;
623
79e35dc3 624 if (create) {
4081363f
JK
625 f2fs_balance_fs(F2FS_I_SB(inode));
626 f2fs_lock_op(F2FS_I_SB(inode));
79e35dc3 627 }
eb47b800
JK
628
629 /* When reading holes, we need its node page */
630 set_new_dnode(&dn, inode, NULL, NULL, 0);
bfad7c2d 631 err = get_dnode_of_data(&dn, pgofs, mode);
1ec79083 632 if (err) {
bfad7c2d
JK
633 if (err == -ENOENT)
634 err = 0;
635 goto unlock_out;
848753aa 636 }
ccfb3000 637 if (dn.data_blkaddr == NEW_ADDR && !fiemap)
1ec79083 638 goto put_out;
eb47b800 639
bfad7c2d
JK
640 if (dn.data_blkaddr != NULL_ADDR) {
641 map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
642 } else if (create) {
643 err = __allocate_data_block(&dn);
644 if (err)
645 goto put_out;
646 allocated = true;
647 map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
648 } else {
649 goto put_out;
650 }
651
6403eb1f 652 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
bfad7c2d
JK
653 bh_result->b_size = (((size_t)1) << blkbits);
654 dn.ofs_in_node++;
655 pgofs++;
656
657get_next:
658 if (dn.ofs_in_node >= end_offset) {
659 if (allocated)
660 sync_inode_page(&dn);
661 allocated = false;
662 f2fs_put_dnode(&dn);
663
664 set_new_dnode(&dn, inode, NULL, NULL, 0);
665 err = get_dnode_of_data(&dn, pgofs, mode);
1ec79083 666 if (err) {
bfad7c2d
JK
667 if (err == -ENOENT)
668 err = 0;
669 goto unlock_out;
670 }
ccfb3000 671 if (dn.data_blkaddr == NEW_ADDR && !fiemap)
1ec79083
JK
672 goto put_out;
673
6403eb1f 674 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
bfad7c2d 675 }
eb47b800 676
bfad7c2d
JK
677 if (maxblocks > (bh_result->b_size >> blkbits)) {
678 block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
679 if (blkaddr == NULL_ADDR && create) {
680 err = __allocate_data_block(&dn);
681 if (err)
682 goto sync_out;
683 allocated = true;
684 blkaddr = dn.data_blkaddr;
685 }
e1c42045 686 /* Give more consecutive addresses for the readahead */
bfad7c2d
JK
687 if (blkaddr == (bh_result->b_blocknr + ofs)) {
688 ofs++;
689 dn.ofs_in_node++;
690 pgofs++;
691 bh_result->b_size += (((size_t)1) << blkbits);
692 goto get_next;
693 }
eb47b800 694 }
bfad7c2d
JK
695sync_out:
696 if (allocated)
697 sync_inode_page(&dn);
698put_out:
eb47b800 699 f2fs_put_dnode(&dn);
bfad7c2d
JK
700unlock_out:
701 if (create)
4081363f 702 f2fs_unlock_op(F2FS_I_SB(inode));
bfad7c2d
JK
703out:
704 trace_f2fs_get_data_block(inode, iblock, bh_result, err);
705 return err;
eb47b800
JK
706}
707
ccfb3000
JK
708static int get_data_block(struct inode *inode, sector_t iblock,
709 struct buffer_head *bh_result, int create)
710{
711 return __get_data_block(inode, iblock, bh_result, create, false);
712}
713
714static int get_data_block_fiemap(struct inode *inode, sector_t iblock,
715 struct buffer_head *bh_result, int create)
716{
717 return __get_data_block(inode, iblock, bh_result, create, true);
718}
719
9ab70134
JK
720int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
721 u64 start, u64 len)
722{
ccfb3000
JK
723 return generic_block_fiemap(inode, fieinfo,
724 start, len, get_data_block_fiemap);
9ab70134
JK
725}
726
eb47b800
JK
727static int f2fs_read_data_page(struct file *file, struct page *page)
728{
9ffe0fb5 729 struct inode *inode = page->mapping->host;
b3d208f9 730 int ret = -EAGAIN;
9ffe0fb5 731
c20e89cd
CY
732 trace_f2fs_readpage(page, DATA);
733
e1c42045 734 /* If the file has inline data, try to read it directly */
9ffe0fb5
HL
735 if (f2fs_has_inline_data(inode))
736 ret = f2fs_read_inline_data(inode, page);
b3d208f9 737 if (ret == -EAGAIN)
9ffe0fb5
HL
738 ret = mpage_readpage(page, get_data_block);
739
740 return ret;
eb47b800
JK
741}
742
743static int f2fs_read_data_pages(struct file *file,
744 struct address_space *mapping,
745 struct list_head *pages, unsigned nr_pages)
746{
9ffe0fb5
HL
747 struct inode *inode = file->f_mapping->host;
748
749 /* If the file has inline data, skip readpages */
750 if (f2fs_has_inline_data(inode))
751 return 0;
752
bfad7c2d 753 return mpage_readpages(mapping, pages, nr_pages, get_data_block);
eb47b800
JK
754}
755
458e6197 756int do_write_data_page(struct page *page, struct f2fs_io_info *fio)
eb47b800
JK
757{
758 struct inode *inode = page->mapping->host;
eb47b800
JK
759 struct dnode_of_data dn;
760 int err = 0;
761
762 set_new_dnode(&dn, inode, NULL, NULL, 0);
266e97a8 763 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
eb47b800
JK
764 if (err)
765 return err;
766
cf04e8eb 767 fio->blk_addr = dn.data_blkaddr;
eb47b800
JK
768
769 /* This page is already truncated */
cf04e8eb 770 if (fio->blk_addr == NULL_ADDR)
eb47b800
JK
771 goto out_writepage;
772
773 set_page_writeback(page);
774
775 /*
776 * If current allocation needs SSR,
777 * it had better in-place writes for updated data.
778 */
cf04e8eb 779 if (unlikely(fio->blk_addr != NEW_ADDR &&
b25958b6
HL
780 !is_cold_data(page) &&
781 need_inplace_update(inode))) {
cf04e8eb 782 rewrite_data_page(page, fio);
fff04f90 783 set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
eb47b800 784 } else {
cf04e8eb
JK
785 write_data_page(page, &dn, fio);
786 update_extent_cache(fio->blk_addr, &dn);
fff04f90 787 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
eb47b800
JK
788 }
789out_writepage:
790 f2fs_put_dnode(&dn);
791 return err;
792}
793
794static int f2fs_write_data_page(struct page *page,
795 struct writeback_control *wbc)
796{
797 struct inode *inode = page->mapping->host;
4081363f 798 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
eb47b800
JK
799 loff_t i_size = i_size_read(inode);
800 const pgoff_t end_index = ((unsigned long long) i_size)
801 >> PAGE_CACHE_SHIFT;
9ffe0fb5 802 unsigned offset = 0;
39936837 803 bool need_balance_fs = false;
eb47b800 804 int err = 0;
458e6197
JK
805 struct f2fs_io_info fio = {
806 .type = DATA,
6c311ec6 807 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
458e6197 808 };
eb47b800 809
ecda0de3
CY
810 trace_f2fs_writepage(page, DATA);
811
eb47b800 812 if (page->index < end_index)
39936837 813 goto write;
eb47b800
JK
814
815 /*
816 * If the offset is out-of-range of file size,
817 * this page does not have to be written to disk.
818 */
819 offset = i_size & (PAGE_CACHE_SIZE - 1);
76f60268 820 if ((page->index >= end_index + 1) || !offset)
39936837 821 goto out;
eb47b800
JK
822
823 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
39936837 824write:
8618b881 825 if (unlikely(sbi->por_doing))
eb47b800 826 goto redirty_out;
1e84371f
JK
827 if (f2fs_is_drop_cache(inode))
828 goto out;
829 if (f2fs_is_volatile_file(inode) && !wbc->for_reclaim &&
830 available_free_memory(sbi, BASE_CHECK))
831 goto redirty_out;
eb47b800 832
39936837 833 /* Dentry blocks are controlled by checkpoint */
eb47b800 834 if (S_ISDIR(inode->i_mode)) {
cf779cab
JK
835 if (unlikely(f2fs_cp_error(sbi)))
836 goto redirty_out;
458e6197 837 err = do_write_data_page(page, &fio);
8618b881
JK
838 goto done;
839 }
9ffe0fb5 840
cf779cab
JK
841 /* we should bypass data pages to proceed the kworkder jobs */
842 if (unlikely(f2fs_cp_error(sbi))) {
843 SetPageError(page);
844 unlock_page(page);
a7ffdbe2 845 goto out;
cf779cab
JK
846 }
847
8618b881 848 if (!wbc->for_reclaim)
39936837 849 need_balance_fs = true;
8618b881 850 else if (has_not_enough_free_secs(sbi, 0))
39936837 851 goto redirty_out;
eb47b800 852
b3d208f9 853 err = -EAGAIN;
8618b881 854 f2fs_lock_op(sbi);
b3d208f9
JK
855 if (f2fs_has_inline_data(inode))
856 err = f2fs_write_inline_data(inode, page);
857 if (err == -EAGAIN)
8618b881
JK
858 err = do_write_data_page(page, &fio);
859 f2fs_unlock_op(sbi);
860done:
861 if (err && err != -ENOENT)
862 goto redirty_out;
eb47b800 863
eb47b800 864 clear_cold_data(page);
39936837 865out:
a7ffdbe2 866 inode_dec_dirty_pages(inode);
eb47b800 867 unlock_page(page);
39936837 868 if (need_balance_fs)
eb47b800 869 f2fs_balance_fs(sbi);
2aea39ec
JK
870 if (wbc->for_reclaim)
871 f2fs_submit_merged_bio(sbi, DATA, WRITE);
eb47b800
JK
872 return 0;
873
eb47b800 874redirty_out:
76f60268 875 redirty_page_for_writepage(wbc, page);
8618b881 876 return AOP_WRITEPAGE_ACTIVATE;
eb47b800
JK
877}
878
fa9150a8
NJ
879static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
880 void *data)
881{
882 struct address_space *mapping = data;
883 int ret = mapping->a_ops->writepage(page, wbc);
884 mapping_set_error(mapping, ret);
885 return ret;
886}
887
25ca923b 888static int f2fs_write_data_pages(struct address_space *mapping,
eb47b800
JK
889 struct writeback_control *wbc)
890{
891 struct inode *inode = mapping->host;
4081363f 892 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
531ad7d5 893 bool locked = false;
eb47b800 894 int ret;
50c8cdb3 895 long diff;
eb47b800 896
e5748434
CY
897 trace_f2fs_writepages(mapping->host, wbc, DATA);
898
cfb185a1 899 /* deal with chardevs and other special file */
900 if (!mapping->a_ops->writepage)
901 return 0;
902
87d6f890 903 if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
a7ffdbe2 904 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
6fb03f3a 905 available_free_memory(sbi, DIRTY_DENTS))
d3baf95d 906 goto skip_write;
87d6f890 907
50c8cdb3 908 diff = nr_pages_to_write(sbi, DATA, wbc);
eb47b800 909
531ad7d5 910 if (!S_ISDIR(inode->i_mode)) {
eb47b800 911 mutex_lock(&sbi->writepages);
531ad7d5
JK
912 locked = true;
913 }
fa9150a8 914 ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
531ad7d5 915 if (locked)
eb47b800 916 mutex_unlock(&sbi->writepages);
458e6197
JK
917
918 f2fs_submit_merged_bio(sbi, DATA, WRITE);
eb47b800
JK
919
920 remove_dirty_dir_inode(inode);
921
50c8cdb3 922 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
eb47b800 923 return ret;
d3baf95d
JK
924
925skip_write:
a7ffdbe2 926 wbc->pages_skipped += get_dirty_pages(inode);
d3baf95d 927 return 0;
eb47b800
JK
928}
929
3aab8f82
CY
930static void f2fs_write_failed(struct address_space *mapping, loff_t to)
931{
932 struct inode *inode = mapping->host;
933
934 if (to > inode->i_size) {
935 truncate_pagecache(inode, inode->i_size);
764aa3e9 936 truncate_blocks(inode, inode->i_size, true);
3aab8f82
CY
937 }
938}
939
eb47b800
JK
940static int f2fs_write_begin(struct file *file, struct address_space *mapping,
941 loff_t pos, unsigned len, unsigned flags,
942 struct page **pagep, void **fsdata)
943{
944 struct inode *inode = mapping->host;
4081363f 945 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
9ba69cf9 946 struct page *page, *ipage;
eb47b800
JK
947 pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
948 struct dnode_of_data dn;
949 int err = 0;
950
62aed044
CY
951 trace_f2fs_write_begin(inode, pos, len, flags);
952
eb47b800 953 f2fs_balance_fs(sbi);
5f727395
JK
954
955 /*
956 * We should check this at this moment to avoid deadlock on inode page
957 * and #0 page. The locking rule for inline_data conversion should be:
958 * lock_page(page #0) -> lock_page(inode_page)
959 */
960 if (index != 0) {
961 err = f2fs_convert_inline_inode(inode);
962 if (err)
963 goto fail;
964 }
afcb7ca0 965repeat:
eb47b800 966 page = grab_cache_page_write_begin(mapping, index, flags);
3aab8f82
CY
967 if (!page) {
968 err = -ENOMEM;
969 goto fail;
970 }
d5f66990 971
eb47b800
JK
972 *pagep = page;
973
e479556b 974 f2fs_lock_op(sbi);
9ba69cf9
JK
975
976 /* check inline_data */
977 ipage = get_node_page(sbi, inode->i_ino);
cd34e296
CY
978 if (IS_ERR(ipage)) {
979 err = PTR_ERR(ipage);
9ba69cf9 980 goto unlock_fail;
cd34e296 981 }
9ba69cf9 982
b3d208f9
JK
983 set_new_dnode(&dn, inode, ipage, ipage, 0);
984
9ba69cf9 985 if (f2fs_has_inline_data(inode)) {
b3d208f9
JK
986 if (pos + len <= MAX_INLINE_DATA) {
987 read_inline_data(page, ipage);
988 set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
989 sync_inode_page(&dn);
990 goto put_next;
b3d208f9 991 }
5f727395
JK
992 err = f2fs_convert_inline_page(&dn, page);
993 if (err)
994 goto put_fail;
b600965c 995 }
9ba69cf9
JK
996 err = f2fs_reserve_block(&dn, index);
997 if (err)
8cdcb713 998 goto put_fail;
b3d208f9 999put_next:
9ba69cf9
JK
1000 f2fs_put_dnode(&dn);
1001 f2fs_unlock_op(sbi);
1002
eb47b800
JK
1003 if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
1004 return 0;
1005
b3d208f9
JK
1006 f2fs_wait_on_page_writeback(page, DATA);
1007
eb47b800
JK
1008 if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
1009 unsigned start = pos & (PAGE_CACHE_SIZE - 1);
1010 unsigned end = start + len;
1011
1012 /* Reading beyond i_size is simple: memset to zero */
1013 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
393ff91f 1014 goto out;
eb47b800
JK
1015 }
1016
b3d208f9 1017 if (dn.data_blkaddr == NEW_ADDR) {
eb47b800
JK
1018 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
1019 } else {
cf04e8eb
JK
1020 struct f2fs_io_info fio = {
1021 .type = DATA,
1022 .rw = READ_SYNC,
1023 .blk_addr = dn.data_blkaddr,
1024 };
1025 err = f2fs_submit_page_bio(sbi, page, &fio);
9234f319
JK
1026 if (err)
1027 goto fail;
d54c795b 1028
393ff91f 1029 lock_page(page);
6bacf52f 1030 if (unlikely(!PageUptodate(page))) {
393ff91f 1031 f2fs_put_page(page, 1);
3aab8f82
CY
1032 err = -EIO;
1033 goto fail;
eb47b800 1034 }
6bacf52f 1035 if (unlikely(page->mapping != mapping)) {
afcb7ca0
JK
1036 f2fs_put_page(page, 1);
1037 goto repeat;
eb47b800
JK
1038 }
1039 }
393ff91f 1040out:
eb47b800
JK
1041 SetPageUptodate(page);
1042 clear_cold_data(page);
1043 return 0;
9ba69cf9 1044
8cdcb713
JK
1045put_fail:
1046 f2fs_put_dnode(&dn);
9ba69cf9
JK
1047unlock_fail:
1048 f2fs_unlock_op(sbi);
b3d208f9 1049 f2fs_put_page(page, 1);
3aab8f82
CY
1050fail:
1051 f2fs_write_failed(mapping, pos + len);
1052 return err;
eb47b800
JK
1053}
1054
a1dd3c13
JK
1055static int f2fs_write_end(struct file *file,
1056 struct address_space *mapping,
1057 loff_t pos, unsigned len, unsigned copied,
1058 struct page *page, void *fsdata)
1059{
1060 struct inode *inode = page->mapping->host;
1061
dfb2bf38
CY
1062 trace_f2fs_write_end(inode, pos, len, copied);
1063
34ba94ba 1064 set_page_dirty(page);
a1dd3c13
JK
1065
1066 if (pos + copied > i_size_read(inode)) {
1067 i_size_write(inode, pos + copied);
1068 mark_inode_dirty(inode);
1069 update_inode_page(inode);
1070 }
1071
75c3c8bc 1072 f2fs_put_page(page, 1);
a1dd3c13
JK
1073 return copied;
1074}
1075
944fcfc1 1076static int check_direct_IO(struct inode *inode, int rw,
5b46f25d 1077 struct iov_iter *iter, loff_t offset)
944fcfc1
JK
1078{
1079 unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
944fcfc1
JK
1080
1081 if (rw == READ)
1082 return 0;
1083
1084 if (offset & blocksize_mask)
1085 return -EINVAL;
1086
5b46f25d
AV
1087 if (iov_iter_alignment(iter) & blocksize_mask)
1088 return -EINVAL;
1089
944fcfc1
JK
1090 return 0;
1091}
1092
eb47b800 1093static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
d8d3d94b 1094 struct iov_iter *iter, loff_t offset)
eb47b800
JK
1095{
1096 struct file *file = iocb->ki_filp;
3aab8f82
CY
1097 struct address_space *mapping = file->f_mapping;
1098 struct inode *inode = mapping->host;
1099 size_t count = iov_iter_count(iter);
1100 int err;
944fcfc1 1101
b3d208f9
JK
1102 /* we don't need to use inline_data strictly */
1103 if (f2fs_has_inline_data(inode)) {
1104 err = f2fs_convert_inline_inode(inode);
1105 if (err)
1106 return err;
1107 }
9ffe0fb5 1108
5b46f25d 1109 if (check_direct_IO(inode, rw, iter, offset))
944fcfc1
JK
1110 return 0;
1111
70407fad
CY
1112 trace_f2fs_direct_IO_enter(inode, offset, count, rw);
1113
3aab8f82
CY
1114 err = blockdev_direct_IO(rw, iocb, inode, iter, offset, get_data_block);
1115 if (err < 0 && (rw & WRITE))
1116 f2fs_write_failed(mapping, offset + count);
70407fad
CY
1117
1118 trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
1119
3aab8f82 1120 return err;
eb47b800
JK
1121}
1122
d47992f8
LC
1123static void f2fs_invalidate_data_page(struct page *page, unsigned int offset,
1124 unsigned int length)
eb47b800
JK
1125{
1126 struct inode *inode = page->mapping->host;
a7ffdbe2
JK
1127
1128 if (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE)
1129 return;
1130
1fe54f9d 1131 if (PageDirty(page))
a7ffdbe2 1132 inode_dec_dirty_pages(inode);
eb47b800
JK
1133 ClearPagePrivate(page);
1134}
1135
1136static int f2fs_release_data_page(struct page *page, gfp_t wait)
1137{
1138 ClearPagePrivate(page);
c3850aa1 1139 return 1;
eb47b800
JK
1140}
1141
1142static int f2fs_set_data_page_dirty(struct page *page)
1143{
1144 struct address_space *mapping = page->mapping;
1145 struct inode *inode = mapping->host;
1146
26c6b887
JK
1147 trace_f2fs_set_page_dirty(page, DATA);
1148
eb47b800 1149 SetPageUptodate(page);
34ba94ba 1150
1e84371f 1151 if (f2fs_is_atomic_file(inode)) {
34ba94ba
JK
1152 register_inmem_page(inode, page);
1153 return 1;
1154 }
1155
a18ff063
JK
1156 mark_inode_dirty(inode);
1157
eb47b800
JK
1158 if (!PageDirty(page)) {
1159 __set_page_dirty_nobuffers(page);
a7ffdbe2 1160 update_dirty_page(inode, page);
eb47b800
JK
1161 return 1;
1162 }
1163 return 0;
1164}
1165
c01e54b7
JK
1166static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
1167{
454ae7e5
CY
1168 struct inode *inode = mapping->host;
1169
b3d208f9
JK
1170 /* we don't need to use inline_data strictly */
1171 if (f2fs_has_inline_data(inode)) {
1172 int err = f2fs_convert_inline_inode(inode);
1173 if (err)
1174 return err;
1175 }
bfad7c2d 1176 return generic_block_bmap(mapping, block, get_data_block);
c01e54b7
JK
1177}
1178
eb47b800
JK
1179const struct address_space_operations f2fs_dblock_aops = {
1180 .readpage = f2fs_read_data_page,
1181 .readpages = f2fs_read_data_pages,
1182 .writepage = f2fs_write_data_page,
1183 .writepages = f2fs_write_data_pages,
1184 .write_begin = f2fs_write_begin,
a1dd3c13 1185 .write_end = f2fs_write_end,
eb47b800
JK
1186 .set_page_dirty = f2fs_set_data_page_dirty,
1187 .invalidatepage = f2fs_invalidate_data_page,
1188 .releasepage = f2fs_release_data_page,
1189 .direct_IO = f2fs_direct_IO,
c01e54b7 1190 .bmap = f2fs_bmap,
eb47b800 1191};