UPSTREAM: android: binder: fix type mismatch warning
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / fs / f2fs / data.c
CommitLineData
0a8165d7 1/*
eb47b800
JK
2 * fs/f2fs/data.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/buffer_head.h>
14#include <linux/mpage.h>
15#include <linux/writeback.h>
16#include <linux/backing-dev.h>
8f46dcae 17#include <linux/pagevec.h>
eb47b800
JK
18#include <linux/blkdev.h>
19#include <linux/bio.h>
690e4a3e 20#include <linux/prefetch.h>
e2e40f2c 21#include <linux/uio.h>
c1286ff4
JK
22#include <linux/mm.h>
23#include <linux/memcontrol.h>
f1e88660 24#include <linux/cleancache.h>
eb47b800
JK
25
26#include "f2fs.h"
27#include "node.h"
28#include "segment.h"
db9f7c1a 29#include "trace.h"
848753aa 30#include <trace/events/f2fs.h>
32cbbe59 31#include <trace/events/android_fs.h>
eb47b800 32
9e266223
CY
33static bool __is_cp_guaranteed(struct page *page)
34{
35 struct address_space *mapping = page->mapping;
36 struct inode *inode;
37 struct f2fs_sb_info *sbi;
38
39 if (!mapping)
40 return false;
41
42 inode = mapping->host;
43 sbi = F2FS_I_SB(inode);
44
45 if (inode->i_ino == F2FS_META_INO(sbi) ||
46 inode->i_ino == F2FS_NODE_INO(sbi) ||
47 S_ISDIR(inode->i_mode) ||
48 is_cold_data(page))
49 return true;
50 return false;
51}
52
4246a0b6 53static void f2fs_read_end_io(struct bio *bio)
93dfe2ac 54{
f568849e
LT
55 struct bio_vec *bvec;
56 int i;
93dfe2ac 57
c1286ff4 58#ifdef CONFIG_F2FS_FAULT_INJECTION
13f00235
JK
59 if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO)) {
60 f2fs_show_injection_info(FAULT_IO);
c1286ff4 61 bio->bi_error = -EIO;
13f00235 62 }
c1286ff4
JK
63#endif
64
4375a336 65 if (f2fs_bio_encrypted(bio)) {
4246a0b6 66 if (bio->bi_error) {
c1286ff4 67 fscrypt_release_ctx(bio->bi_private);
4375a336 68 } else {
c1286ff4 69 fscrypt_decrypt_bio_pages(bio->bi_private, bio);
4375a336
JK
70 return;
71 }
72 }
73
12377024
CY
74 bio_for_each_segment_all(bvec, bio, i) {
75 struct page *page = bvec->bv_page;
f1e88660 76
4246a0b6 77 if (!bio->bi_error) {
c1286ff4
JK
78 if (!PageUptodate(page))
79 SetPageUptodate(page);
f1e88660
JK
80 } else {
81 ClearPageUptodate(page);
82 SetPageError(page);
83 }
84 unlock_page(page);
85 }
f1e88660
JK
86 bio_put(bio);
87}
88
4246a0b6 89static void f2fs_write_end_io(struct bio *bio)
93dfe2ac 90{
1b1f559f 91 struct f2fs_sb_info *sbi = bio->bi_private;
f568849e
LT
92 struct bio_vec *bvec;
93 int i;
93dfe2ac 94
f568849e 95 bio_for_each_segment_all(bvec, bio, i) {
93dfe2ac 96 struct page *page = bvec->bv_page;
9e266223 97 enum count_type type = WB_DATA_TYPE(page);
93dfe2ac 98
b3fcb700
JK
99 if (IS_DUMMY_WRITTEN_PAGE(page)) {
100 set_page_private(page, (unsigned long)NULL);
101 ClearPagePrivate(page);
102 unlock_page(page);
103 mempool_free(page, sbi->write_io_dummy);
104
105 if (unlikely(bio->bi_error))
106 f2fs_stop_checkpoint(sbi, true);
107 continue;
108 }
109
c1286ff4 110 fscrypt_pullback_bio_page(&page, true);
4375a336 111
4246a0b6 112 if (unlikely(bio->bi_error)) {
93dfe2ac 113 set_bit(AS_EIO, &page->mapping->flags);
c1286ff4 114 f2fs_stop_checkpoint(sbi, true);
93dfe2ac 115 }
9e266223
CY
116 dec_page_count(sbi, type);
117 clear_cold_data(page);
93dfe2ac 118 end_page_writeback(page);
f568849e 119 }
9e266223 120 if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
c1286ff4 121 wq_has_sleeper(&sbi->cp_wait))
93dfe2ac
JK
122 wake_up(&sbi->cp_wait);
123
124 bio_put(bio);
125}
126
07f01079
JK
127/*
128 * Return true, if pre_bio's bdev is same as its target device.
129 */
130struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
131 block_t blk_addr, struct bio *bio)
132{
133 struct block_device *bdev = sbi->sb->s_bdev;
134 int i;
135
136 for (i = 0; i < sbi->s_ndevs; i++) {
137 if (FDEV(i).start_blk <= blk_addr &&
138 FDEV(i).end_blk >= blk_addr) {
139 blk_addr -= FDEV(i).start_blk;
140 bdev = FDEV(i).bdev;
141 break;
142 }
143 }
144 if (bio) {
145 bio->bi_bdev = bdev;
146 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
147 }
148 return bdev;
149}
150
151int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
152{
153 int i;
154
155 for (i = 0; i < sbi->s_ndevs; i++)
156 if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
157 return i;
158 return 0;
159}
160
161static bool __same_bdev(struct f2fs_sb_info *sbi,
162 block_t blk_addr, struct bio *bio)
163{
164 return f2fs_target_device(sbi, blk_addr, NULL) == bio->bi_bdev;
165}
166
940a6d34
GZ
167/*
168 * Low-level block read/write IO operations.
169 */
170static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
171 int npages, bool is_read)
172{
173 struct bio *bio;
174
740432f8 175 bio = f2fs_bio_alloc(npages);
940a6d34 176
07f01079 177 f2fs_target_device(sbi, blk_addr, bio);
940a6d34 178 bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
12377024 179 bio->bi_private = is_read ? NULL : sbi;
940a6d34
GZ
180
181 return bio;
182}
183
dc45fd9e
JK
184static inline void __submit_bio(struct f2fs_sb_info *sbi,
185 struct bio *bio, enum page_type type)
c1286ff4 186{
dc45fd9e 187 if (!is_read_io(bio_op(bio))) {
b3fcb700
JK
188 unsigned int start;
189
22bbc1ef 190 if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
c1286ff4
JK
191 current->plug && (type == DATA || type == NODE))
192 blk_finish_plug(current->plug);
b3fcb700
JK
193
194 if (type != DATA && type != NODE)
195 goto submit_io;
196
197 start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
198 start %= F2FS_IO_SIZE(sbi);
199
200 if (start == 0)
201 goto submit_io;
202
203 /* fill dummy pages */
204 for (; start < F2FS_IO_SIZE(sbi); start++) {
205 struct page *page =
206 mempool_alloc(sbi->write_io_dummy,
207 GFP_NOIO | __GFP_ZERO | __GFP_NOFAIL);
208 f2fs_bug_on(sbi, !page);
209
210 SetPagePrivate(page);
211 set_page_private(page, (unsigned long)DUMMY_WRITTEN_PAGE);
212 lock_page(page);
213 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
214 f2fs_bug_on(sbi, 1);
215 }
216 /*
217 * In the NODE case, we lose next block address chain. So, we
218 * need to do checkpoint in f2fs_sync_file.
219 */
220 if (type == NODE)
221 set_sbi_flag(sbi, SBI_NEED_CP);
c1286ff4 222 }
b3fcb700 223submit_io:
8ef4f0ca
JK
224 if (is_read_io(bio_op(bio)))
225 trace_f2fs_submit_read_bio(sbi->sb, type, bio);
226 else
227 trace_f2fs_submit_write_bio(sbi->sb, type, bio);
13f00235 228 submit_bio(bio_op(bio), bio);
c1286ff4
JK
229}
230
458e6197 231static void __submit_merged_bio(struct f2fs_bio_info *io)
93dfe2ac 232{
458e6197 233 struct f2fs_io_info *fio = &io->fio;
93dfe2ac
JK
234
235 if (!io->bio)
236 return;
237
8ef4f0ca
JK
238 bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
239
dc45fd9e 240 if (is_read_io(fio->op))
8ef4f0ca 241 trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
6a8f8ca5 242 else
8ef4f0ca 243 trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
dc45fd9e
JK
244
245 __submit_bio(io->sbi, io->bio, fio->type);
93dfe2ac
JK
246 io->bio = NULL;
247}
248
13f00235
JK
249static bool __has_merged_page(struct f2fs_bio_info *io,
250 struct inode *inode, nid_t ino, pgoff_t idx)
c1286ff4
JK
251{
252 struct bio_vec *bvec;
253 struct page *target;
254 int i;
255
256 if (!io->bio)
257 return false;
258
13f00235 259 if (!inode && !ino)
c1286ff4
JK
260 return true;
261
262 bio_for_each_segment_all(bvec, io->bio, i) {
263
264 if (bvec->bv_page->mapping)
265 target = bvec->bv_page;
266 else
267 target = fscrypt_control_page(bvec->bv_page);
268
13f00235
JK
269 if (idx != target->index)
270 continue;
271
c1286ff4
JK
272 if (inode && inode == target->mapping->host)
273 return true;
c1286ff4
JK
274 if (ino && ino == ino_of_node(target))
275 return true;
276 }
277
278 return false;
279}
280
281static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
13f00235 282 nid_t ino, pgoff_t idx, enum page_type type)
c1286ff4
JK
283{
284 enum page_type btype = PAGE_TYPE_OF_BIO(type);
13f00235
JK
285 enum temp_type temp;
286 struct f2fs_bio_info *io;
287 bool ret = false;
288
289 for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
290 io = sbi->write_io[btype] + temp;
c1286ff4 291
13f00235
JK
292 down_read(&io->io_rwsem);
293 ret = __has_merged_page(io, inode, ino, idx);
294 up_read(&io->io_rwsem);
295
296 /* TODO: use HOT temp only for meta pages now. */
297 if (ret || btype == META)
298 break;
299 }
c1286ff4
JK
300 return ret;
301}
302
13f00235
JK
303static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
304 enum page_type type, enum temp_type temp)
93dfe2ac
JK
305{
306 enum page_type btype = PAGE_TYPE_OF_BIO(type);
13f00235 307 struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
93dfe2ac 308
df0f8dc0 309 down_write(&io->io_rwsem);
458e6197
JK
310
311 /* change META to META_FLUSH in the checkpoint procedure */
312 if (type >= META_FLUSH) {
313 io->fio.type = META_FLUSH;
dc45fd9e 314 io->fio.op = REQ_OP_WRITE;
13f00235 315 io->fio.op_flags = REQ_META | REQ_PRIO;
dc45fd9e 316 if (!test_opt(sbi, NOBARRIER))
13f00235 317 io->fio.op_flags |= WRITE_FLUSH | REQ_FUA;
458e6197
JK
318 }
319 __submit_merged_bio(io);
df0f8dc0 320 up_write(&io->io_rwsem);
93dfe2ac
JK
321}
322
13f00235
JK
323static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
324 struct inode *inode, nid_t ino, pgoff_t idx,
325 enum page_type type, bool force)
326{
327 enum temp_type temp;
328
329 if (!force && !has_merged_page(sbi, inode, ino, idx, type))
330 return;
331
332 for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
333
334 __f2fs_submit_merged_write(sbi, type, temp);
335
336 /* TODO: use HOT temp only for meta pages now. */
337 if (type >= META)
338 break;
339 }
340}
341
342void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
c1286ff4 343{
13f00235 344 __submit_merged_write_cond(sbi, NULL, 0, 0, type, true);
c1286ff4
JK
345}
346
13f00235
JK
347void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
348 struct inode *inode, nid_t ino, pgoff_t idx,
349 enum page_type type)
c1286ff4 350{
13f00235 351 __submit_merged_write_cond(sbi, inode, ino, idx, type, false);
c1286ff4
JK
352}
353
13f00235 354void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
c1286ff4 355{
13f00235
JK
356 f2fs_submit_merged_write(sbi, DATA);
357 f2fs_submit_merged_write(sbi, NODE);
358 f2fs_submit_merged_write(sbi, META);
c1286ff4
JK
359}
360
93dfe2ac
JK
361/*
362 * Fill the locked page with data located in the block address.
13f00235 363 * A caller needs to unlock the page on failure.
93dfe2ac 364 */
05ca3632 365int f2fs_submit_page_bio(struct f2fs_io_info *fio)
93dfe2ac 366{
93dfe2ac 367 struct bio *bio;
c1286ff4
JK
368 struct page *page = fio->encrypted_page ?
369 fio->encrypted_page : fio->page;
93dfe2ac 370
2ace38e0 371 trace_f2fs_submit_page_bio(page, fio);
05ca3632 372 f2fs_trace_ios(fio, 0);
93dfe2ac
JK
373
374 /* Allocate a new bio */
dc45fd9e 375 bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->op));
93dfe2ac 376
c1286ff4 377 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
93dfe2ac 378 bio_put(bio);
93dfe2ac
JK
379 return -EFAULT;
380 }
dc45fd9e 381 bio_set_op_attrs(bio, fio->op, fio->op_flags);
93dfe2ac 382
dc45fd9e 383 __submit_bio(fio->sbi, bio, fio->type);
13f00235
JK
384
385 if (!is_read_io(fio->op))
386 inc_page_count(fio->sbi, WB_DATA_TYPE(fio->page));
93dfe2ac
JK
387 return 0;
388}
389
13f00235 390int f2fs_submit_page_write(struct f2fs_io_info *fio)
93dfe2ac 391{
05ca3632 392 struct f2fs_sb_info *sbi = fio->sbi;
458e6197 393 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
13f00235 394 struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
4375a336 395 struct page *bio_page;
b3fcb700 396 int err = 0;
93dfe2ac 397
13f00235
JK
398 f2fs_bug_on(sbi, is_read_io(fio->op));
399
400 down_write(&io->io_rwsem);
401next:
402 if (fio->in_list) {
403 spin_lock(&io->io_lock);
404 if (list_empty(&io->io_list)) {
405 spin_unlock(&io->io_lock);
406 goto out_fail;
407 }
408 fio = list_first_entry(&io->io_list,
409 struct f2fs_io_info, list);
410 list_del(&fio->list);
411 spin_unlock(&io->io_lock);
412 }
93dfe2ac 413
c1286ff4
JK
414 if (fio->old_blkaddr != NEW_ADDR)
415 verify_block_addr(sbi, fio->old_blkaddr);
416 verify_block_addr(sbi, fio->new_blkaddr);
93dfe2ac 417
9e266223
CY
418 bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
419
13f00235
JK
420 /* set submitted = 1 as a return value */
421 fio->submitted = 1;
9e266223 422
13f00235 423 inc_page_count(sbi, WB_DATA_TYPE(bio_page));
93dfe2ac 424
c1286ff4 425 if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
dc45fd9e 426 (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags) ||
07f01079 427 !__same_bdev(sbi, fio->new_blkaddr, io->bio)))
458e6197 428 __submit_merged_bio(io);
93dfe2ac
JK
429alloc_new:
430 if (io->bio == NULL) {
b3fcb700
JK
431 if ((fio->type == DATA || fio->type == NODE) &&
432 fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
433 err = -EAGAIN;
434 dec_page_count(sbi, WB_DATA_TYPE(bio_page));
435 goto out_fail;
436 }
c1286ff4 437 io->bio = __bio_alloc(sbi, fio->new_blkaddr,
13f00235 438 BIO_MAX_PAGES, false);
458e6197 439 io->fio = *fio;
93dfe2ac
JK
440 }
441
13f00235 442 if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
458e6197 443 __submit_merged_bio(io);
93dfe2ac
JK
444 goto alloc_new;
445 }
446
c1286ff4 447 io->last_block_in_bio = fio->new_blkaddr;
05ca3632 448 f2fs_trace_ios(fio, 0);
13f00235
JK
449
450 trace_f2fs_submit_page_write(fio->page, fio);
451
452 if (fio->in_list)
453 goto next;
b3fcb700 454out_fail:
df0f8dc0 455 up_write(&io->io_rwsem);
b3fcb700 456 return err;
93dfe2ac
JK
457}
458
13f00235
JK
459static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
460 unsigned nr_pages)
461{
462 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
463 struct fscrypt_ctx *ctx = NULL;
464 struct bio *bio;
465
466 if (f2fs_encrypted_file(inode)) {
467 ctx = fscrypt_get_ctx(inode, GFP_NOFS);
468 if (IS_ERR(ctx))
469 return ERR_CAST(ctx);
470
471 /* wait the page to be moved by cleaning */
472 f2fs_wait_on_block_writeback(sbi, blkaddr);
473 }
474
475 bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES));
476 if (!bio) {
477 if (ctx)
478 fscrypt_release_ctx(ctx);
479 return ERR_PTR(-ENOMEM);
480 }
481 f2fs_target_device(sbi, blkaddr, bio);
482 bio->bi_end_io = f2fs_read_end_io;
483 bio->bi_private = ctx;
484 bio_set_op_attrs(bio, REQ_OP_READ, 0);
485
486 return bio;
487}
488
489/* This can handle encryption stuffs */
490static int f2fs_submit_page_read(struct inode *inode, struct page *page,
491 block_t blkaddr)
492{
493 struct bio *bio = f2fs_grab_read_bio(inode, blkaddr, 1);
494
495 if (IS_ERR(bio))
496 return PTR_ERR(bio);
497
498 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
499 bio_put(bio);
500 return -EFAULT;
501 }
502 __submit_bio(F2FS_I_SB(inode), bio, DATA);
503 return 0;
504}
505
c1286ff4
JK
506static void __set_data_blkaddr(struct dnode_of_data *dn)
507{
508 struct f2fs_node *rn = F2FS_NODE(dn->node_page);
509 __le32 *addr_array;
13f00235
JK
510 int base = 0;
511
512 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
513 base = get_extra_isize(dn->inode);
c1286ff4
JK
514
515 /* Get physical address of data block */
516 addr_array = blkaddr_in_node(rn);
13f00235 517 addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
c1286ff4
JK
518}
519
0a8165d7 520/*
eb47b800
JK
521 * Lock ordering for the change of data block address:
522 * ->data_page
523 * ->node_page
524 * update block addresses in the node page
525 */
216a620a 526void set_data_blkaddr(struct dnode_of_data *dn)
eb47b800 527{
c1286ff4
JK
528 f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
529 __set_data_blkaddr(dn);
530 if (set_page_dirty(dn->node_page))
531 dn->node_changed = true;
532}
eb47b800 533
c1286ff4
JK
534void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
535{
536 dn->data_blkaddr = blkaddr;
537 set_data_blkaddr(dn);
538 f2fs_update_extent_cache(dn);
eb47b800
JK
539}
540
c1286ff4
JK
541/* dn->ofs_in_node will be returned with up-to-date last block pointer */
542int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
eb47b800 543{
4081363f 544 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
13f00235 545 int err;
eb47b800 546
c1286ff4
JK
547 if (!count)
548 return 0;
549
550 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
eb47b800 551 return -EPERM;
13f00235
JK
552 if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
553 return err;
eb47b800 554
c1286ff4
JK
555 trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
556 dn->ofs_in_node, count);
c01e2853 557
c1286ff4
JK
558 f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
559
560 for (; count > 0; dn->ofs_in_node++) {
13f00235
JK
561 block_t blkaddr = datablock_addr(dn->inode,
562 dn->node_page, dn->ofs_in_node);
c1286ff4
JK
563 if (blkaddr == NULL_ADDR) {
564 dn->data_blkaddr = NEW_ADDR;
565 __set_data_blkaddr(dn);
566 count--;
567 }
568 }
569
570 if (set_page_dirty(dn->node_page))
571 dn->node_changed = true;
eb47b800
JK
572 return 0;
573}
574
c1286ff4
JK
575/* Should keep dn->ofs_in_node unchanged */
576int reserve_new_block(struct dnode_of_data *dn)
577{
578 unsigned int ofs_in_node = dn->ofs_in_node;
579 int ret;
580
581 ret = reserve_new_blocks(dn, 1);
582 dn->ofs_in_node = ofs_in_node;
583 return ret;
584}
585
b600965c
HL
586int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
587{
588 bool need_put = dn->inode_page ? false : true;
589 int err;
590
591 err = get_dnode_of_data(dn, index, ALLOC_NODE);
592 if (err)
593 return err;
a8865372 594
b600965c
HL
595 if (dn->data_blkaddr == NULL_ADDR)
596 err = reserve_new_block(dn);
a8865372 597 if (err || need_put)
b600965c
HL
598 f2fs_put_dnode(dn);
599 return err;
600}
601
759af1c9 602int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
eb47b800 603{
13f00235 604 struct extent_info ei = {0,0,0};
759af1c9 605 struct inode *inode = dn->inode;
028a41e8 606
759af1c9
FL
607 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
608 dn->data_blkaddr = ei.blk + index - ei.fofs;
609 return 0;
429511cd 610 }
028a41e8 611
759af1c9 612 return f2fs_reserve_block(dn, index);
eb47b800
JK
613}
614
a56c7c6f 615struct page *get_read_data_page(struct inode *inode, pgoff_t index,
dc45fd9e 616 int op_flags, bool for_write)
eb47b800 617{
eb47b800
JK
618 struct address_space *mapping = inode->i_mapping;
619 struct dnode_of_data dn;
620 struct page *page;
13f00235 621 struct extent_info ei = {0,0,0};
eb47b800 622 int err;
4375a336 623
a56c7c6f 624 page = f2fs_grab_cache_page(mapping, index, for_write);
650495de
JK
625 if (!page)
626 return ERR_PTR(-ENOMEM);
627
cb3bc9ee
CY
628 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
629 dn.data_blkaddr = ei.blk + index - ei.fofs;
630 goto got_it;
631 }
632
eb47b800 633 set_new_dnode(&dn, inode, NULL, NULL, 0);
266e97a8 634 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
86531d6b
JK
635 if (err)
636 goto put_err;
eb47b800
JK
637 f2fs_put_dnode(&dn);
638
6bacf52f 639 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
86531d6b
JK
640 err = -ENOENT;
641 goto put_err;
650495de 642 }
cb3bc9ee 643got_it:
43f3eae1
JK
644 if (PageUptodate(page)) {
645 unlock_page(page);
eb47b800 646 return page;
43f3eae1 647 }
eb47b800 648
d59ff4df
JK
649 /*
650 * A new dentry page is allocated but not able to be written, since its
651 * new inode page couldn't be allocated due to -ENOSPC.
652 * In such the case, its blkaddr can be remained as NEW_ADDR.
653 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
654 */
655 if (dn.data_blkaddr == NEW_ADDR) {
c1286ff4
JK
656 zero_user_segment(page, 0, PAGE_SIZE);
657 if (!PageUptodate(page))
658 SetPageUptodate(page);
43f3eae1 659 unlock_page(page);
d59ff4df
JK
660 return page;
661 }
eb47b800 662
13f00235 663 err = f2fs_submit_page_read(inode, page, dn.data_blkaddr);
393ff91f 664 if (err)
86531d6b 665 goto put_err;
43f3eae1 666 return page;
86531d6b
JK
667
668put_err:
669 f2fs_put_page(page, 1);
670 return ERR_PTR(err);
43f3eae1
JK
671}
672
673struct page *find_data_page(struct inode *inode, pgoff_t index)
674{
675 struct address_space *mapping = inode->i_mapping;
676 struct page *page;
677
678 page = find_get_page(mapping, index);
679 if (page && PageUptodate(page))
680 return page;
681 f2fs_put_page(page, 0);
682
dc45fd9e 683 page = get_read_data_page(inode, index, REQ_SYNC, false);
43f3eae1
JK
684 if (IS_ERR(page))
685 return page;
686
687 if (PageUptodate(page))
688 return page;
689
690 wait_on_page_locked(page);
691 if (unlikely(!PageUptodate(page))) {
692 f2fs_put_page(page, 0);
693 return ERR_PTR(-EIO);
694 }
695 return page;
696}
697
698/*
699 * If it tries to access a hole, return an error.
700 * Because, the callers, functions in dir.c and GC, should be able to know
701 * whether this page exists or not.
702 */
a56c7c6f
JK
703struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
704 bool for_write)
43f3eae1
JK
705{
706 struct address_space *mapping = inode->i_mapping;
707 struct page *page;
708repeat:
dc45fd9e 709 page = get_read_data_page(inode, index, REQ_SYNC, for_write);
43f3eae1
JK
710 if (IS_ERR(page))
711 return page;
393ff91f 712
43f3eae1 713 /* wait for read completion */
393ff91f 714 lock_page(page);
6bacf52f 715 if (unlikely(page->mapping != mapping)) {
afcb7ca0
JK
716 f2fs_put_page(page, 1);
717 goto repeat;
eb47b800 718 }
c1286ff4
JK
719 if (unlikely(!PageUptodate(page))) {
720 f2fs_put_page(page, 1);
721 return ERR_PTR(-EIO);
722 }
eb47b800
JK
723 return page;
724}
725
0a8165d7 726/*
eb47b800
JK
727 * Caller ensures that this data page is never allocated.
728 * A new zero-filled data page is allocated in the page cache.
39936837 729 *
4f4124d0
CY
730 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
731 * f2fs_unlock_op().
470f00e9
CY
732 * Note that, ipage is set only by make_empty_dir, and if any error occur,
733 * ipage should be released by this function.
eb47b800 734 */
64aa7ed9 735struct page *get_new_data_page(struct inode *inode,
a8865372 736 struct page *ipage, pgoff_t index, bool new_i_size)
eb47b800 737{
eb47b800
JK
738 struct address_space *mapping = inode->i_mapping;
739 struct page *page;
740 struct dnode_of_data dn;
741 int err;
c1286ff4 742
a56c7c6f 743 page = f2fs_grab_cache_page(mapping, index, true);
470f00e9
CY
744 if (!page) {
745 /*
746 * before exiting, we should make sure ipage will be released
747 * if any error occur.
748 */
749 f2fs_put_page(ipage, 1);
01f28610 750 return ERR_PTR(-ENOMEM);
470f00e9 751 }
eb47b800 752
a8865372 753 set_new_dnode(&dn, inode, ipage, NULL, 0);
b600965c 754 err = f2fs_reserve_block(&dn, index);
01f28610
JK
755 if (err) {
756 f2fs_put_page(page, 1);
eb47b800 757 return ERR_PTR(err);
a8865372 758 }
01f28610
JK
759 if (!ipage)
760 f2fs_put_dnode(&dn);
eb47b800
JK
761
762 if (PageUptodate(page))
01f28610 763 goto got_it;
eb47b800
JK
764
765 if (dn.data_blkaddr == NEW_ADDR) {
c1286ff4
JK
766 zero_user_segment(page, 0, PAGE_SIZE);
767 if (!PageUptodate(page))
768 SetPageUptodate(page);
eb47b800 769 } else {
4375a336 770 f2fs_put_page(page, 1);
a8865372 771
c1286ff4
JK
772 /* if ipage exists, blkaddr should be NEW_ADDR */
773 f2fs_bug_on(F2FS_I_SB(inode), ipage);
774 page = get_lock_data_page(inode, index, true);
4375a336 775 if (IS_ERR(page))
c1286ff4 776 return page;
eb47b800 777 }
01f28610 778got_it:
9edcdabf 779 if (new_i_size && i_size_read(inode) <
c1286ff4
JK
780 ((loff_t)(index + 1) << PAGE_SHIFT))
781 f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
eb47b800
JK
782 return page;
783}
784
bfad7c2d
JK
785static int __allocate_data_block(struct dnode_of_data *dn)
786{
4081363f 787 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
bfad7c2d 788 struct f2fs_summary sum;
bfad7c2d 789 struct node_info ni;
976e4c50 790 pgoff_t fofs;
c1286ff4 791 blkcnt_t count = 1;
13f00235 792 int err;
bfad7c2d 793
c1286ff4 794 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
bfad7c2d 795 return -EPERM;
df6136ef 796
13f00235
JK
797 dn->data_blkaddr = datablock_addr(dn->inode,
798 dn->node_page, dn->ofs_in_node);
df6136ef
CY
799 if (dn->data_blkaddr == NEW_ADDR)
800 goto alloc;
801
13f00235
JK
802 if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
803 return err;
bfad7c2d 804
df6136ef 805alloc:
bfad7c2d
JK
806 get_node_info(sbi, dn->nid, &ni);
807 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
808
df6136ef 809 allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
13f00235 810 &sum, CURSEG_WARM_DATA, NULL, false);
216a620a 811 set_data_blkaddr(dn);
bfad7c2d 812
976e4c50 813 /* update i_size */
c1286ff4 814 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
976e4c50 815 dn->ofs_in_node;
c1286ff4
JK
816 if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
817 f2fs_i_size_write(dn->inode,
818 ((loff_t)(fofs + 1) << PAGE_SHIFT));
bfad7c2d
JK
819 return 0;
820}
821
8b2c7581
JK
822static inline bool __force_buffered_io(struct inode *inode, int rw)
823{
13f00235 824 return (f2fs_encrypted_file(inode) ||
8b2c7581
JK
825 (rw == WRITE && test_opt(F2FS_I_SB(inode), LFS)) ||
826 F2FS_I_SB(inode)->s_ndevs);
827}
828
bd8e4154 829int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
59b802e5 830{
c1286ff4
JK
831 struct inode *inode = file_inode(iocb->ki_filp);
832 struct f2fs_map_blocks map;
bd8e4154 833 int err = 0;
f9811703 834
13f00235
JK
835 if (is_inode_flag_set(inode, FI_NO_PREALLOC))
836 return 0;
837
c1286ff4
JK
838 map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
839 map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
840 if (map.m_len > map.m_lblk)
841 map.m_len -= map.m_lblk;
842 else
843 map.m_len = 0;
59b802e5 844
c1286ff4 845 map.m_next_pgofs = NULL;
59b802e5 846
c1286ff4 847 if (iocb->ki_flags & IOCB_DIRECT) {
bd8e4154
JK
848 err = f2fs_convert_inline_inode(inode);
849 if (err)
850 return err;
8b2c7581
JK
851 return f2fs_map_blocks(inode, &map, 1,
852 __force_buffered_io(inode, WRITE) ?
853 F2FS_GET_BLOCK_PRE_AIO :
854 F2FS_GET_BLOCK_PRE_DIO);
59b802e5 855 }
13f00235 856 if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
bd8e4154
JK
857 err = f2fs_convert_inline_inode(inode);
858 if (err)
859 return err;
c1286ff4
JK
860 }
861 if (!f2fs_has_inline_data(inode))
862 return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
bd8e4154 863 return err;
59b802e5
JK
864}
865
13f00235
JK
866static inline void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
867{
868 if (flag == F2FS_GET_BLOCK_PRE_AIO) {
869 if (lock)
870 down_read(&sbi->node_change);
871 else
872 up_read(&sbi->node_change);
873 } else {
874 if (lock)
875 f2fs_lock_op(sbi);
876 else
877 f2fs_unlock_op(sbi);
878 }
879}
880
0a8165d7 881/*
003a3e1d
JK
882 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
883 * f2fs_map_blocks structure.
4f4124d0
CY
884 * If original data blocks are allocated, then give them to blockdev.
885 * Otherwise,
886 * a. preallocate requested block addresses
887 * b. do not use extent cache for better performance
888 * c. give the block addresses to blockdev
eb47b800 889 */
c1286ff4 890int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
e2b4e2bc 891 int create, int flag)
eb47b800 892{
003a3e1d 893 unsigned int maxblocks = map->m_len;
eb47b800 894 struct dnode_of_data dn;
f9811703 895 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
c1286ff4
JK
896 int mode = create ? ALLOC_NODE : LOOKUP_NODE;
897 pgoff_t pgofs, end_offset, end;
bfad7c2d 898 int err = 0, ofs = 1;
c1286ff4
JK
899 unsigned int ofs_in_node, last_ofs_in_node;
900 blkcnt_t prealloc;
13f00235 901 struct extent_info ei = {0,0,0};
c1286ff4
JK
902 block_t blkaddr;
903
904 if (!maxblocks)
905 return 0;
eb47b800 906
003a3e1d
JK
907 map->m_len = 0;
908 map->m_flags = 0;
909
910 /* it only supports block size == page size */
911 pgofs = (pgoff_t)map->m_lblk;
c1286ff4 912 end = pgofs + maxblocks;
eb47b800 913
c1286ff4 914 if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
003a3e1d
JK
915 map->m_pblk = ei.blk + pgofs - ei.fofs;
916 map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
917 map->m_flags = F2FS_MAP_MAPPED;
bfad7c2d 918 goto out;
a2e7d1bf 919 }
bfad7c2d 920
c1286ff4 921next_dnode:
59b802e5 922 if (create)
13f00235 923 __do_map_lock(sbi, flag, true);
eb47b800
JK
924
925 /* When reading holes, we need its node page */
926 set_new_dnode(&dn, inode, NULL, NULL, 0);
bfad7c2d 927 err = get_dnode_of_data(&dn, pgofs, mode);
1ec79083 928 if (err) {
c1286ff4
JK
929 if (flag == F2FS_GET_BLOCK_BMAP)
930 map->m_pblk = 0;
931 if (err == -ENOENT) {
bfad7c2d 932 err = 0;
c1286ff4
JK
933 if (map->m_next_pgofs)
934 *map->m_next_pgofs =
935 get_next_page_offset(&dn, pgofs);
936 }
bfad7c2d 937 goto unlock_out;
848753aa 938 }
973163fc 939
c1286ff4 940 prealloc = 0;
b3441f8c 941 last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
c1286ff4
JK
942 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
943
944next_block:
13f00235 945 blkaddr = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
c1286ff4
JK
946
947 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
973163fc 948 if (create) {
f9811703
CY
949 if (unlikely(f2fs_cp_error(sbi))) {
950 err = -EIO;
c1286ff4
JK
951 goto sync_out;
952 }
953 if (flag == F2FS_GET_BLOCK_PRE_AIO) {
954 if (blkaddr == NULL_ADDR) {
955 prealloc++;
956 last_ofs_in_node = dn.ofs_in_node;
957 }
958 } else {
959 err = __allocate_data_block(&dn);
332f40b4 960 if (!err)
c1286ff4 961 set_inode_flag(inode, FI_APPEND_WRITE);
f9811703 962 }
973163fc 963 if (err)
c1286ff4 964 goto sync_out;
13f00235 965 map->m_flags |= F2FS_MAP_NEW;
c1286ff4 966 blkaddr = dn.data_blkaddr;
973163fc 967 } else {
c1286ff4
JK
968 if (flag == F2FS_GET_BLOCK_BMAP) {
969 map->m_pblk = 0;
970 goto sync_out;
973163fc 971 }
c1286ff4
JK
972 if (flag == F2FS_GET_BLOCK_FIEMAP &&
973 blkaddr == NULL_ADDR) {
974 if (map->m_next_pgofs)
975 *map->m_next_pgofs = pgofs + 1;
976 }
977 if (flag != F2FS_GET_BLOCK_FIEMAP ||
978 blkaddr != NEW_ADDR)
979 goto sync_out;
e2b4e2bc 980 }
e2b4e2bc 981 }
eb47b800 982
c1286ff4
JK
983 if (flag == F2FS_GET_BLOCK_PRE_AIO)
984 goto skip;
985
986 if (map->m_len == 0) {
987 /* preallocated unwritten block should be mapped for fiemap. */
988 if (blkaddr == NEW_ADDR)
989 map->m_flags |= F2FS_MAP_UNWRITTEN;
990 map->m_flags |= F2FS_MAP_MAPPED;
991
992 map->m_pblk = blkaddr;
993 map->m_len = 1;
994 } else if ((map->m_pblk != NEW_ADDR &&
995 blkaddr == (map->m_pblk + ofs)) ||
996 (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
997 flag == F2FS_GET_BLOCK_PRE_DIO) {
998 ofs++;
999 map->m_len++;
1000 } else {
1001 goto sync_out;
1002 }
bfad7c2d 1003
c1286ff4 1004skip:
bfad7c2d
JK
1005 dn.ofs_in_node++;
1006 pgofs++;
1007
c1286ff4
JK
1008 /* preallocate blocks in batch for one dnode page */
1009 if (flag == F2FS_GET_BLOCK_PRE_AIO &&
1010 (pgofs == end || dn.ofs_in_node == end_offset)) {
bfad7c2d 1011
c1286ff4
JK
1012 dn.ofs_in_node = ofs_in_node;
1013 err = reserve_new_blocks(&dn, prealloc);
1014 if (err)
1015 goto sync_out;
e2b4e2bc 1016
c1286ff4
JK
1017 map->m_len += dn.ofs_in_node - ofs_in_node;
1018 if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
1019 err = -ENOSPC;
1020 goto sync_out;
1021 }
1022 dn.ofs_in_node = end_offset;
bfad7c2d 1023 }
eb47b800 1024
c1286ff4
JK
1025 if (pgofs >= end)
1026 goto sync_out;
1027 else if (dn.ofs_in_node < end_offset)
1028 goto next_block;
973163fc 1029
c1286ff4 1030 f2fs_put_dnode(&dn);
973163fc 1031
c1286ff4 1032 if (create) {
13f00235 1033 __do_map_lock(sbi, flag, false);
332f40b4 1034 f2fs_balance_fs(sbi, dn.node_changed);
eb47b800 1035 }
c1286ff4
JK
1036 goto next_dnode;
1037
bfad7c2d 1038sync_out:
eb47b800 1039 f2fs_put_dnode(&dn);
bfad7c2d 1040unlock_out:
c1286ff4 1041 if (create) {
13f00235 1042 __do_map_lock(sbi, flag, false);
332f40b4 1043 f2fs_balance_fs(sbi, dn.node_changed);
c1286ff4 1044 }
bfad7c2d 1045out:
003a3e1d 1046 trace_f2fs_map_blocks(inode, map, err);
bfad7c2d 1047 return err;
eb47b800
JK
1048}
1049
003a3e1d 1050static int __get_data_block(struct inode *inode, sector_t iblock,
c1286ff4
JK
1051 struct buffer_head *bh, int create, int flag,
1052 pgoff_t *next_pgofs)
003a3e1d
JK
1053{
1054 struct f2fs_map_blocks map;
bd8e4154 1055 int err;
003a3e1d
JK
1056
1057 map.m_lblk = iblock;
1058 map.m_len = bh->b_size >> inode->i_blkbits;
c1286ff4 1059 map.m_next_pgofs = next_pgofs;
003a3e1d 1060
bd8e4154
JK
1061 err = f2fs_map_blocks(inode, &map, create, flag);
1062 if (!err) {
003a3e1d
JK
1063 map_bh(bh, inode->i_sb, map.m_pblk);
1064 bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
13f00235 1065 bh->b_size = (u64)map.m_len << inode->i_blkbits;
003a3e1d 1066 }
bd8e4154 1067 return err;
003a3e1d
JK
1068}
1069
ccfb3000 1070static int get_data_block(struct inode *inode, sector_t iblock,
c1286ff4
JK
1071 struct buffer_head *bh_result, int create, int flag,
1072 pgoff_t *next_pgofs)
e2b4e2bc 1073{
c1286ff4
JK
1074 return __get_data_block(inode, iblock, bh_result, create,
1075 flag, next_pgofs);
e2b4e2bc
CY
1076}
1077
1078static int get_data_block_dio(struct inode *inode, sector_t iblock,
ccfb3000
JK
1079 struct buffer_head *bh_result, int create)
1080{
e2b4e2bc 1081 return __get_data_block(inode, iblock, bh_result, create,
13f00235 1082 F2FS_GET_BLOCK_DEFAULT, NULL);
ccfb3000
JK
1083}
1084
e2b4e2bc 1085static int get_data_block_bmap(struct inode *inode, sector_t iblock,
ccfb3000
JK
1086 struct buffer_head *bh_result, int create)
1087{
c1286ff4
JK
1088 /* Block number less than F2FS MAX BLOCKS */
1089 if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
1090 return -EFBIG;
1091
e2b4e2bc 1092 return __get_data_block(inode, iblock, bh_result, create,
c1286ff4 1093 F2FS_GET_BLOCK_BMAP, NULL);
ccfb3000
JK
1094}
1095
7f63eb77
JK
1096static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
1097{
1098 return (offset >> inode->i_blkbits);
1099}
1100
1101static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
1102{
1103 return (blk << inode->i_blkbits);
1104}
1105
9ab70134
JK
1106int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1107 u64 start, u64 len)
1108{
7f63eb77
JK
1109 struct buffer_head map_bh;
1110 sector_t start_blk, last_blk;
c1286ff4 1111 pgoff_t next_pgofs;
7f63eb77
JK
1112 u64 logical = 0, phys = 0, size = 0;
1113 u32 flags = 0;
7f63eb77
JK
1114 int ret = 0;
1115
1116 ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
1117 if (ret)
1118 return ret;
1119
67f8cf3c
JK
1120 if (f2fs_has_inline_data(inode)) {
1121 ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
1122 if (ret != -EAGAIN)
1123 return ret;
1124 }
1125
c1286ff4 1126 inode_lock(inode);
7f63eb77 1127
7f63eb77
JK
1128 if (logical_to_blk(inode, len) == 0)
1129 len = blk_to_logical(inode, 1);
1130
1131 start_blk = logical_to_blk(inode, start);
1132 last_blk = logical_to_blk(inode, start + len - 1);
c1286ff4 1133
7f63eb77
JK
1134next:
1135 memset(&map_bh, 0, sizeof(struct buffer_head));
1136 map_bh.b_size = len;
1137
e2b4e2bc 1138 ret = get_data_block(inode, start_blk, &map_bh, 0,
c1286ff4 1139 F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
7f63eb77
JK
1140 if (ret)
1141 goto out;
1142
1143 /* HOLE */
1144 if (!buffer_mapped(&map_bh)) {
c1286ff4 1145 start_blk = next_pgofs;
9e3d0bf6
CY
1146
1147 if (blk_to_logical(inode, start_blk) < blk_to_logical(inode,
1148 F2FS_I_SB(inode)->max_file_blocks))
c1286ff4 1149 goto prep_next;
9e3d0bf6 1150
c1286ff4
JK
1151 flags |= FIEMAP_EXTENT_LAST;
1152 }
7f63eb77 1153
c1286ff4
JK
1154 if (size) {
1155 if (f2fs_encrypted_inode(inode))
1156 flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
7f63eb77 1157
c1286ff4
JK
1158 ret = fiemap_fill_next_extent(fieinfo, logical,
1159 phys, size, flags);
1160 }
7f63eb77 1161
c1286ff4
JK
1162 if (start_blk > last_blk || ret)
1163 goto out;
7f63eb77 1164
c1286ff4
JK
1165 logical = blk_to_logical(inode, start_blk);
1166 phys = blk_to_logical(inode, map_bh.b_blocknr);
1167 size = map_bh.b_size;
1168 flags = 0;
1169 if (buffer_unwritten(&map_bh))
1170 flags = FIEMAP_EXTENT_UNWRITTEN;
7f63eb77 1171
c1286ff4
JK
1172 start_blk += logical_to_blk(inode, size);
1173
1174prep_next:
7f63eb77
JK
1175 cond_resched();
1176 if (fatal_signal_pending(current))
1177 ret = -EINTR;
1178 else
1179 goto next;
1180out:
1181 if (ret == 1)
1182 ret = 0;
1183
c1286ff4 1184 inode_unlock(inode);
7f63eb77 1185 return ret;
9ab70134
JK
1186}
1187
f1e88660
JK
1188/*
1189 * This function was originally taken from fs/mpage.c, and customized for f2fs.
1190 * Major change was from block_size == page_size in f2fs by default.
1191 */
1192static int f2fs_mpage_readpages(struct address_space *mapping,
1193 struct list_head *pages, struct page *page,
1194 unsigned nr_pages)
1195{
1196 struct bio *bio = NULL;
1197 unsigned page_idx;
1198 sector_t last_block_in_bio = 0;
1199 struct inode *inode = mapping->host;
1200 const unsigned blkbits = inode->i_blkbits;
1201 const unsigned blocksize = 1 << blkbits;
1202 sector_t block_in_file;
1203 sector_t last_block;
1204 sector_t last_block_in_file;
1205 sector_t block_nr;
f1e88660
JK
1206 struct f2fs_map_blocks map;
1207
1208 map.m_pblk = 0;
1209 map.m_lblk = 0;
1210 map.m_len = 0;
1211 map.m_flags = 0;
c1286ff4 1212 map.m_next_pgofs = NULL;
f1e88660
JK
1213
1214 for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
1215
f1e88660 1216 if (pages) {
7129702a 1217 page = list_last_entry(pages, struct page, lru);
13f00235
JK
1218
1219 prefetchw(&page->flags);
f1e88660
JK
1220 list_del(&page->lru);
1221 if (add_to_page_cache_lru(page, mapping,
1222 page->index, GFP_KERNEL))
1223 goto next_page;
1224 }
1225
1226 block_in_file = (sector_t)page->index;
1227 last_block = block_in_file + nr_pages;
1228 last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
1229 blkbits;
1230 if (last_block > last_block_in_file)
1231 last_block = last_block_in_file;
1232
1233 /*
1234 * Map blocks using the previous result first.
1235 */
1236 if ((map.m_flags & F2FS_MAP_MAPPED) &&
1237 block_in_file > map.m_lblk &&
1238 block_in_file < (map.m_lblk + map.m_len))
1239 goto got_it;
1240
1241 /*
1242 * Then do more f2fs_map_blocks() calls until we are
1243 * done with this page.
1244 */
1245 map.m_flags = 0;
1246
1247 if (block_in_file < last_block) {
1248 map.m_lblk = block_in_file;
1249 map.m_len = last_block - block_in_file;
1250
46c9e141 1251 if (f2fs_map_blocks(inode, &map, 0,
13f00235 1252 F2FS_GET_BLOCK_DEFAULT))
f1e88660
JK
1253 goto set_error_page;
1254 }
1255got_it:
1256 if ((map.m_flags & F2FS_MAP_MAPPED)) {
1257 block_nr = map.m_pblk + block_in_file - map.m_lblk;
1258 SetPageMappedToDisk(page);
1259
1260 if (!PageUptodate(page) && !cleancache_get_page(page)) {
1261 SetPageUptodate(page);
1262 goto confused;
1263 }
1264 } else {
c1286ff4
JK
1265 zero_user_segment(page, 0, PAGE_SIZE);
1266 if (!PageUptodate(page))
1267 SetPageUptodate(page);
f1e88660
JK
1268 unlock_page(page);
1269 goto next_page;
1270 }
1271
1272 /*
1273 * This page will go to BIO. Do we need to send this
1274 * BIO off first?
1275 */
07f01079
JK
1276 if (bio && (last_block_in_bio != block_nr - 1 ||
1277 !__same_bdev(F2FS_I_SB(inode), block_nr, bio))) {
f1e88660 1278submit_and_realloc:
dc45fd9e 1279 __submit_bio(F2FS_I_SB(inode), bio, DATA);
f1e88660
JK
1280 bio = NULL;
1281 }
1282 if (bio == NULL) {
13f00235 1283 bio = f2fs_grab_read_bio(inode, block_nr, nr_pages);
c1286ff4
JK
1284 if (IS_ERR(bio)) {
1285 bio = NULL;
f1e88660 1286 goto set_error_page;
4375a336 1287 }
f1e88660
JK
1288 }
1289
1290 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
1291 goto submit_and_realloc;
1292
1293 last_block_in_bio = block_nr;
1294 goto next_page;
1295set_error_page:
1296 SetPageError(page);
c1286ff4 1297 zero_user_segment(page, 0, PAGE_SIZE);
f1e88660
JK
1298 unlock_page(page);
1299 goto next_page;
1300confused:
1301 if (bio) {
dc45fd9e 1302 __submit_bio(F2FS_I_SB(inode), bio, DATA);
f1e88660
JK
1303 bio = NULL;
1304 }
1305 unlock_page(page);
1306next_page:
1307 if (pages)
c1286ff4 1308 put_page(page);
f1e88660
JK
1309 }
1310 BUG_ON(pages && !list_empty(pages));
1311 if (bio)
dc45fd9e 1312 __submit_bio(F2FS_I_SB(inode), bio, DATA);
f1e88660
JK
1313 return 0;
1314}
1315
eb47b800
JK
1316static int f2fs_read_data_page(struct file *file, struct page *page)
1317{
9ffe0fb5 1318 struct inode *inode = page->mapping->host;
b3d208f9 1319 int ret = -EAGAIN;
9ffe0fb5 1320
c20e89cd
CY
1321 trace_f2fs_readpage(page, DATA);
1322
e1c42045 1323 /* If the file has inline data, try to read it directly */
9ffe0fb5
HL
1324 if (f2fs_has_inline_data(inode))
1325 ret = f2fs_read_inline_data(inode, page);
b3d208f9 1326 if (ret == -EAGAIN)
f1e88660 1327 ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
9ffe0fb5 1328 return ret;
eb47b800
JK
1329}
1330
1331static int f2fs_read_data_pages(struct file *file,
1332 struct address_space *mapping,
1333 struct list_head *pages, unsigned nr_pages)
1334{
9ffe0fb5 1335 struct inode *inode = file->f_mapping->host;
7129702a 1336 struct page *page = list_last_entry(pages, struct page, lru);
b8c29400
CY
1337
1338 trace_f2fs_readpages(inode, page, nr_pages);
9ffe0fb5
HL
1339
1340 /* If the file has inline data, skip readpages */
1341 if (f2fs_has_inline_data(inode))
1342 return 0;
1343
f1e88660 1344 return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
eb47b800
JK
1345}
1346
13f00235
JK
1347static int encrypt_one_page(struct f2fs_io_info *fio)
1348{
1349 struct inode *inode = fio->page->mapping->host;
1350 gfp_t gfp_flags = GFP_NOFS;
1351
1352 if (!f2fs_encrypted_file(inode))
1353 return 0;
1354
1355 /* wait for GCed encrypted page writeback */
1356 f2fs_wait_on_block_writeback(fio->sbi, fio->old_blkaddr);
1357
1358retry_encrypt:
1359 fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
1360 PAGE_SIZE, 0, fio->page->index, gfp_flags);
1361 if (!IS_ERR(fio->encrypted_page))
1362 return 0;
1363
1364 /* flush pending IOs and wait for a while in the ENOMEM case */
1365 if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
1366 f2fs_flush_merged_writes(fio->sbi);
1367 congestion_wait(BLK_RW_ASYNC, HZ/50);
1368 gfp_flags |= __GFP_NOFAIL;
1369 goto retry_encrypt;
1370 }
1371 return PTR_ERR(fio->encrypted_page);
1372}
1373
1374static inline bool need_inplace_update(struct f2fs_io_info *fio)
1375{
1376 struct inode *inode = fio->page->mapping->host;
1377
1378 if (S_ISDIR(inode->i_mode) || f2fs_is_atomic_file(inode))
1379 return false;
1380 if (is_cold_data(fio->page))
1381 return false;
1382 if (IS_ATOMIC_WRITTEN_PAGE(fio->page))
1383 return false;
1384
1385 return need_inplace_update_policy(inode, fio);
1386}
1387
1388static inline bool valid_ipu_blkaddr(struct f2fs_io_info *fio)
1389{
1390 if (fio->old_blkaddr == NEW_ADDR)
1391 return false;
1392 if (fio->old_blkaddr == NULL_ADDR)
1393 return false;
1394 return true;
1395}
1396
05ca3632 1397int do_write_data_page(struct f2fs_io_info *fio)
eb47b800 1398{
05ca3632 1399 struct page *page = fio->page;
eb47b800 1400 struct inode *inode = page->mapping->host;
eb47b800 1401 struct dnode_of_data dn;
13f00235
JK
1402 struct extent_info ei = {0,0,0};
1403 bool ipu_force = false;
eb47b800
JK
1404 int err = 0;
1405
1406 set_new_dnode(&dn, inode, NULL, NULL, 0);
13f00235
JK
1407 if (need_inplace_update(fio) &&
1408 f2fs_lookup_extent_cache(inode, page->index, &ei)) {
1409 fio->old_blkaddr = ei.blk + page->index - ei.fofs;
1410
1411 if (valid_ipu_blkaddr(fio)) {
1412 ipu_force = true;
1413 fio->need_lock = LOCK_DONE;
1414 goto got_it;
1415 }
1416 }
1417
1418 /* Deadlock due to between page->lock and f2fs_lock_op */
1419 if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
1420 return -EAGAIN;
1421
266e97a8 1422 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
eb47b800 1423 if (err)
13f00235 1424 goto out;
eb47b800 1425
c1286ff4 1426 fio->old_blkaddr = dn.data_blkaddr;
eb47b800
JK
1427
1428 /* This page is already truncated */
c1286ff4 1429 if (fio->old_blkaddr == NULL_ADDR) {
2bca1e23 1430 ClearPageUptodate(page);
eb47b800 1431 goto out_writepage;
2bca1e23 1432 }
13f00235
JK
1433got_it:
1434 /*
1435 * If current allocation needs SSR,
1436 * it had better in-place writes for updated data.
1437 */
1438 if (ipu_force || (valid_ipu_blkaddr(fio) && need_inplace_update(fio))) {
1439 err = encrypt_one_page(fio);
1440 if (err)
1441 goto out_writepage;
eb47b800 1442
13f00235
JK
1443 set_page_writeback(page);
1444 f2fs_put_dnode(&dn);
1445 if (fio->need_lock == LOCK_REQ)
1446 f2fs_unlock_op(fio->sbi);
1447 err = rewrite_data_page(fio);
1448 trace_f2fs_do_write_data_page(fio->page, IPU);
1449 set_inode_flag(inode, FI_UPDATE_WRITE);
1450 return err;
1451 }
08b39fbd 1452
13f00235
JK
1453 if (fio->need_lock == LOCK_RETRY) {
1454 if (!f2fs_trylock_op(fio->sbi)) {
1455 err = -EAGAIN;
4375a336
JK
1456 goto out_writepage;
1457 }
13f00235 1458 fio->need_lock = LOCK_REQ;
4375a336
JK
1459 }
1460
13f00235
JK
1461 err = encrypt_one_page(fio);
1462 if (err)
1463 goto out_writepage;
1464
eb47b800
JK
1465 set_page_writeback(page);
1466
13f00235
JK
1467 /* LFS mode write path */
1468 write_data_page(&dn, fio);
1469 trace_f2fs_do_write_data_page(page, OPU);
1470 set_inode_flag(inode, FI_APPEND_WRITE);
1471 if (page->index == 0)
1472 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
eb47b800
JK
1473out_writepage:
1474 f2fs_put_dnode(&dn);
13f00235
JK
1475out:
1476 if (fio->need_lock == LOCK_REQ)
1477 f2fs_unlock_op(fio->sbi);
eb47b800
JK
1478 return err;
1479}
1480
13f00235
JK
1481static int __write_data_page(struct page *page, bool *submitted,
1482 struct writeback_control *wbc,
1483 enum iostat_type io_type)
eb47b800
JK
1484{
1485 struct inode *inode = page->mapping->host;
4081363f 1486 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
eb47b800
JK
1487 loff_t i_size = i_size_read(inode);
1488 const pgoff_t end_index = ((unsigned long long) i_size)
c1286ff4
JK
1489 >> PAGE_SHIFT;
1490 loff_t psize = (page->index + 1) << PAGE_SHIFT;
9ffe0fb5 1491 unsigned offset = 0;
39936837 1492 bool need_balance_fs = false;
eb47b800 1493 int err = 0;
458e6197 1494 struct f2fs_io_info fio = {
05ca3632 1495 .sbi = sbi,
458e6197 1496 .type = DATA,
dc45fd9e
JK
1497 .op = REQ_OP_WRITE,
1498 .op_flags = wbc_to_write_flags(wbc),
13f00235 1499 .old_blkaddr = NULL_ADDR,
05ca3632 1500 .page = page,
4375a336 1501 .encrypted_page = NULL,
13f00235
JK
1502 .submitted = false,
1503 .need_lock = LOCK_RETRY,
1504 .io_type = io_type,
458e6197 1505 };
eb47b800 1506
ecda0de3
CY
1507 trace_f2fs_writepage(page, DATA);
1508
13f00235
JK
1509 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1510 goto redirty_out;
1511
eb47b800 1512 if (page->index < end_index)
39936837 1513 goto write;
eb47b800
JK
1514
1515 /*
1516 * If the offset is out-of-range of file size,
1517 * this page does not have to be written to disk.
1518 */
c1286ff4 1519 offset = i_size & (PAGE_SIZE - 1);
76f60268 1520 if ((page->index >= end_index + 1) || !offset)
39936837 1521 goto out;
eb47b800 1522
c1286ff4 1523 zero_user_segment(page, offset, PAGE_SIZE);
39936837 1524write:
1e84371f
JK
1525 if (f2fs_is_drop_cache(inode))
1526 goto out;
c1286ff4
JK
1527 /* we should not write 0'th page having journal header */
1528 if (f2fs_is_volatile_file(inode) && (!page->index ||
1529 (!wbc->for_reclaim &&
1530 available_free_memory(sbi, BASE_CHECK))))
1e84371f 1531 goto redirty_out;
eb47b800 1532
c1286ff4
JK
1533 /* we should bypass data pages to proceed the kworkder jobs */
1534 if (unlikely(f2fs_cp_error(sbi))) {
1535 mapping_set_error(page->mapping, -EIO);
1536 goto out;
1537 }
1538
39936837 1539 /* Dentry blocks are controlled by checkpoint */
eb47b800 1540 if (S_ISDIR(inode->i_mode)) {
13f00235 1541 fio.need_lock = LOCK_DONE;
05ca3632 1542 err = do_write_data_page(&fio);
8618b881
JK
1543 goto done;
1544 }
9ffe0fb5 1545
8618b881 1546 if (!wbc->for_reclaim)
39936837 1547 need_balance_fs = true;
c1286ff4 1548 else if (has_not_enough_free_secs(sbi, 0, 0))
39936837 1549 goto redirty_out;
13f00235
JK
1550 else
1551 set_inode_flag(inode, FI_HOT_DATA);
eb47b800 1552
b3d208f9 1553 err = -EAGAIN;
13f00235 1554 if (f2fs_has_inline_data(inode)) {
b3d208f9 1555 err = f2fs_write_inline_data(inode, page);
13f00235
JK
1556 if (!err)
1557 goto out;
1558 }
1559
1560 if (err == -EAGAIN) {
05ca3632 1561 err = do_write_data_page(&fio);
13f00235
JK
1562 if (err == -EAGAIN) {
1563 fio.need_lock = LOCK_REQ;
1564 err = do_write_data_page(&fio);
1565 }
1566 }
c1286ff4
JK
1567 if (F2FS_I(inode)->last_disk_size < psize)
1568 F2FS_I(inode)->last_disk_size = psize;
13f00235 1569
8618b881
JK
1570done:
1571 if (err && err != -ENOENT)
1572 goto redirty_out;
eb47b800 1573
39936837 1574out:
a7ffdbe2 1575 inode_dec_dirty_pages(inode);
2bca1e23
JK
1576 if (err)
1577 ClearPageUptodate(page);
c1286ff4
JK
1578
1579 if (wbc->for_reclaim) {
13f00235
JK
1580 f2fs_submit_merged_write_cond(sbi, inode, 0, page->index, DATA);
1581 clear_inode_flag(inode, FI_HOT_DATA);
c1286ff4 1582 remove_dirty_inode(inode);
13f00235 1583 submitted = NULL;
c1286ff4
JK
1584 }
1585
eb47b800 1586 unlock_page(page);
13f00235
JK
1587 if (!S_ISDIR(inode->i_mode))
1588 f2fs_balance_fs(sbi, need_balance_fs);
1589
1590 if (unlikely(f2fs_cp_error(sbi))) {
1591 f2fs_submit_merged_write(sbi, DATA);
1592 submitted = NULL;
1593 }
c1286ff4 1594
13f00235
JK
1595 if (submitted)
1596 *submitted = fio.submitted;
c1286ff4 1597
eb47b800
JK
1598 return 0;
1599
eb47b800 1600redirty_out:
76f60268 1601 redirty_page_for_writepage(wbc, page);
9a82dd23
CY
1602 if (!err)
1603 return AOP_WRITEPAGE_ACTIVATE;
c1286ff4
JK
1604 unlock_page(page);
1605 return err;
fa9150a8
NJ
1606}
1607
13f00235
JK
1608static int f2fs_write_data_page(struct page *page,
1609 struct writeback_control *wbc)
1610{
1611 return __write_data_page(page, NULL, wbc, FS_DATA_IO);
1612}
1613
8f46dcae
CY
1614/*
1615 * This function was copied from write_cche_pages from mm/page-writeback.c.
1616 * The major change is making write step of cold data page separately from
1617 * warm/hot data page.
1618 */
1619static int f2fs_write_cache_pages(struct address_space *mapping,
13f00235
JK
1620 struct writeback_control *wbc,
1621 enum iostat_type io_type)
8f46dcae
CY
1622{
1623 int ret = 0;
1624 int done = 0;
1625 struct pagevec pvec;
1626 int nr_pages;
1627 pgoff_t uninitialized_var(writeback_index);
1628 pgoff_t index;
1629 pgoff_t end; /* Inclusive */
1630 pgoff_t done_index;
13f00235 1631 pgoff_t last_idx = ULONG_MAX;
8f46dcae
CY
1632 int cycled;
1633 int range_whole = 0;
1634 int tag;
8f46dcae
CY
1635
1636 pagevec_init(&pvec, 0);
c1286ff4 1637
13f00235
JK
1638 if (get_dirty_pages(mapping->host) <=
1639 SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
1640 set_inode_flag(mapping->host, FI_HOT_DATA);
1641 else
1642 clear_inode_flag(mapping->host, FI_HOT_DATA);
1643
8f46dcae
CY
1644 if (wbc->range_cyclic) {
1645 writeback_index = mapping->writeback_index; /* prev offset */
1646 index = writeback_index;
1647 if (index == 0)
1648 cycled = 1;
1649 else
1650 cycled = 0;
1651 end = -1;
1652 } else {
c1286ff4
JK
1653 index = wbc->range_start >> PAGE_SHIFT;
1654 end = wbc->range_end >> PAGE_SHIFT;
8f46dcae
CY
1655 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1656 range_whole = 1;
1657 cycled = 1; /* ignore range_cyclic tests */
1658 }
1659 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1660 tag = PAGECACHE_TAG_TOWRITE;
1661 else
1662 tag = PAGECACHE_TAG_DIRTY;
1663retry:
1664 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1665 tag_pages_for_writeback(mapping, index, end);
1666 done_index = index;
1667 while (!done && (index <= end)) {
1668 int i;
1669
1670 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
1671 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
1672 if (nr_pages == 0)
1673 break;
1674
1675 for (i = 0; i < nr_pages; i++) {
1676 struct page *page = pvec.pages[i];
13f00235 1677 bool submitted = false;
8f46dcae
CY
1678
1679 if (page->index > end) {
1680 done = 1;
1681 break;
1682 }
1683
1684 done_index = page->index;
13f00235 1685retry_write:
8f46dcae
CY
1686 lock_page(page);
1687
1688 if (unlikely(page->mapping != mapping)) {
1689continue_unlock:
1690 unlock_page(page);
1691 continue;
1692 }
1693
1694 if (!PageDirty(page)) {
1695 /* someone wrote it for us */
1696 goto continue_unlock;
1697 }
1698
8f46dcae
CY
1699 if (PageWriteback(page)) {
1700 if (wbc->sync_mode != WB_SYNC_NONE)
c1286ff4
JK
1701 f2fs_wait_on_page_writeback(page,
1702 DATA, true);
8f46dcae
CY
1703 else
1704 goto continue_unlock;
1705 }
1706
1707 BUG_ON(PageWriteback(page));
1708 if (!clear_page_dirty_for_io(page))
1709 goto continue_unlock;
1710
13f00235 1711 ret = __write_data_page(page, &submitted, wbc, io_type);
8f46dcae 1712 if (unlikely(ret)) {
9a82dd23
CY
1713 /*
1714 * keep nr_to_write, since vfs uses this to
1715 * get # of written pages.
1716 */
1717 if (ret == AOP_WRITEPAGE_ACTIVATE) {
1718 unlock_page(page);
1719 ret = 0;
1720 continue;
13f00235
JK
1721 } else if (ret == -EAGAIN) {
1722 ret = 0;
1723 if (wbc->sync_mode == WB_SYNC_ALL) {
1724 cond_resched();
1725 congestion_wait(BLK_RW_ASYNC,
1726 HZ/50);
1727 goto retry_write;
1728 }
1729 continue;
9a82dd23 1730 }
c1286ff4
JK
1731 done_index = page->index + 1;
1732 done = 1;
1733 break;
13f00235
JK
1734 } else if (submitted) {
1735 last_idx = page->index;
8f46dcae
CY
1736 }
1737
13f00235
JK
1738 /* give a priority to WB_SYNC threads */
1739 if ((atomic_read(&F2FS_M_SB(mapping)->wb_sync_req) ||
1740 --wbc->nr_to_write <= 0) &&
1741 wbc->sync_mode == WB_SYNC_NONE) {
8f46dcae
CY
1742 done = 1;
1743 break;
1744 }
1745 }
1746 pagevec_release(&pvec);
1747 cond_resched();
1748 }
1749
8f46dcae
CY
1750 if (!cycled && !done) {
1751 cycled = 1;
1752 index = 0;
1753 end = writeback_index - 1;
1754 goto retry;
1755 }
1756 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1757 mapping->writeback_index = done_index;
1758
13f00235
JK
1759 if (last_idx != ULONG_MAX)
1760 f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
1761 0, last_idx, DATA);
c1286ff4 1762
8f46dcae
CY
1763 return ret;
1764}
1765
13f00235
JK
1766int __f2fs_write_data_pages(struct address_space *mapping,
1767 struct writeback_control *wbc,
1768 enum iostat_type io_type)
eb47b800
JK
1769{
1770 struct inode *inode = mapping->host;
4081363f 1771 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
c1286ff4 1772 struct blk_plug plug;
eb47b800 1773 int ret;
e5748434 1774
cfb185a1 1775 /* deal with chardevs and other special file */
1776 if (!mapping->a_ops->writepage)
1777 return 0;
1778
6a290544
CY
1779 /* skip writing if there is no dirty page in this inode */
1780 if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
1781 return 0;
1782
13f00235
JK
1783 /* during POR, we don't need to trigger writepage at all. */
1784 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1785 goto skip_write;
1786
a1257023
JK
1787 if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
1788 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
1789 available_free_memory(sbi, DIRTY_DENTS))
1790 goto skip_write;
1791
c1286ff4
JK
1792 /* skip writing during file defragment */
1793 if (is_inode_flag_set(inode, FI_DO_DEFRAG))
1794 goto skip_write;
1795
c1286ff4 1796 trace_f2fs_writepages(mapping->host, wbc, DATA);
458e6197 1797
13f00235
JK
1798 /* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
1799 if (wbc->sync_mode == WB_SYNC_ALL)
1800 atomic_inc(&sbi->wb_sync_req);
1801 else if (atomic_read(&sbi->wb_sync_req))
1802 goto skip_write;
1803
c1286ff4 1804 blk_start_plug(&plug);
13f00235 1805 ret = f2fs_write_cache_pages(mapping, wbc, io_type);
c1286ff4 1806 blk_finish_plug(&plug);
13f00235
JK
1807
1808 if (wbc->sync_mode == WB_SYNC_ALL)
1809 atomic_dec(&sbi->wb_sync_req);
c1286ff4
JK
1810 /*
1811 * if some pages were truncated, we cannot guarantee its mapping->host
1812 * to detect pending bios.
1813 */
eb47b800 1814
c1286ff4 1815 remove_dirty_inode(inode);
eb47b800 1816 return ret;
d3baf95d
JK
1817
1818skip_write:
a7ffdbe2 1819 wbc->pages_skipped += get_dirty_pages(inode);
c1286ff4 1820 trace_f2fs_writepages(mapping->host, wbc, DATA);
d3baf95d 1821 return 0;
eb47b800
JK
1822}
1823
13f00235
JK
1824static int f2fs_write_data_pages(struct address_space *mapping,
1825 struct writeback_control *wbc)
1826{
1827 struct inode *inode = mapping->host;
1828
1829 return __f2fs_write_data_pages(mapping, wbc,
1830 F2FS_I(inode)->cp_task == current ?
1831 FS_CP_DATA_IO : FS_DATA_IO);
1832}
1833
3aab8f82
CY
1834static void f2fs_write_failed(struct address_space *mapping, loff_t to)
1835{
1836 struct inode *inode = mapping->host;
c1286ff4
JK
1837 loff_t i_size = i_size_read(inode);
1838
1839 if (to > i_size) {
13f00235 1840 down_write(&F2FS_I(inode)->i_mmap_sem);
c1286ff4
JK
1841 truncate_pagecache(inode, i_size);
1842 truncate_blocks(inode, i_size, true);
13f00235 1843 up_write(&F2FS_I(inode)->i_mmap_sem);
c1286ff4
JK
1844 }
1845}
1846
1847static int prepare_write_begin(struct f2fs_sb_info *sbi,
1848 struct page *page, loff_t pos, unsigned len,
1849 block_t *blk_addr, bool *node_changed)
1850{
1851 struct inode *inode = page->mapping->host;
1852 pgoff_t index = page->index;
1853 struct dnode_of_data dn;
1854 struct page *ipage;
1855 bool locked = false;
13f00235 1856 struct extent_info ei = {0,0,0};
c1286ff4
JK
1857 int err = 0;
1858
1859 /*
1860 * we already allocated all the blocks, so we don't need to get
1861 * the block addresses when there is no need to fill the page.
1862 */
13f00235
JK
1863 if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE &&
1864 !is_inode_flag_set(inode, FI_NO_PREALLOC))
c1286ff4 1865 return 0;
3aab8f82 1866
c1286ff4
JK
1867 if (f2fs_has_inline_data(inode) ||
1868 (pos & PAGE_MASK) >= i_size_read(inode)) {
13f00235 1869 __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
c1286ff4 1870 locked = true;
3aab8f82 1871 }
c1286ff4
JK
1872restart:
1873 /* check inline_data */
1874 ipage = get_node_page(sbi, inode->i_ino);
1875 if (IS_ERR(ipage)) {
1876 err = PTR_ERR(ipage);
1877 goto unlock_out;
1878 }
1879
1880 set_new_dnode(&dn, inode, ipage, ipage, 0);
1881
1882 if (f2fs_has_inline_data(inode)) {
13f00235 1883 if (pos + len <= MAX_INLINE_DATA(inode)) {
c1286ff4
JK
1884 read_inline_data(page, ipage);
1885 set_inode_flag(inode, FI_DATA_EXIST);
1886 if (inode->i_nlink)
1887 set_inline_node(ipage);
1888 } else {
1889 err = f2fs_convert_inline_page(&dn, page);
1890 if (err)
1891 goto out;
1892 if (dn.data_blkaddr == NULL_ADDR)
1893 err = f2fs_get_block(&dn, index);
1894 }
1895 } else if (locked) {
1896 err = f2fs_get_block(&dn, index);
1897 } else {
1898 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1899 dn.data_blkaddr = ei.blk + index - ei.fofs;
1900 } else {
1901 /* hole case */
1902 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
1903 if (err || dn.data_blkaddr == NULL_ADDR) {
1904 f2fs_put_dnode(&dn);
13f00235
JK
1905 __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
1906 true);
c1286ff4
JK
1907 locked = true;
1908 goto restart;
1909 }
1910 }
1911 }
1912
1913 /* convert_inline_page can make node_changed */
1914 *blk_addr = dn.data_blkaddr;
1915 *node_changed = dn.node_changed;
1916out:
1917 f2fs_put_dnode(&dn);
1918unlock_out:
1919 if (locked)
13f00235 1920 __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
c1286ff4 1921 return err;
3aab8f82
CY
1922}
1923
eb47b800
JK
1924static int f2fs_write_begin(struct file *file, struct address_space *mapping,
1925 loff_t pos, unsigned len, unsigned flags,
1926 struct page **pagep, void **fsdata)
1927{
1928 struct inode *inode = mapping->host;
4081363f 1929 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
86531d6b 1930 struct page *page = NULL;
c1286ff4
JK
1931 pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
1932 bool need_balance = false;
1933 block_t blkaddr = NULL_ADDR;
eb47b800
JK
1934 int err = 0;
1935
d854b688
MS
1936 if (trace_android_fs_datawrite_start_enabled()) {
1937 char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
1938
1939 path = android_fstrace_get_pathname(pathbuf,
1940 MAX_TRACE_PATHBUF_LEN,
1941 inode);
1942 trace_android_fs_datawrite_start(inode, pos, len,
1943 current->pid, path,
1944 current->comm);
1945 }
62aed044
CY
1946 trace_f2fs_write_begin(inode, pos, len, flags);
1947
5f727395
JK
1948 /*
1949 * We should check this at this moment to avoid deadlock on inode page
1950 * and #0 page. The locking rule for inline_data conversion should be:
1951 * lock_page(page #0) -> lock_page(inode_page)
1952 */
1953 if (index != 0) {
1954 err = f2fs_convert_inline_inode(inode);
1955 if (err)
1956 goto fail;
1957 }
afcb7ca0 1958repeat:
13f00235
JK
1959 /*
1960 * Do not use grab_cache_page_write_begin() to avoid deadlock due to
1961 * wait_for_stable_page. Will wait that below with our IO control.
1962 */
1963 page = grab_cache_page(mapping, index);
3aab8f82
CY
1964 if (!page) {
1965 err = -ENOMEM;
1966 goto fail;
1967 }
d5f66990 1968
eb47b800
JK
1969 *pagep = page;
1970
c1286ff4
JK
1971 err = prepare_write_begin(sbi, page, pos, len,
1972 &blkaddr, &need_balance);
1973 if (err)
1974 goto fail;
b3d208f9 1975
c1286ff4
JK
1976 if (need_balance && has_not_enough_free_secs(sbi, 0, 0)) {
1977 unlock_page(page);
1978 f2fs_balance_fs(sbi, true);
1979 lock_page(page);
1980 if (page->mapping != mapping) {
1981 /* The page got truncated from under us */
1982 f2fs_put_page(page, 1);
1983 goto repeat;
b3d208f9 1984 }
b600965c 1985 }
759af1c9 1986
c1286ff4 1987 f2fs_wait_on_page_writeback(page, DATA, false);
b3d208f9 1988
08b39fbd 1989 /* wait for GCed encrypted page writeback */
13f00235
JK
1990 if (f2fs_encrypted_file(inode))
1991 f2fs_wait_on_block_writeback(sbi, blkaddr);
08b39fbd 1992
c1286ff4
JK
1993 if (len == PAGE_SIZE || PageUptodate(page))
1994 return 0;
eb47b800 1995
ff919929
YH
1996 if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode)) {
1997 zero_user_segment(page, len, PAGE_SIZE);
1998 return 0;
1999 }
2000
c1286ff4
JK
2001 if (blkaddr == NEW_ADDR) {
2002 zero_user_segment(page, 0, PAGE_SIZE);
2003 SetPageUptodate(page);
eb47b800 2004 } else {
13f00235
JK
2005 err = f2fs_submit_page_read(inode, page, blkaddr);
2006 if (err)
3aab8f82 2007 goto fail;
c1286ff4
JK
2008
2009 lock_page(page);
6bacf52f 2010 if (unlikely(page->mapping != mapping)) {
afcb7ca0
JK
2011 f2fs_put_page(page, 1);
2012 goto repeat;
eb47b800 2013 }
c1286ff4
JK
2014 if (unlikely(!PageUptodate(page))) {
2015 err = -EIO;
2016 goto fail;
4375a336 2017 }
eb47b800 2018 }
eb47b800 2019 return 0;
9ba69cf9 2020
3aab8f82 2021fail:
86531d6b 2022 f2fs_put_page(page, 1);
3aab8f82
CY
2023 f2fs_write_failed(mapping, pos + len);
2024 return err;
eb47b800
JK
2025}
2026
a1dd3c13
JK
2027static int f2fs_write_end(struct file *file,
2028 struct address_space *mapping,
2029 loff_t pos, unsigned len, unsigned copied,
2030 struct page *page, void *fsdata)
2031{
2032 struct inode *inode = page->mapping->host;
2033
32cbbe59 2034 trace_android_fs_datawrite_end(inode, pos, len);
dfb2bf38
CY
2035 trace_f2fs_write_end(inode, pos, len, copied);
2036
c1286ff4
JK
2037 /*
2038 * This should be come from len == PAGE_SIZE, and we expect copied
2039 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
2040 * let generic_perform_write() try to copy data again through copied=0.
2041 */
2042 if (!PageUptodate(page)) {
ff919929 2043 if (unlikely(copied != len))
c1286ff4
JK
2044 copied = 0;
2045 else
2046 SetPageUptodate(page);
a1dd3c13 2047 }
c1286ff4
JK
2048 if (!copied)
2049 goto unlock_out;
2050
2051 set_page_dirty(page);
a1dd3c13 2052
c1286ff4
JK
2053 if (pos + copied > i_size_read(inode))
2054 f2fs_i_size_write(inode, pos + copied);
2055unlock_out:
75c3c8bc 2056 f2fs_put_page(page, 1);
c1286ff4 2057 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
a1dd3c13
JK
2058 return copied;
2059}
2060
6f673763
OS
2061static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
2062 loff_t offset)
944fcfc1
JK
2063{
2064 unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
944fcfc1 2065
944fcfc1
JK
2066 if (offset & blocksize_mask)
2067 return -EINVAL;
2068
5b46f25d
AV
2069 if (iov_iter_alignment(iter) & blocksize_mask)
2070 return -EINVAL;
2071
944fcfc1
JK
2072 return 0;
2073}
2074
22c6186e 2075static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
c1286ff4 2076 loff_t offset)
eb47b800 2077{
c1286ff4 2078 struct address_space *mapping = iocb->ki_filp->f_mapping;
3aab8f82
CY
2079 struct inode *inode = mapping->host;
2080 size_t count = iov_iter_count(iter);
c1286ff4 2081 int rw = iov_iter_rw(iter);
3aab8f82 2082 int err;
944fcfc1 2083
c15e8599
CY
2084 err = check_direct_IO(inode, iter, offset);
2085 if (err)
2086 return err;
944fcfc1 2087
8b2c7581 2088 if (__force_buffered_io(inode, rw))
07f01079 2089 return 0;
70407fad 2090
32cbbe59 2091 if (trace_android_fs_dataread_start_enabled() &&
d854b688
MS
2092 (iov_iter_rw(iter) == READ)) {
2093 char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
2094
2095 path = android_fstrace_get_pathname(pathbuf,
2096 MAX_TRACE_PATHBUF_LEN,
2097 inode);
32cbbe59 2098 trace_android_fs_dataread_start(inode, offset,
d854b688 2099 count, current->pid, path,
32cbbe59 2100 current->comm);
d854b688 2101 }
32cbbe59 2102 if (trace_android_fs_datawrite_start_enabled() &&
d854b688
MS
2103 (iov_iter_rw(iter) == WRITE)) {
2104 char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
32cbbe59 2105
d854b688
MS
2106 path = android_fstrace_get_pathname(pathbuf,
2107 MAX_TRACE_PATHBUF_LEN,
2108 inode);
2109 trace_android_fs_datawrite_start(inode, offset, count,
2110 current->pid, path,
2111 current->comm);
2112 }
c1286ff4 2113 trace_f2fs_direct_IO_enter(inode, offset, count, rw);
59b802e5 2114
c1286ff4 2115 down_read(&F2FS_I(inode)->dio_rwsem[rw]);
e2b4e2bc 2116 err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block_dio);
c1286ff4
JK
2117 up_read(&F2FS_I(inode)->dio_rwsem[rw]);
2118
2119 if (rw == WRITE) {
13f00235
JK
2120 if (err > 0) {
2121 f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
2122 err);
c1286ff4 2123 set_inode_flag(inode, FI_UPDATE_WRITE);
13f00235 2124 } else if (err < 0) {
c1286ff4 2125 f2fs_write_failed(mapping, offset + count);
13f00235 2126 }
c1286ff4 2127 }
70407fad 2128
32cbbe59
MS
2129 if (trace_android_fs_dataread_start_enabled() &&
2130 (iov_iter_rw(iter) == READ))
2131 trace_android_fs_dataread_end(inode, offset, count);
2132 if (trace_android_fs_datawrite_start_enabled() &&
2133 (iov_iter_rw(iter) == WRITE))
2134 trace_android_fs_datawrite_end(inode, offset, count);
2135
c1286ff4 2136 trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
70407fad 2137
3aab8f82 2138 return err;
eb47b800
JK
2139}
2140
487261f3
CY
2141void f2fs_invalidate_page(struct page *page, unsigned int offset,
2142 unsigned int length)
eb47b800
JK
2143{
2144 struct inode *inode = page->mapping->host;
487261f3 2145 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
a7ffdbe2 2146
487261f3 2147 if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
c1286ff4 2148 (offset % PAGE_SIZE || length != PAGE_SIZE))
a7ffdbe2
JK
2149 return;
2150
487261f3 2151 if (PageDirty(page)) {
75bb19d8 2152 if (inode->i_ino == F2FS_META_INO(sbi)) {
487261f3 2153 dec_page_count(sbi, F2FS_DIRTY_META);
75bb19d8 2154 } else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
487261f3 2155 dec_page_count(sbi, F2FS_DIRTY_NODES);
75bb19d8 2156 } else {
487261f3 2157 inode_dec_dirty_pages(inode);
75bb19d8
CY
2158 remove_dirty_inode(inode);
2159 }
487261f3 2160 }
decd36b6
CY
2161
2162 /* This is atomic written page, keep Private */
2163 if (IS_ATOMIC_WRITTEN_PAGE(page))
13f00235 2164 return drop_inmem_page(inode, page);
decd36b6 2165
c1286ff4 2166 set_page_private(page, 0);
eb47b800
JK
2167 ClearPagePrivate(page);
2168}
2169
487261f3 2170int f2fs_release_page(struct page *page, gfp_t wait)
eb47b800 2171{
f68daeeb
JK
2172 /* If this is dirty page, keep PagePrivate */
2173 if (PageDirty(page))
2174 return 0;
2175
decd36b6
CY
2176 /* This is atomic written page, keep Private */
2177 if (IS_ATOMIC_WRITTEN_PAGE(page))
2178 return 0;
2179
c1286ff4 2180 set_page_private(page, 0);
eb47b800 2181 ClearPagePrivate(page);
c3850aa1 2182 return 1;
eb47b800
JK
2183}
2184
c1286ff4
JK
2185/*
2186 * This was copied from __set_page_dirty_buffers which gives higher performance
2187 * in very high speed storages. (e.g., pmem)
2188 */
2189void f2fs_set_page_dirty_nobuffers(struct page *page)
2190{
2191 struct address_space *mapping = page->mapping;
2192 struct mem_cgroup *memcg;
2193 unsigned long flags;
2194
2195 if (unlikely(!mapping))
2196 return;
2197
2198 spin_lock(&mapping->private_lock);
2199 memcg = mem_cgroup_begin_page_stat(page);
2200 SetPageDirty(page);
2201 spin_unlock(&mapping->private_lock);
2202
2203 spin_lock_irqsave(&mapping->tree_lock, flags);
2204 WARN_ON_ONCE(!PageUptodate(page));
2205 account_page_dirtied(page, mapping, memcg);
2206 radix_tree_tag_set(&mapping->page_tree,
2207 page_index(page), PAGECACHE_TAG_DIRTY);
2208 spin_unlock_irqrestore(&mapping->tree_lock, flags);
2209
2210 mem_cgroup_end_page_stat(memcg);
2211
2212 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
2213 return;
2214}
2215
eb47b800
JK
2216static int f2fs_set_data_page_dirty(struct page *page)
2217{
2218 struct address_space *mapping = page->mapping;
2219 struct inode *inode = mapping->host;
2220
26c6b887
JK
2221 trace_f2fs_set_page_dirty(page, DATA);
2222
c1286ff4
JK
2223 if (!PageUptodate(page))
2224 SetPageUptodate(page);
34ba94ba 2225
dc8b8cea 2226 if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
decd36b6
CY
2227 if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
2228 register_inmem_page(inode, page);
2229 return 1;
2230 }
2231 /*
2232 * Previously, this page has been registered, we just
2233 * return here.
2234 */
2235 return 0;
34ba94ba
JK
2236 }
2237
eb47b800 2238 if (!PageDirty(page)) {
c1286ff4 2239 f2fs_set_page_dirty_nobuffers(page);
a7ffdbe2 2240 update_dirty_page(inode, page);
eb47b800
JK
2241 return 1;
2242 }
2243 return 0;
2244}
2245
c01e54b7
JK
2246static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
2247{
454ae7e5
CY
2248 struct inode *inode = mapping->host;
2249
1d373a0e
JK
2250 if (f2fs_has_inline_data(inode))
2251 return 0;
2252
2253 /* make sure allocating whole blocks */
2254 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2255 filemap_write_and_wait(mapping);
2256
e2b4e2bc 2257 return generic_block_bmap(mapping, block, get_data_block_bmap);
429511cd
CY
2258}
2259
c1286ff4
JK
2260#ifdef CONFIG_MIGRATION
2261#include <linux/migrate.h>
2262
2263int f2fs_migrate_page(struct address_space *mapping,
2264 struct page *newpage, struct page *page, enum migrate_mode mode)
2265{
2266 int rc, extra_count;
2267 struct f2fs_inode_info *fi = F2FS_I(mapping->host);
2268 bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);
2269
2270 BUG_ON(PageWriteback(page));
2271
2272 /* migrating an atomic written page is safe with the inmem_lock hold */
13f00235
JK
2273 if (atomic_written) {
2274 if (mode != MIGRATE_SYNC)
2275 return -EBUSY;
2276 if (!mutex_trylock(&fi->inmem_lock))
2277 return -EAGAIN;
2278 }
c1286ff4
JK
2279
2280 /*
2281 * A reference is expected if PagePrivate set when move mapping,
2282 * however F2FS breaks this for maintaining dirty page counts when
2283 * truncating pages. So here adjusting the 'extra_count' make it work.
2284 */
2285 extra_count = (atomic_written ? 1 : 0) - page_has_private(page);
2286 rc = migrate_page_move_mapping(mapping, newpage,
2287 page, NULL, mode, extra_count);
2288 if (rc != MIGRATEPAGE_SUCCESS) {
2289 if (atomic_written)
2290 mutex_unlock(&fi->inmem_lock);
2291 return rc;
2292 }
2293
2294 if (atomic_written) {
2295 struct inmem_pages *cur;
2296 list_for_each_entry(cur, &fi->inmem_pages, list)
2297 if (cur->page == page) {
2298 cur->page = newpage;
2299 break;
2300 }
2301 mutex_unlock(&fi->inmem_lock);
2302 put_page(page);
2303 get_page(newpage);
2304 }
2305
2306 if (PagePrivate(page))
2307 SetPagePrivate(newpage);
2308 set_page_private(newpage, page_private(page));
2309
2310 migrate_page_copy(newpage, page);
2311
2312 return MIGRATEPAGE_SUCCESS;
2313}
2314#endif
2315
eb47b800
JK
2316const struct address_space_operations f2fs_dblock_aops = {
2317 .readpage = f2fs_read_data_page,
2318 .readpages = f2fs_read_data_pages,
2319 .writepage = f2fs_write_data_page,
2320 .writepages = f2fs_write_data_pages,
2321 .write_begin = f2fs_write_begin,
a1dd3c13 2322 .write_end = f2fs_write_end,
eb47b800 2323 .set_page_dirty = f2fs_set_data_page_dirty,
487261f3
CY
2324 .invalidatepage = f2fs_invalidate_page,
2325 .releasepage = f2fs_release_page,
eb47b800 2326 .direct_IO = f2fs_direct_IO,
c01e54b7 2327 .bmap = f2fs_bmap,
c1286ff4
JK
2328#ifdef CONFIG_MIGRATION
2329 .migratepage = f2fs_migrate_page,
2330#endif
eb47b800 2331};