f2fs: introduce f2fs_map_bh to clean codes of check_extent_cache
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / fs / f2fs / data.c
CommitLineData
0a8165d7 1/*
eb47b800
JK
2 * fs/f2fs/data.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/buffer_head.h>
14#include <linux/mpage.h>
a27bb332 15#include <linux/aio.h>
eb47b800
JK
16#include <linux/writeback.h>
17#include <linux/backing-dev.h>
18#include <linux/blkdev.h>
19#include <linux/bio.h>
690e4a3e 20#include <linux/prefetch.h>
eb47b800
JK
21
22#include "f2fs.h"
23#include "node.h"
24#include "segment.h"
db9f7c1a 25#include "trace.h"
848753aa 26#include <trace/events/f2fs.h>
eb47b800 27
93dfe2ac
JK
28static void f2fs_read_end_io(struct bio *bio, int err)
29{
f568849e
LT
30 struct bio_vec *bvec;
31 int i;
93dfe2ac 32
f568849e 33 bio_for_each_segment_all(bvec, bio, i) {
93dfe2ac
JK
34 struct page *page = bvec->bv_page;
35
f568849e
LT
36 if (!err) {
37 SetPageUptodate(page);
38 } else {
93dfe2ac
JK
39 ClearPageUptodate(page);
40 SetPageError(page);
41 }
42 unlock_page(page);
f568849e 43 }
93dfe2ac
JK
44 bio_put(bio);
45}
46
47static void f2fs_write_end_io(struct bio *bio, int err)
48{
1b1f559f 49 struct f2fs_sb_info *sbi = bio->bi_private;
f568849e
LT
50 struct bio_vec *bvec;
51 int i;
93dfe2ac 52
f568849e 53 bio_for_each_segment_all(bvec, bio, i) {
93dfe2ac
JK
54 struct page *page = bvec->bv_page;
55
f568849e 56 if (unlikely(err)) {
cf779cab 57 set_page_dirty(page);
93dfe2ac 58 set_bit(AS_EIO, &page->mapping->flags);
744602cf 59 f2fs_stop_checkpoint(sbi);
93dfe2ac
JK
60 }
61 end_page_writeback(page);
62 dec_page_count(sbi, F2FS_WRITEBACK);
f568849e 63 }
93dfe2ac 64
93dfe2ac
JK
65 if (!get_pages(sbi, F2FS_WRITEBACK) &&
66 !list_empty(&sbi->cp_wait.task_list))
67 wake_up(&sbi->cp_wait);
68
69 bio_put(bio);
70}
71
940a6d34
GZ
72/*
73 * Low-level block read/write IO operations.
74 */
75static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
76 int npages, bool is_read)
77{
78 struct bio *bio;
79
80 /* No failure on bio allocation */
81 bio = bio_alloc(GFP_NOIO, npages);
82
83 bio->bi_bdev = sbi->sb->s_bdev;
55cf9cb6 84 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
940a6d34 85 bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
1b1f559f 86 bio->bi_private = sbi;
940a6d34
GZ
87
88 return bio;
89}
90
458e6197 91static void __submit_merged_bio(struct f2fs_bio_info *io)
93dfe2ac 92{
458e6197 93 struct f2fs_io_info *fio = &io->fio;
93dfe2ac
JK
94
95 if (!io->bio)
96 return;
97
6a8f8ca5 98 if (is_read_io(fio->rw))
2ace38e0 99 trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio);
6a8f8ca5 100 else
2ace38e0 101 trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
940a6d34 102
6a8f8ca5 103 submit_bio(fio->rw, io->bio);
93dfe2ac
JK
104 io->bio = NULL;
105}
106
107void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
458e6197 108 enum page_type type, int rw)
93dfe2ac
JK
109{
110 enum page_type btype = PAGE_TYPE_OF_BIO(type);
111 struct f2fs_bio_info *io;
112
113 io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];
114
df0f8dc0 115 down_write(&io->io_rwsem);
458e6197
JK
116
117 /* change META to META_FLUSH in the checkpoint procedure */
118 if (type >= META_FLUSH) {
119 io->fio.type = META_FLUSH;
0f7b2abd
JK
120 if (test_opt(sbi, NOBARRIER))
121 io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO;
122 else
123 io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
458e6197
JK
124 }
125 __submit_merged_bio(io);
df0f8dc0 126 up_write(&io->io_rwsem);
93dfe2ac
JK
127}
128
129/*
130 * Fill the locked page with data located in the block address.
131 * Return unlocked page.
132 */
133int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page,
cf04e8eb 134 struct f2fs_io_info *fio)
93dfe2ac 135{
93dfe2ac
JK
136 struct bio *bio;
137
2ace38e0 138 trace_f2fs_submit_page_bio(page, fio);
db9f7c1a 139 f2fs_trace_ios(page, fio, 0);
93dfe2ac
JK
140
141 /* Allocate a new bio */
cf04e8eb 142 bio = __bio_alloc(sbi, fio->blk_addr, 1, is_read_io(fio->rw));
93dfe2ac
JK
143
144 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
145 bio_put(bio);
146 f2fs_put_page(page, 1);
147 return -EFAULT;
148 }
149
cf04e8eb 150 submit_bio(fio->rw, bio);
93dfe2ac
JK
151 return 0;
152}
153
154void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page,
cf04e8eb 155 struct f2fs_io_info *fio)
93dfe2ac 156{
458e6197 157 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
93dfe2ac 158 struct f2fs_bio_info *io;
940a6d34 159 bool is_read = is_read_io(fio->rw);
93dfe2ac 160
940a6d34 161 io = is_read ? &sbi->read_io : &sbi->write_io[btype];
93dfe2ac 162
cf04e8eb 163 verify_block_addr(sbi, fio->blk_addr);
93dfe2ac 164
df0f8dc0 165 down_write(&io->io_rwsem);
93dfe2ac 166
940a6d34 167 if (!is_read)
93dfe2ac
JK
168 inc_page_count(sbi, F2FS_WRITEBACK);
169
cf04e8eb 170 if (io->bio && (io->last_block_in_bio != fio->blk_addr - 1 ||
458e6197
JK
171 io->fio.rw != fio->rw))
172 __submit_merged_bio(io);
93dfe2ac
JK
173alloc_new:
174 if (io->bio == NULL) {
90a893c7 175 int bio_blocks = MAX_BIO_BLOCKS(sbi);
940a6d34 176
cf04e8eb 177 io->bio = __bio_alloc(sbi, fio->blk_addr, bio_blocks, is_read);
458e6197 178 io->fio = *fio;
93dfe2ac
JK
179 }
180
181 if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) <
182 PAGE_CACHE_SIZE) {
458e6197 183 __submit_merged_bio(io);
93dfe2ac
JK
184 goto alloc_new;
185 }
186
cf04e8eb 187 io->last_block_in_bio = fio->blk_addr;
db9f7c1a 188 f2fs_trace_ios(page, fio, 0);
93dfe2ac 189
df0f8dc0 190 up_write(&io->io_rwsem);
2ace38e0 191 trace_f2fs_submit_page_mbio(page, fio);
93dfe2ac
JK
192}
193
0a8165d7 194/*
eb47b800
JK
195 * Lock ordering for the change of data block address:
196 * ->data_page
197 * ->node_page
198 * update block addresses in the node page
199 */
e1509cf2 200static void __set_data_blkaddr(struct dnode_of_data *dn)
eb47b800
JK
201{
202 struct f2fs_node *rn;
203 __le32 *addr_array;
204 struct page *node_page = dn->node_page;
205 unsigned int ofs_in_node = dn->ofs_in_node;
206
5514f0aa 207 f2fs_wait_on_page_writeback(node_page, NODE);
eb47b800 208
45590710 209 rn = F2FS_NODE(node_page);
eb47b800
JK
210
211 /* Get physical address of data block */
212 addr_array = blkaddr_in_node(rn);
e1509cf2 213 addr_array[ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
eb47b800
JK
214 set_page_dirty(node_page);
215}
216
217int reserve_new_block(struct dnode_of_data *dn)
218{
4081363f 219 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
eb47b800 220
6bacf52f 221 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
eb47b800 222 return -EPERM;
cfb271d4 223 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
eb47b800
JK
224 return -ENOSPC;
225
c01e2853
NJ
226 trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
227
eb47b800 228 dn->data_blkaddr = NEW_ADDR;
e1509cf2 229 __set_data_blkaddr(dn);
a18ff063 230 mark_inode_dirty(dn->inode);
eb47b800
JK
231 sync_inode_page(dn);
232 return 0;
233}
234
b600965c
HL
235int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
236{
237 bool need_put = dn->inode_page ? false : true;
238 int err;
239
240 err = get_dnode_of_data(dn, index, ALLOC_NODE);
241 if (err)
242 return err;
a8865372 243
b600965c
HL
244 if (dn->data_blkaddr == NULL_ADDR)
245 err = reserve_new_block(dn);
a8865372 246 if (err || need_put)
b600965c
HL
247 f2fs_put_dnode(dn);
248 return err;
249}
250
a2e7d1bf
CY
251static void f2fs_map_bh(struct super_block *sb, pgoff_t pgofs,
252 struct extent_info *ei, struct buffer_head *bh_result)
253{
254 unsigned int blkbits = sb->s_blocksize_bits;
255 size_t count;
256
257 set_buffer_new(bh_result);
258 map_bh(bh_result, sb, ei->blk + pgofs - ei->fofs);
259 count = ei->fofs + ei->len - pgofs;
260 if (count < (UINT_MAX >> blkbits))
261 bh_result->b_size = (count << blkbits);
262 else
263 bh_result->b_size = UINT_MAX;
264}
265
eb47b800 266static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
a2e7d1bf 267 struct extent_info *ei)
eb47b800
JK
268{
269 struct f2fs_inode_info *fi = F2FS_I(inode);
eb47b800
JK
270 pgoff_t start_fofs, end_fofs;
271 block_t start_blkaddr;
272
c11abd1a
JK
273 if (is_inode_flag_set(fi, FI_NO_EXTENT))
274 return 0;
275
0c872e2d 276 read_lock(&fi->ext_lock);
eb47b800 277 if (fi->ext.len == 0) {
0c872e2d 278 read_unlock(&fi->ext_lock);
eb47b800
JK
279 return 0;
280 }
281
dcdfff65
JK
282 stat_inc_total_hit(inode->i_sb);
283
eb47b800
JK
284 start_fofs = fi->ext.fofs;
285 end_fofs = fi->ext.fofs + fi->ext.len - 1;
4d0b0bd4 286 start_blkaddr = fi->ext.blk;
eb47b800
JK
287
288 if (pgofs >= start_fofs && pgofs <= end_fofs) {
a2e7d1bf 289 *ei = fi->ext;
dcdfff65 290 stat_inc_read_hit(inode->i_sb);
0c872e2d 291 read_unlock(&fi->ext_lock);
eb47b800
JK
292 return 1;
293 }
0c872e2d 294 read_unlock(&fi->ext_lock);
eb47b800
JK
295 return 0;
296}
297
e1509cf2 298void update_extent_cache(struct dnode_of_data *dn)
eb47b800
JK
299{
300 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
301 pgoff_t fofs, start_fofs, end_fofs;
302 block_t start_blkaddr, end_blkaddr;
c11abd1a 303 int need_update = true;
eb47b800 304
e1509cf2 305 f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR);
eb47b800
JK
306
307 /* Update the page address in the parent node */
e1509cf2 308 __set_data_blkaddr(dn);
eb47b800 309
c11abd1a
JK
310 if (is_inode_flag_set(fi, FI_NO_EXTENT))
311 return;
312
3547ea96
JK
313 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
314 dn->ofs_in_node;
315
0c872e2d 316 write_lock(&fi->ext_lock);
eb47b800
JK
317
318 start_fofs = fi->ext.fofs;
319 end_fofs = fi->ext.fofs + fi->ext.len - 1;
4d0b0bd4
CY
320 start_blkaddr = fi->ext.blk;
321 end_blkaddr = fi->ext.blk + fi->ext.len - 1;
eb47b800
JK
322
323 /* Drop and initialize the matched extent */
324 if (fi->ext.len == 1 && fofs == start_fofs)
325 fi->ext.len = 0;
326
327 /* Initial extent */
328 if (fi->ext.len == 0) {
e1509cf2 329 if (dn->data_blkaddr != NULL_ADDR) {
eb47b800 330 fi->ext.fofs = fofs;
4d0b0bd4 331 fi->ext.blk = dn->data_blkaddr;
eb47b800
JK
332 fi->ext.len = 1;
333 }
334 goto end_update;
335 }
336
6224da87 337 /* Front merge */
e1509cf2 338 if (fofs == start_fofs - 1 && dn->data_blkaddr == start_blkaddr - 1) {
eb47b800 339 fi->ext.fofs--;
4d0b0bd4 340 fi->ext.blk--;
eb47b800
JK
341 fi->ext.len++;
342 goto end_update;
343 }
344
345 /* Back merge */
e1509cf2 346 if (fofs == end_fofs + 1 && dn->data_blkaddr == end_blkaddr + 1) {
eb47b800
JK
347 fi->ext.len++;
348 goto end_update;
349 }
350
351 /* Split the existing extent */
352 if (fi->ext.len > 1 &&
353 fofs >= start_fofs && fofs <= end_fofs) {
354 if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
355 fi->ext.len = fofs - start_fofs;
356 } else {
357 fi->ext.fofs = fofs + 1;
4d0b0bd4 358 fi->ext.blk = start_blkaddr + fofs - start_fofs + 1;
eb47b800
JK
359 fi->ext.len -= fofs - start_fofs + 1;
360 }
c11abd1a
JK
361 } else {
362 need_update = false;
eb47b800 363 }
eb47b800 364
c11abd1a
JK
365 /* Finally, if the extent is very fragmented, let's drop the cache. */
366 if (fi->ext.len < F2FS_MIN_EXTENT_LEN) {
367 fi->ext.len = 0;
368 set_inode_flag(fi, FI_NO_EXTENT);
369 need_update = true;
370 }
eb47b800 371end_update:
0c872e2d 372 write_unlock(&fi->ext_lock);
c11abd1a
JK
373 if (need_update)
374 sync_inode_page(dn);
375 return;
eb47b800
JK
376}
377
c718379b 378struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
eb47b800 379{
eb47b800
JK
380 struct address_space *mapping = inode->i_mapping;
381 struct dnode_of_data dn;
382 struct page *page;
383 int err;
cf04e8eb
JK
384 struct f2fs_io_info fio = {
385 .type = DATA,
386 .rw = sync ? READ_SYNC : READA,
387 };
eb47b800
JK
388
389 page = find_get_page(mapping, index);
390 if (page && PageUptodate(page))
391 return page;
392 f2fs_put_page(page, 0);
393
394 set_new_dnode(&dn, inode, NULL, NULL, 0);
266e97a8 395 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
eb47b800
JK
396 if (err)
397 return ERR_PTR(err);
398 f2fs_put_dnode(&dn);
399
400 if (dn.data_blkaddr == NULL_ADDR)
401 return ERR_PTR(-ENOENT);
402
403 /* By fallocate(), there is no cached page, but with NEW_ADDR */
6bacf52f 404 if (unlikely(dn.data_blkaddr == NEW_ADDR))
eb47b800
JK
405 return ERR_PTR(-EINVAL);
406
9ac1349a 407 page = grab_cache_page(mapping, index);
eb47b800
JK
408 if (!page)
409 return ERR_PTR(-ENOMEM);
410
393ff91f
JK
411 if (PageUptodate(page)) {
412 unlock_page(page);
413 return page;
414 }
415
cf04e8eb
JK
416 fio.blk_addr = dn.data_blkaddr;
417 err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio);
1069bbf7
CY
418 if (err)
419 return ERR_PTR(err);
420
c718379b
JK
421 if (sync) {
422 wait_on_page_locked(page);
6bacf52f 423 if (unlikely(!PageUptodate(page))) {
c718379b
JK
424 f2fs_put_page(page, 0);
425 return ERR_PTR(-EIO);
426 }
eb47b800 427 }
eb47b800
JK
428 return page;
429}
430
0a8165d7 431/*
eb47b800
JK
432 * If it tries to access a hole, return an error.
433 * Because, the callers, functions in dir.c and GC, should be able to know
434 * whether this page exists or not.
435 */
436struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
437{
eb47b800
JK
438 struct address_space *mapping = inode->i_mapping;
439 struct dnode_of_data dn;
440 struct page *page;
441 int err;
cf04e8eb
JK
442 struct f2fs_io_info fio = {
443 .type = DATA,
444 .rw = READ_SYNC,
445 };
650495de 446repeat:
9ac1349a 447 page = grab_cache_page(mapping, index);
650495de
JK
448 if (!page)
449 return ERR_PTR(-ENOMEM);
450
eb47b800 451 set_new_dnode(&dn, inode, NULL, NULL, 0);
266e97a8 452 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
650495de
JK
453 if (err) {
454 f2fs_put_page(page, 1);
eb47b800 455 return ERR_PTR(err);
650495de 456 }
eb47b800
JK
457 f2fs_put_dnode(&dn);
458
6bacf52f 459 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
650495de 460 f2fs_put_page(page, 1);
eb47b800 461 return ERR_PTR(-ENOENT);
650495de 462 }
eb47b800
JK
463
464 if (PageUptodate(page))
465 return page;
466
d59ff4df
JK
467 /*
468 * A new dentry page is allocated but not able to be written, since its
469 * new inode page couldn't be allocated due to -ENOSPC.
470 * In such the case, its blkaddr can be remained as NEW_ADDR.
471 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
472 */
473 if (dn.data_blkaddr == NEW_ADDR) {
474 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
475 SetPageUptodate(page);
476 return page;
477 }
eb47b800 478
cf04e8eb
JK
479 fio.blk_addr = dn.data_blkaddr;
480 err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio);
393ff91f 481 if (err)
eb47b800 482 return ERR_PTR(err);
393ff91f
JK
483
484 lock_page(page);
6bacf52f 485 if (unlikely(!PageUptodate(page))) {
393ff91f
JK
486 f2fs_put_page(page, 1);
487 return ERR_PTR(-EIO);
eb47b800 488 }
6bacf52f 489 if (unlikely(page->mapping != mapping)) {
afcb7ca0
JK
490 f2fs_put_page(page, 1);
491 goto repeat;
eb47b800
JK
492 }
493 return page;
494}
495
0a8165d7 496/*
eb47b800
JK
497 * Caller ensures that this data page is never allocated.
498 * A new zero-filled data page is allocated in the page cache.
39936837 499 *
4f4124d0
CY
500 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
501 * f2fs_unlock_op().
a8865372 502 * Note that, ipage is set only by make_empty_dir.
eb47b800 503 */
64aa7ed9 504struct page *get_new_data_page(struct inode *inode,
a8865372 505 struct page *ipage, pgoff_t index, bool new_i_size)
eb47b800 506{
eb47b800
JK
507 struct address_space *mapping = inode->i_mapping;
508 struct page *page;
509 struct dnode_of_data dn;
510 int err;
511
a8865372 512 set_new_dnode(&dn, inode, ipage, NULL, 0);
b600965c 513 err = f2fs_reserve_block(&dn, index);
eb47b800
JK
514 if (err)
515 return ERR_PTR(err);
afcb7ca0 516repeat:
eb47b800 517 page = grab_cache_page(mapping, index);
a8865372
JK
518 if (!page) {
519 err = -ENOMEM;
520 goto put_err;
521 }
eb47b800
JK
522
523 if (PageUptodate(page))
524 return page;
525
526 if (dn.data_blkaddr == NEW_ADDR) {
527 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
393ff91f 528 SetPageUptodate(page);
eb47b800 529 } else {
cf04e8eb
JK
530 struct f2fs_io_info fio = {
531 .type = DATA,
532 .rw = READ_SYNC,
533 .blk_addr = dn.data_blkaddr,
534 };
535 err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio);
393ff91f 536 if (err)
a8865372
JK
537 goto put_err;
538
393ff91f 539 lock_page(page);
6bacf52f 540 if (unlikely(!PageUptodate(page))) {
393ff91f 541 f2fs_put_page(page, 1);
a8865372
JK
542 err = -EIO;
543 goto put_err;
eb47b800 544 }
6bacf52f 545 if (unlikely(page->mapping != mapping)) {
afcb7ca0
JK
546 f2fs_put_page(page, 1);
547 goto repeat;
eb47b800
JK
548 }
549 }
eb47b800
JK
550
551 if (new_i_size &&
552 i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
553 i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
699489bb
JK
554 /* Only the directory inode sets new_i_size */
555 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
eb47b800
JK
556 }
557 return page;
a8865372
JK
558
559put_err:
560 f2fs_put_dnode(&dn);
561 return ERR_PTR(err);
eb47b800
JK
562}
563
bfad7c2d
JK
564static int __allocate_data_block(struct dnode_of_data *dn)
565{
4081363f 566 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
976e4c50 567 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
bfad7c2d 568 struct f2fs_summary sum;
bfad7c2d 569 struct node_info ni;
38aa0889 570 int seg = CURSEG_WARM_DATA;
976e4c50 571 pgoff_t fofs;
bfad7c2d
JK
572
573 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
574 return -EPERM;
575 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
576 return -ENOSPC;
577
bfad7c2d
JK
578 get_node_info(sbi, dn->nid, &ni);
579 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
580
38aa0889
JK
581 if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
582 seg = CURSEG_DIRECT_IO;
583
584 allocate_data_block(sbi, NULL, NULL_ADDR, &dn->data_blkaddr, &sum, seg);
bfad7c2d
JK
585
586 /* direct IO doesn't use extent cache to maximize the performance */
41ef94b3 587 __set_data_blkaddr(dn);
bfad7c2d 588
976e4c50
JK
589 /* update i_size */
590 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
591 dn->ofs_in_node;
592 if (i_size_read(dn->inode) < ((fofs + 1) << PAGE_CACHE_SHIFT))
593 i_size_write(dn->inode, ((fofs + 1) << PAGE_CACHE_SHIFT));
594
bfad7c2d
JK
595 return 0;
596}
597
59b802e5
JK
598static void __allocate_data_blocks(struct inode *inode, loff_t offset,
599 size_t count)
600{
601 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
602 struct dnode_of_data dn;
603 u64 start = F2FS_BYTES_TO_BLK(offset);
604 u64 len = F2FS_BYTES_TO_BLK(count);
605 bool allocated;
606 u64 end_offset;
607
608 while (len) {
609 f2fs_balance_fs(sbi);
610 f2fs_lock_op(sbi);
611
612 /* When reading holes, we need its node page */
613 set_new_dnode(&dn, inode, NULL, NULL, 0);
614 if (get_dnode_of_data(&dn, start, ALLOC_NODE))
615 goto out;
616
617 allocated = false;
618 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
619
620 while (dn.ofs_in_node < end_offset && len) {
621 if (dn.data_blkaddr == NULL_ADDR) {
622 if (__allocate_data_block(&dn))
623 goto sync_out;
624 allocated = true;
625 }
626 len--;
627 start++;
628 dn.ofs_in_node++;
629 }
630
631 if (allocated)
632 sync_inode_page(&dn);
633
634 f2fs_put_dnode(&dn);
635 f2fs_unlock_op(sbi);
636 }
637 return;
638
639sync_out:
640 if (allocated)
641 sync_inode_page(&dn);
642 f2fs_put_dnode(&dn);
643out:
644 f2fs_unlock_op(sbi);
645 return;
646}
647
0a8165d7 648/*
4f4124d0
CY
649 * get_data_block() now supported readahead/bmap/rw direct_IO with mapped bh.
650 * If original data blocks are allocated, then give them to blockdev.
651 * Otherwise,
652 * a. preallocate requested block addresses
653 * b. do not use extent cache for better performance
654 * c. give the block addresses to blockdev
eb47b800 655 */
ccfb3000
JK
656static int __get_data_block(struct inode *inode, sector_t iblock,
657 struct buffer_head *bh_result, int create, bool fiemap)
eb47b800
JK
658{
659 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
660 unsigned maxblocks = bh_result->b_size >> blkbits;
661 struct dnode_of_data dn;
bfad7c2d
JK
662 int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
663 pgoff_t pgofs, end_offset;
664 int err = 0, ofs = 1;
a2e7d1bf 665 struct extent_info ei;
bfad7c2d 666 bool allocated = false;
eb47b800
JK
667
668 /* Get the page offset from the block offset(iblock) */
669 pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));
670
a2e7d1bf
CY
671 if (check_extent_cache(inode, pgofs, &ei)) {
672 f2fs_map_bh(inode->i_sb, pgofs, &ei, bh_result);
bfad7c2d 673 goto out;
a2e7d1bf 674 }
bfad7c2d 675
59b802e5 676 if (create)
4081363f 677 f2fs_lock_op(F2FS_I_SB(inode));
eb47b800
JK
678
679 /* When reading holes, we need its node page */
680 set_new_dnode(&dn, inode, NULL, NULL, 0);
bfad7c2d 681 err = get_dnode_of_data(&dn, pgofs, mode);
1ec79083 682 if (err) {
bfad7c2d
JK
683 if (err == -ENOENT)
684 err = 0;
685 goto unlock_out;
848753aa 686 }
ccfb3000 687 if (dn.data_blkaddr == NEW_ADDR && !fiemap)
1ec79083 688 goto put_out;
eb47b800 689
bfad7c2d 690 if (dn.data_blkaddr != NULL_ADDR) {
da17eece 691 set_buffer_new(bh_result);
bfad7c2d
JK
692 map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
693 } else if (create) {
694 err = __allocate_data_block(&dn);
695 if (err)
696 goto put_out;
697 allocated = true;
da17eece 698 set_buffer_new(bh_result);
bfad7c2d
JK
699 map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
700 } else {
701 goto put_out;
702 }
703
6403eb1f 704 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
bfad7c2d
JK
705 bh_result->b_size = (((size_t)1) << blkbits);
706 dn.ofs_in_node++;
707 pgofs++;
708
709get_next:
710 if (dn.ofs_in_node >= end_offset) {
711 if (allocated)
712 sync_inode_page(&dn);
713 allocated = false;
714 f2fs_put_dnode(&dn);
715
716 set_new_dnode(&dn, inode, NULL, NULL, 0);
717 err = get_dnode_of_data(&dn, pgofs, mode);
1ec79083 718 if (err) {
bfad7c2d
JK
719 if (err == -ENOENT)
720 err = 0;
721 goto unlock_out;
722 }
ccfb3000 723 if (dn.data_blkaddr == NEW_ADDR && !fiemap)
1ec79083
JK
724 goto put_out;
725
6403eb1f 726 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
bfad7c2d 727 }
eb47b800 728
bfad7c2d
JK
729 if (maxblocks > (bh_result->b_size >> blkbits)) {
730 block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
731 if (blkaddr == NULL_ADDR && create) {
732 err = __allocate_data_block(&dn);
733 if (err)
734 goto sync_out;
735 allocated = true;
736 blkaddr = dn.data_blkaddr;
737 }
e1c42045 738 /* Give more consecutive addresses for the readahead */
bfad7c2d
JK
739 if (blkaddr == (bh_result->b_blocknr + ofs)) {
740 ofs++;
741 dn.ofs_in_node++;
742 pgofs++;
743 bh_result->b_size += (((size_t)1) << blkbits);
744 goto get_next;
745 }
eb47b800 746 }
bfad7c2d
JK
747sync_out:
748 if (allocated)
749 sync_inode_page(&dn);
750put_out:
eb47b800 751 f2fs_put_dnode(&dn);
bfad7c2d
JK
752unlock_out:
753 if (create)
4081363f 754 f2fs_unlock_op(F2FS_I_SB(inode));
bfad7c2d
JK
755out:
756 trace_f2fs_get_data_block(inode, iblock, bh_result, err);
757 return err;
eb47b800
JK
758}
759
ccfb3000
JK
760static int get_data_block(struct inode *inode, sector_t iblock,
761 struct buffer_head *bh_result, int create)
762{
763 return __get_data_block(inode, iblock, bh_result, create, false);
764}
765
766static int get_data_block_fiemap(struct inode *inode, sector_t iblock,
767 struct buffer_head *bh_result, int create)
768{
769 return __get_data_block(inode, iblock, bh_result, create, true);
770}
771
9ab70134
JK
772int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
773 u64 start, u64 len)
774{
ccfb3000
JK
775 return generic_block_fiemap(inode, fieinfo,
776 start, len, get_data_block_fiemap);
9ab70134
JK
777}
778
eb47b800
JK
779static int f2fs_read_data_page(struct file *file, struct page *page)
780{
9ffe0fb5 781 struct inode *inode = page->mapping->host;
b3d208f9 782 int ret = -EAGAIN;
9ffe0fb5 783
c20e89cd
CY
784 trace_f2fs_readpage(page, DATA);
785
e1c42045 786 /* If the file has inline data, try to read it directly */
9ffe0fb5
HL
787 if (f2fs_has_inline_data(inode))
788 ret = f2fs_read_inline_data(inode, page);
b3d208f9 789 if (ret == -EAGAIN)
9ffe0fb5
HL
790 ret = mpage_readpage(page, get_data_block);
791
792 return ret;
eb47b800
JK
793}
794
795static int f2fs_read_data_pages(struct file *file,
796 struct address_space *mapping,
797 struct list_head *pages, unsigned nr_pages)
798{
9ffe0fb5
HL
799 struct inode *inode = file->f_mapping->host;
800
801 /* If the file has inline data, skip readpages */
802 if (f2fs_has_inline_data(inode))
803 return 0;
804
bfad7c2d 805 return mpage_readpages(mapping, pages, nr_pages, get_data_block);
eb47b800
JK
806}
807
458e6197 808int do_write_data_page(struct page *page, struct f2fs_io_info *fio)
eb47b800
JK
809{
810 struct inode *inode = page->mapping->host;
eb47b800
JK
811 struct dnode_of_data dn;
812 int err = 0;
813
814 set_new_dnode(&dn, inode, NULL, NULL, 0);
266e97a8 815 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
eb47b800
JK
816 if (err)
817 return err;
818
cf04e8eb 819 fio->blk_addr = dn.data_blkaddr;
eb47b800
JK
820
821 /* This page is already truncated */
cf04e8eb 822 if (fio->blk_addr == NULL_ADDR)
eb47b800
JK
823 goto out_writepage;
824
825 set_page_writeback(page);
826
827 /*
828 * If current allocation needs SSR,
829 * it had better in-place writes for updated data.
830 */
cf04e8eb 831 if (unlikely(fio->blk_addr != NEW_ADDR &&
b25958b6
HL
832 !is_cold_data(page) &&
833 need_inplace_update(inode))) {
cf04e8eb 834 rewrite_data_page(page, fio);
fff04f90 835 set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
eb47b800 836 } else {
cf04e8eb 837 write_data_page(page, &dn, fio);
e1509cf2 838 update_extent_cache(&dn);
fff04f90 839 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
eb47b800
JK
840 }
841out_writepage:
842 f2fs_put_dnode(&dn);
843 return err;
844}
845
846static int f2fs_write_data_page(struct page *page,
847 struct writeback_control *wbc)
848{
849 struct inode *inode = page->mapping->host;
4081363f 850 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
eb47b800
JK
851 loff_t i_size = i_size_read(inode);
852 const pgoff_t end_index = ((unsigned long long) i_size)
853 >> PAGE_CACHE_SHIFT;
9ffe0fb5 854 unsigned offset = 0;
39936837 855 bool need_balance_fs = false;
eb47b800 856 int err = 0;
458e6197
JK
857 struct f2fs_io_info fio = {
858 .type = DATA,
6c311ec6 859 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
458e6197 860 };
eb47b800 861
ecda0de3
CY
862 trace_f2fs_writepage(page, DATA);
863
eb47b800 864 if (page->index < end_index)
39936837 865 goto write;
eb47b800
JK
866
867 /*
868 * If the offset is out-of-range of file size,
869 * this page does not have to be written to disk.
870 */
871 offset = i_size & (PAGE_CACHE_SIZE - 1);
76f60268 872 if ((page->index >= end_index + 1) || !offset)
39936837 873 goto out;
eb47b800
JK
874
875 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
39936837 876write:
caf0047e 877 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
eb47b800 878 goto redirty_out;
1e84371f
JK
879 if (f2fs_is_drop_cache(inode))
880 goto out;
881 if (f2fs_is_volatile_file(inode) && !wbc->for_reclaim &&
882 available_free_memory(sbi, BASE_CHECK))
883 goto redirty_out;
eb47b800 884
39936837 885 /* Dentry blocks are controlled by checkpoint */
eb47b800 886 if (S_ISDIR(inode->i_mode)) {
cf779cab
JK
887 if (unlikely(f2fs_cp_error(sbi)))
888 goto redirty_out;
458e6197 889 err = do_write_data_page(page, &fio);
8618b881
JK
890 goto done;
891 }
9ffe0fb5 892
cf779cab
JK
893 /* we should bypass data pages to proceed the kworkder jobs */
894 if (unlikely(f2fs_cp_error(sbi))) {
895 SetPageError(page);
a7ffdbe2 896 goto out;
cf779cab
JK
897 }
898
8618b881 899 if (!wbc->for_reclaim)
39936837 900 need_balance_fs = true;
8618b881 901 else if (has_not_enough_free_secs(sbi, 0))
39936837 902 goto redirty_out;
eb47b800 903
b3d208f9 904 err = -EAGAIN;
8618b881 905 f2fs_lock_op(sbi);
b3d208f9
JK
906 if (f2fs_has_inline_data(inode))
907 err = f2fs_write_inline_data(inode, page);
908 if (err == -EAGAIN)
8618b881
JK
909 err = do_write_data_page(page, &fio);
910 f2fs_unlock_op(sbi);
911done:
912 if (err && err != -ENOENT)
913 goto redirty_out;
eb47b800 914
eb47b800 915 clear_cold_data(page);
39936837 916out:
a7ffdbe2 917 inode_dec_dirty_pages(inode);
eb47b800 918 unlock_page(page);
39936837 919 if (need_balance_fs)
eb47b800 920 f2fs_balance_fs(sbi);
2aea39ec
JK
921 if (wbc->for_reclaim)
922 f2fs_submit_merged_bio(sbi, DATA, WRITE);
eb47b800
JK
923 return 0;
924
eb47b800 925redirty_out:
76f60268 926 redirty_page_for_writepage(wbc, page);
8618b881 927 return AOP_WRITEPAGE_ACTIVATE;
eb47b800
JK
928}
929
fa9150a8
NJ
930static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
931 void *data)
932{
933 struct address_space *mapping = data;
934 int ret = mapping->a_ops->writepage(page, wbc);
935 mapping_set_error(mapping, ret);
936 return ret;
937}
938
25ca923b 939static int f2fs_write_data_pages(struct address_space *mapping,
eb47b800
JK
940 struct writeback_control *wbc)
941{
942 struct inode *inode = mapping->host;
4081363f 943 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
531ad7d5 944 bool locked = false;
eb47b800 945 int ret;
50c8cdb3 946 long diff;
eb47b800 947
e5748434
CY
948 trace_f2fs_writepages(mapping->host, wbc, DATA);
949
cfb185a1 950 /* deal with chardevs and other special file */
951 if (!mapping->a_ops->writepage)
952 return 0;
953
87d6f890 954 if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
a7ffdbe2 955 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
6fb03f3a 956 available_free_memory(sbi, DIRTY_DENTS))
d3baf95d 957 goto skip_write;
87d6f890 958
50c8cdb3 959 diff = nr_pages_to_write(sbi, DATA, wbc);
eb47b800 960
531ad7d5 961 if (!S_ISDIR(inode->i_mode)) {
eb47b800 962 mutex_lock(&sbi->writepages);
531ad7d5
JK
963 locked = true;
964 }
fa9150a8 965 ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
531ad7d5 966 if (locked)
eb47b800 967 mutex_unlock(&sbi->writepages);
458e6197
JK
968
969 f2fs_submit_merged_bio(sbi, DATA, WRITE);
eb47b800
JK
970
971 remove_dirty_dir_inode(inode);
972
50c8cdb3 973 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
eb47b800 974 return ret;
d3baf95d
JK
975
976skip_write:
a7ffdbe2 977 wbc->pages_skipped += get_dirty_pages(inode);
d3baf95d 978 return 0;
eb47b800
JK
979}
980
3aab8f82
CY
981static void f2fs_write_failed(struct address_space *mapping, loff_t to)
982{
983 struct inode *inode = mapping->host;
984
985 if (to > inode->i_size) {
986 truncate_pagecache(inode, inode->i_size);
764aa3e9 987 truncate_blocks(inode, inode->i_size, true);
3aab8f82
CY
988 }
989}
990
eb47b800
JK
991static int f2fs_write_begin(struct file *file, struct address_space *mapping,
992 loff_t pos, unsigned len, unsigned flags,
993 struct page **pagep, void **fsdata)
994{
995 struct inode *inode = mapping->host;
4081363f 996 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
9ba69cf9 997 struct page *page, *ipage;
eb47b800
JK
998 pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
999 struct dnode_of_data dn;
1000 int err = 0;
1001
62aed044
CY
1002 trace_f2fs_write_begin(inode, pos, len, flags);
1003
eb47b800 1004 f2fs_balance_fs(sbi);
5f727395
JK
1005
1006 /*
1007 * We should check this at this moment to avoid deadlock on inode page
1008 * and #0 page. The locking rule for inline_data conversion should be:
1009 * lock_page(page #0) -> lock_page(inode_page)
1010 */
1011 if (index != 0) {
1012 err = f2fs_convert_inline_inode(inode);
1013 if (err)
1014 goto fail;
1015 }
afcb7ca0 1016repeat:
eb47b800 1017 page = grab_cache_page_write_begin(mapping, index, flags);
3aab8f82
CY
1018 if (!page) {
1019 err = -ENOMEM;
1020 goto fail;
1021 }
d5f66990 1022
eb47b800
JK
1023 *pagep = page;
1024
e479556b 1025 f2fs_lock_op(sbi);
9ba69cf9
JK
1026
1027 /* check inline_data */
1028 ipage = get_node_page(sbi, inode->i_ino);
cd34e296
CY
1029 if (IS_ERR(ipage)) {
1030 err = PTR_ERR(ipage);
9ba69cf9 1031 goto unlock_fail;
cd34e296 1032 }
9ba69cf9 1033
b3d208f9
JK
1034 set_new_dnode(&dn, inode, ipage, ipage, 0);
1035
9ba69cf9 1036 if (f2fs_has_inline_data(inode)) {
b3d208f9
JK
1037 if (pos + len <= MAX_INLINE_DATA) {
1038 read_inline_data(page, ipage);
1039 set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
1040 sync_inode_page(&dn);
1041 goto put_next;
b3d208f9 1042 }
5f727395
JK
1043 err = f2fs_convert_inline_page(&dn, page);
1044 if (err)
1045 goto put_fail;
b600965c 1046 }
9ba69cf9
JK
1047 err = f2fs_reserve_block(&dn, index);
1048 if (err)
8cdcb713 1049 goto put_fail;
b3d208f9 1050put_next:
9ba69cf9
JK
1051 f2fs_put_dnode(&dn);
1052 f2fs_unlock_op(sbi);
1053
eb47b800
JK
1054 if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
1055 return 0;
1056
b3d208f9
JK
1057 f2fs_wait_on_page_writeback(page, DATA);
1058
eb47b800
JK
1059 if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
1060 unsigned start = pos & (PAGE_CACHE_SIZE - 1);
1061 unsigned end = start + len;
1062
1063 /* Reading beyond i_size is simple: memset to zero */
1064 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
393ff91f 1065 goto out;
eb47b800
JK
1066 }
1067
b3d208f9 1068 if (dn.data_blkaddr == NEW_ADDR) {
eb47b800
JK
1069 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
1070 } else {
cf04e8eb
JK
1071 struct f2fs_io_info fio = {
1072 .type = DATA,
1073 .rw = READ_SYNC,
1074 .blk_addr = dn.data_blkaddr,
1075 };
1076 err = f2fs_submit_page_bio(sbi, page, &fio);
9234f319
JK
1077 if (err)
1078 goto fail;
d54c795b 1079
393ff91f 1080 lock_page(page);
6bacf52f 1081 if (unlikely(!PageUptodate(page))) {
393ff91f 1082 f2fs_put_page(page, 1);
3aab8f82
CY
1083 err = -EIO;
1084 goto fail;
eb47b800 1085 }
6bacf52f 1086 if (unlikely(page->mapping != mapping)) {
afcb7ca0
JK
1087 f2fs_put_page(page, 1);
1088 goto repeat;
eb47b800
JK
1089 }
1090 }
393ff91f 1091out:
eb47b800
JK
1092 SetPageUptodate(page);
1093 clear_cold_data(page);
1094 return 0;
9ba69cf9 1095
8cdcb713
JK
1096put_fail:
1097 f2fs_put_dnode(&dn);
9ba69cf9
JK
1098unlock_fail:
1099 f2fs_unlock_op(sbi);
b3d208f9 1100 f2fs_put_page(page, 1);
3aab8f82
CY
1101fail:
1102 f2fs_write_failed(mapping, pos + len);
1103 return err;
eb47b800
JK
1104}
1105
a1dd3c13
JK
1106static int f2fs_write_end(struct file *file,
1107 struct address_space *mapping,
1108 loff_t pos, unsigned len, unsigned copied,
1109 struct page *page, void *fsdata)
1110{
1111 struct inode *inode = page->mapping->host;
1112
dfb2bf38
CY
1113 trace_f2fs_write_end(inode, pos, len, copied);
1114
34ba94ba 1115 set_page_dirty(page);
a1dd3c13
JK
1116
1117 if (pos + copied > i_size_read(inode)) {
1118 i_size_write(inode, pos + copied);
1119 mark_inode_dirty(inode);
1120 update_inode_page(inode);
1121 }
1122
75c3c8bc 1123 f2fs_put_page(page, 1);
a1dd3c13
JK
1124 return copied;
1125}
1126
944fcfc1 1127static int check_direct_IO(struct inode *inode, int rw,
5b46f25d 1128 struct iov_iter *iter, loff_t offset)
944fcfc1
JK
1129{
1130 unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
944fcfc1
JK
1131
1132 if (rw == READ)
1133 return 0;
1134
1135 if (offset & blocksize_mask)
1136 return -EINVAL;
1137
5b46f25d
AV
1138 if (iov_iter_alignment(iter) & blocksize_mask)
1139 return -EINVAL;
1140
944fcfc1
JK
1141 return 0;
1142}
1143
eb47b800 1144static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
d8d3d94b 1145 struct iov_iter *iter, loff_t offset)
eb47b800
JK
1146{
1147 struct file *file = iocb->ki_filp;
3aab8f82
CY
1148 struct address_space *mapping = file->f_mapping;
1149 struct inode *inode = mapping->host;
1150 size_t count = iov_iter_count(iter);
1151 int err;
944fcfc1 1152
b3d208f9
JK
1153 /* we don't need to use inline_data strictly */
1154 if (f2fs_has_inline_data(inode)) {
1155 err = f2fs_convert_inline_inode(inode);
1156 if (err)
1157 return err;
1158 }
9ffe0fb5 1159
5b46f25d 1160 if (check_direct_IO(inode, rw, iter, offset))
944fcfc1
JK
1161 return 0;
1162
70407fad
CY
1163 trace_f2fs_direct_IO_enter(inode, offset, count, rw);
1164
59b802e5
JK
1165 if (rw & WRITE)
1166 __allocate_data_blocks(inode, offset, count);
1167
3aab8f82
CY
1168 err = blockdev_direct_IO(rw, iocb, inode, iter, offset, get_data_block);
1169 if (err < 0 && (rw & WRITE))
1170 f2fs_write_failed(mapping, offset + count);
70407fad
CY
1171
1172 trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
1173
3aab8f82 1174 return err;
eb47b800
JK
1175}
1176
487261f3
CY
1177void f2fs_invalidate_page(struct page *page, unsigned int offset,
1178 unsigned int length)
eb47b800
JK
1179{
1180 struct inode *inode = page->mapping->host;
487261f3 1181 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
a7ffdbe2 1182
487261f3
CY
1183 if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
1184 (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE))
a7ffdbe2
JK
1185 return;
1186
487261f3
CY
1187 if (PageDirty(page)) {
1188 if (inode->i_ino == F2FS_META_INO(sbi))
1189 dec_page_count(sbi, F2FS_DIRTY_META);
1190 else if (inode->i_ino == F2FS_NODE_INO(sbi))
1191 dec_page_count(sbi, F2FS_DIRTY_NODES);
1192 else
1193 inode_dec_dirty_pages(inode);
1194 }
eb47b800
JK
1195 ClearPagePrivate(page);
1196}
1197
487261f3 1198int f2fs_release_page(struct page *page, gfp_t wait)
eb47b800 1199{
f68daeeb
JK
1200 /* If this is dirty page, keep PagePrivate */
1201 if (PageDirty(page))
1202 return 0;
1203
eb47b800 1204 ClearPagePrivate(page);
c3850aa1 1205 return 1;
eb47b800
JK
1206}
1207
1208static int f2fs_set_data_page_dirty(struct page *page)
1209{
1210 struct address_space *mapping = page->mapping;
1211 struct inode *inode = mapping->host;
1212
26c6b887
JK
1213 trace_f2fs_set_page_dirty(page, DATA);
1214
eb47b800 1215 SetPageUptodate(page);
34ba94ba 1216
1e84371f 1217 if (f2fs_is_atomic_file(inode)) {
34ba94ba
JK
1218 register_inmem_page(inode, page);
1219 return 1;
1220 }
1221
a18ff063
JK
1222 mark_inode_dirty(inode);
1223
eb47b800
JK
1224 if (!PageDirty(page)) {
1225 __set_page_dirty_nobuffers(page);
a7ffdbe2 1226 update_dirty_page(inode, page);
eb47b800
JK
1227 return 1;
1228 }
1229 return 0;
1230}
1231
c01e54b7
JK
1232static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
1233{
454ae7e5
CY
1234 struct inode *inode = mapping->host;
1235
b3d208f9
JK
1236 /* we don't need to use inline_data strictly */
1237 if (f2fs_has_inline_data(inode)) {
1238 int err = f2fs_convert_inline_inode(inode);
1239 if (err)
1240 return err;
1241 }
bfad7c2d 1242 return generic_block_bmap(mapping, block, get_data_block);
c01e54b7
JK
1243}
1244
eb47b800
JK
1245const struct address_space_operations f2fs_dblock_aops = {
1246 .readpage = f2fs_read_data_page,
1247 .readpages = f2fs_read_data_pages,
1248 .writepage = f2fs_write_data_page,
1249 .writepages = f2fs_write_data_pages,
1250 .write_begin = f2fs_write_begin,
a1dd3c13 1251 .write_end = f2fs_write_end,
eb47b800 1252 .set_page_dirty = f2fs_set_data_page_dirty,
487261f3
CY
1253 .invalidatepage = f2fs_invalidate_page,
1254 .releasepage = f2fs_release_page,
eb47b800 1255 .direct_IO = f2fs_direct_IO,
c01e54b7 1256 .bmap = f2fs_bmap,
eb47b800 1257};