Commit | Line | Data |
---|---|---|
0a8165d7 | 1 | /* |
eb47b800 JK |
2 | * fs/f2fs/data.c |
3 | * | |
4 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. | |
5 | * http://www.samsung.com/ | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
11 | #include <linux/fs.h> | |
12 | #include <linux/f2fs_fs.h> | |
13 | #include <linux/buffer_head.h> | |
14 | #include <linux/mpage.h> | |
15 | #include <linux/writeback.h> | |
16 | #include <linux/backing-dev.h> | |
8f46dcae | 17 | #include <linux/pagevec.h> |
eb47b800 JK |
18 | #include <linux/blkdev.h> |
19 | #include <linux/bio.h> | |
690e4a3e | 20 | #include <linux/prefetch.h> |
e2e40f2c | 21 | #include <linux/uio.h> |
c1286ff4 JK |
22 | #include <linux/mm.h> |
23 | #include <linux/memcontrol.h> | |
f1e88660 | 24 | #include <linux/cleancache.h> |
eb47b800 JK |
25 | |
26 | #include "f2fs.h" | |
27 | #include "node.h" | |
28 | #include "segment.h" | |
db9f7c1a | 29 | #include "trace.h" |
848753aa | 30 | #include <trace/events/f2fs.h> |
32cbbe59 | 31 | #include <trace/events/android_fs.h> |
eb47b800 | 32 | |
4246a0b6 | 33 | static void f2fs_read_end_io(struct bio *bio) |
93dfe2ac | 34 | { |
f568849e LT |
35 | struct bio_vec *bvec; |
36 | int i; | |
93dfe2ac | 37 | |
c1286ff4 JK |
38 | #ifdef CONFIG_F2FS_FAULT_INJECTION |
39 | if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO)) | |
40 | bio->bi_error = -EIO; | |
41 | #endif | |
42 | ||
4375a336 | 43 | if (f2fs_bio_encrypted(bio)) { |
4246a0b6 | 44 | if (bio->bi_error) { |
c1286ff4 | 45 | fscrypt_release_ctx(bio->bi_private); |
4375a336 | 46 | } else { |
c1286ff4 | 47 | fscrypt_decrypt_bio_pages(bio->bi_private, bio); |
4375a336 JK |
48 | return; |
49 | } | |
50 | } | |
51 | ||
12377024 CY |
52 | bio_for_each_segment_all(bvec, bio, i) { |
53 | struct page *page = bvec->bv_page; | |
f1e88660 | 54 | |
4246a0b6 | 55 | if (!bio->bi_error) { |
c1286ff4 JK |
56 | if (!PageUptodate(page)) |
57 | SetPageUptodate(page); | |
f1e88660 JK |
58 | } else { |
59 | ClearPageUptodate(page); | |
60 | SetPageError(page); | |
61 | } | |
62 | unlock_page(page); | |
63 | } | |
f1e88660 JK |
64 | bio_put(bio); |
65 | } | |
66 | ||
4246a0b6 | 67 | static void f2fs_write_end_io(struct bio *bio) |
93dfe2ac | 68 | { |
1b1f559f | 69 | struct f2fs_sb_info *sbi = bio->bi_private; |
f568849e LT |
70 | struct bio_vec *bvec; |
71 | int i; | |
93dfe2ac | 72 | |
f568849e | 73 | bio_for_each_segment_all(bvec, bio, i) { |
93dfe2ac JK |
74 | struct page *page = bvec->bv_page; |
75 | ||
c1286ff4 | 76 | fscrypt_pullback_bio_page(&page, true); |
4375a336 | 77 | |
4246a0b6 | 78 | if (unlikely(bio->bi_error)) { |
93dfe2ac | 79 | set_bit(AS_EIO, &page->mapping->flags); |
c1286ff4 | 80 | f2fs_stop_checkpoint(sbi, true); |
93dfe2ac JK |
81 | } |
82 | end_page_writeback(page); | |
f568849e | 83 | } |
c1286ff4 JK |
84 | if (atomic_dec_and_test(&sbi->nr_wb_bios) && |
85 | wq_has_sleeper(&sbi->cp_wait)) | |
93dfe2ac JK |
86 | wake_up(&sbi->cp_wait); |
87 | ||
88 | bio_put(bio); | |
89 | } | |
90 | ||
940a6d34 GZ |
91 | /* |
92 | * Low-level block read/write IO operations. | |
93 | */ | |
94 | static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr, | |
95 | int npages, bool is_read) | |
96 | { | |
97 | struct bio *bio; | |
98 | ||
740432f8 | 99 | bio = f2fs_bio_alloc(npages); |
940a6d34 GZ |
100 | |
101 | bio->bi_bdev = sbi->sb->s_bdev; | |
55cf9cb6 | 102 | bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr); |
940a6d34 | 103 | bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io; |
12377024 | 104 | bio->bi_private = is_read ? NULL : sbi; |
940a6d34 GZ |
105 | |
106 | return bio; | |
107 | } | |
108 | ||
c1286ff4 JK |
109 | static inline void __submit_bio(struct f2fs_sb_info *sbi, int rw, |
110 | struct bio *bio, enum page_type type) | |
111 | { | |
112 | if (!is_read_io(rw)) { | |
113 | atomic_inc(&sbi->nr_wb_bios); | |
114 | if (f2fs_sb_mounted_hmsmr(sbi->sb) && | |
115 | current->plug && (type == DATA || type == NODE)) | |
116 | blk_finish_plug(current->plug); | |
117 | } | |
118 | submit_bio(rw, bio); | |
119 | } | |
120 | ||
458e6197 | 121 | static void __submit_merged_bio(struct f2fs_bio_info *io) |
93dfe2ac | 122 | { |
458e6197 | 123 | struct f2fs_io_info *fio = &io->fio; |
93dfe2ac JK |
124 | |
125 | if (!io->bio) | |
126 | return; | |
127 | ||
6a8f8ca5 | 128 | if (is_read_io(fio->rw)) |
2ace38e0 | 129 | trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio); |
6a8f8ca5 | 130 | else |
2ace38e0 | 131 | trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio); |
940a6d34 | 132 | |
c1286ff4 | 133 | __submit_bio(io->sbi, fio->rw, io->bio, fio->type); |
93dfe2ac JK |
134 | io->bio = NULL; |
135 | } | |
136 | ||
c1286ff4 JK |
137 | static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode, |
138 | struct page *page, nid_t ino) | |
139 | { | |
140 | struct bio_vec *bvec; | |
141 | struct page *target; | |
142 | int i; | |
143 | ||
144 | if (!io->bio) | |
145 | return false; | |
146 | ||
147 | if (!inode && !page && !ino) | |
148 | return true; | |
149 | ||
150 | bio_for_each_segment_all(bvec, io->bio, i) { | |
151 | ||
152 | if (bvec->bv_page->mapping) | |
153 | target = bvec->bv_page; | |
154 | else | |
155 | target = fscrypt_control_page(bvec->bv_page); | |
156 | ||
157 | if (inode && inode == target->mapping->host) | |
158 | return true; | |
159 | if (page && page == target) | |
160 | return true; | |
161 | if (ino && ino == ino_of_node(target)) | |
162 | return true; | |
163 | } | |
164 | ||
165 | return false; | |
166 | } | |
167 | ||
168 | static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode, | |
169 | struct page *page, nid_t ino, | |
170 | enum page_type type) | |
171 | { | |
172 | enum page_type btype = PAGE_TYPE_OF_BIO(type); | |
173 | struct f2fs_bio_info *io = &sbi->write_io[btype]; | |
174 | bool ret; | |
175 | ||
176 | down_read(&io->io_rwsem); | |
177 | ret = __has_merged_page(io, inode, page, ino); | |
178 | up_read(&io->io_rwsem); | |
179 | return ret; | |
180 | } | |
181 | ||
182 | static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, | |
183 | struct inode *inode, struct page *page, | |
184 | nid_t ino, enum page_type type, int rw) | |
93dfe2ac JK |
185 | { |
186 | enum page_type btype = PAGE_TYPE_OF_BIO(type); | |
187 | struct f2fs_bio_info *io; | |
188 | ||
189 | io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype]; | |
190 | ||
df0f8dc0 | 191 | down_write(&io->io_rwsem); |
458e6197 | 192 | |
c1286ff4 JK |
193 | if (!__has_merged_page(io, inode, page, ino)) |
194 | goto out; | |
195 | ||
458e6197 JK |
196 | /* change META to META_FLUSH in the checkpoint procedure */ |
197 | if (type >= META_FLUSH) { | |
198 | io->fio.type = META_FLUSH; | |
0f7b2abd JK |
199 | if (test_opt(sbi, NOBARRIER)) |
200 | io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO; | |
201 | else | |
202 | io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO; | |
458e6197 JK |
203 | } |
204 | __submit_merged_bio(io); | |
c1286ff4 | 205 | out: |
df0f8dc0 | 206 | up_write(&io->io_rwsem); |
93dfe2ac JK |
207 | } |
208 | ||
c1286ff4 JK |
209 | void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, enum page_type type, |
210 | int rw) | |
211 | { | |
212 | __f2fs_submit_merged_bio(sbi, NULL, NULL, 0, type, rw); | |
213 | } | |
214 | ||
215 | void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *sbi, | |
216 | struct inode *inode, struct page *page, | |
217 | nid_t ino, enum page_type type, int rw) | |
218 | { | |
219 | if (has_merged_page(sbi, inode, page, ino, type)) | |
220 | __f2fs_submit_merged_bio(sbi, inode, page, ino, type, rw); | |
221 | } | |
222 | ||
223 | void f2fs_flush_merged_bios(struct f2fs_sb_info *sbi) | |
224 | { | |
225 | f2fs_submit_merged_bio(sbi, DATA, WRITE); | |
226 | f2fs_submit_merged_bio(sbi, NODE, WRITE); | |
227 | f2fs_submit_merged_bio(sbi, META, WRITE); | |
228 | } | |
229 | ||
93dfe2ac JK |
230 | /* |
231 | * Fill the locked page with data located in the block address. | |
232 | * Return unlocked page. | |
233 | */ | |
05ca3632 | 234 | int f2fs_submit_page_bio(struct f2fs_io_info *fio) |
93dfe2ac | 235 | { |
93dfe2ac | 236 | struct bio *bio; |
c1286ff4 JK |
237 | struct page *page = fio->encrypted_page ? |
238 | fio->encrypted_page : fio->page; | |
93dfe2ac | 239 | |
2ace38e0 | 240 | trace_f2fs_submit_page_bio(page, fio); |
05ca3632 | 241 | f2fs_trace_ios(fio, 0); |
93dfe2ac JK |
242 | |
243 | /* Allocate a new bio */ | |
c1286ff4 | 244 | bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->rw)); |
93dfe2ac | 245 | |
c1286ff4 | 246 | if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { |
93dfe2ac | 247 | bio_put(bio); |
93dfe2ac JK |
248 | return -EFAULT; |
249 | } | |
250 | ||
c1286ff4 | 251 | __submit_bio(fio->sbi, fio->rw, bio, fio->type); |
93dfe2ac JK |
252 | return 0; |
253 | } | |
254 | ||
05ca3632 | 255 | void f2fs_submit_page_mbio(struct f2fs_io_info *fio) |
93dfe2ac | 256 | { |
05ca3632 | 257 | struct f2fs_sb_info *sbi = fio->sbi; |
458e6197 | 258 | enum page_type btype = PAGE_TYPE_OF_BIO(fio->type); |
93dfe2ac | 259 | struct f2fs_bio_info *io; |
940a6d34 | 260 | bool is_read = is_read_io(fio->rw); |
4375a336 | 261 | struct page *bio_page; |
93dfe2ac | 262 | |
940a6d34 | 263 | io = is_read ? &sbi->read_io : &sbi->write_io[btype]; |
93dfe2ac | 264 | |
c1286ff4 JK |
265 | if (fio->old_blkaddr != NEW_ADDR) |
266 | verify_block_addr(sbi, fio->old_blkaddr); | |
267 | verify_block_addr(sbi, fio->new_blkaddr); | |
93dfe2ac | 268 | |
df0f8dc0 | 269 | down_write(&io->io_rwsem); |
93dfe2ac | 270 | |
c1286ff4 | 271 | if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 || |
458e6197 JK |
272 | io->fio.rw != fio->rw)) |
273 | __submit_merged_bio(io); | |
93dfe2ac JK |
274 | alloc_new: |
275 | if (io->bio == NULL) { | |
90a893c7 | 276 | int bio_blocks = MAX_BIO_BLOCKS(sbi); |
940a6d34 | 277 | |
c1286ff4 JK |
278 | io->bio = __bio_alloc(sbi, fio->new_blkaddr, |
279 | bio_blocks, is_read); | |
458e6197 | 280 | io->fio = *fio; |
93dfe2ac JK |
281 | } |
282 | ||
4375a336 JK |
283 | bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page; |
284 | ||
c1286ff4 JK |
285 | if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < |
286 | PAGE_SIZE) { | |
458e6197 | 287 | __submit_merged_bio(io); |
93dfe2ac JK |
288 | goto alloc_new; |
289 | } | |
290 | ||
c1286ff4 | 291 | io->last_block_in_bio = fio->new_blkaddr; |
05ca3632 | 292 | f2fs_trace_ios(fio, 0); |
93dfe2ac | 293 | |
df0f8dc0 | 294 | up_write(&io->io_rwsem); |
05ca3632 | 295 | trace_f2fs_submit_page_mbio(fio->page, fio); |
93dfe2ac JK |
296 | } |
297 | ||
c1286ff4 JK |
298 | static void __set_data_blkaddr(struct dnode_of_data *dn) |
299 | { | |
300 | struct f2fs_node *rn = F2FS_NODE(dn->node_page); | |
301 | __le32 *addr_array; | |
302 | ||
303 | /* Get physical address of data block */ | |
304 | addr_array = blkaddr_in_node(rn); | |
305 | addr_array[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr); | |
306 | } | |
307 | ||
0a8165d7 | 308 | /* |
eb47b800 JK |
309 | * Lock ordering for the change of data block address: |
310 | * ->data_page | |
311 | * ->node_page | |
312 | * update block addresses in the node page | |
313 | */ | |
216a620a | 314 | void set_data_blkaddr(struct dnode_of_data *dn) |
eb47b800 | 315 | { |
c1286ff4 JK |
316 | f2fs_wait_on_page_writeback(dn->node_page, NODE, true); |
317 | __set_data_blkaddr(dn); | |
318 | if (set_page_dirty(dn->node_page)) | |
319 | dn->node_changed = true; | |
320 | } | |
eb47b800 | 321 | |
c1286ff4 JK |
322 | void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr) |
323 | { | |
324 | dn->data_blkaddr = blkaddr; | |
325 | set_data_blkaddr(dn); | |
326 | f2fs_update_extent_cache(dn); | |
eb47b800 JK |
327 | } |
328 | ||
c1286ff4 JK |
329 | /* dn->ofs_in_node will be returned with up-to-date last block pointer */ |
330 | int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count) | |
eb47b800 | 331 | { |
4081363f | 332 | struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); |
eb47b800 | 333 | |
c1286ff4 JK |
334 | if (!count) |
335 | return 0; | |
336 | ||
337 | if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC))) | |
eb47b800 | 338 | return -EPERM; |
c1286ff4 | 339 | if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count))) |
eb47b800 JK |
340 | return -ENOSPC; |
341 | ||
c1286ff4 JK |
342 | trace_f2fs_reserve_new_blocks(dn->inode, dn->nid, |
343 | dn->ofs_in_node, count); | |
c01e2853 | 344 | |
c1286ff4 JK |
345 | f2fs_wait_on_page_writeback(dn->node_page, NODE, true); |
346 | ||
347 | for (; count > 0; dn->ofs_in_node++) { | |
348 | block_t blkaddr = | |
349 | datablock_addr(dn->node_page, dn->ofs_in_node); | |
350 | if (blkaddr == NULL_ADDR) { | |
351 | dn->data_blkaddr = NEW_ADDR; | |
352 | __set_data_blkaddr(dn); | |
353 | count--; | |
354 | } | |
355 | } | |
356 | ||
357 | if (set_page_dirty(dn->node_page)) | |
358 | dn->node_changed = true; | |
eb47b800 JK |
359 | return 0; |
360 | } | |
361 | ||
c1286ff4 JK |
362 | /* Should keep dn->ofs_in_node unchanged */ |
363 | int reserve_new_block(struct dnode_of_data *dn) | |
364 | { | |
365 | unsigned int ofs_in_node = dn->ofs_in_node; | |
366 | int ret; | |
367 | ||
368 | ret = reserve_new_blocks(dn, 1); | |
369 | dn->ofs_in_node = ofs_in_node; | |
370 | return ret; | |
371 | } | |
372 | ||
b600965c HL |
373 | int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index) |
374 | { | |
375 | bool need_put = dn->inode_page ? false : true; | |
376 | int err; | |
377 | ||
378 | err = get_dnode_of_data(dn, index, ALLOC_NODE); | |
379 | if (err) | |
380 | return err; | |
a8865372 | 381 | |
b600965c HL |
382 | if (dn->data_blkaddr == NULL_ADDR) |
383 | err = reserve_new_block(dn); | |
a8865372 | 384 | if (err || need_put) |
b600965c HL |
385 | f2fs_put_dnode(dn); |
386 | return err; | |
387 | } | |
388 | ||
759af1c9 | 389 | int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index) |
eb47b800 | 390 | { |
028a41e8 | 391 | struct extent_info ei; |
759af1c9 | 392 | struct inode *inode = dn->inode; |
028a41e8 | 393 | |
759af1c9 FL |
394 | if (f2fs_lookup_extent_cache(inode, index, &ei)) { |
395 | dn->data_blkaddr = ei.blk + index - ei.fofs; | |
396 | return 0; | |
429511cd | 397 | } |
028a41e8 | 398 | |
759af1c9 | 399 | return f2fs_reserve_block(dn, index); |
eb47b800 JK |
400 | } |
401 | ||
a56c7c6f JK |
402 | struct page *get_read_data_page(struct inode *inode, pgoff_t index, |
403 | int rw, bool for_write) | |
eb47b800 | 404 | { |
eb47b800 JK |
405 | struct address_space *mapping = inode->i_mapping; |
406 | struct dnode_of_data dn; | |
407 | struct page *page; | |
cb3bc9ee | 408 | struct extent_info ei; |
eb47b800 | 409 | int err; |
cf04e8eb | 410 | struct f2fs_io_info fio = { |
05ca3632 | 411 | .sbi = F2FS_I_SB(inode), |
cf04e8eb | 412 | .type = DATA, |
43f3eae1 | 413 | .rw = rw, |
4375a336 | 414 | .encrypted_page = NULL, |
cf04e8eb | 415 | }; |
eb47b800 | 416 | |
4375a336 JK |
417 | if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) |
418 | return read_mapping_page(mapping, index, NULL); | |
419 | ||
a56c7c6f | 420 | page = f2fs_grab_cache_page(mapping, index, for_write); |
650495de JK |
421 | if (!page) |
422 | return ERR_PTR(-ENOMEM); | |
423 | ||
cb3bc9ee CY |
424 | if (f2fs_lookup_extent_cache(inode, index, &ei)) { |
425 | dn.data_blkaddr = ei.blk + index - ei.fofs; | |
426 | goto got_it; | |
427 | } | |
428 | ||
eb47b800 | 429 | set_new_dnode(&dn, inode, NULL, NULL, 0); |
266e97a8 | 430 | err = get_dnode_of_data(&dn, index, LOOKUP_NODE); |
86531d6b JK |
431 | if (err) |
432 | goto put_err; | |
eb47b800 JK |
433 | f2fs_put_dnode(&dn); |
434 | ||
6bacf52f | 435 | if (unlikely(dn.data_blkaddr == NULL_ADDR)) { |
86531d6b JK |
436 | err = -ENOENT; |
437 | goto put_err; | |
650495de | 438 | } |
cb3bc9ee | 439 | got_it: |
43f3eae1 JK |
440 | if (PageUptodate(page)) { |
441 | unlock_page(page); | |
eb47b800 | 442 | return page; |
43f3eae1 | 443 | } |
eb47b800 | 444 | |
d59ff4df JK |
445 | /* |
446 | * A new dentry page is allocated but not able to be written, since its | |
447 | * new inode page couldn't be allocated due to -ENOSPC. | |
448 | * In such the case, its blkaddr can be remained as NEW_ADDR. | |
449 | * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata. | |
450 | */ | |
451 | if (dn.data_blkaddr == NEW_ADDR) { | |
c1286ff4 JK |
452 | zero_user_segment(page, 0, PAGE_SIZE); |
453 | if (!PageUptodate(page)) | |
454 | SetPageUptodate(page); | |
43f3eae1 | 455 | unlock_page(page); |
d59ff4df JK |
456 | return page; |
457 | } | |
eb47b800 | 458 | |
c1286ff4 | 459 | fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr; |
05ca3632 JK |
460 | fio.page = page; |
461 | err = f2fs_submit_page_bio(&fio); | |
393ff91f | 462 | if (err) |
86531d6b | 463 | goto put_err; |
43f3eae1 | 464 | return page; |
86531d6b JK |
465 | |
466 | put_err: | |
467 | f2fs_put_page(page, 1); | |
468 | return ERR_PTR(err); | |
43f3eae1 JK |
469 | } |
470 | ||
471 | struct page *find_data_page(struct inode *inode, pgoff_t index) | |
472 | { | |
473 | struct address_space *mapping = inode->i_mapping; | |
474 | struct page *page; | |
475 | ||
476 | page = find_get_page(mapping, index); | |
477 | if (page && PageUptodate(page)) | |
478 | return page; | |
479 | f2fs_put_page(page, 0); | |
480 | ||
a56c7c6f | 481 | page = get_read_data_page(inode, index, READ_SYNC, false); |
43f3eae1 JK |
482 | if (IS_ERR(page)) |
483 | return page; | |
484 | ||
485 | if (PageUptodate(page)) | |
486 | return page; | |
487 | ||
488 | wait_on_page_locked(page); | |
489 | if (unlikely(!PageUptodate(page))) { | |
490 | f2fs_put_page(page, 0); | |
491 | return ERR_PTR(-EIO); | |
492 | } | |
493 | return page; | |
494 | } | |
495 | ||
496 | /* | |
497 | * If it tries to access a hole, return an error. | |
498 | * Because, the callers, functions in dir.c and GC, should be able to know | |
499 | * whether this page exists or not. | |
500 | */ | |
a56c7c6f JK |
501 | struct page *get_lock_data_page(struct inode *inode, pgoff_t index, |
502 | bool for_write) | |
43f3eae1 JK |
503 | { |
504 | struct address_space *mapping = inode->i_mapping; | |
505 | struct page *page; | |
506 | repeat: | |
a56c7c6f | 507 | page = get_read_data_page(inode, index, READ_SYNC, for_write); |
43f3eae1 JK |
508 | if (IS_ERR(page)) |
509 | return page; | |
393ff91f | 510 | |
43f3eae1 | 511 | /* wait for read completion */ |
393ff91f | 512 | lock_page(page); |
6bacf52f | 513 | if (unlikely(page->mapping != mapping)) { |
afcb7ca0 JK |
514 | f2fs_put_page(page, 1); |
515 | goto repeat; | |
eb47b800 | 516 | } |
c1286ff4 JK |
517 | if (unlikely(!PageUptodate(page))) { |
518 | f2fs_put_page(page, 1); | |
519 | return ERR_PTR(-EIO); | |
520 | } | |
eb47b800 JK |
521 | return page; |
522 | } | |
523 | ||
0a8165d7 | 524 | /* |
eb47b800 JK |
525 | * Caller ensures that this data page is never allocated. |
526 | * A new zero-filled data page is allocated in the page cache. | |
39936837 | 527 | * |
4f4124d0 CY |
528 | * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and |
529 | * f2fs_unlock_op(). | |
470f00e9 CY |
530 | * Note that, ipage is set only by make_empty_dir, and if any error occur, |
531 | * ipage should be released by this function. | |
eb47b800 | 532 | */ |
64aa7ed9 | 533 | struct page *get_new_data_page(struct inode *inode, |
a8865372 | 534 | struct page *ipage, pgoff_t index, bool new_i_size) |
eb47b800 | 535 | { |
eb47b800 JK |
536 | struct address_space *mapping = inode->i_mapping; |
537 | struct page *page; | |
538 | struct dnode_of_data dn; | |
539 | int err; | |
c1286ff4 | 540 | |
a56c7c6f | 541 | page = f2fs_grab_cache_page(mapping, index, true); |
470f00e9 CY |
542 | if (!page) { |
543 | /* | |
544 | * before exiting, we should make sure ipage will be released | |
545 | * if any error occur. | |
546 | */ | |
547 | f2fs_put_page(ipage, 1); | |
01f28610 | 548 | return ERR_PTR(-ENOMEM); |
470f00e9 | 549 | } |
eb47b800 | 550 | |
a8865372 | 551 | set_new_dnode(&dn, inode, ipage, NULL, 0); |
b600965c | 552 | err = f2fs_reserve_block(&dn, index); |
01f28610 JK |
553 | if (err) { |
554 | f2fs_put_page(page, 1); | |
eb47b800 | 555 | return ERR_PTR(err); |
a8865372 | 556 | } |
01f28610 JK |
557 | if (!ipage) |
558 | f2fs_put_dnode(&dn); | |
eb47b800 JK |
559 | |
560 | if (PageUptodate(page)) | |
01f28610 | 561 | goto got_it; |
eb47b800 JK |
562 | |
563 | if (dn.data_blkaddr == NEW_ADDR) { | |
c1286ff4 JK |
564 | zero_user_segment(page, 0, PAGE_SIZE); |
565 | if (!PageUptodate(page)) | |
566 | SetPageUptodate(page); | |
eb47b800 | 567 | } else { |
4375a336 | 568 | f2fs_put_page(page, 1); |
a8865372 | 569 | |
c1286ff4 JK |
570 | /* if ipage exists, blkaddr should be NEW_ADDR */ |
571 | f2fs_bug_on(F2FS_I_SB(inode), ipage); | |
572 | page = get_lock_data_page(inode, index, true); | |
4375a336 | 573 | if (IS_ERR(page)) |
c1286ff4 | 574 | return page; |
eb47b800 | 575 | } |
01f28610 | 576 | got_it: |
9edcdabf | 577 | if (new_i_size && i_size_read(inode) < |
c1286ff4 JK |
578 | ((loff_t)(index + 1) << PAGE_SHIFT)) |
579 | f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT)); | |
eb47b800 JK |
580 | return page; |
581 | } | |
582 | ||
bfad7c2d JK |
583 | static int __allocate_data_block(struct dnode_of_data *dn) |
584 | { | |
4081363f | 585 | struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); |
bfad7c2d | 586 | struct f2fs_summary sum; |
bfad7c2d | 587 | struct node_info ni; |
38aa0889 | 588 | int seg = CURSEG_WARM_DATA; |
976e4c50 | 589 | pgoff_t fofs; |
c1286ff4 | 590 | blkcnt_t count = 1; |
bfad7c2d | 591 | |
c1286ff4 | 592 | if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC))) |
bfad7c2d | 593 | return -EPERM; |
df6136ef CY |
594 | |
595 | dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node); | |
596 | if (dn->data_blkaddr == NEW_ADDR) | |
597 | goto alloc; | |
598 | ||
c1286ff4 | 599 | if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count))) |
bfad7c2d JK |
600 | return -ENOSPC; |
601 | ||
df6136ef | 602 | alloc: |
bfad7c2d JK |
603 | get_node_info(sbi, dn->nid, &ni); |
604 | set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); | |
605 | ||
38aa0889 JK |
606 | if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page) |
607 | seg = CURSEG_DIRECT_IO; | |
608 | ||
df6136ef CY |
609 | allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr, |
610 | &sum, seg); | |
216a620a | 611 | set_data_blkaddr(dn); |
bfad7c2d | 612 | |
976e4c50 | 613 | /* update i_size */ |
c1286ff4 | 614 | fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) + |
976e4c50 | 615 | dn->ofs_in_node; |
c1286ff4 JK |
616 | if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT)) |
617 | f2fs_i_size_write(dn->inode, | |
618 | ((loff_t)(fofs + 1) << PAGE_SHIFT)); | |
bfad7c2d JK |
619 | return 0; |
620 | } | |
621 | ||
c1286ff4 | 622 | ssize_t f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from) |
59b802e5 | 623 | { |
c1286ff4 JK |
624 | struct inode *inode = file_inode(iocb->ki_filp); |
625 | struct f2fs_map_blocks map; | |
626 | ssize_t ret = 0; | |
f9811703 | 627 | |
c1286ff4 JK |
628 | map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos); |
629 | map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from)); | |
630 | if (map.m_len > map.m_lblk) | |
631 | map.m_len -= map.m_lblk; | |
632 | else | |
633 | map.m_len = 0; | |
59b802e5 | 634 | |
c1286ff4 | 635 | map.m_next_pgofs = NULL; |
59b802e5 | 636 | |
c1286ff4 JK |
637 | if (iocb->ki_flags & IOCB_DIRECT) { |
638 | ret = f2fs_convert_inline_inode(inode); | |
639 | if (ret) | |
640 | return ret; | |
641 | return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO); | |
59b802e5 | 642 | } |
c1286ff4 JK |
643 | if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA) { |
644 | ret = f2fs_convert_inline_inode(inode); | |
645 | if (ret) | |
646 | return ret; | |
647 | } | |
648 | if (!f2fs_has_inline_data(inode)) | |
649 | return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO); | |
650 | return ret; | |
59b802e5 JK |
651 | } |
652 | ||
0a8165d7 | 653 | /* |
003a3e1d JK |
654 | * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with |
655 | * f2fs_map_blocks structure. | |
4f4124d0 CY |
656 | * If original data blocks are allocated, then give them to blockdev. |
657 | * Otherwise, | |
658 | * a. preallocate requested block addresses | |
659 | * b. do not use extent cache for better performance | |
660 | * c. give the block addresses to blockdev | |
eb47b800 | 661 | */ |
c1286ff4 | 662 | int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, |
e2b4e2bc | 663 | int create, int flag) |
eb47b800 | 664 | { |
003a3e1d | 665 | unsigned int maxblocks = map->m_len; |
eb47b800 | 666 | struct dnode_of_data dn; |
f9811703 | 667 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
c1286ff4 JK |
668 | int mode = create ? ALLOC_NODE : LOOKUP_NODE; |
669 | pgoff_t pgofs, end_offset, end; | |
bfad7c2d | 670 | int err = 0, ofs = 1; |
c1286ff4 JK |
671 | unsigned int ofs_in_node, last_ofs_in_node; |
672 | blkcnt_t prealloc; | |
a2e7d1bf | 673 | struct extent_info ei; |
c1286ff4 JK |
674 | block_t blkaddr; |
675 | ||
676 | if (!maxblocks) | |
677 | return 0; | |
eb47b800 | 678 | |
003a3e1d JK |
679 | map->m_len = 0; |
680 | map->m_flags = 0; | |
681 | ||
682 | /* it only supports block size == page size */ | |
683 | pgofs = (pgoff_t)map->m_lblk; | |
c1286ff4 | 684 | end = pgofs + maxblocks; |
eb47b800 | 685 | |
c1286ff4 | 686 | if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) { |
003a3e1d JK |
687 | map->m_pblk = ei.blk + pgofs - ei.fofs; |
688 | map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs); | |
689 | map->m_flags = F2FS_MAP_MAPPED; | |
bfad7c2d | 690 | goto out; |
a2e7d1bf | 691 | } |
bfad7c2d | 692 | |
c1286ff4 | 693 | next_dnode: |
59b802e5 | 694 | if (create) |
c1286ff4 | 695 | f2fs_lock_op(sbi); |
eb47b800 JK |
696 | |
697 | /* When reading holes, we need its node page */ | |
698 | set_new_dnode(&dn, inode, NULL, NULL, 0); | |
bfad7c2d | 699 | err = get_dnode_of_data(&dn, pgofs, mode); |
1ec79083 | 700 | if (err) { |
c1286ff4 JK |
701 | if (flag == F2FS_GET_BLOCK_BMAP) |
702 | map->m_pblk = 0; | |
703 | if (err == -ENOENT) { | |
bfad7c2d | 704 | err = 0; |
c1286ff4 JK |
705 | if (map->m_next_pgofs) |
706 | *map->m_next_pgofs = | |
707 | get_next_page_offset(&dn, pgofs); | |
708 | } | |
bfad7c2d | 709 | goto unlock_out; |
848753aa | 710 | } |
973163fc | 711 | |
c1286ff4 JK |
712 | prealloc = 0; |
713 | ofs_in_node = dn.ofs_in_node; | |
714 | end_offset = ADDRS_PER_PAGE(dn.node_page, inode); | |
715 | ||
716 | next_block: | |
717 | blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node); | |
718 | ||
719 | if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) { | |
973163fc | 720 | if (create) { |
f9811703 CY |
721 | if (unlikely(f2fs_cp_error(sbi))) { |
722 | err = -EIO; | |
c1286ff4 JK |
723 | goto sync_out; |
724 | } | |
725 | if (flag == F2FS_GET_BLOCK_PRE_AIO) { | |
726 | if (blkaddr == NULL_ADDR) { | |
727 | prealloc++; | |
728 | last_ofs_in_node = dn.ofs_in_node; | |
729 | } | |
730 | } else { | |
731 | err = __allocate_data_block(&dn); | |
332f40b4 | 732 | if (!err) |
c1286ff4 | 733 | set_inode_flag(inode, FI_APPEND_WRITE); |
f9811703 | 734 | } |
973163fc | 735 | if (err) |
c1286ff4 | 736 | goto sync_out; |
973163fc | 737 | map->m_flags = F2FS_MAP_NEW; |
c1286ff4 | 738 | blkaddr = dn.data_blkaddr; |
973163fc | 739 | } else { |
c1286ff4 JK |
740 | if (flag == F2FS_GET_BLOCK_BMAP) { |
741 | map->m_pblk = 0; | |
742 | goto sync_out; | |
973163fc | 743 | } |
c1286ff4 JK |
744 | if (flag == F2FS_GET_BLOCK_FIEMAP && |
745 | blkaddr == NULL_ADDR) { | |
746 | if (map->m_next_pgofs) | |
747 | *map->m_next_pgofs = pgofs + 1; | |
748 | } | |
749 | if (flag != F2FS_GET_BLOCK_FIEMAP || | |
750 | blkaddr != NEW_ADDR) | |
751 | goto sync_out; | |
e2b4e2bc | 752 | } |
e2b4e2bc | 753 | } |
eb47b800 | 754 | |
c1286ff4 JK |
755 | if (flag == F2FS_GET_BLOCK_PRE_AIO) |
756 | goto skip; | |
757 | ||
758 | if (map->m_len == 0) { | |
759 | /* preallocated unwritten block should be mapped for fiemap. */ | |
760 | if (blkaddr == NEW_ADDR) | |
761 | map->m_flags |= F2FS_MAP_UNWRITTEN; | |
762 | map->m_flags |= F2FS_MAP_MAPPED; | |
763 | ||
764 | map->m_pblk = blkaddr; | |
765 | map->m_len = 1; | |
766 | } else if ((map->m_pblk != NEW_ADDR && | |
767 | blkaddr == (map->m_pblk + ofs)) || | |
768 | (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) || | |
769 | flag == F2FS_GET_BLOCK_PRE_DIO) { | |
770 | ofs++; | |
771 | map->m_len++; | |
772 | } else { | |
773 | goto sync_out; | |
774 | } | |
bfad7c2d | 775 | |
c1286ff4 | 776 | skip: |
bfad7c2d JK |
777 | dn.ofs_in_node++; |
778 | pgofs++; | |
779 | ||
c1286ff4 JK |
780 | /* preallocate blocks in batch for one dnode page */ |
781 | if (flag == F2FS_GET_BLOCK_PRE_AIO && | |
782 | (pgofs == end || dn.ofs_in_node == end_offset)) { | |
bfad7c2d | 783 | |
c1286ff4 JK |
784 | dn.ofs_in_node = ofs_in_node; |
785 | err = reserve_new_blocks(&dn, prealloc); | |
786 | if (err) | |
787 | goto sync_out; | |
e2b4e2bc | 788 | |
c1286ff4 JK |
789 | map->m_len += dn.ofs_in_node - ofs_in_node; |
790 | if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) { | |
791 | err = -ENOSPC; | |
792 | goto sync_out; | |
793 | } | |
794 | dn.ofs_in_node = end_offset; | |
bfad7c2d | 795 | } |
eb47b800 | 796 | |
c1286ff4 JK |
797 | if (pgofs >= end) |
798 | goto sync_out; | |
799 | else if (dn.ofs_in_node < end_offset) | |
800 | goto next_block; | |
973163fc | 801 | |
c1286ff4 | 802 | f2fs_put_dnode(&dn); |
973163fc | 803 | |
c1286ff4 JK |
804 | if (create) { |
805 | f2fs_unlock_op(sbi); | |
332f40b4 | 806 | f2fs_balance_fs(sbi, dn.node_changed); |
eb47b800 | 807 | } |
c1286ff4 JK |
808 | goto next_dnode; |
809 | ||
bfad7c2d | 810 | sync_out: |
eb47b800 | 811 | f2fs_put_dnode(&dn); |
bfad7c2d | 812 | unlock_out: |
c1286ff4 JK |
813 | if (create) { |
814 | f2fs_unlock_op(sbi); | |
332f40b4 | 815 | f2fs_balance_fs(sbi, dn.node_changed); |
c1286ff4 | 816 | } |
bfad7c2d | 817 | out: |
003a3e1d | 818 | trace_f2fs_map_blocks(inode, map, err); |
bfad7c2d | 819 | return err; |
eb47b800 JK |
820 | } |
821 | ||
003a3e1d | 822 | static int __get_data_block(struct inode *inode, sector_t iblock, |
c1286ff4 JK |
823 | struct buffer_head *bh, int create, int flag, |
824 | pgoff_t *next_pgofs) | |
003a3e1d JK |
825 | { |
826 | struct f2fs_map_blocks map; | |
827 | int ret; | |
828 | ||
829 | map.m_lblk = iblock; | |
830 | map.m_len = bh->b_size >> inode->i_blkbits; | |
c1286ff4 | 831 | map.m_next_pgofs = next_pgofs; |
003a3e1d | 832 | |
e2b4e2bc | 833 | ret = f2fs_map_blocks(inode, &map, create, flag); |
003a3e1d JK |
834 | if (!ret) { |
835 | map_bh(bh, inode->i_sb, map.m_pblk); | |
836 | bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags; | |
837 | bh->b_size = map.m_len << inode->i_blkbits; | |
838 | } | |
839 | return ret; | |
840 | } | |
841 | ||
ccfb3000 | 842 | static int get_data_block(struct inode *inode, sector_t iblock, |
c1286ff4 JK |
843 | struct buffer_head *bh_result, int create, int flag, |
844 | pgoff_t *next_pgofs) | |
e2b4e2bc | 845 | { |
c1286ff4 JK |
846 | return __get_data_block(inode, iblock, bh_result, create, |
847 | flag, next_pgofs); | |
e2b4e2bc CY |
848 | } |
849 | ||
850 | static int get_data_block_dio(struct inode *inode, sector_t iblock, | |
ccfb3000 JK |
851 | struct buffer_head *bh_result, int create) |
852 | { | |
e2b4e2bc | 853 | return __get_data_block(inode, iblock, bh_result, create, |
c1286ff4 | 854 | F2FS_GET_BLOCK_DIO, NULL); |
ccfb3000 JK |
855 | } |
856 | ||
e2b4e2bc | 857 | static int get_data_block_bmap(struct inode *inode, sector_t iblock, |
ccfb3000 JK |
858 | struct buffer_head *bh_result, int create) |
859 | { | |
c1286ff4 JK |
860 | /* Block number less than F2FS MAX BLOCKS */ |
861 | if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks)) | |
862 | return -EFBIG; | |
863 | ||
e2b4e2bc | 864 | return __get_data_block(inode, iblock, bh_result, create, |
c1286ff4 | 865 | F2FS_GET_BLOCK_BMAP, NULL); |
ccfb3000 JK |
866 | } |
867 | ||
7f63eb77 JK |
868 | static inline sector_t logical_to_blk(struct inode *inode, loff_t offset) |
869 | { | |
870 | return (offset >> inode->i_blkbits); | |
871 | } | |
872 | ||
873 | static inline loff_t blk_to_logical(struct inode *inode, sector_t blk) | |
874 | { | |
875 | return (blk << inode->i_blkbits); | |
876 | } | |
877 | ||
9ab70134 JK |
878 | int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, |
879 | u64 start, u64 len) | |
880 | { | |
7f63eb77 JK |
881 | struct buffer_head map_bh; |
882 | sector_t start_blk, last_blk; | |
c1286ff4 | 883 | pgoff_t next_pgofs; |
7f63eb77 JK |
884 | u64 logical = 0, phys = 0, size = 0; |
885 | u32 flags = 0; | |
7f63eb77 JK |
886 | int ret = 0; |
887 | ||
888 | ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC); | |
889 | if (ret) | |
890 | return ret; | |
891 | ||
67f8cf3c JK |
892 | if (f2fs_has_inline_data(inode)) { |
893 | ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len); | |
894 | if (ret != -EAGAIN) | |
895 | return ret; | |
896 | } | |
897 | ||
c1286ff4 | 898 | inode_lock(inode); |
7f63eb77 | 899 | |
7f63eb77 JK |
900 | if (logical_to_blk(inode, len) == 0) |
901 | len = blk_to_logical(inode, 1); | |
902 | ||
903 | start_blk = logical_to_blk(inode, start); | |
904 | last_blk = logical_to_blk(inode, start + len - 1); | |
c1286ff4 | 905 | |
7f63eb77 JK |
906 | next: |
907 | memset(&map_bh, 0, sizeof(struct buffer_head)); | |
908 | map_bh.b_size = len; | |
909 | ||
e2b4e2bc | 910 | ret = get_data_block(inode, start_blk, &map_bh, 0, |
c1286ff4 | 911 | F2FS_GET_BLOCK_FIEMAP, &next_pgofs); |
7f63eb77 JK |
912 | if (ret) |
913 | goto out; | |
914 | ||
915 | /* HOLE */ | |
916 | if (!buffer_mapped(&map_bh)) { | |
c1286ff4 | 917 | start_blk = next_pgofs; |
9e3d0bf6 CY |
918 | |
919 | if (blk_to_logical(inode, start_blk) < blk_to_logical(inode, | |
920 | F2FS_I_SB(inode)->max_file_blocks)) | |
c1286ff4 | 921 | goto prep_next; |
9e3d0bf6 | 922 | |
c1286ff4 JK |
923 | flags |= FIEMAP_EXTENT_LAST; |
924 | } | |
7f63eb77 | 925 | |
c1286ff4 JK |
926 | if (size) { |
927 | if (f2fs_encrypted_inode(inode)) | |
928 | flags |= FIEMAP_EXTENT_DATA_ENCRYPTED; | |
7f63eb77 | 929 | |
c1286ff4 JK |
930 | ret = fiemap_fill_next_extent(fieinfo, logical, |
931 | phys, size, flags); | |
932 | } | |
7f63eb77 | 933 | |
c1286ff4 JK |
934 | if (start_blk > last_blk || ret) |
935 | goto out; | |
7f63eb77 | 936 | |
c1286ff4 JK |
937 | logical = blk_to_logical(inode, start_blk); |
938 | phys = blk_to_logical(inode, map_bh.b_blocknr); | |
939 | size = map_bh.b_size; | |
940 | flags = 0; | |
941 | if (buffer_unwritten(&map_bh)) | |
942 | flags = FIEMAP_EXTENT_UNWRITTEN; | |
7f63eb77 | 943 | |
c1286ff4 JK |
944 | start_blk += logical_to_blk(inode, size); |
945 | ||
946 | prep_next: | |
7f63eb77 JK |
947 | cond_resched(); |
948 | if (fatal_signal_pending(current)) | |
949 | ret = -EINTR; | |
950 | else | |
951 | goto next; | |
952 | out: | |
953 | if (ret == 1) | |
954 | ret = 0; | |
955 | ||
c1286ff4 | 956 | inode_unlock(inode); |
7f63eb77 | 957 | return ret; |
9ab70134 JK |
958 | } |
959 | ||
c1286ff4 JK |
960 | static struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr, |
961 | unsigned nr_pages) | |
962 | { | |
963 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); | |
964 | struct fscrypt_ctx *ctx = NULL; | |
965 | struct block_device *bdev = sbi->sb->s_bdev; | |
966 | struct bio *bio; | |
967 | ||
968 | if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) { | |
969 | ctx = fscrypt_get_ctx(inode, GFP_NOFS); | |
970 | if (IS_ERR(ctx)) | |
971 | return ERR_CAST(ctx); | |
972 | ||
973 | /* wait the page to be moved by cleaning */ | |
974 | f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr); | |
975 | } | |
976 | ||
977 | bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES)); | |
978 | if (!bio) { | |
979 | if (ctx) | |
980 | fscrypt_release_ctx(ctx); | |
981 | return ERR_PTR(-ENOMEM); | |
982 | } | |
983 | bio->bi_bdev = bdev; | |
984 | bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blkaddr); | |
985 | bio->bi_end_io = f2fs_read_end_io; | |
986 | bio->bi_private = ctx; | |
987 | ||
988 | return bio; | |
989 | } | |
990 | ||
f1e88660 JK |
991 | /* |
992 | * This function was originally taken from fs/mpage.c, and customized for f2fs. | |
993 | * Major change was from block_size == page_size in f2fs by default. | |
994 | */ | |
995 | static int f2fs_mpage_readpages(struct address_space *mapping, | |
996 | struct list_head *pages, struct page *page, | |
997 | unsigned nr_pages) | |
998 | { | |
999 | struct bio *bio = NULL; | |
1000 | unsigned page_idx; | |
1001 | sector_t last_block_in_bio = 0; | |
1002 | struct inode *inode = mapping->host; | |
1003 | const unsigned blkbits = inode->i_blkbits; | |
1004 | const unsigned blocksize = 1 << blkbits; | |
1005 | sector_t block_in_file; | |
1006 | sector_t last_block; | |
1007 | sector_t last_block_in_file; | |
1008 | sector_t block_nr; | |
f1e88660 JK |
1009 | struct f2fs_map_blocks map; |
1010 | ||
1011 | map.m_pblk = 0; | |
1012 | map.m_lblk = 0; | |
1013 | map.m_len = 0; | |
1014 | map.m_flags = 0; | |
c1286ff4 | 1015 | map.m_next_pgofs = NULL; |
f1e88660 JK |
1016 | |
1017 | for (page_idx = 0; nr_pages; page_idx++, nr_pages--) { | |
1018 | ||
1019 | prefetchw(&page->flags); | |
1020 | if (pages) { | |
1021 | page = list_entry(pages->prev, struct page, lru); | |
1022 | list_del(&page->lru); | |
1023 | if (add_to_page_cache_lru(page, mapping, | |
1024 | page->index, GFP_KERNEL)) | |
1025 | goto next_page; | |
1026 | } | |
1027 | ||
1028 | block_in_file = (sector_t)page->index; | |
1029 | last_block = block_in_file + nr_pages; | |
1030 | last_block_in_file = (i_size_read(inode) + blocksize - 1) >> | |
1031 | blkbits; | |
1032 | if (last_block > last_block_in_file) | |
1033 | last_block = last_block_in_file; | |
1034 | ||
1035 | /* | |
1036 | * Map blocks using the previous result first. | |
1037 | */ | |
1038 | if ((map.m_flags & F2FS_MAP_MAPPED) && | |
1039 | block_in_file > map.m_lblk && | |
1040 | block_in_file < (map.m_lblk + map.m_len)) | |
1041 | goto got_it; | |
1042 | ||
1043 | /* | |
1044 | * Then do more f2fs_map_blocks() calls until we are | |
1045 | * done with this page. | |
1046 | */ | |
1047 | map.m_flags = 0; | |
1048 | ||
1049 | if (block_in_file < last_block) { | |
1050 | map.m_lblk = block_in_file; | |
1051 | map.m_len = last_block - block_in_file; | |
1052 | ||
46c9e141 | 1053 | if (f2fs_map_blocks(inode, &map, 0, |
c1286ff4 | 1054 | F2FS_GET_BLOCK_READ)) |
f1e88660 JK |
1055 | goto set_error_page; |
1056 | } | |
1057 | got_it: | |
1058 | if ((map.m_flags & F2FS_MAP_MAPPED)) { | |
1059 | block_nr = map.m_pblk + block_in_file - map.m_lblk; | |
1060 | SetPageMappedToDisk(page); | |
1061 | ||
1062 | if (!PageUptodate(page) && !cleancache_get_page(page)) { | |
1063 | SetPageUptodate(page); | |
1064 | goto confused; | |
1065 | } | |
1066 | } else { | |
c1286ff4 JK |
1067 | zero_user_segment(page, 0, PAGE_SIZE); |
1068 | if (!PageUptodate(page)) | |
1069 | SetPageUptodate(page); | |
f1e88660 JK |
1070 | unlock_page(page); |
1071 | goto next_page; | |
1072 | } | |
1073 | ||
1074 | /* | |
1075 | * This page will go to BIO. Do we need to send this | |
1076 | * BIO off first? | |
1077 | */ | |
1078 | if (bio && (last_block_in_bio != block_nr - 1)) { | |
1079 | submit_and_realloc: | |
c1286ff4 | 1080 | __submit_bio(F2FS_I_SB(inode), READ, bio, DATA); |
f1e88660 JK |
1081 | bio = NULL; |
1082 | } | |
1083 | if (bio == NULL) { | |
c1286ff4 JK |
1084 | bio = f2fs_grab_bio(inode, block_nr, nr_pages); |
1085 | if (IS_ERR(bio)) { | |
1086 | bio = NULL; | |
f1e88660 | 1087 | goto set_error_page; |
4375a336 | 1088 | } |
f1e88660 JK |
1089 | } |
1090 | ||
1091 | if (bio_add_page(bio, page, blocksize, 0) < blocksize) | |
1092 | goto submit_and_realloc; | |
1093 | ||
1094 | last_block_in_bio = block_nr; | |
1095 | goto next_page; | |
1096 | set_error_page: | |
1097 | SetPageError(page); | |
c1286ff4 | 1098 | zero_user_segment(page, 0, PAGE_SIZE); |
f1e88660 JK |
1099 | unlock_page(page); |
1100 | goto next_page; | |
1101 | confused: | |
1102 | if (bio) { | |
c1286ff4 | 1103 | __submit_bio(F2FS_I_SB(inode), READ, bio, DATA); |
f1e88660 JK |
1104 | bio = NULL; |
1105 | } | |
1106 | unlock_page(page); | |
1107 | next_page: | |
1108 | if (pages) | |
c1286ff4 | 1109 | put_page(page); |
f1e88660 JK |
1110 | } |
1111 | BUG_ON(pages && !list_empty(pages)); | |
1112 | if (bio) | |
c1286ff4 | 1113 | __submit_bio(F2FS_I_SB(inode), READ, bio, DATA); |
f1e88660 JK |
1114 | return 0; |
1115 | } | |
1116 | ||
eb47b800 JK |
1117 | static int f2fs_read_data_page(struct file *file, struct page *page) |
1118 | { | |
9ffe0fb5 | 1119 | struct inode *inode = page->mapping->host; |
b3d208f9 | 1120 | int ret = -EAGAIN; |
9ffe0fb5 | 1121 | |
c20e89cd CY |
1122 | trace_f2fs_readpage(page, DATA); |
1123 | ||
e1c42045 | 1124 | /* If the file has inline data, try to read it directly */ |
9ffe0fb5 HL |
1125 | if (f2fs_has_inline_data(inode)) |
1126 | ret = f2fs_read_inline_data(inode, page); | |
b3d208f9 | 1127 | if (ret == -EAGAIN) |
f1e88660 | 1128 | ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1); |
9ffe0fb5 | 1129 | return ret; |
eb47b800 JK |
1130 | } |
1131 | ||
1132 | static int f2fs_read_data_pages(struct file *file, | |
1133 | struct address_space *mapping, | |
1134 | struct list_head *pages, unsigned nr_pages) | |
1135 | { | |
9ffe0fb5 | 1136 | struct inode *inode = file->f_mapping->host; |
b8c29400 CY |
1137 | struct page *page = list_entry(pages->prev, struct page, lru); |
1138 | ||
1139 | trace_f2fs_readpages(inode, page, nr_pages); | |
9ffe0fb5 HL |
1140 | |
1141 | /* If the file has inline data, skip readpages */ | |
1142 | if (f2fs_has_inline_data(inode)) | |
1143 | return 0; | |
1144 | ||
f1e88660 | 1145 | return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages); |
eb47b800 JK |
1146 | } |
1147 | ||
05ca3632 | 1148 | int do_write_data_page(struct f2fs_io_info *fio) |
eb47b800 | 1149 | { |
05ca3632 | 1150 | struct page *page = fio->page; |
eb47b800 | 1151 | struct inode *inode = page->mapping->host; |
eb47b800 JK |
1152 | struct dnode_of_data dn; |
1153 | int err = 0; | |
1154 | ||
1155 | set_new_dnode(&dn, inode, NULL, NULL, 0); | |
266e97a8 | 1156 | err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE); |
eb47b800 JK |
1157 | if (err) |
1158 | return err; | |
1159 | ||
c1286ff4 | 1160 | fio->old_blkaddr = dn.data_blkaddr; |
eb47b800 JK |
1161 | |
1162 | /* This page is already truncated */ | |
c1286ff4 | 1163 | if (fio->old_blkaddr == NULL_ADDR) { |
2bca1e23 | 1164 | ClearPageUptodate(page); |
eb47b800 | 1165 | goto out_writepage; |
2bca1e23 | 1166 | } |
eb47b800 | 1167 | |
4375a336 | 1168 | if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) { |
c1286ff4 | 1169 | gfp_t gfp_flags = GFP_NOFS; |
08b39fbd CY |
1170 | |
1171 | /* wait for GCed encrypted page writeback */ | |
1172 | f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode), | |
c1286ff4 JK |
1173 | fio->old_blkaddr); |
1174 | retry_encrypt: | |
1175 | fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page, | |
1176 | gfp_flags); | |
4375a336 JK |
1177 | if (IS_ERR(fio->encrypted_page)) { |
1178 | err = PTR_ERR(fio->encrypted_page); | |
c1286ff4 JK |
1179 | if (err == -ENOMEM) { |
1180 | /* flush pending ios and wait for a while */ | |
1181 | f2fs_flush_merged_bios(F2FS_I_SB(inode)); | |
1182 | congestion_wait(BLK_RW_ASYNC, HZ/50); | |
1183 | gfp_flags |= __GFP_NOFAIL; | |
1184 | err = 0; | |
1185 | goto retry_encrypt; | |
1186 | } | |
4375a336 JK |
1187 | goto out_writepage; |
1188 | } | |
1189 | } | |
1190 | ||
eb47b800 JK |
1191 | set_page_writeback(page); |
1192 | ||
1193 | /* | |
1194 | * If current allocation needs SSR, | |
1195 | * it had better in-place writes for updated data. | |
1196 | */ | |
c1286ff4 | 1197 | if (unlikely(fio->old_blkaddr != NEW_ADDR && |
b25958b6 | 1198 | !is_cold_data(page) && |
c1286ff4 | 1199 | !IS_ATOMIC_WRITTEN_PAGE(page) && |
b25958b6 | 1200 | need_inplace_update(inode))) { |
05ca3632 | 1201 | rewrite_data_page(fio); |
c1286ff4 | 1202 | set_inode_flag(inode, FI_UPDATE_WRITE); |
8ce67cb0 | 1203 | trace_f2fs_do_write_data_page(page, IPU); |
eb47b800 | 1204 | } else { |
05ca3632 | 1205 | write_data_page(&dn, fio); |
8ce67cb0 | 1206 | trace_f2fs_do_write_data_page(page, OPU); |
c1286ff4 | 1207 | set_inode_flag(inode, FI_APPEND_WRITE); |
3c6c2beb | 1208 | if (page->index == 0) |
c1286ff4 | 1209 | set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN); |
eb47b800 JK |
1210 | } |
1211 | out_writepage: | |
1212 | f2fs_put_dnode(&dn); | |
1213 | return err; | |
1214 | } | |
1215 | ||
1216 | static int f2fs_write_data_page(struct page *page, | |
1217 | struct writeback_control *wbc) | |
1218 | { | |
1219 | struct inode *inode = page->mapping->host; | |
4081363f | 1220 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
eb47b800 JK |
1221 | loff_t i_size = i_size_read(inode); |
1222 | const pgoff_t end_index = ((unsigned long long) i_size) | |
c1286ff4 JK |
1223 | >> PAGE_SHIFT; |
1224 | loff_t psize = (page->index + 1) << PAGE_SHIFT; | |
9ffe0fb5 | 1225 | unsigned offset = 0; |
39936837 | 1226 | bool need_balance_fs = false; |
eb47b800 | 1227 | int err = 0; |
458e6197 | 1228 | struct f2fs_io_info fio = { |
05ca3632 | 1229 | .sbi = sbi, |
458e6197 | 1230 | .type = DATA, |
6c311ec6 | 1231 | .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE, |
05ca3632 | 1232 | .page = page, |
4375a336 | 1233 | .encrypted_page = NULL, |
458e6197 | 1234 | }; |
eb47b800 | 1235 | |
ecda0de3 CY |
1236 | trace_f2fs_writepage(page, DATA); |
1237 | ||
eb47b800 | 1238 | if (page->index < end_index) |
39936837 | 1239 | goto write; |
eb47b800 JK |
1240 | |
1241 | /* | |
1242 | * If the offset is out-of-range of file size, | |
1243 | * this page does not have to be written to disk. | |
1244 | */ | |
c1286ff4 | 1245 | offset = i_size & (PAGE_SIZE - 1); |
76f60268 | 1246 | if ((page->index >= end_index + 1) || !offset) |
39936837 | 1247 | goto out; |
eb47b800 | 1248 | |
c1286ff4 | 1249 | zero_user_segment(page, offset, PAGE_SIZE); |
39936837 | 1250 | write: |
caf0047e | 1251 | if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) |
eb47b800 | 1252 | goto redirty_out; |
1e84371f JK |
1253 | if (f2fs_is_drop_cache(inode)) |
1254 | goto out; | |
c1286ff4 JK |
1255 | /* we should not write 0'th page having journal header */ |
1256 | if (f2fs_is_volatile_file(inode) && (!page->index || | |
1257 | (!wbc->for_reclaim && | |
1258 | available_free_memory(sbi, BASE_CHECK)))) | |
1e84371f | 1259 | goto redirty_out; |
eb47b800 | 1260 | |
c1286ff4 JK |
1261 | /* we should bypass data pages to proceed the kworkder jobs */ |
1262 | if (unlikely(f2fs_cp_error(sbi))) { | |
1263 | mapping_set_error(page->mapping, -EIO); | |
1264 | goto out; | |
1265 | } | |
1266 | ||
39936837 | 1267 | /* Dentry blocks are controlled by checkpoint */ |
eb47b800 | 1268 | if (S_ISDIR(inode->i_mode)) { |
05ca3632 | 1269 | err = do_write_data_page(&fio); |
8618b881 JK |
1270 | goto done; |
1271 | } | |
9ffe0fb5 | 1272 | |
8618b881 | 1273 | if (!wbc->for_reclaim) |
39936837 | 1274 | need_balance_fs = true; |
c1286ff4 | 1275 | else if (has_not_enough_free_secs(sbi, 0, 0)) |
39936837 | 1276 | goto redirty_out; |
eb47b800 | 1277 | |
b3d208f9 | 1278 | err = -EAGAIN; |
8618b881 | 1279 | f2fs_lock_op(sbi); |
b3d208f9 JK |
1280 | if (f2fs_has_inline_data(inode)) |
1281 | err = f2fs_write_inline_data(inode, page); | |
1282 | if (err == -EAGAIN) | |
05ca3632 | 1283 | err = do_write_data_page(&fio); |
c1286ff4 JK |
1284 | if (F2FS_I(inode)->last_disk_size < psize) |
1285 | F2FS_I(inode)->last_disk_size = psize; | |
8618b881 JK |
1286 | f2fs_unlock_op(sbi); |
1287 | done: | |
1288 | if (err && err != -ENOENT) | |
1289 | goto redirty_out; | |
eb47b800 | 1290 | |
eb47b800 | 1291 | clear_cold_data(page); |
39936837 | 1292 | out: |
a7ffdbe2 | 1293 | inode_dec_dirty_pages(inode); |
2bca1e23 JK |
1294 | if (err) |
1295 | ClearPageUptodate(page); | |
c1286ff4 JK |
1296 | |
1297 | if (wbc->for_reclaim) { | |
1298 | f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, DATA, WRITE); | |
1299 | remove_dirty_inode(inode); | |
1300 | } | |
1301 | ||
eb47b800 | 1302 | unlock_page(page); |
c1286ff4 JK |
1303 | f2fs_balance_fs(sbi, need_balance_fs); |
1304 | ||
1305 | if (unlikely(f2fs_cp_error(sbi))) | |
2aea39ec | 1306 | f2fs_submit_merged_bio(sbi, DATA, WRITE); |
c1286ff4 | 1307 | |
eb47b800 JK |
1308 | return 0; |
1309 | ||
eb47b800 | 1310 | redirty_out: |
76f60268 | 1311 | redirty_page_for_writepage(wbc, page); |
c1286ff4 JK |
1312 | unlock_page(page); |
1313 | return err; | |
fa9150a8 NJ |
1314 | } |
1315 | ||
8f46dcae CY |
1316 | /* |
1317 | * This function was copied from write_cche_pages from mm/page-writeback.c. | |
1318 | * The major change is making write step of cold data page separately from | |
1319 | * warm/hot data page. | |
1320 | */ | |
1321 | static int f2fs_write_cache_pages(struct address_space *mapping, | |
c1286ff4 | 1322 | struct writeback_control *wbc) |
8f46dcae CY |
1323 | { |
1324 | int ret = 0; | |
1325 | int done = 0; | |
1326 | struct pagevec pvec; | |
1327 | int nr_pages; | |
1328 | pgoff_t uninitialized_var(writeback_index); | |
1329 | pgoff_t index; | |
1330 | pgoff_t end; /* Inclusive */ | |
1331 | pgoff_t done_index; | |
1332 | int cycled; | |
1333 | int range_whole = 0; | |
1334 | int tag; | |
c1286ff4 | 1335 | int nwritten = 0; |
8f46dcae CY |
1336 | |
1337 | pagevec_init(&pvec, 0); | |
c1286ff4 | 1338 | |
8f46dcae CY |
1339 | if (wbc->range_cyclic) { |
1340 | writeback_index = mapping->writeback_index; /* prev offset */ | |
1341 | index = writeback_index; | |
1342 | if (index == 0) | |
1343 | cycled = 1; | |
1344 | else | |
1345 | cycled = 0; | |
1346 | end = -1; | |
1347 | } else { | |
c1286ff4 JK |
1348 | index = wbc->range_start >> PAGE_SHIFT; |
1349 | end = wbc->range_end >> PAGE_SHIFT; | |
8f46dcae CY |
1350 | if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) |
1351 | range_whole = 1; | |
1352 | cycled = 1; /* ignore range_cyclic tests */ | |
1353 | } | |
1354 | if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) | |
1355 | tag = PAGECACHE_TAG_TOWRITE; | |
1356 | else | |
1357 | tag = PAGECACHE_TAG_DIRTY; | |
1358 | retry: | |
1359 | if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) | |
1360 | tag_pages_for_writeback(mapping, index, end); | |
1361 | done_index = index; | |
1362 | while (!done && (index <= end)) { | |
1363 | int i; | |
1364 | ||
1365 | nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, | |
1366 | min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1); | |
1367 | if (nr_pages == 0) | |
1368 | break; | |
1369 | ||
1370 | for (i = 0; i < nr_pages; i++) { | |
1371 | struct page *page = pvec.pages[i]; | |
1372 | ||
1373 | if (page->index > end) { | |
1374 | done = 1; | |
1375 | break; | |
1376 | } | |
1377 | ||
1378 | done_index = page->index; | |
1379 | ||
1380 | lock_page(page); | |
1381 | ||
1382 | if (unlikely(page->mapping != mapping)) { | |
1383 | continue_unlock: | |
1384 | unlock_page(page); | |
1385 | continue; | |
1386 | } | |
1387 | ||
1388 | if (!PageDirty(page)) { | |
1389 | /* someone wrote it for us */ | |
1390 | goto continue_unlock; | |
1391 | } | |
1392 | ||
8f46dcae CY |
1393 | if (PageWriteback(page)) { |
1394 | if (wbc->sync_mode != WB_SYNC_NONE) | |
c1286ff4 JK |
1395 | f2fs_wait_on_page_writeback(page, |
1396 | DATA, true); | |
8f46dcae CY |
1397 | else |
1398 | goto continue_unlock; | |
1399 | } | |
1400 | ||
1401 | BUG_ON(PageWriteback(page)); | |
1402 | if (!clear_page_dirty_for_io(page)) | |
1403 | goto continue_unlock; | |
1404 | ||
c1286ff4 | 1405 | ret = mapping->a_ops->writepage(page, wbc); |
8f46dcae | 1406 | if (unlikely(ret)) { |
c1286ff4 JK |
1407 | done_index = page->index + 1; |
1408 | done = 1; | |
1409 | break; | |
1410 | } else { | |
1411 | nwritten++; | |
8f46dcae CY |
1412 | } |
1413 | ||
1414 | if (--wbc->nr_to_write <= 0 && | |
1415 | wbc->sync_mode == WB_SYNC_NONE) { | |
1416 | done = 1; | |
1417 | break; | |
1418 | } | |
1419 | } | |
1420 | pagevec_release(&pvec); | |
1421 | cond_resched(); | |
1422 | } | |
1423 | ||
8f46dcae CY |
1424 | if (!cycled && !done) { |
1425 | cycled = 1; | |
1426 | index = 0; | |
1427 | end = writeback_index - 1; | |
1428 | goto retry; | |
1429 | } | |
1430 | if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) | |
1431 | mapping->writeback_index = done_index; | |
1432 | ||
c1286ff4 JK |
1433 | if (nwritten) |
1434 | f2fs_submit_merged_bio_cond(F2FS_M_SB(mapping), mapping->host, | |
1435 | NULL, 0, DATA, WRITE); | |
1436 | ||
8f46dcae CY |
1437 | return ret; |
1438 | } | |
1439 | ||
25ca923b | 1440 | static int f2fs_write_data_pages(struct address_space *mapping, |
eb47b800 JK |
1441 | struct writeback_control *wbc) |
1442 | { | |
1443 | struct inode *inode = mapping->host; | |
4081363f | 1444 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
c1286ff4 | 1445 | struct blk_plug plug; |
eb47b800 | 1446 | int ret; |
e5748434 | 1447 | |
cfb185a1 | 1448 | /* deal with chardevs and other special file */ |
1449 | if (!mapping->a_ops->writepage) | |
1450 | return 0; | |
1451 | ||
6a290544 CY |
1452 | /* skip writing if there is no dirty page in this inode */ |
1453 | if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE) | |
1454 | return 0; | |
1455 | ||
a1257023 JK |
1456 | if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE && |
1457 | get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) && | |
1458 | available_free_memory(sbi, DIRTY_DENTS)) | |
1459 | goto skip_write; | |
1460 | ||
c1286ff4 JK |
1461 | /* skip writing during file defragment */ |
1462 | if (is_inode_flag_set(inode, FI_DO_DEFRAG)) | |
1463 | goto skip_write; | |
1464 | ||
d5669f7b JK |
1465 | /* during POR, we don't need to trigger writepage at all. */ |
1466 | if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) | |
1467 | goto skip_write; | |
1468 | ||
c1286ff4 | 1469 | trace_f2fs_writepages(mapping->host, wbc, DATA); |
458e6197 | 1470 | |
c1286ff4 JK |
1471 | blk_start_plug(&plug); |
1472 | ret = f2fs_write_cache_pages(mapping, wbc); | |
1473 | blk_finish_plug(&plug); | |
1474 | /* | |
1475 | * if some pages were truncated, we cannot guarantee its mapping->host | |
1476 | * to detect pending bios. | |
1477 | */ | |
eb47b800 | 1478 | |
c1286ff4 | 1479 | remove_dirty_inode(inode); |
eb47b800 | 1480 | return ret; |
d3baf95d JK |
1481 | |
1482 | skip_write: | |
a7ffdbe2 | 1483 | wbc->pages_skipped += get_dirty_pages(inode); |
c1286ff4 | 1484 | trace_f2fs_writepages(mapping->host, wbc, DATA); |
d3baf95d | 1485 | return 0; |
eb47b800 JK |
1486 | } |
1487 | ||
3aab8f82 CY |
1488 | static void f2fs_write_failed(struct address_space *mapping, loff_t to) |
1489 | { | |
1490 | struct inode *inode = mapping->host; | |
c1286ff4 JK |
1491 | loff_t i_size = i_size_read(inode); |
1492 | ||
1493 | if (to > i_size) { | |
1494 | truncate_pagecache(inode, i_size); | |
1495 | truncate_blocks(inode, i_size, true); | |
1496 | } | |
1497 | } | |
1498 | ||
1499 | static int prepare_write_begin(struct f2fs_sb_info *sbi, | |
1500 | struct page *page, loff_t pos, unsigned len, | |
1501 | block_t *blk_addr, bool *node_changed) | |
1502 | { | |
1503 | struct inode *inode = page->mapping->host; | |
1504 | pgoff_t index = page->index; | |
1505 | struct dnode_of_data dn; | |
1506 | struct page *ipage; | |
1507 | bool locked = false; | |
1508 | struct extent_info ei; | |
1509 | int err = 0; | |
1510 | ||
1511 | /* | |
1512 | * we already allocated all the blocks, so we don't need to get | |
1513 | * the block addresses when there is no need to fill the page. | |
1514 | */ | |
1515 | if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE) | |
1516 | return 0; | |
3aab8f82 | 1517 | |
c1286ff4 JK |
1518 | if (f2fs_has_inline_data(inode) || |
1519 | (pos & PAGE_MASK) >= i_size_read(inode)) { | |
1520 | f2fs_lock_op(sbi); | |
1521 | locked = true; | |
3aab8f82 | 1522 | } |
c1286ff4 JK |
1523 | restart: |
1524 | /* check inline_data */ | |
1525 | ipage = get_node_page(sbi, inode->i_ino); | |
1526 | if (IS_ERR(ipage)) { | |
1527 | err = PTR_ERR(ipage); | |
1528 | goto unlock_out; | |
1529 | } | |
1530 | ||
1531 | set_new_dnode(&dn, inode, ipage, ipage, 0); | |
1532 | ||
1533 | if (f2fs_has_inline_data(inode)) { | |
1534 | if (pos + len <= MAX_INLINE_DATA) { | |
1535 | read_inline_data(page, ipage); | |
1536 | set_inode_flag(inode, FI_DATA_EXIST); | |
1537 | if (inode->i_nlink) | |
1538 | set_inline_node(ipage); | |
1539 | } else { | |
1540 | err = f2fs_convert_inline_page(&dn, page); | |
1541 | if (err) | |
1542 | goto out; | |
1543 | if (dn.data_blkaddr == NULL_ADDR) | |
1544 | err = f2fs_get_block(&dn, index); | |
1545 | } | |
1546 | } else if (locked) { | |
1547 | err = f2fs_get_block(&dn, index); | |
1548 | } else { | |
1549 | if (f2fs_lookup_extent_cache(inode, index, &ei)) { | |
1550 | dn.data_blkaddr = ei.blk + index - ei.fofs; | |
1551 | } else { | |
1552 | /* hole case */ | |
1553 | err = get_dnode_of_data(&dn, index, LOOKUP_NODE); | |
1554 | if (err || dn.data_blkaddr == NULL_ADDR) { | |
1555 | f2fs_put_dnode(&dn); | |
1556 | f2fs_lock_op(sbi); | |
1557 | locked = true; | |
1558 | goto restart; | |
1559 | } | |
1560 | } | |
1561 | } | |
1562 | ||
1563 | /* convert_inline_page can make node_changed */ | |
1564 | *blk_addr = dn.data_blkaddr; | |
1565 | *node_changed = dn.node_changed; | |
1566 | out: | |
1567 | f2fs_put_dnode(&dn); | |
1568 | unlock_out: | |
1569 | if (locked) | |
1570 | f2fs_unlock_op(sbi); | |
1571 | return err; | |
3aab8f82 CY |
1572 | } |
1573 | ||
eb47b800 JK |
1574 | static int f2fs_write_begin(struct file *file, struct address_space *mapping, |
1575 | loff_t pos, unsigned len, unsigned flags, | |
1576 | struct page **pagep, void **fsdata) | |
1577 | { | |
1578 | struct inode *inode = mapping->host; | |
4081363f | 1579 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
86531d6b | 1580 | struct page *page = NULL; |
c1286ff4 JK |
1581 | pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT; |
1582 | bool need_balance = false; | |
1583 | block_t blkaddr = NULL_ADDR; | |
eb47b800 JK |
1584 | int err = 0; |
1585 | ||
d854b688 MS |
1586 | if (trace_android_fs_datawrite_start_enabled()) { |
1587 | char *path, pathbuf[MAX_TRACE_PATHBUF_LEN]; | |
1588 | ||
1589 | path = android_fstrace_get_pathname(pathbuf, | |
1590 | MAX_TRACE_PATHBUF_LEN, | |
1591 | inode); | |
1592 | trace_android_fs_datawrite_start(inode, pos, len, | |
1593 | current->pid, path, | |
1594 | current->comm); | |
1595 | } | |
62aed044 CY |
1596 | trace_f2fs_write_begin(inode, pos, len, flags); |
1597 | ||
5f727395 JK |
1598 | /* |
1599 | * We should check this at this moment to avoid deadlock on inode page | |
1600 | * and #0 page. The locking rule for inline_data conversion should be: | |
1601 | * lock_page(page #0) -> lock_page(inode_page) | |
1602 | */ | |
1603 | if (index != 0) { | |
1604 | err = f2fs_convert_inline_inode(inode); | |
1605 | if (err) | |
1606 | goto fail; | |
1607 | } | |
afcb7ca0 | 1608 | repeat: |
eb47b800 | 1609 | page = grab_cache_page_write_begin(mapping, index, flags); |
3aab8f82 CY |
1610 | if (!page) { |
1611 | err = -ENOMEM; | |
1612 | goto fail; | |
1613 | } | |
d5f66990 | 1614 | |
eb47b800 JK |
1615 | *pagep = page; |
1616 | ||
c1286ff4 JK |
1617 | err = prepare_write_begin(sbi, page, pos, len, |
1618 | &blkaddr, &need_balance); | |
1619 | if (err) | |
1620 | goto fail; | |
b3d208f9 | 1621 | |
c1286ff4 JK |
1622 | if (need_balance && has_not_enough_free_secs(sbi, 0, 0)) { |
1623 | unlock_page(page); | |
1624 | f2fs_balance_fs(sbi, true); | |
1625 | lock_page(page); | |
1626 | if (page->mapping != mapping) { | |
1627 | /* The page got truncated from under us */ | |
1628 | f2fs_put_page(page, 1); | |
1629 | goto repeat; | |
b3d208f9 | 1630 | } |
b600965c | 1631 | } |
759af1c9 | 1632 | |
c1286ff4 | 1633 | f2fs_wait_on_page_writeback(page, DATA, false); |
b3d208f9 | 1634 | |
08b39fbd CY |
1635 | /* wait for GCed encrypted page writeback */ |
1636 | if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) | |
c1286ff4 | 1637 | f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr); |
08b39fbd | 1638 | |
c1286ff4 JK |
1639 | if (len == PAGE_SIZE || PageUptodate(page)) |
1640 | return 0; | |
eb47b800 | 1641 | |
c1286ff4 JK |
1642 | if (blkaddr == NEW_ADDR) { |
1643 | zero_user_segment(page, 0, PAGE_SIZE); | |
1644 | SetPageUptodate(page); | |
eb47b800 | 1645 | } else { |
c1286ff4 JK |
1646 | struct bio *bio; |
1647 | ||
1648 | bio = f2fs_grab_bio(inode, blkaddr, 1); | |
1649 | if (IS_ERR(bio)) { | |
1650 | err = PTR_ERR(bio); | |
9234f319 | 1651 | goto fail; |
c1286ff4 | 1652 | } |
d54c795b | 1653 | |
c1286ff4 JK |
1654 | if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { |
1655 | bio_put(bio); | |
1656 | err = -EFAULT; | |
3aab8f82 | 1657 | goto fail; |
eb47b800 | 1658 | } |
c1286ff4 JK |
1659 | |
1660 | __submit_bio(sbi, READ_SYNC, bio, DATA); | |
1661 | ||
1662 | lock_page(page); | |
6bacf52f | 1663 | if (unlikely(page->mapping != mapping)) { |
afcb7ca0 JK |
1664 | f2fs_put_page(page, 1); |
1665 | goto repeat; | |
eb47b800 | 1666 | } |
c1286ff4 JK |
1667 | if (unlikely(!PageUptodate(page))) { |
1668 | err = -EIO; | |
1669 | goto fail; | |
4375a336 | 1670 | } |
eb47b800 | 1671 | } |
eb47b800 | 1672 | return 0; |
9ba69cf9 | 1673 | |
3aab8f82 | 1674 | fail: |
86531d6b | 1675 | f2fs_put_page(page, 1); |
3aab8f82 CY |
1676 | f2fs_write_failed(mapping, pos + len); |
1677 | return err; | |
eb47b800 JK |
1678 | } |
1679 | ||
a1dd3c13 JK |
1680 | static int f2fs_write_end(struct file *file, |
1681 | struct address_space *mapping, | |
1682 | loff_t pos, unsigned len, unsigned copied, | |
1683 | struct page *page, void *fsdata) | |
1684 | { | |
1685 | struct inode *inode = page->mapping->host; | |
1686 | ||
32cbbe59 | 1687 | trace_android_fs_datawrite_end(inode, pos, len); |
dfb2bf38 CY |
1688 | trace_f2fs_write_end(inode, pos, len, copied); |
1689 | ||
c1286ff4 JK |
1690 | /* |
1691 | * This should be come from len == PAGE_SIZE, and we expect copied | |
1692 | * should be PAGE_SIZE. Otherwise, we treat it with zero copied and | |
1693 | * let generic_perform_write() try to copy data again through copied=0. | |
1694 | */ | |
1695 | if (!PageUptodate(page)) { | |
1696 | if (unlikely(copied != PAGE_SIZE)) | |
1697 | copied = 0; | |
1698 | else | |
1699 | SetPageUptodate(page); | |
a1dd3c13 | 1700 | } |
c1286ff4 JK |
1701 | if (!copied) |
1702 | goto unlock_out; | |
1703 | ||
1704 | set_page_dirty(page); | |
1705 | clear_cold_data(page); | |
a1dd3c13 | 1706 | |
c1286ff4 JK |
1707 | if (pos + copied > i_size_read(inode)) |
1708 | f2fs_i_size_write(inode, pos + copied); | |
1709 | unlock_out: | |
75c3c8bc | 1710 | f2fs_put_page(page, 1); |
c1286ff4 | 1711 | f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); |
a1dd3c13 JK |
1712 | return copied; |
1713 | } | |
1714 | ||
6f673763 OS |
1715 | static int check_direct_IO(struct inode *inode, struct iov_iter *iter, |
1716 | loff_t offset) | |
944fcfc1 JK |
1717 | { |
1718 | unsigned blocksize_mask = inode->i_sb->s_blocksize - 1; | |
944fcfc1 | 1719 | |
944fcfc1 JK |
1720 | if (offset & blocksize_mask) |
1721 | return -EINVAL; | |
1722 | ||
5b46f25d AV |
1723 | if (iov_iter_alignment(iter) & blocksize_mask) |
1724 | return -EINVAL; | |
1725 | ||
944fcfc1 JK |
1726 | return 0; |
1727 | } | |
1728 | ||
22c6186e | 1729 | static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, |
c1286ff4 | 1730 | loff_t offset) |
eb47b800 | 1731 | { |
c1286ff4 | 1732 | struct address_space *mapping = iocb->ki_filp->f_mapping; |
3aab8f82 CY |
1733 | struct inode *inode = mapping->host; |
1734 | size_t count = iov_iter_count(iter); | |
c1286ff4 | 1735 | int rw = iov_iter_rw(iter); |
3aab8f82 | 1736 | int err; |
944fcfc1 | 1737 | |
c15e8599 CY |
1738 | err = check_direct_IO(inode, iter, offset); |
1739 | if (err) | |
1740 | return err; | |
944fcfc1 | 1741 | |
c1286ff4 JK |
1742 | if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) |
1743 | return 0; | |
1744 | if (test_opt(F2FS_I_SB(inode), LFS)) | |
1745 | return 0; | |
70407fad | 1746 | |
32cbbe59 | 1747 | if (trace_android_fs_dataread_start_enabled() && |
d854b688 MS |
1748 | (iov_iter_rw(iter) == READ)) { |
1749 | char *path, pathbuf[MAX_TRACE_PATHBUF_LEN]; | |
1750 | ||
1751 | path = android_fstrace_get_pathname(pathbuf, | |
1752 | MAX_TRACE_PATHBUF_LEN, | |
1753 | inode); | |
32cbbe59 | 1754 | trace_android_fs_dataread_start(inode, offset, |
d854b688 | 1755 | count, current->pid, path, |
32cbbe59 | 1756 | current->comm); |
d854b688 | 1757 | } |
32cbbe59 | 1758 | if (trace_android_fs_datawrite_start_enabled() && |
d854b688 MS |
1759 | (iov_iter_rw(iter) == WRITE)) { |
1760 | char *path, pathbuf[MAX_TRACE_PATHBUF_LEN]; | |
32cbbe59 | 1761 | |
d854b688 MS |
1762 | path = android_fstrace_get_pathname(pathbuf, |
1763 | MAX_TRACE_PATHBUF_LEN, | |
1764 | inode); | |
1765 | trace_android_fs_datawrite_start(inode, offset, count, | |
1766 | current->pid, path, | |
1767 | current->comm); | |
1768 | } | |
c1286ff4 | 1769 | trace_f2fs_direct_IO_enter(inode, offset, count, rw); |
59b802e5 | 1770 | |
c1286ff4 | 1771 | down_read(&F2FS_I(inode)->dio_rwsem[rw]); |
e2b4e2bc | 1772 | err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block_dio); |
c1286ff4 JK |
1773 | up_read(&F2FS_I(inode)->dio_rwsem[rw]); |
1774 | ||
1775 | if (rw == WRITE) { | |
1776 | if (err > 0) | |
1777 | set_inode_flag(inode, FI_UPDATE_WRITE); | |
1778 | else if (err < 0) | |
1779 | f2fs_write_failed(mapping, offset + count); | |
1780 | } | |
70407fad | 1781 | |
32cbbe59 MS |
1782 | if (trace_android_fs_dataread_start_enabled() && |
1783 | (iov_iter_rw(iter) == READ)) | |
1784 | trace_android_fs_dataread_end(inode, offset, count); | |
1785 | if (trace_android_fs_datawrite_start_enabled() && | |
1786 | (iov_iter_rw(iter) == WRITE)) | |
1787 | trace_android_fs_datawrite_end(inode, offset, count); | |
1788 | ||
c1286ff4 | 1789 | trace_f2fs_direct_IO_exit(inode, offset, count, rw, err); |
70407fad | 1790 | |
3aab8f82 | 1791 | return err; |
eb47b800 JK |
1792 | } |
1793 | ||
487261f3 CY |
1794 | void f2fs_invalidate_page(struct page *page, unsigned int offset, |
1795 | unsigned int length) | |
eb47b800 JK |
1796 | { |
1797 | struct inode *inode = page->mapping->host; | |
487261f3 | 1798 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
a7ffdbe2 | 1799 | |
487261f3 | 1800 | if (inode->i_ino >= F2FS_ROOT_INO(sbi) && |
c1286ff4 | 1801 | (offset % PAGE_SIZE || length != PAGE_SIZE)) |
a7ffdbe2 JK |
1802 | return; |
1803 | ||
487261f3 | 1804 | if (PageDirty(page)) { |
75bb19d8 | 1805 | if (inode->i_ino == F2FS_META_INO(sbi)) { |
487261f3 | 1806 | dec_page_count(sbi, F2FS_DIRTY_META); |
75bb19d8 | 1807 | } else if (inode->i_ino == F2FS_NODE_INO(sbi)) { |
487261f3 | 1808 | dec_page_count(sbi, F2FS_DIRTY_NODES); |
75bb19d8 | 1809 | } else { |
487261f3 | 1810 | inode_dec_dirty_pages(inode); |
75bb19d8 CY |
1811 | remove_dirty_inode(inode); |
1812 | } | |
487261f3 | 1813 | } |
decd36b6 CY |
1814 | |
1815 | /* This is atomic written page, keep Private */ | |
1816 | if (IS_ATOMIC_WRITTEN_PAGE(page)) | |
1817 | return; | |
1818 | ||
c1286ff4 | 1819 | set_page_private(page, 0); |
eb47b800 JK |
1820 | ClearPagePrivate(page); |
1821 | } | |
1822 | ||
487261f3 | 1823 | int f2fs_release_page(struct page *page, gfp_t wait) |
eb47b800 | 1824 | { |
f68daeeb JK |
1825 | /* If this is dirty page, keep PagePrivate */ |
1826 | if (PageDirty(page)) | |
1827 | return 0; | |
1828 | ||
decd36b6 CY |
1829 | /* This is atomic written page, keep Private */ |
1830 | if (IS_ATOMIC_WRITTEN_PAGE(page)) | |
1831 | return 0; | |
1832 | ||
c1286ff4 | 1833 | set_page_private(page, 0); |
eb47b800 | 1834 | ClearPagePrivate(page); |
c3850aa1 | 1835 | return 1; |
eb47b800 JK |
1836 | } |
1837 | ||
c1286ff4 JK |
1838 | /* |
1839 | * This was copied from __set_page_dirty_buffers which gives higher performance | |
1840 | * in very high speed storages. (e.g., pmem) | |
1841 | */ | |
1842 | void f2fs_set_page_dirty_nobuffers(struct page *page) | |
1843 | { | |
1844 | struct address_space *mapping = page->mapping; | |
1845 | struct mem_cgroup *memcg; | |
1846 | unsigned long flags; | |
1847 | ||
1848 | if (unlikely(!mapping)) | |
1849 | return; | |
1850 | ||
1851 | spin_lock(&mapping->private_lock); | |
1852 | memcg = mem_cgroup_begin_page_stat(page); | |
1853 | SetPageDirty(page); | |
1854 | spin_unlock(&mapping->private_lock); | |
1855 | ||
1856 | spin_lock_irqsave(&mapping->tree_lock, flags); | |
1857 | WARN_ON_ONCE(!PageUptodate(page)); | |
1858 | account_page_dirtied(page, mapping, memcg); | |
1859 | radix_tree_tag_set(&mapping->page_tree, | |
1860 | page_index(page), PAGECACHE_TAG_DIRTY); | |
1861 | spin_unlock_irqrestore(&mapping->tree_lock, flags); | |
1862 | ||
1863 | mem_cgroup_end_page_stat(memcg); | |
1864 | ||
1865 | __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); | |
1866 | return; | |
1867 | } | |
1868 | ||
eb47b800 JK |
1869 | static int f2fs_set_data_page_dirty(struct page *page) |
1870 | { | |
1871 | struct address_space *mapping = page->mapping; | |
1872 | struct inode *inode = mapping->host; | |
1873 | ||
26c6b887 JK |
1874 | trace_f2fs_set_page_dirty(page, DATA); |
1875 | ||
c1286ff4 JK |
1876 | if (!PageUptodate(page)) |
1877 | SetPageUptodate(page); | |
34ba94ba | 1878 | |
1e84371f | 1879 | if (f2fs_is_atomic_file(inode)) { |
decd36b6 CY |
1880 | if (!IS_ATOMIC_WRITTEN_PAGE(page)) { |
1881 | register_inmem_page(inode, page); | |
1882 | return 1; | |
1883 | } | |
1884 | /* | |
1885 | * Previously, this page has been registered, we just | |
1886 | * return here. | |
1887 | */ | |
1888 | return 0; | |
34ba94ba JK |
1889 | } |
1890 | ||
eb47b800 | 1891 | if (!PageDirty(page)) { |
c1286ff4 | 1892 | f2fs_set_page_dirty_nobuffers(page); |
a7ffdbe2 | 1893 | update_dirty_page(inode, page); |
eb47b800 JK |
1894 | return 1; |
1895 | } | |
1896 | return 0; | |
1897 | } | |
1898 | ||
c01e54b7 JK |
1899 | static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) |
1900 | { | |
454ae7e5 CY |
1901 | struct inode *inode = mapping->host; |
1902 | ||
1d373a0e JK |
1903 | if (f2fs_has_inline_data(inode)) |
1904 | return 0; | |
1905 | ||
1906 | /* make sure allocating whole blocks */ | |
1907 | if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) | |
1908 | filemap_write_and_wait(mapping); | |
1909 | ||
e2b4e2bc | 1910 | return generic_block_bmap(mapping, block, get_data_block_bmap); |
429511cd CY |
1911 | } |
1912 | ||
c1286ff4 JK |
1913 | #ifdef CONFIG_MIGRATION |
1914 | #include <linux/migrate.h> | |
1915 | ||
1916 | int f2fs_migrate_page(struct address_space *mapping, | |
1917 | struct page *newpage, struct page *page, enum migrate_mode mode) | |
1918 | { | |
1919 | int rc, extra_count; | |
1920 | struct f2fs_inode_info *fi = F2FS_I(mapping->host); | |
1921 | bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page); | |
1922 | ||
1923 | BUG_ON(PageWriteback(page)); | |
1924 | ||
1925 | /* migrating an atomic written page is safe with the inmem_lock hold */ | |
1926 | if (atomic_written && !mutex_trylock(&fi->inmem_lock)) | |
1927 | return -EAGAIN; | |
1928 | ||
1929 | /* | |
1930 | * A reference is expected if PagePrivate set when move mapping, | |
1931 | * however F2FS breaks this for maintaining dirty page counts when | |
1932 | * truncating pages. So here adjusting the 'extra_count' make it work. | |
1933 | */ | |
1934 | extra_count = (atomic_written ? 1 : 0) - page_has_private(page); | |
1935 | rc = migrate_page_move_mapping(mapping, newpage, | |
1936 | page, NULL, mode, extra_count); | |
1937 | if (rc != MIGRATEPAGE_SUCCESS) { | |
1938 | if (atomic_written) | |
1939 | mutex_unlock(&fi->inmem_lock); | |
1940 | return rc; | |
1941 | } | |
1942 | ||
1943 | if (atomic_written) { | |
1944 | struct inmem_pages *cur; | |
1945 | list_for_each_entry(cur, &fi->inmem_pages, list) | |
1946 | if (cur->page == page) { | |
1947 | cur->page = newpage; | |
1948 | break; | |
1949 | } | |
1950 | mutex_unlock(&fi->inmem_lock); | |
1951 | put_page(page); | |
1952 | get_page(newpage); | |
1953 | } | |
1954 | ||
1955 | if (PagePrivate(page)) | |
1956 | SetPagePrivate(newpage); | |
1957 | set_page_private(newpage, page_private(page)); | |
1958 | ||
1959 | migrate_page_copy(newpage, page); | |
1960 | ||
1961 | return MIGRATEPAGE_SUCCESS; | |
1962 | } | |
1963 | #endif | |
1964 | ||
eb47b800 JK |
1965 | const struct address_space_operations f2fs_dblock_aops = { |
1966 | .readpage = f2fs_read_data_page, | |
1967 | .readpages = f2fs_read_data_pages, | |
1968 | .writepage = f2fs_write_data_page, | |
1969 | .writepages = f2fs_write_data_pages, | |
1970 | .write_begin = f2fs_write_begin, | |
a1dd3c13 | 1971 | .write_end = f2fs_write_end, |
eb47b800 | 1972 | .set_page_dirty = f2fs_set_data_page_dirty, |
487261f3 CY |
1973 | .invalidatepage = f2fs_invalidate_page, |
1974 | .releasepage = f2fs_release_page, | |
eb47b800 | 1975 | .direct_IO = f2fs_direct_IO, |
c01e54b7 | 1976 | .bmap = f2fs_bmap, |
c1286ff4 JK |
1977 | #ifdef CONFIG_MIGRATION |
1978 | .migratepage = f2fs_migrate_page, | |
1979 | #endif | |
eb47b800 | 1980 | }; |