Commit | Line | Data |
---|---|---|
0a8165d7 | 1 | /* |
eb47b800 JK |
2 | * fs/f2fs/data.c |
3 | * | |
4 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. | |
5 | * http://www.samsung.com/ | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
11 | #include <linux/fs.h> | |
12 | #include <linux/f2fs_fs.h> | |
13 | #include <linux/buffer_head.h> | |
14 | #include <linux/mpage.h> | |
15 | #include <linux/writeback.h> | |
16 | #include <linux/backing-dev.h> | |
17 | #include <linux/blkdev.h> | |
18 | #include <linux/bio.h> | |
690e4a3e | 19 | #include <linux/prefetch.h> |
e2e40f2c | 20 | #include <linux/uio.h> |
f1e88660 | 21 | #include <linux/cleancache.h> |
eb47b800 JK |
22 | |
23 | #include "f2fs.h" | |
24 | #include "node.h" | |
25 | #include "segment.h" | |
db9f7c1a | 26 | #include "trace.h" |
848753aa | 27 | #include <trace/events/f2fs.h> |
eb47b800 | 28 | |
429511cd CY |
29 | static struct kmem_cache *extent_tree_slab; |
30 | static struct kmem_cache *extent_node_slab; | |
31 | ||
93dfe2ac JK |
32 | static void f2fs_read_end_io(struct bio *bio, int err) |
33 | { | |
f568849e LT |
34 | struct bio_vec *bvec; |
35 | int i; | |
93dfe2ac | 36 | |
4375a336 JK |
37 | if (f2fs_bio_encrypted(bio)) { |
38 | if (err) { | |
39 | f2fs_release_crypto_ctx(bio->bi_private); | |
40 | } else { | |
41 | f2fs_end_io_crypto_work(bio->bi_private, bio); | |
42 | return; | |
43 | } | |
44 | } | |
45 | ||
12377024 CY |
46 | bio_for_each_segment_all(bvec, bio, i) { |
47 | struct page *page = bvec->bv_page; | |
f1e88660 JK |
48 | |
49 | if (!err) { | |
50 | SetPageUptodate(page); | |
51 | } else { | |
52 | ClearPageUptodate(page); | |
53 | SetPageError(page); | |
54 | } | |
55 | unlock_page(page); | |
56 | } | |
f1e88660 JK |
57 | bio_put(bio); |
58 | } | |
59 | ||
93dfe2ac JK |
60 | static void f2fs_write_end_io(struct bio *bio, int err) |
61 | { | |
1b1f559f | 62 | struct f2fs_sb_info *sbi = bio->bi_private; |
f568849e LT |
63 | struct bio_vec *bvec; |
64 | int i; | |
93dfe2ac | 65 | |
f568849e | 66 | bio_for_each_segment_all(bvec, bio, i) { |
93dfe2ac JK |
67 | struct page *page = bvec->bv_page; |
68 | ||
4375a336 JK |
69 | f2fs_restore_and_release_control_page(&page); |
70 | ||
f568849e | 71 | if (unlikely(err)) { |
cf779cab | 72 | set_page_dirty(page); |
93dfe2ac | 73 | set_bit(AS_EIO, &page->mapping->flags); |
744602cf | 74 | f2fs_stop_checkpoint(sbi); |
93dfe2ac JK |
75 | } |
76 | end_page_writeback(page); | |
77 | dec_page_count(sbi, F2FS_WRITEBACK); | |
f568849e | 78 | } |
93dfe2ac | 79 | |
93dfe2ac JK |
80 | if (!get_pages(sbi, F2FS_WRITEBACK) && |
81 | !list_empty(&sbi->cp_wait.task_list)) | |
82 | wake_up(&sbi->cp_wait); | |
83 | ||
84 | bio_put(bio); | |
85 | } | |
86 | ||
940a6d34 GZ |
87 | /* |
88 | * Low-level block read/write IO operations. | |
89 | */ | |
90 | static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr, | |
91 | int npages, bool is_read) | |
92 | { | |
93 | struct bio *bio; | |
94 | ||
95 | /* No failure on bio allocation */ | |
96 | bio = bio_alloc(GFP_NOIO, npages); | |
97 | ||
98 | bio->bi_bdev = sbi->sb->s_bdev; | |
55cf9cb6 | 99 | bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr); |
940a6d34 | 100 | bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io; |
12377024 | 101 | bio->bi_private = is_read ? NULL : sbi; |
940a6d34 GZ |
102 | |
103 | return bio; | |
104 | } | |
105 | ||
458e6197 | 106 | static void __submit_merged_bio(struct f2fs_bio_info *io) |
93dfe2ac | 107 | { |
458e6197 | 108 | struct f2fs_io_info *fio = &io->fio; |
93dfe2ac JK |
109 | |
110 | if (!io->bio) | |
111 | return; | |
112 | ||
6a8f8ca5 | 113 | if (is_read_io(fio->rw)) |
2ace38e0 | 114 | trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio); |
6a8f8ca5 | 115 | else |
2ace38e0 | 116 | trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio); |
940a6d34 | 117 | |
6a8f8ca5 | 118 | submit_bio(fio->rw, io->bio); |
93dfe2ac JK |
119 | io->bio = NULL; |
120 | } | |
121 | ||
122 | void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, | |
458e6197 | 123 | enum page_type type, int rw) |
93dfe2ac JK |
124 | { |
125 | enum page_type btype = PAGE_TYPE_OF_BIO(type); | |
126 | struct f2fs_bio_info *io; | |
127 | ||
128 | io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype]; | |
129 | ||
df0f8dc0 | 130 | down_write(&io->io_rwsem); |
458e6197 JK |
131 | |
132 | /* change META to META_FLUSH in the checkpoint procedure */ | |
133 | if (type >= META_FLUSH) { | |
134 | io->fio.type = META_FLUSH; | |
0f7b2abd JK |
135 | if (test_opt(sbi, NOBARRIER)) |
136 | io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO; | |
137 | else | |
138 | io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO; | |
458e6197 JK |
139 | } |
140 | __submit_merged_bio(io); | |
df0f8dc0 | 141 | up_write(&io->io_rwsem); |
93dfe2ac JK |
142 | } |
143 | ||
144 | /* | |
145 | * Fill the locked page with data located in the block address. | |
146 | * Return unlocked page. | |
147 | */ | |
05ca3632 | 148 | int f2fs_submit_page_bio(struct f2fs_io_info *fio) |
93dfe2ac | 149 | { |
93dfe2ac | 150 | struct bio *bio; |
4375a336 | 151 | struct page *page = fio->encrypted_page ? fio->encrypted_page : fio->page; |
93dfe2ac | 152 | |
2ace38e0 | 153 | trace_f2fs_submit_page_bio(page, fio); |
05ca3632 | 154 | f2fs_trace_ios(fio, 0); |
93dfe2ac JK |
155 | |
156 | /* Allocate a new bio */ | |
05ca3632 | 157 | bio = __bio_alloc(fio->sbi, fio->blk_addr, 1, is_read_io(fio->rw)); |
93dfe2ac JK |
158 | |
159 | if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { | |
160 | bio_put(bio); | |
161 | f2fs_put_page(page, 1); | |
162 | return -EFAULT; | |
163 | } | |
164 | ||
cf04e8eb | 165 | submit_bio(fio->rw, bio); |
93dfe2ac JK |
166 | return 0; |
167 | } | |
168 | ||
05ca3632 | 169 | void f2fs_submit_page_mbio(struct f2fs_io_info *fio) |
93dfe2ac | 170 | { |
05ca3632 | 171 | struct f2fs_sb_info *sbi = fio->sbi; |
458e6197 | 172 | enum page_type btype = PAGE_TYPE_OF_BIO(fio->type); |
93dfe2ac | 173 | struct f2fs_bio_info *io; |
940a6d34 | 174 | bool is_read = is_read_io(fio->rw); |
4375a336 | 175 | struct page *bio_page; |
93dfe2ac | 176 | |
940a6d34 | 177 | io = is_read ? &sbi->read_io : &sbi->write_io[btype]; |
93dfe2ac | 178 | |
cf04e8eb | 179 | verify_block_addr(sbi, fio->blk_addr); |
93dfe2ac | 180 | |
df0f8dc0 | 181 | down_write(&io->io_rwsem); |
93dfe2ac | 182 | |
940a6d34 | 183 | if (!is_read) |
93dfe2ac JK |
184 | inc_page_count(sbi, F2FS_WRITEBACK); |
185 | ||
cf04e8eb | 186 | if (io->bio && (io->last_block_in_bio != fio->blk_addr - 1 || |
458e6197 JK |
187 | io->fio.rw != fio->rw)) |
188 | __submit_merged_bio(io); | |
93dfe2ac JK |
189 | alloc_new: |
190 | if (io->bio == NULL) { | |
90a893c7 | 191 | int bio_blocks = MAX_BIO_BLOCKS(sbi); |
940a6d34 | 192 | |
cf04e8eb | 193 | io->bio = __bio_alloc(sbi, fio->blk_addr, bio_blocks, is_read); |
458e6197 | 194 | io->fio = *fio; |
93dfe2ac JK |
195 | } |
196 | ||
4375a336 JK |
197 | bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page; |
198 | ||
199 | if (bio_add_page(io->bio, bio_page, PAGE_CACHE_SIZE, 0) < | |
93dfe2ac | 200 | PAGE_CACHE_SIZE) { |
458e6197 | 201 | __submit_merged_bio(io); |
93dfe2ac JK |
202 | goto alloc_new; |
203 | } | |
204 | ||
cf04e8eb | 205 | io->last_block_in_bio = fio->blk_addr; |
05ca3632 | 206 | f2fs_trace_ios(fio, 0); |
93dfe2ac | 207 | |
df0f8dc0 | 208 | up_write(&io->io_rwsem); |
05ca3632 | 209 | trace_f2fs_submit_page_mbio(fio->page, fio); |
93dfe2ac JK |
210 | } |
211 | ||
0a8165d7 | 212 | /* |
eb47b800 JK |
213 | * Lock ordering for the change of data block address: |
214 | * ->data_page | |
215 | * ->node_page | |
216 | * update block addresses in the node page | |
217 | */ | |
216a620a | 218 | void set_data_blkaddr(struct dnode_of_data *dn) |
eb47b800 JK |
219 | { |
220 | struct f2fs_node *rn; | |
221 | __le32 *addr_array; | |
222 | struct page *node_page = dn->node_page; | |
223 | unsigned int ofs_in_node = dn->ofs_in_node; | |
224 | ||
5514f0aa | 225 | f2fs_wait_on_page_writeback(node_page, NODE); |
eb47b800 | 226 | |
45590710 | 227 | rn = F2FS_NODE(node_page); |
eb47b800 JK |
228 | |
229 | /* Get physical address of data block */ | |
230 | addr_array = blkaddr_in_node(rn); | |
e1509cf2 | 231 | addr_array[ofs_in_node] = cpu_to_le32(dn->data_blkaddr); |
eb47b800 JK |
232 | set_page_dirty(node_page); |
233 | } | |
234 | ||
235 | int reserve_new_block(struct dnode_of_data *dn) | |
236 | { | |
4081363f | 237 | struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); |
eb47b800 | 238 | |
6bacf52f | 239 | if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) |
eb47b800 | 240 | return -EPERM; |
cfb271d4 | 241 | if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1))) |
eb47b800 JK |
242 | return -ENOSPC; |
243 | ||
c01e2853 NJ |
244 | trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node); |
245 | ||
eb47b800 | 246 | dn->data_blkaddr = NEW_ADDR; |
216a620a | 247 | set_data_blkaddr(dn); |
a18ff063 | 248 | mark_inode_dirty(dn->inode); |
eb47b800 JK |
249 | sync_inode_page(dn); |
250 | return 0; | |
251 | } | |
252 | ||
b600965c HL |
253 | int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index) |
254 | { | |
255 | bool need_put = dn->inode_page ? false : true; | |
256 | int err; | |
257 | ||
258 | err = get_dnode_of_data(dn, index, ALLOC_NODE); | |
259 | if (err) | |
260 | return err; | |
a8865372 | 261 | |
b600965c HL |
262 | if (dn->data_blkaddr == NULL_ADDR) |
263 | err = reserve_new_block(dn); | |
a8865372 | 264 | if (err || need_put) |
b600965c HL |
265 | f2fs_put_dnode(dn); |
266 | return err; | |
267 | } | |
268 | ||
429511cd CY |
269 | static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi, |
270 | struct extent_tree *et, struct extent_info *ei, | |
271 | struct rb_node *parent, struct rb_node **p) | |
272 | { | |
273 | struct extent_node *en; | |
274 | ||
275 | en = kmem_cache_alloc(extent_node_slab, GFP_ATOMIC); | |
276 | if (!en) | |
277 | return NULL; | |
278 | ||
279 | en->ei = *ei; | |
280 | INIT_LIST_HEAD(&en->list); | |
281 | ||
282 | rb_link_node(&en->rb_node, parent, p); | |
283 | rb_insert_color(&en->rb_node, &et->root); | |
284 | et->count++; | |
285 | atomic_inc(&sbi->total_ext_node); | |
286 | return en; | |
287 | } | |
288 | ||
289 | static void __detach_extent_node(struct f2fs_sb_info *sbi, | |
290 | struct extent_tree *et, struct extent_node *en) | |
291 | { | |
292 | rb_erase(&en->rb_node, &et->root); | |
293 | et->count--; | |
294 | atomic_dec(&sbi->total_ext_node); | |
62c8af65 CY |
295 | |
296 | if (et->cached_en == en) | |
297 | et->cached_en = NULL; | |
429511cd CY |
298 | } |
299 | ||
93dfc526 CY |
300 | static struct extent_tree *__grab_extent_tree(struct inode *inode) |
301 | { | |
302 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); | |
303 | struct extent_tree *et; | |
304 | nid_t ino = inode->i_ino; | |
305 | ||
306 | down_write(&sbi->extent_tree_lock); | |
307 | et = radix_tree_lookup(&sbi->extent_tree_root, ino); | |
308 | if (!et) { | |
309 | et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS); | |
310 | f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et); | |
311 | memset(et, 0, sizeof(struct extent_tree)); | |
312 | et->ino = ino; | |
313 | et->root = RB_ROOT; | |
314 | et->cached_en = NULL; | |
315 | rwlock_init(&et->lock); | |
316 | atomic_set(&et->refcount, 0); | |
317 | et->count = 0; | |
318 | sbi->total_ext_tree++; | |
319 | } | |
320 | atomic_inc(&et->refcount); | |
321 | up_write(&sbi->extent_tree_lock); | |
322 | ||
3e72f721 JK |
323 | /* never died untill evict_inode */ |
324 | F2FS_I(inode)->extent_tree = et; | |
325 | ||
93dfc526 CY |
326 | return et; |
327 | } | |
328 | ||
429511cd CY |
329 | static struct extent_node *__lookup_extent_tree(struct extent_tree *et, |
330 | unsigned int fofs) | |
331 | { | |
332 | struct rb_node *node = et->root.rb_node; | |
333 | struct extent_node *en; | |
334 | ||
62c8af65 CY |
335 | if (et->cached_en) { |
336 | struct extent_info *cei = &et->cached_en->ei; | |
337 | ||
338 | if (cei->fofs <= fofs && cei->fofs + cei->len > fofs) | |
339 | return et->cached_en; | |
340 | } | |
341 | ||
429511cd CY |
342 | while (node) { |
343 | en = rb_entry(node, struct extent_node, rb_node); | |
344 | ||
244f4fc1 | 345 | if (fofs < en->ei.fofs) |
429511cd | 346 | node = node->rb_left; |
244f4fc1 | 347 | else if (fofs >= en->ei.fofs + en->ei.len) |
429511cd | 348 | node = node->rb_right; |
244f4fc1 | 349 | else |
429511cd CY |
350 | return en; |
351 | } | |
352 | return NULL; | |
353 | } | |
354 | ||
355 | static struct extent_node *__try_back_merge(struct f2fs_sb_info *sbi, | |
356 | struct extent_tree *et, struct extent_node *en) | |
357 | { | |
358 | struct extent_node *prev; | |
359 | struct rb_node *node; | |
360 | ||
361 | node = rb_prev(&en->rb_node); | |
362 | if (!node) | |
363 | return NULL; | |
364 | ||
365 | prev = rb_entry(node, struct extent_node, rb_node); | |
366 | if (__is_back_mergeable(&en->ei, &prev->ei)) { | |
367 | en->ei.fofs = prev->ei.fofs; | |
368 | en->ei.blk = prev->ei.blk; | |
369 | en->ei.len += prev->ei.len; | |
370 | __detach_extent_node(sbi, et, prev); | |
371 | return prev; | |
372 | } | |
373 | return NULL; | |
374 | } | |
375 | ||
376 | static struct extent_node *__try_front_merge(struct f2fs_sb_info *sbi, | |
377 | struct extent_tree *et, struct extent_node *en) | |
378 | { | |
379 | struct extent_node *next; | |
380 | struct rb_node *node; | |
381 | ||
382 | node = rb_next(&en->rb_node); | |
383 | if (!node) | |
384 | return NULL; | |
385 | ||
386 | next = rb_entry(node, struct extent_node, rb_node); | |
387 | if (__is_front_mergeable(&en->ei, &next->ei)) { | |
388 | en->ei.len += next->ei.len; | |
389 | __detach_extent_node(sbi, et, next); | |
390 | return next; | |
391 | } | |
392 | return NULL; | |
393 | } | |
394 | ||
395 | static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi, | |
396 | struct extent_tree *et, struct extent_info *ei, | |
397 | struct extent_node **den) | |
398 | { | |
399 | struct rb_node **p = &et->root.rb_node; | |
400 | struct rb_node *parent = NULL; | |
401 | struct extent_node *en; | |
402 | ||
403 | while (*p) { | |
404 | parent = *p; | |
405 | en = rb_entry(parent, struct extent_node, rb_node); | |
406 | ||
407 | if (ei->fofs < en->ei.fofs) { | |
408 | if (__is_front_mergeable(ei, &en->ei)) { | |
409 | f2fs_bug_on(sbi, !den); | |
410 | en->ei.fofs = ei->fofs; | |
411 | en->ei.blk = ei->blk; | |
412 | en->ei.len += ei->len; | |
413 | *den = __try_back_merge(sbi, et, en); | |
3e72f721 | 414 | goto update_out; |
429511cd CY |
415 | } |
416 | p = &(*p)->rb_left; | |
417 | } else if (ei->fofs >= en->ei.fofs + en->ei.len) { | |
418 | if (__is_back_mergeable(ei, &en->ei)) { | |
419 | f2fs_bug_on(sbi, !den); | |
420 | en->ei.len += ei->len; | |
421 | *den = __try_front_merge(sbi, et, en); | |
3e72f721 | 422 | goto update_out; |
429511cd CY |
423 | } |
424 | p = &(*p)->rb_right; | |
425 | } else { | |
426 | f2fs_bug_on(sbi, 1); | |
427 | } | |
428 | } | |
429 | ||
3e72f721 JK |
430 | en = __attach_extent_node(sbi, et, ei, parent, p); |
431 | if (!en) | |
432 | return NULL; | |
433 | update_out: | |
434 | if (en->ei.len > et->largest.len) | |
435 | et->largest = en->ei; | |
436 | et->cached_en = en; | |
437 | return en; | |
429511cd CY |
438 | } |
439 | ||
440 | static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi, | |
441 | struct extent_tree *et, bool free_all) | |
442 | { | |
443 | struct rb_node *node, *next; | |
444 | struct extent_node *en; | |
445 | unsigned int count = et->count; | |
446 | ||
447 | node = rb_first(&et->root); | |
448 | while (node) { | |
449 | next = rb_next(node); | |
450 | en = rb_entry(node, struct extent_node, rb_node); | |
451 | ||
452 | if (free_all) { | |
453 | spin_lock(&sbi->extent_lock); | |
454 | if (!list_empty(&en->list)) | |
455 | list_del_init(&en->list); | |
456 | spin_unlock(&sbi->extent_lock); | |
457 | } | |
458 | ||
459 | if (free_all || list_empty(&en->list)) { | |
460 | __detach_extent_node(sbi, et, en); | |
461 | kmem_cache_free(extent_node_slab, en); | |
462 | } | |
463 | node = next; | |
464 | } | |
465 | ||
466 | return count - et->count; | |
467 | } | |
468 | ||
3e72f721 JK |
469 | static void __drop_largest_extent(struct inode *inode, pgoff_t fofs) |
470 | { | |
471 | struct extent_info *largest = &F2FS_I(inode)->extent_tree->largest; | |
472 | ||
473 | if (largest->fofs <= fofs && largest->fofs + largest->len > fofs) | |
474 | largest->len = 0; | |
475 | } | |
476 | ||
477 | void f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext) | |
028a41e8 CY |
478 | { |
479 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); | |
480 | struct extent_tree *et; | |
481 | struct extent_node *en; | |
482 | struct extent_info ei; | |
483 | ||
3e72f721 | 484 | if (!f2fs_may_extent_tree(inode)) |
028a41e8 CY |
485 | return; |
486 | ||
487 | et = __grab_extent_tree(inode); | |
488 | ||
3e72f721 JK |
489 | if (!i_ext || le32_to_cpu(i_ext->len) < F2FS_MIN_EXTENT_LEN) |
490 | return; | |
028a41e8 CY |
491 | |
492 | set_extent_info(&ei, le32_to_cpu(i_ext->fofs), | |
493 | le32_to_cpu(i_ext->blk), le32_to_cpu(i_ext->len)); | |
494 | ||
3e72f721 JK |
495 | write_lock(&et->lock); |
496 | if (et->count) | |
497 | goto out; | |
498 | ||
028a41e8 CY |
499 | en = __insert_extent_tree(sbi, et, &ei, NULL); |
500 | if (en) { | |
028a41e8 CY |
501 | spin_lock(&sbi->extent_lock); |
502 | list_add_tail(&en->list, &sbi->extent_list); | |
503 | spin_unlock(&sbi->extent_lock); | |
504 | } | |
505 | out: | |
506 | write_unlock(&et->lock); | |
028a41e8 CY |
507 | } |
508 | ||
429511cd CY |
509 | static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs, |
510 | struct extent_info *ei) | |
511 | { | |
512 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); | |
3e72f721 | 513 | struct extent_tree *et = F2FS_I(inode)->extent_tree; |
429511cd | 514 | struct extent_node *en; |
84bc926c | 515 | bool ret = false; |
429511cd | 516 | |
3e72f721 | 517 | f2fs_bug_on(sbi, !et); |
1ec4610c | 518 | |
3e72f721 | 519 | trace_f2fs_lookup_extent_tree_start(inode, pgofs); |
429511cd CY |
520 | |
521 | read_lock(&et->lock); | |
84bc926c JK |
522 | |
523 | if (et->largest.fofs <= pgofs && | |
524 | et->largest.fofs + et->largest.len > pgofs) { | |
525 | *ei = et->largest; | |
526 | ret = true; | |
527 | stat_inc_read_hit(sbi->sb); | |
528 | goto out; | |
529 | } | |
530 | ||
429511cd CY |
531 | en = __lookup_extent_tree(et, pgofs); |
532 | if (en) { | |
533 | *ei = en->ei; | |
534 | spin_lock(&sbi->extent_lock); | |
535 | if (!list_empty(&en->list)) | |
536 | list_move_tail(&en->list, &sbi->extent_list); | |
244f4fc1 | 537 | et->cached_en = en; |
429511cd | 538 | spin_unlock(&sbi->extent_lock); |
84bc926c | 539 | ret = true; |
429511cd CY |
540 | stat_inc_read_hit(sbi->sb); |
541 | } | |
84bc926c | 542 | out: |
429511cd CY |
543 | stat_inc_total_hit(sbi->sb); |
544 | read_unlock(&et->lock); | |
545 | ||
84bc926c JK |
546 | trace_f2fs_lookup_extent_tree_end(inode, pgofs, ei); |
547 | return ret; | |
429511cd CY |
548 | } |
549 | ||
3e72f721 JK |
550 | /* return true, if on-disk extent should be updated */ |
551 | static bool f2fs_update_extent_tree(struct inode *inode, pgoff_t fofs, | |
429511cd CY |
552 | block_t blkaddr) |
553 | { | |
554 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); | |
3e72f721 | 555 | struct extent_tree *et = F2FS_I(inode)->extent_tree; |
429511cd CY |
556 | struct extent_node *en = NULL, *en1 = NULL, *en2 = NULL, *en3 = NULL; |
557 | struct extent_node *den = NULL; | |
3e72f721 | 558 | struct extent_info ei, dei, prev; |
429511cd CY |
559 | unsigned int endofs; |
560 | ||
3e72f721 JK |
561 | if (!et) |
562 | return false; | |
1ec4610c | 563 | |
3e72f721 | 564 | trace_f2fs_update_extent_tree(inode, fofs, blkaddr); |
429511cd CY |
565 | |
566 | write_lock(&et->lock); | |
567 | ||
3e72f721 JK |
568 | if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT)) { |
569 | write_unlock(&et->lock); | |
570 | return false; | |
571 | } | |
572 | ||
573 | prev = et->largest; | |
574 | dei.len = 0; | |
575 | ||
576 | /* we do not guarantee that the largest extent is cached all the time */ | |
577 | __drop_largest_extent(inode, fofs); | |
578 | ||
429511cd CY |
579 | /* 1. lookup and remove existing extent info in cache */ |
580 | en = __lookup_extent_tree(et, fofs); | |
581 | if (!en) | |
582 | goto update_extent; | |
583 | ||
584 | dei = en->ei; | |
585 | __detach_extent_node(sbi, et, en); | |
586 | ||
587 | /* 2. if extent can be split more, split and insert the left part */ | |
588 | if (dei.len > 1) { | |
589 | /* insert left part of split extent into cache */ | |
590 | if (fofs - dei.fofs >= F2FS_MIN_EXTENT_LEN) { | |
591 | set_extent_info(&ei, dei.fofs, dei.blk, | |
592 | fofs - dei.fofs); | |
593 | en1 = __insert_extent_tree(sbi, et, &ei, NULL); | |
594 | } | |
595 | ||
596 | /* insert right part of split extent into cache */ | |
597 | endofs = dei.fofs + dei.len - 1; | |
598 | if (endofs - fofs >= F2FS_MIN_EXTENT_LEN) { | |
599 | set_extent_info(&ei, fofs + 1, | |
7a2cb678 | 600 | fofs - dei.fofs + dei.blk + 1, endofs - fofs); |
429511cd CY |
601 | en2 = __insert_extent_tree(sbi, et, &ei, NULL); |
602 | } | |
603 | } | |
604 | ||
605 | update_extent: | |
606 | /* 3. update extent in extent cache */ | |
607 | if (blkaddr) { | |
608 | set_extent_info(&ei, fofs, blkaddr, 1); | |
609 | en3 = __insert_extent_tree(sbi, et, &ei, &den); | |
3e72f721 JK |
610 | |
611 | /* give up extent_cache, if split and small updates happen */ | |
612 | if (dei.len >= 1 && | |
613 | prev.len < F2FS_MIN_EXTENT_LEN && | |
614 | et->largest.len < F2FS_MIN_EXTENT_LEN) { | |
615 | et->largest.len = 0; | |
616 | set_inode_flag(F2FS_I(inode), FI_NO_EXTENT); | |
617 | } | |
429511cd CY |
618 | } |
619 | ||
620 | /* 4. update in global extent list */ | |
621 | spin_lock(&sbi->extent_lock); | |
622 | if (en && !list_empty(&en->list)) | |
623 | list_del(&en->list); | |
624 | /* | |
625 | * en1 and en2 split from en, they will become more and more smaller | |
626 | * fragments after splitting several times. So if the length is smaller | |
627 | * than F2FS_MIN_EXTENT_LEN, we will not add them into extent tree. | |
628 | */ | |
629 | if (en1) | |
630 | list_add_tail(&en1->list, &sbi->extent_list); | |
631 | if (en2) | |
632 | list_add_tail(&en2->list, &sbi->extent_list); | |
633 | if (en3) { | |
634 | if (list_empty(&en3->list)) | |
635 | list_add_tail(&en3->list, &sbi->extent_list); | |
636 | else | |
637 | list_move_tail(&en3->list, &sbi->extent_list); | |
638 | } | |
639 | if (den && !list_empty(&den->list)) | |
640 | list_del(&den->list); | |
641 | spin_unlock(&sbi->extent_lock); | |
642 | ||
643 | /* 5. release extent node */ | |
644 | if (en) | |
645 | kmem_cache_free(extent_node_slab, en); | |
646 | if (den) | |
647 | kmem_cache_free(extent_node_slab, den); | |
648 | ||
3e72f721 JK |
649 | if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT)) |
650 | __free_extent_tree(sbi, et, true); | |
0bdee482 | 651 | |
3e72f721 | 652 | write_unlock(&et->lock); |
0bdee482 | 653 | |
3e72f721 | 654 | return !__is_extent_same(&prev, &et->largest); |
0bdee482 CY |
655 | } |
656 | ||
554df79e | 657 | unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink) |
429511cd CY |
658 | { |
659 | struct extent_tree *treevec[EXT_TREE_VEC_SIZE]; | |
660 | struct extent_node *en, *tmp; | |
661 | unsigned long ino = F2FS_ROOT_INO(sbi); | |
3e72f721 | 662 | struct radix_tree_root *root = &sbi->extent_tree_root; |
429511cd | 663 | unsigned int found; |
1ec4610c | 664 | unsigned int node_cnt = 0, tree_cnt = 0; |
7023a1ad | 665 | int remained; |
429511cd | 666 | |
1dcc336b | 667 | if (!test_opt(sbi, EXTENT_CACHE)) |
554df79e | 668 | return 0; |
429511cd | 669 | |
7023a1ad JK |
670 | if (!down_write_trylock(&sbi->extent_tree_lock)) |
671 | goto out; | |
672 | ||
673 | /* 1. remove unreferenced extent tree */ | |
674 | while ((found = radix_tree_gang_lookup(root, | |
675 | (void **)treevec, ino, EXT_TREE_VEC_SIZE))) { | |
676 | unsigned i; | |
677 | ||
678 | ino = treevec[found - 1]->ino + 1; | |
679 | for (i = 0; i < found; i++) { | |
680 | struct extent_tree *et = treevec[i]; | |
681 | ||
682 | if (!atomic_read(&et->refcount)) { | |
683 | write_lock(&et->lock); | |
684 | node_cnt += __free_extent_tree(sbi, et, true); | |
685 | write_unlock(&et->lock); | |
686 | ||
687 | radix_tree_delete(root, et->ino); | |
688 | kmem_cache_free(extent_tree_slab, et); | |
689 | sbi->total_ext_tree--; | |
690 | tree_cnt++; | |
691 | ||
692 | if (node_cnt + tree_cnt >= nr_shrink) | |
693 | goto unlock_out; | |
694 | } | |
695 | } | |
696 | } | |
697 | up_write(&sbi->extent_tree_lock); | |
698 | ||
699 | /* 2. remove LRU extent entries */ | |
700 | if (!down_write_trylock(&sbi->extent_tree_lock)) | |
701 | goto out; | |
702 | ||
703 | remained = nr_shrink - (node_cnt + tree_cnt); | |
704 | ||
429511cd CY |
705 | spin_lock(&sbi->extent_lock); |
706 | list_for_each_entry_safe(en, tmp, &sbi->extent_list, list) { | |
7023a1ad | 707 | if (!remained--) |
429511cd CY |
708 | break; |
709 | list_del_init(&en->list); | |
710 | } | |
711 | spin_unlock(&sbi->extent_lock); | |
712 | ||
3e72f721 | 713 | while ((found = radix_tree_gang_lookup(root, |
429511cd CY |
714 | (void **)treevec, ino, EXT_TREE_VEC_SIZE))) { |
715 | unsigned i; | |
716 | ||
717 | ino = treevec[found - 1]->ino + 1; | |
718 | for (i = 0; i < found; i++) { | |
719 | struct extent_tree *et = treevec[i]; | |
720 | ||
429511cd | 721 | write_lock(&et->lock); |
1ec4610c | 722 | node_cnt += __free_extent_tree(sbi, et, false); |
429511cd | 723 | write_unlock(&et->lock); |
7023a1ad JK |
724 | |
725 | if (node_cnt + tree_cnt >= nr_shrink) | |
726 | break; | |
429511cd CY |
727 | } |
728 | } | |
7023a1ad | 729 | unlock_out: |
429511cd | 730 | up_write(&sbi->extent_tree_lock); |
554df79e | 731 | out: |
1ec4610c | 732 | trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt); |
554df79e JK |
733 | |
734 | return node_cnt + tree_cnt; | |
429511cd CY |
735 | } |
736 | ||
3e72f721 | 737 | unsigned int f2fs_destroy_extent_node(struct inode *inode) |
429511cd CY |
738 | { |
739 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); | |
3e72f721 | 740 | struct extent_tree *et = F2FS_I(inode)->extent_tree; |
1ec4610c | 741 | unsigned int node_cnt = 0; |
429511cd | 742 | |
93dfc526 | 743 | if (!et) |
3e72f721 | 744 | return 0; |
429511cd | 745 | |
429511cd | 746 | write_lock(&et->lock); |
1ec4610c | 747 | node_cnt = __free_extent_tree(sbi, et, true); |
429511cd CY |
748 | write_unlock(&et->lock); |
749 | ||
3e72f721 JK |
750 | return node_cnt; |
751 | } | |
429511cd | 752 | |
3e72f721 JK |
753 | void f2fs_destroy_extent_tree(struct inode *inode) |
754 | { | |
755 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); | |
756 | struct extent_tree *et = F2FS_I(inode)->extent_tree; | |
757 | unsigned int node_cnt = 0; | |
758 | ||
759 | if (!et) | |
760 | return; | |
761 | ||
762 | if (inode->i_nlink && !is_bad_inode(inode) && et->count) { | |
763 | atomic_dec(&et->refcount); | |
764 | return; | |
429511cd | 765 | } |
3e72f721 JK |
766 | |
767 | /* free all extent info belong to this extent tree */ | |
768 | node_cnt = f2fs_destroy_extent_node(inode); | |
769 | ||
770 | /* delete extent tree entry in radix tree */ | |
771 | down_write(&sbi->extent_tree_lock); | |
772 | atomic_dec(&et->refcount); | |
429511cd CY |
773 | f2fs_bug_on(sbi, atomic_read(&et->refcount) || et->count); |
774 | radix_tree_delete(&sbi->extent_tree_root, inode->i_ino); | |
775 | kmem_cache_free(extent_tree_slab, et); | |
776 | sbi->total_ext_tree--; | |
777 | up_write(&sbi->extent_tree_lock); | |
eb47b800 | 778 | |
3e72f721 | 779 | F2FS_I(inode)->extent_tree = NULL; |
028a41e8 | 780 | |
3e72f721 JK |
781 | trace_f2fs_destroy_extent_tree(inode, node_cnt); |
782 | return; | |
028a41e8 CY |
783 | } |
784 | ||
7e4dde79 CY |
785 | static bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs, |
786 | struct extent_info *ei) | |
787 | { | |
3e72f721 | 788 | if (!f2fs_may_extent_tree(inode)) |
91c5d9bc CY |
789 | return false; |
790 | ||
3e72f721 | 791 | return f2fs_lookup_extent_tree(inode, pgofs, ei); |
7e4dde79 CY |
792 | } |
793 | ||
794 | void f2fs_update_extent_cache(struct dnode_of_data *dn) | |
795 | { | |
796 | struct f2fs_inode_info *fi = F2FS_I(dn->inode); | |
797 | pgoff_t fofs; | |
798 | ||
3e72f721 | 799 | if (!f2fs_may_extent_tree(dn->inode)) |
91c5d9bc CY |
800 | return; |
801 | ||
3e72f721 JK |
802 | f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR); |
803 | ||
7e4dde79 CY |
804 | fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) + |
805 | dn->ofs_in_node; | |
806 | ||
3e72f721 | 807 | if (f2fs_update_extent_tree(dn->inode, fofs, dn->data_blkaddr)) |
c11abd1a | 808 | sync_inode_page(dn); |
eb47b800 JK |
809 | } |
810 | ||
43f3eae1 | 811 | struct page *get_read_data_page(struct inode *inode, pgoff_t index, int rw) |
eb47b800 | 812 | { |
eb47b800 JK |
813 | struct address_space *mapping = inode->i_mapping; |
814 | struct dnode_of_data dn; | |
815 | struct page *page; | |
cb3bc9ee | 816 | struct extent_info ei; |
eb47b800 | 817 | int err; |
cf04e8eb | 818 | struct f2fs_io_info fio = { |
05ca3632 | 819 | .sbi = F2FS_I_SB(inode), |
cf04e8eb | 820 | .type = DATA, |
43f3eae1 | 821 | .rw = rw, |
4375a336 | 822 | .encrypted_page = NULL, |
cf04e8eb | 823 | }; |
eb47b800 | 824 | |
4375a336 JK |
825 | if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) |
826 | return read_mapping_page(mapping, index, NULL); | |
827 | ||
9ac1349a | 828 | page = grab_cache_page(mapping, index); |
650495de JK |
829 | if (!page) |
830 | return ERR_PTR(-ENOMEM); | |
831 | ||
cb3bc9ee CY |
832 | if (f2fs_lookup_extent_cache(inode, index, &ei)) { |
833 | dn.data_blkaddr = ei.blk + index - ei.fofs; | |
834 | goto got_it; | |
835 | } | |
836 | ||
eb47b800 | 837 | set_new_dnode(&dn, inode, NULL, NULL, 0); |
266e97a8 | 838 | err = get_dnode_of_data(&dn, index, LOOKUP_NODE); |
650495de JK |
839 | if (err) { |
840 | f2fs_put_page(page, 1); | |
eb47b800 | 841 | return ERR_PTR(err); |
650495de | 842 | } |
eb47b800 JK |
843 | f2fs_put_dnode(&dn); |
844 | ||
6bacf52f | 845 | if (unlikely(dn.data_blkaddr == NULL_ADDR)) { |
650495de | 846 | f2fs_put_page(page, 1); |
eb47b800 | 847 | return ERR_PTR(-ENOENT); |
650495de | 848 | } |
cb3bc9ee | 849 | got_it: |
43f3eae1 JK |
850 | if (PageUptodate(page)) { |
851 | unlock_page(page); | |
eb47b800 | 852 | return page; |
43f3eae1 | 853 | } |
eb47b800 | 854 | |
d59ff4df JK |
855 | /* |
856 | * A new dentry page is allocated but not able to be written, since its | |
857 | * new inode page couldn't be allocated due to -ENOSPC. | |
858 | * In such the case, its blkaddr can be remained as NEW_ADDR. | |
859 | * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata. | |
860 | */ | |
861 | if (dn.data_blkaddr == NEW_ADDR) { | |
862 | zero_user_segment(page, 0, PAGE_CACHE_SIZE); | |
863 | SetPageUptodate(page); | |
43f3eae1 | 864 | unlock_page(page); |
d59ff4df JK |
865 | return page; |
866 | } | |
eb47b800 | 867 | |
cf04e8eb | 868 | fio.blk_addr = dn.data_blkaddr; |
05ca3632 JK |
869 | fio.page = page; |
870 | err = f2fs_submit_page_bio(&fio); | |
393ff91f | 871 | if (err) |
eb47b800 | 872 | return ERR_PTR(err); |
43f3eae1 JK |
873 | return page; |
874 | } | |
875 | ||
876 | struct page *find_data_page(struct inode *inode, pgoff_t index) | |
877 | { | |
878 | struct address_space *mapping = inode->i_mapping; | |
879 | struct page *page; | |
880 | ||
881 | page = find_get_page(mapping, index); | |
882 | if (page && PageUptodate(page)) | |
883 | return page; | |
884 | f2fs_put_page(page, 0); | |
885 | ||
886 | page = get_read_data_page(inode, index, READ_SYNC); | |
887 | if (IS_ERR(page)) | |
888 | return page; | |
889 | ||
890 | if (PageUptodate(page)) | |
891 | return page; | |
892 | ||
893 | wait_on_page_locked(page); | |
894 | if (unlikely(!PageUptodate(page))) { | |
895 | f2fs_put_page(page, 0); | |
896 | return ERR_PTR(-EIO); | |
897 | } | |
898 | return page; | |
899 | } | |
900 | ||
901 | /* | |
902 | * If it tries to access a hole, return an error. | |
903 | * Because, the callers, functions in dir.c and GC, should be able to know | |
904 | * whether this page exists or not. | |
905 | */ | |
906 | struct page *get_lock_data_page(struct inode *inode, pgoff_t index) | |
907 | { | |
908 | struct address_space *mapping = inode->i_mapping; | |
909 | struct page *page; | |
910 | repeat: | |
911 | page = get_read_data_page(inode, index, READ_SYNC); | |
912 | if (IS_ERR(page)) | |
913 | return page; | |
393ff91f | 914 | |
43f3eae1 | 915 | /* wait for read completion */ |
393ff91f | 916 | lock_page(page); |
6bacf52f | 917 | if (unlikely(!PageUptodate(page))) { |
393ff91f JK |
918 | f2fs_put_page(page, 1); |
919 | return ERR_PTR(-EIO); | |
eb47b800 | 920 | } |
6bacf52f | 921 | if (unlikely(page->mapping != mapping)) { |
afcb7ca0 JK |
922 | f2fs_put_page(page, 1); |
923 | goto repeat; | |
eb47b800 JK |
924 | } |
925 | return page; | |
926 | } | |
927 | ||
0a8165d7 | 928 | /* |
eb47b800 JK |
929 | * Caller ensures that this data page is never allocated. |
930 | * A new zero-filled data page is allocated in the page cache. | |
39936837 | 931 | * |
4f4124d0 CY |
932 | * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and |
933 | * f2fs_unlock_op(). | |
a8865372 | 934 | * Note that, ipage is set only by make_empty_dir. |
eb47b800 | 935 | */ |
64aa7ed9 | 936 | struct page *get_new_data_page(struct inode *inode, |
a8865372 | 937 | struct page *ipage, pgoff_t index, bool new_i_size) |
eb47b800 | 938 | { |
eb47b800 JK |
939 | struct address_space *mapping = inode->i_mapping; |
940 | struct page *page; | |
941 | struct dnode_of_data dn; | |
942 | int err; | |
01f28610 JK |
943 | repeat: |
944 | page = grab_cache_page(mapping, index); | |
945 | if (!page) | |
946 | return ERR_PTR(-ENOMEM); | |
eb47b800 | 947 | |
a8865372 | 948 | set_new_dnode(&dn, inode, ipage, NULL, 0); |
b600965c | 949 | err = f2fs_reserve_block(&dn, index); |
01f28610 JK |
950 | if (err) { |
951 | f2fs_put_page(page, 1); | |
eb47b800 | 952 | return ERR_PTR(err); |
a8865372 | 953 | } |
01f28610 JK |
954 | if (!ipage) |
955 | f2fs_put_dnode(&dn); | |
eb47b800 JK |
956 | |
957 | if (PageUptodate(page)) | |
01f28610 | 958 | goto got_it; |
eb47b800 JK |
959 | |
960 | if (dn.data_blkaddr == NEW_ADDR) { | |
961 | zero_user_segment(page, 0, PAGE_CACHE_SIZE); | |
393ff91f | 962 | SetPageUptodate(page); |
eb47b800 | 963 | } else { |
4375a336 | 964 | f2fs_put_page(page, 1); |
a8865372 | 965 | |
4375a336 JK |
966 | page = get_read_data_page(inode, index, READ_SYNC); |
967 | if (IS_ERR(page)) | |
afcb7ca0 | 968 | goto repeat; |
4375a336 JK |
969 | |
970 | /* wait for read completion */ | |
971 | lock_page(page); | |
eb47b800 | 972 | } |
01f28610 | 973 | got_it: |
eb47b800 JK |
974 | if (new_i_size && |
975 | i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) { | |
976 | i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT)); | |
699489bb JK |
977 | /* Only the directory inode sets new_i_size */ |
978 | set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR); | |
eb47b800 JK |
979 | } |
980 | return page; | |
981 | } | |
982 | ||
bfad7c2d JK |
983 | static int __allocate_data_block(struct dnode_of_data *dn) |
984 | { | |
4081363f | 985 | struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); |
976e4c50 | 986 | struct f2fs_inode_info *fi = F2FS_I(dn->inode); |
bfad7c2d | 987 | struct f2fs_summary sum; |
bfad7c2d | 988 | struct node_info ni; |
38aa0889 | 989 | int seg = CURSEG_WARM_DATA; |
976e4c50 | 990 | pgoff_t fofs; |
bfad7c2d JK |
991 | |
992 | if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) | |
993 | return -EPERM; | |
df6136ef CY |
994 | |
995 | dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node); | |
996 | if (dn->data_blkaddr == NEW_ADDR) | |
997 | goto alloc; | |
998 | ||
bfad7c2d JK |
999 | if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1))) |
1000 | return -ENOSPC; | |
1001 | ||
df6136ef | 1002 | alloc: |
bfad7c2d JK |
1003 | get_node_info(sbi, dn->nid, &ni); |
1004 | set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); | |
1005 | ||
38aa0889 JK |
1006 | if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page) |
1007 | seg = CURSEG_DIRECT_IO; | |
1008 | ||
df6136ef CY |
1009 | allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr, |
1010 | &sum, seg); | |
216a620a | 1011 | set_data_blkaddr(dn); |
bfad7c2d | 1012 | |
976e4c50 JK |
1013 | /* update i_size */ |
1014 | fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) + | |
1015 | dn->ofs_in_node; | |
1016 | if (i_size_read(dn->inode) < ((fofs + 1) << PAGE_CACHE_SHIFT)) | |
1017 | i_size_write(dn->inode, ((fofs + 1) << PAGE_CACHE_SHIFT)); | |
1018 | ||
3e72f721 JK |
1019 | /* direct IO doesn't use extent cache to maximize the performance */ |
1020 | __drop_largest_extent(dn->inode, fofs); | |
1021 | ||
bfad7c2d JK |
1022 | return 0; |
1023 | } | |
1024 | ||
59b802e5 JK |
1025 | static void __allocate_data_blocks(struct inode *inode, loff_t offset, |
1026 | size_t count) | |
1027 | { | |
1028 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); | |
1029 | struct dnode_of_data dn; | |
1030 | u64 start = F2FS_BYTES_TO_BLK(offset); | |
1031 | u64 len = F2FS_BYTES_TO_BLK(count); | |
1032 | bool allocated; | |
1033 | u64 end_offset; | |
1034 | ||
1035 | while (len) { | |
1036 | f2fs_balance_fs(sbi); | |
1037 | f2fs_lock_op(sbi); | |
1038 | ||
1039 | /* When reading holes, we need its node page */ | |
1040 | set_new_dnode(&dn, inode, NULL, NULL, 0); | |
1041 | if (get_dnode_of_data(&dn, start, ALLOC_NODE)) | |
1042 | goto out; | |
1043 | ||
1044 | allocated = false; | |
1045 | end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); | |
1046 | ||
1047 | while (dn.ofs_in_node < end_offset && len) { | |
d6d4f1cb CY |
1048 | block_t blkaddr; |
1049 | ||
1050 | blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node); | |
df6136ef | 1051 | if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR) { |
59b802e5 JK |
1052 | if (__allocate_data_block(&dn)) |
1053 | goto sync_out; | |
1054 | allocated = true; | |
1055 | } | |
1056 | len--; | |
1057 | start++; | |
1058 | dn.ofs_in_node++; | |
1059 | } | |
1060 | ||
1061 | if (allocated) | |
1062 | sync_inode_page(&dn); | |
1063 | ||
1064 | f2fs_put_dnode(&dn); | |
1065 | f2fs_unlock_op(sbi); | |
1066 | } | |
1067 | return; | |
1068 | ||
1069 | sync_out: | |
1070 | if (allocated) | |
1071 | sync_inode_page(&dn); | |
1072 | f2fs_put_dnode(&dn); | |
1073 | out: | |
1074 | f2fs_unlock_op(sbi); | |
1075 | return; | |
1076 | } | |
1077 | ||
0a8165d7 | 1078 | /* |
003a3e1d JK |
1079 | * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with |
1080 | * f2fs_map_blocks structure. | |
4f4124d0 CY |
1081 | * If original data blocks are allocated, then give them to blockdev. |
1082 | * Otherwise, | |
1083 | * a. preallocate requested block addresses | |
1084 | * b. do not use extent cache for better performance | |
1085 | * c. give the block addresses to blockdev | |
eb47b800 | 1086 | */ |
003a3e1d JK |
1087 | static int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, |
1088 | int create, bool fiemap) | |
eb47b800 | 1089 | { |
003a3e1d | 1090 | unsigned int maxblocks = map->m_len; |
eb47b800 | 1091 | struct dnode_of_data dn; |
bfad7c2d JK |
1092 | int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA; |
1093 | pgoff_t pgofs, end_offset; | |
1094 | int err = 0, ofs = 1; | |
a2e7d1bf | 1095 | struct extent_info ei; |
bfad7c2d | 1096 | bool allocated = false; |
eb47b800 | 1097 | |
003a3e1d JK |
1098 | map->m_len = 0; |
1099 | map->m_flags = 0; | |
1100 | ||
1101 | /* it only supports block size == page size */ | |
1102 | pgofs = (pgoff_t)map->m_lblk; | |
eb47b800 | 1103 | |
7e4dde79 | 1104 | if (f2fs_lookup_extent_cache(inode, pgofs, &ei)) { |
003a3e1d JK |
1105 | map->m_pblk = ei.blk + pgofs - ei.fofs; |
1106 | map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs); | |
1107 | map->m_flags = F2FS_MAP_MAPPED; | |
bfad7c2d | 1108 | goto out; |
a2e7d1bf | 1109 | } |
bfad7c2d | 1110 | |
59b802e5 | 1111 | if (create) |
4081363f | 1112 | f2fs_lock_op(F2FS_I_SB(inode)); |
eb47b800 JK |
1113 | |
1114 | /* When reading holes, we need its node page */ | |
1115 | set_new_dnode(&dn, inode, NULL, NULL, 0); | |
bfad7c2d | 1116 | err = get_dnode_of_data(&dn, pgofs, mode); |
1ec79083 | 1117 | if (err) { |
bfad7c2d JK |
1118 | if (err == -ENOENT) |
1119 | err = 0; | |
1120 | goto unlock_out; | |
848753aa | 1121 | } |
ccfb3000 | 1122 | if (dn.data_blkaddr == NEW_ADDR && !fiemap) |
1ec79083 | 1123 | goto put_out; |
eb47b800 | 1124 | |
bfad7c2d | 1125 | if (dn.data_blkaddr != NULL_ADDR) { |
003a3e1d JK |
1126 | map->m_flags = F2FS_MAP_MAPPED; |
1127 | map->m_pblk = dn.data_blkaddr; | |
7f63eb77 JK |
1128 | if (dn.data_blkaddr == NEW_ADDR) |
1129 | map->m_flags |= F2FS_MAP_UNWRITTEN; | |
bfad7c2d JK |
1130 | } else if (create) { |
1131 | err = __allocate_data_block(&dn); | |
1132 | if (err) | |
1133 | goto put_out; | |
1134 | allocated = true; | |
003a3e1d JK |
1135 | map->m_flags = F2FS_MAP_NEW | F2FS_MAP_MAPPED; |
1136 | map->m_pblk = dn.data_blkaddr; | |
bfad7c2d JK |
1137 | } else { |
1138 | goto put_out; | |
1139 | } | |
1140 | ||
6403eb1f | 1141 | end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); |
003a3e1d | 1142 | map->m_len = 1; |
bfad7c2d JK |
1143 | dn.ofs_in_node++; |
1144 | pgofs++; | |
1145 | ||
1146 | get_next: | |
1147 | if (dn.ofs_in_node >= end_offset) { | |
1148 | if (allocated) | |
1149 | sync_inode_page(&dn); | |
1150 | allocated = false; | |
1151 | f2fs_put_dnode(&dn); | |
1152 | ||
1153 | set_new_dnode(&dn, inode, NULL, NULL, 0); | |
1154 | err = get_dnode_of_data(&dn, pgofs, mode); | |
1ec79083 | 1155 | if (err) { |
bfad7c2d JK |
1156 | if (err == -ENOENT) |
1157 | err = 0; | |
1158 | goto unlock_out; | |
1159 | } | |
ccfb3000 | 1160 | if (dn.data_blkaddr == NEW_ADDR && !fiemap) |
1ec79083 JK |
1161 | goto put_out; |
1162 | ||
6403eb1f | 1163 | end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); |
bfad7c2d | 1164 | } |
eb47b800 | 1165 | |
003a3e1d | 1166 | if (maxblocks > map->m_len) { |
bfad7c2d JK |
1167 | block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node); |
1168 | if (blkaddr == NULL_ADDR && create) { | |
1169 | err = __allocate_data_block(&dn); | |
1170 | if (err) | |
1171 | goto sync_out; | |
1172 | allocated = true; | |
003a3e1d | 1173 | map->m_flags |= F2FS_MAP_NEW; |
bfad7c2d JK |
1174 | blkaddr = dn.data_blkaddr; |
1175 | } | |
e1c42045 | 1176 | /* Give more consecutive addresses for the readahead */ |
7f63eb77 JK |
1177 | if ((map->m_pblk != NEW_ADDR && |
1178 | blkaddr == (map->m_pblk + ofs)) || | |
1179 | (map->m_pblk == NEW_ADDR && | |
1180 | blkaddr == NEW_ADDR)) { | |
bfad7c2d JK |
1181 | ofs++; |
1182 | dn.ofs_in_node++; | |
1183 | pgofs++; | |
003a3e1d | 1184 | map->m_len++; |
bfad7c2d JK |
1185 | goto get_next; |
1186 | } | |
eb47b800 | 1187 | } |
bfad7c2d JK |
1188 | sync_out: |
1189 | if (allocated) | |
1190 | sync_inode_page(&dn); | |
1191 | put_out: | |
eb47b800 | 1192 | f2fs_put_dnode(&dn); |
bfad7c2d JK |
1193 | unlock_out: |
1194 | if (create) | |
4081363f | 1195 | f2fs_unlock_op(F2FS_I_SB(inode)); |
bfad7c2d | 1196 | out: |
003a3e1d | 1197 | trace_f2fs_map_blocks(inode, map, err); |
bfad7c2d | 1198 | return err; |
eb47b800 JK |
1199 | } |
1200 | ||
003a3e1d JK |
1201 | static int __get_data_block(struct inode *inode, sector_t iblock, |
1202 | struct buffer_head *bh, int create, bool fiemap) | |
1203 | { | |
1204 | struct f2fs_map_blocks map; | |
1205 | int ret; | |
1206 | ||
1207 | map.m_lblk = iblock; | |
1208 | map.m_len = bh->b_size >> inode->i_blkbits; | |
1209 | ||
1210 | ret = f2fs_map_blocks(inode, &map, create, fiemap); | |
1211 | if (!ret) { | |
1212 | map_bh(bh, inode->i_sb, map.m_pblk); | |
1213 | bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags; | |
1214 | bh->b_size = map.m_len << inode->i_blkbits; | |
1215 | } | |
1216 | return ret; | |
1217 | } | |
1218 | ||
ccfb3000 JK |
1219 | static int get_data_block(struct inode *inode, sector_t iblock, |
1220 | struct buffer_head *bh_result, int create) | |
1221 | { | |
1222 | return __get_data_block(inode, iblock, bh_result, create, false); | |
1223 | } | |
1224 | ||
1225 | static int get_data_block_fiemap(struct inode *inode, sector_t iblock, | |
1226 | struct buffer_head *bh_result, int create) | |
1227 | { | |
1228 | return __get_data_block(inode, iblock, bh_result, create, true); | |
1229 | } | |
1230 | ||
7f63eb77 JK |
1231 | static inline sector_t logical_to_blk(struct inode *inode, loff_t offset) |
1232 | { | |
1233 | return (offset >> inode->i_blkbits); | |
1234 | } | |
1235 | ||
1236 | static inline loff_t blk_to_logical(struct inode *inode, sector_t blk) | |
1237 | { | |
1238 | return (blk << inode->i_blkbits); | |
1239 | } | |
1240 | ||
9ab70134 JK |
1241 | int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, |
1242 | u64 start, u64 len) | |
1243 | { | |
7f63eb77 JK |
1244 | struct buffer_head map_bh; |
1245 | sector_t start_blk, last_blk; | |
1246 | loff_t isize = i_size_read(inode); | |
1247 | u64 logical = 0, phys = 0, size = 0; | |
1248 | u32 flags = 0; | |
1249 | bool past_eof = false, whole_file = false; | |
1250 | int ret = 0; | |
1251 | ||
1252 | ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC); | |
1253 | if (ret) | |
1254 | return ret; | |
1255 | ||
1256 | mutex_lock(&inode->i_mutex); | |
1257 | ||
1258 | if (len >= isize) { | |
1259 | whole_file = true; | |
1260 | len = isize; | |
1261 | } | |
1262 | ||
1263 | if (logical_to_blk(inode, len) == 0) | |
1264 | len = blk_to_logical(inode, 1); | |
1265 | ||
1266 | start_blk = logical_to_blk(inode, start); | |
1267 | last_blk = logical_to_blk(inode, start + len - 1); | |
1268 | next: | |
1269 | memset(&map_bh, 0, sizeof(struct buffer_head)); | |
1270 | map_bh.b_size = len; | |
1271 | ||
1272 | ret = get_data_block_fiemap(inode, start_blk, &map_bh, 0); | |
1273 | if (ret) | |
1274 | goto out; | |
1275 | ||
1276 | /* HOLE */ | |
1277 | if (!buffer_mapped(&map_bh)) { | |
1278 | start_blk++; | |
1279 | ||
1280 | if (!past_eof && blk_to_logical(inode, start_blk) >= isize) | |
1281 | past_eof = 1; | |
1282 | ||
1283 | if (past_eof && size) { | |
1284 | flags |= FIEMAP_EXTENT_LAST; | |
1285 | ret = fiemap_fill_next_extent(fieinfo, logical, | |
1286 | phys, size, flags); | |
1287 | } else if (size) { | |
1288 | ret = fiemap_fill_next_extent(fieinfo, logical, | |
1289 | phys, size, flags); | |
1290 | size = 0; | |
1291 | } | |
1292 | ||
1293 | /* if we have holes up to/past EOF then we're done */ | |
1294 | if (start_blk > last_blk || past_eof || ret) | |
1295 | goto out; | |
1296 | } else { | |
1297 | if (start_blk > last_blk && !whole_file) { | |
1298 | ret = fiemap_fill_next_extent(fieinfo, logical, | |
1299 | phys, size, flags); | |
1300 | goto out; | |
1301 | } | |
1302 | ||
1303 | /* | |
1304 | * if size != 0 then we know we already have an extent | |
1305 | * to add, so add it. | |
1306 | */ | |
1307 | if (size) { | |
1308 | ret = fiemap_fill_next_extent(fieinfo, logical, | |
1309 | phys, size, flags); | |
1310 | if (ret) | |
1311 | goto out; | |
1312 | } | |
1313 | ||
1314 | logical = blk_to_logical(inode, start_blk); | |
1315 | phys = blk_to_logical(inode, map_bh.b_blocknr); | |
1316 | size = map_bh.b_size; | |
1317 | flags = 0; | |
1318 | if (buffer_unwritten(&map_bh)) | |
1319 | flags = FIEMAP_EXTENT_UNWRITTEN; | |
1320 | ||
1321 | start_blk += logical_to_blk(inode, size); | |
1322 | ||
1323 | /* | |
1324 | * If we are past the EOF, then we need to make sure as | |
1325 | * soon as we find a hole that the last extent we found | |
1326 | * is marked with FIEMAP_EXTENT_LAST | |
1327 | */ | |
1328 | if (!past_eof && logical + size >= isize) | |
1329 | past_eof = true; | |
1330 | } | |
1331 | cond_resched(); | |
1332 | if (fatal_signal_pending(current)) | |
1333 | ret = -EINTR; | |
1334 | else | |
1335 | goto next; | |
1336 | out: | |
1337 | if (ret == 1) | |
1338 | ret = 0; | |
1339 | ||
1340 | mutex_unlock(&inode->i_mutex); | |
1341 | return ret; | |
9ab70134 JK |
1342 | } |
1343 | ||
f1e88660 JK |
1344 | /* |
1345 | * This function was originally taken from fs/mpage.c, and customized for f2fs. | |
1346 | * Major change was from block_size == page_size in f2fs by default. | |
1347 | */ | |
1348 | static int f2fs_mpage_readpages(struct address_space *mapping, | |
1349 | struct list_head *pages, struct page *page, | |
1350 | unsigned nr_pages) | |
1351 | { | |
1352 | struct bio *bio = NULL; | |
1353 | unsigned page_idx; | |
1354 | sector_t last_block_in_bio = 0; | |
1355 | struct inode *inode = mapping->host; | |
1356 | const unsigned blkbits = inode->i_blkbits; | |
1357 | const unsigned blocksize = 1 << blkbits; | |
1358 | sector_t block_in_file; | |
1359 | sector_t last_block; | |
1360 | sector_t last_block_in_file; | |
1361 | sector_t block_nr; | |
1362 | struct block_device *bdev = inode->i_sb->s_bdev; | |
1363 | struct f2fs_map_blocks map; | |
1364 | ||
1365 | map.m_pblk = 0; | |
1366 | map.m_lblk = 0; | |
1367 | map.m_len = 0; | |
1368 | map.m_flags = 0; | |
1369 | ||
1370 | for (page_idx = 0; nr_pages; page_idx++, nr_pages--) { | |
1371 | ||
1372 | prefetchw(&page->flags); | |
1373 | if (pages) { | |
1374 | page = list_entry(pages->prev, struct page, lru); | |
1375 | list_del(&page->lru); | |
1376 | if (add_to_page_cache_lru(page, mapping, | |
1377 | page->index, GFP_KERNEL)) | |
1378 | goto next_page; | |
1379 | } | |
1380 | ||
1381 | block_in_file = (sector_t)page->index; | |
1382 | last_block = block_in_file + nr_pages; | |
1383 | last_block_in_file = (i_size_read(inode) + blocksize - 1) >> | |
1384 | blkbits; | |
1385 | if (last_block > last_block_in_file) | |
1386 | last_block = last_block_in_file; | |
1387 | ||
1388 | /* | |
1389 | * Map blocks using the previous result first. | |
1390 | */ | |
1391 | if ((map.m_flags & F2FS_MAP_MAPPED) && | |
1392 | block_in_file > map.m_lblk && | |
1393 | block_in_file < (map.m_lblk + map.m_len)) | |
1394 | goto got_it; | |
1395 | ||
1396 | /* | |
1397 | * Then do more f2fs_map_blocks() calls until we are | |
1398 | * done with this page. | |
1399 | */ | |
1400 | map.m_flags = 0; | |
1401 | ||
1402 | if (block_in_file < last_block) { | |
1403 | map.m_lblk = block_in_file; | |
1404 | map.m_len = last_block - block_in_file; | |
1405 | ||
1406 | if (f2fs_map_blocks(inode, &map, 0, false)) | |
1407 | goto set_error_page; | |
1408 | } | |
1409 | got_it: | |
1410 | if ((map.m_flags & F2FS_MAP_MAPPED)) { | |
1411 | block_nr = map.m_pblk + block_in_file - map.m_lblk; | |
1412 | SetPageMappedToDisk(page); | |
1413 | ||
1414 | if (!PageUptodate(page) && !cleancache_get_page(page)) { | |
1415 | SetPageUptodate(page); | |
1416 | goto confused; | |
1417 | } | |
1418 | } else { | |
1419 | zero_user_segment(page, 0, PAGE_CACHE_SIZE); | |
1420 | SetPageUptodate(page); | |
1421 | unlock_page(page); | |
1422 | goto next_page; | |
1423 | } | |
1424 | ||
1425 | /* | |
1426 | * This page will go to BIO. Do we need to send this | |
1427 | * BIO off first? | |
1428 | */ | |
1429 | if (bio && (last_block_in_bio != block_nr - 1)) { | |
1430 | submit_and_realloc: | |
1431 | submit_bio(READ, bio); | |
1432 | bio = NULL; | |
1433 | } | |
1434 | if (bio == NULL) { | |
4375a336 JK |
1435 | struct f2fs_crypto_ctx *ctx = NULL; |
1436 | ||
1437 | if (f2fs_encrypted_inode(inode) && | |
1438 | S_ISREG(inode->i_mode)) { | |
1439 | struct page *cpage; | |
1440 | ||
1441 | ctx = f2fs_get_crypto_ctx(inode); | |
1442 | if (IS_ERR(ctx)) | |
1443 | goto set_error_page; | |
1444 | ||
1445 | /* wait the page to be moved by cleaning */ | |
1446 | cpage = find_lock_page( | |
1447 | META_MAPPING(F2FS_I_SB(inode)), | |
1448 | block_nr); | |
1449 | if (cpage) { | |
1450 | f2fs_wait_on_page_writeback(cpage, | |
1451 | DATA); | |
1452 | f2fs_put_page(cpage, 1); | |
1453 | } | |
1454 | } | |
1455 | ||
f1e88660 JK |
1456 | bio = bio_alloc(GFP_KERNEL, |
1457 | min_t(int, nr_pages, bio_get_nr_vecs(bdev))); | |
4375a336 JK |
1458 | if (!bio) { |
1459 | if (ctx) | |
1460 | f2fs_release_crypto_ctx(ctx); | |
f1e88660 | 1461 | goto set_error_page; |
4375a336 | 1462 | } |
f1e88660 JK |
1463 | bio->bi_bdev = bdev; |
1464 | bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr); | |
12377024 | 1465 | bio->bi_end_io = f2fs_read_end_io; |
4375a336 | 1466 | bio->bi_private = ctx; |
f1e88660 JK |
1467 | } |
1468 | ||
1469 | if (bio_add_page(bio, page, blocksize, 0) < blocksize) | |
1470 | goto submit_and_realloc; | |
1471 | ||
1472 | last_block_in_bio = block_nr; | |
1473 | goto next_page; | |
1474 | set_error_page: | |
1475 | SetPageError(page); | |
1476 | zero_user_segment(page, 0, PAGE_CACHE_SIZE); | |
1477 | unlock_page(page); | |
1478 | goto next_page; | |
1479 | confused: | |
1480 | if (bio) { | |
1481 | submit_bio(READ, bio); | |
1482 | bio = NULL; | |
1483 | } | |
1484 | unlock_page(page); | |
1485 | next_page: | |
1486 | if (pages) | |
1487 | page_cache_release(page); | |
1488 | } | |
1489 | BUG_ON(pages && !list_empty(pages)); | |
1490 | if (bio) | |
1491 | submit_bio(READ, bio); | |
1492 | return 0; | |
1493 | } | |
1494 | ||
eb47b800 JK |
1495 | static int f2fs_read_data_page(struct file *file, struct page *page) |
1496 | { | |
9ffe0fb5 | 1497 | struct inode *inode = page->mapping->host; |
b3d208f9 | 1498 | int ret = -EAGAIN; |
9ffe0fb5 | 1499 | |
c20e89cd CY |
1500 | trace_f2fs_readpage(page, DATA); |
1501 | ||
e1c42045 | 1502 | /* If the file has inline data, try to read it directly */ |
9ffe0fb5 HL |
1503 | if (f2fs_has_inline_data(inode)) |
1504 | ret = f2fs_read_inline_data(inode, page); | |
b3d208f9 | 1505 | if (ret == -EAGAIN) |
f1e88660 | 1506 | ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1); |
9ffe0fb5 | 1507 | return ret; |
eb47b800 JK |
1508 | } |
1509 | ||
1510 | static int f2fs_read_data_pages(struct file *file, | |
1511 | struct address_space *mapping, | |
1512 | struct list_head *pages, unsigned nr_pages) | |
1513 | { | |
9ffe0fb5 HL |
1514 | struct inode *inode = file->f_mapping->host; |
1515 | ||
1516 | /* If the file has inline data, skip readpages */ | |
1517 | if (f2fs_has_inline_data(inode)) | |
1518 | return 0; | |
1519 | ||
f1e88660 | 1520 | return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages); |
eb47b800 JK |
1521 | } |
1522 | ||
05ca3632 | 1523 | int do_write_data_page(struct f2fs_io_info *fio) |
eb47b800 | 1524 | { |
05ca3632 | 1525 | struct page *page = fio->page; |
eb47b800 | 1526 | struct inode *inode = page->mapping->host; |
eb47b800 JK |
1527 | struct dnode_of_data dn; |
1528 | int err = 0; | |
1529 | ||
1530 | set_new_dnode(&dn, inode, NULL, NULL, 0); | |
266e97a8 | 1531 | err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE); |
eb47b800 JK |
1532 | if (err) |
1533 | return err; | |
1534 | ||
cf04e8eb | 1535 | fio->blk_addr = dn.data_blkaddr; |
eb47b800 JK |
1536 | |
1537 | /* This page is already truncated */ | |
2bca1e23 JK |
1538 | if (fio->blk_addr == NULL_ADDR) { |
1539 | ClearPageUptodate(page); | |
eb47b800 | 1540 | goto out_writepage; |
2bca1e23 | 1541 | } |
eb47b800 | 1542 | |
4375a336 JK |
1543 | if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) { |
1544 | fio->encrypted_page = f2fs_encrypt(inode, fio->page); | |
1545 | if (IS_ERR(fio->encrypted_page)) { | |
1546 | err = PTR_ERR(fio->encrypted_page); | |
1547 | goto out_writepage; | |
1548 | } | |
1549 | } | |
1550 | ||
eb47b800 JK |
1551 | set_page_writeback(page); |
1552 | ||
1553 | /* | |
1554 | * If current allocation needs SSR, | |
1555 | * it had better in-place writes for updated data. | |
1556 | */ | |
cf04e8eb | 1557 | if (unlikely(fio->blk_addr != NEW_ADDR && |
b25958b6 HL |
1558 | !is_cold_data(page) && |
1559 | need_inplace_update(inode))) { | |
05ca3632 | 1560 | rewrite_data_page(fio); |
fff04f90 | 1561 | set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE); |
8ce67cb0 | 1562 | trace_f2fs_do_write_data_page(page, IPU); |
eb47b800 | 1563 | } else { |
05ca3632 | 1564 | write_data_page(&dn, fio); |
216a620a | 1565 | set_data_blkaddr(&dn); |
7e4dde79 | 1566 | f2fs_update_extent_cache(&dn); |
8ce67cb0 | 1567 | trace_f2fs_do_write_data_page(page, OPU); |
fff04f90 | 1568 | set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); |
3c6c2beb JK |
1569 | if (page->index == 0) |
1570 | set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN); | |
eb47b800 JK |
1571 | } |
1572 | out_writepage: | |
1573 | f2fs_put_dnode(&dn); | |
1574 | return err; | |
1575 | } | |
1576 | ||
1577 | static int f2fs_write_data_page(struct page *page, | |
1578 | struct writeback_control *wbc) | |
1579 | { | |
1580 | struct inode *inode = page->mapping->host; | |
4081363f | 1581 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
eb47b800 JK |
1582 | loff_t i_size = i_size_read(inode); |
1583 | const pgoff_t end_index = ((unsigned long long) i_size) | |
1584 | >> PAGE_CACHE_SHIFT; | |
9ffe0fb5 | 1585 | unsigned offset = 0; |
39936837 | 1586 | bool need_balance_fs = false; |
eb47b800 | 1587 | int err = 0; |
458e6197 | 1588 | struct f2fs_io_info fio = { |
05ca3632 | 1589 | .sbi = sbi, |
458e6197 | 1590 | .type = DATA, |
6c311ec6 | 1591 | .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE, |
05ca3632 | 1592 | .page = page, |
4375a336 | 1593 | .encrypted_page = NULL, |
458e6197 | 1594 | }; |
eb47b800 | 1595 | |
ecda0de3 CY |
1596 | trace_f2fs_writepage(page, DATA); |
1597 | ||
eb47b800 | 1598 | if (page->index < end_index) |
39936837 | 1599 | goto write; |
eb47b800 JK |
1600 | |
1601 | /* | |
1602 | * If the offset is out-of-range of file size, | |
1603 | * this page does not have to be written to disk. | |
1604 | */ | |
1605 | offset = i_size & (PAGE_CACHE_SIZE - 1); | |
76f60268 | 1606 | if ((page->index >= end_index + 1) || !offset) |
39936837 | 1607 | goto out; |
eb47b800 JK |
1608 | |
1609 | zero_user_segment(page, offset, PAGE_CACHE_SIZE); | |
39936837 | 1610 | write: |
caf0047e | 1611 | if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) |
eb47b800 | 1612 | goto redirty_out; |
1e84371f JK |
1613 | if (f2fs_is_drop_cache(inode)) |
1614 | goto out; | |
1615 | if (f2fs_is_volatile_file(inode) && !wbc->for_reclaim && | |
1616 | available_free_memory(sbi, BASE_CHECK)) | |
1617 | goto redirty_out; | |
eb47b800 | 1618 | |
39936837 | 1619 | /* Dentry blocks are controlled by checkpoint */ |
eb47b800 | 1620 | if (S_ISDIR(inode->i_mode)) { |
cf779cab JK |
1621 | if (unlikely(f2fs_cp_error(sbi))) |
1622 | goto redirty_out; | |
05ca3632 | 1623 | err = do_write_data_page(&fio); |
8618b881 JK |
1624 | goto done; |
1625 | } | |
9ffe0fb5 | 1626 | |
cf779cab JK |
1627 | /* we should bypass data pages to proceed the kworkder jobs */ |
1628 | if (unlikely(f2fs_cp_error(sbi))) { | |
1629 | SetPageError(page); | |
a7ffdbe2 | 1630 | goto out; |
cf779cab JK |
1631 | } |
1632 | ||
8618b881 | 1633 | if (!wbc->for_reclaim) |
39936837 | 1634 | need_balance_fs = true; |
8618b881 | 1635 | else if (has_not_enough_free_secs(sbi, 0)) |
39936837 | 1636 | goto redirty_out; |
eb47b800 | 1637 | |
b3d208f9 | 1638 | err = -EAGAIN; |
8618b881 | 1639 | f2fs_lock_op(sbi); |
b3d208f9 JK |
1640 | if (f2fs_has_inline_data(inode)) |
1641 | err = f2fs_write_inline_data(inode, page); | |
1642 | if (err == -EAGAIN) | |
05ca3632 | 1643 | err = do_write_data_page(&fio); |
8618b881 JK |
1644 | f2fs_unlock_op(sbi); |
1645 | done: | |
1646 | if (err && err != -ENOENT) | |
1647 | goto redirty_out; | |
eb47b800 | 1648 | |
eb47b800 | 1649 | clear_cold_data(page); |
39936837 | 1650 | out: |
a7ffdbe2 | 1651 | inode_dec_dirty_pages(inode); |
2bca1e23 JK |
1652 | if (err) |
1653 | ClearPageUptodate(page); | |
eb47b800 | 1654 | unlock_page(page); |
39936837 | 1655 | if (need_balance_fs) |
eb47b800 | 1656 | f2fs_balance_fs(sbi); |
2aea39ec JK |
1657 | if (wbc->for_reclaim) |
1658 | f2fs_submit_merged_bio(sbi, DATA, WRITE); | |
eb47b800 JK |
1659 | return 0; |
1660 | ||
eb47b800 | 1661 | redirty_out: |
76f60268 | 1662 | redirty_page_for_writepage(wbc, page); |
8618b881 | 1663 | return AOP_WRITEPAGE_ACTIVATE; |
eb47b800 JK |
1664 | } |
1665 | ||
fa9150a8 NJ |
1666 | static int __f2fs_writepage(struct page *page, struct writeback_control *wbc, |
1667 | void *data) | |
1668 | { | |
1669 | struct address_space *mapping = data; | |
1670 | int ret = mapping->a_ops->writepage(page, wbc); | |
1671 | mapping_set_error(mapping, ret); | |
1672 | return ret; | |
1673 | } | |
1674 | ||
25ca923b | 1675 | static int f2fs_write_data_pages(struct address_space *mapping, |
eb47b800 JK |
1676 | struct writeback_control *wbc) |
1677 | { | |
1678 | struct inode *inode = mapping->host; | |
4081363f | 1679 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
5463e7c1 | 1680 | bool locked = false; |
eb47b800 | 1681 | int ret; |
50c8cdb3 | 1682 | long diff; |
eb47b800 | 1683 | |
e5748434 CY |
1684 | trace_f2fs_writepages(mapping->host, wbc, DATA); |
1685 | ||
cfb185a1 | 1686 | /* deal with chardevs and other special file */ |
1687 | if (!mapping->a_ops->writepage) | |
1688 | return 0; | |
1689 | ||
87d6f890 | 1690 | if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE && |
a7ffdbe2 | 1691 | get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) && |
6fb03f3a | 1692 | available_free_memory(sbi, DIRTY_DENTS)) |
d3baf95d | 1693 | goto skip_write; |
87d6f890 | 1694 | |
d5669f7b JK |
1695 | /* during POR, we don't need to trigger writepage at all. */ |
1696 | if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) | |
1697 | goto skip_write; | |
1698 | ||
50c8cdb3 | 1699 | diff = nr_pages_to_write(sbi, DATA, wbc); |
eb47b800 | 1700 | |
5463e7c1 JK |
1701 | if (!S_ISDIR(inode->i_mode)) { |
1702 | mutex_lock(&sbi->writepages); | |
1703 | locked = true; | |
1704 | } | |
fa9150a8 | 1705 | ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping); |
bb96a8d5 | 1706 | f2fs_submit_merged_bio(sbi, DATA, WRITE); |
5463e7c1 JK |
1707 | if (locked) |
1708 | mutex_unlock(&sbi->writepages); | |
458e6197 | 1709 | |
eb47b800 JK |
1710 | remove_dirty_dir_inode(inode); |
1711 | ||
50c8cdb3 | 1712 | wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff); |
eb47b800 | 1713 | return ret; |
d3baf95d JK |
1714 | |
1715 | skip_write: | |
a7ffdbe2 | 1716 | wbc->pages_skipped += get_dirty_pages(inode); |
d3baf95d | 1717 | return 0; |
eb47b800 JK |
1718 | } |
1719 | ||
3aab8f82 CY |
1720 | static void f2fs_write_failed(struct address_space *mapping, loff_t to) |
1721 | { | |
1722 | struct inode *inode = mapping->host; | |
1723 | ||
1724 | if (to > inode->i_size) { | |
1725 | truncate_pagecache(inode, inode->i_size); | |
764aa3e9 | 1726 | truncate_blocks(inode, inode->i_size, true); |
3aab8f82 CY |
1727 | } |
1728 | } | |
1729 | ||
eb47b800 JK |
1730 | static int f2fs_write_begin(struct file *file, struct address_space *mapping, |
1731 | loff_t pos, unsigned len, unsigned flags, | |
1732 | struct page **pagep, void **fsdata) | |
1733 | { | |
1734 | struct inode *inode = mapping->host; | |
4081363f | 1735 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
9ba69cf9 | 1736 | struct page *page, *ipage; |
eb47b800 JK |
1737 | pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT; |
1738 | struct dnode_of_data dn; | |
1739 | int err = 0; | |
1740 | ||
62aed044 CY |
1741 | trace_f2fs_write_begin(inode, pos, len, flags); |
1742 | ||
eb47b800 | 1743 | f2fs_balance_fs(sbi); |
5f727395 JK |
1744 | |
1745 | /* | |
1746 | * We should check this at this moment to avoid deadlock on inode page | |
1747 | * and #0 page. The locking rule for inline_data conversion should be: | |
1748 | * lock_page(page #0) -> lock_page(inode_page) | |
1749 | */ | |
1750 | if (index != 0) { | |
1751 | err = f2fs_convert_inline_inode(inode); | |
1752 | if (err) | |
1753 | goto fail; | |
1754 | } | |
afcb7ca0 | 1755 | repeat: |
eb47b800 | 1756 | page = grab_cache_page_write_begin(mapping, index, flags); |
3aab8f82 CY |
1757 | if (!page) { |
1758 | err = -ENOMEM; | |
1759 | goto fail; | |
1760 | } | |
d5f66990 | 1761 | |
eb47b800 JK |
1762 | *pagep = page; |
1763 | ||
e479556b | 1764 | f2fs_lock_op(sbi); |
9ba69cf9 JK |
1765 | |
1766 | /* check inline_data */ | |
1767 | ipage = get_node_page(sbi, inode->i_ino); | |
cd34e296 CY |
1768 | if (IS_ERR(ipage)) { |
1769 | err = PTR_ERR(ipage); | |
9ba69cf9 | 1770 | goto unlock_fail; |
cd34e296 | 1771 | } |
9ba69cf9 | 1772 | |
b3d208f9 JK |
1773 | set_new_dnode(&dn, inode, ipage, ipage, 0); |
1774 | ||
9ba69cf9 | 1775 | if (f2fs_has_inline_data(inode)) { |
b3d208f9 JK |
1776 | if (pos + len <= MAX_INLINE_DATA) { |
1777 | read_inline_data(page, ipage); | |
1778 | set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); | |
1779 | sync_inode_page(&dn); | |
1780 | goto put_next; | |
b3d208f9 | 1781 | } |
5f727395 JK |
1782 | err = f2fs_convert_inline_page(&dn, page); |
1783 | if (err) | |
1784 | goto put_fail; | |
b600965c | 1785 | } |
9ba69cf9 JK |
1786 | err = f2fs_reserve_block(&dn, index); |
1787 | if (err) | |
8cdcb713 | 1788 | goto put_fail; |
b3d208f9 | 1789 | put_next: |
9ba69cf9 JK |
1790 | f2fs_put_dnode(&dn); |
1791 | f2fs_unlock_op(sbi); | |
1792 | ||
eb47b800 JK |
1793 | if ((len == PAGE_CACHE_SIZE) || PageUptodate(page)) |
1794 | return 0; | |
1795 | ||
b3d208f9 JK |
1796 | f2fs_wait_on_page_writeback(page, DATA); |
1797 | ||
eb47b800 JK |
1798 | if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { |
1799 | unsigned start = pos & (PAGE_CACHE_SIZE - 1); | |
1800 | unsigned end = start + len; | |
1801 | ||
1802 | /* Reading beyond i_size is simple: memset to zero */ | |
1803 | zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); | |
393ff91f | 1804 | goto out; |
eb47b800 JK |
1805 | } |
1806 | ||
b3d208f9 | 1807 | if (dn.data_blkaddr == NEW_ADDR) { |
eb47b800 JK |
1808 | zero_user_segment(page, 0, PAGE_CACHE_SIZE); |
1809 | } else { | |
cf04e8eb | 1810 | struct f2fs_io_info fio = { |
05ca3632 | 1811 | .sbi = sbi, |
cf04e8eb JK |
1812 | .type = DATA, |
1813 | .rw = READ_SYNC, | |
1814 | .blk_addr = dn.data_blkaddr, | |
05ca3632 | 1815 | .page = page, |
4375a336 | 1816 | .encrypted_page = NULL, |
cf04e8eb | 1817 | }; |
05ca3632 | 1818 | err = f2fs_submit_page_bio(&fio); |
9234f319 JK |
1819 | if (err) |
1820 | goto fail; | |
d54c795b | 1821 | |
393ff91f | 1822 | lock_page(page); |
6bacf52f | 1823 | if (unlikely(!PageUptodate(page))) { |
393ff91f | 1824 | f2fs_put_page(page, 1); |
3aab8f82 CY |
1825 | err = -EIO; |
1826 | goto fail; | |
eb47b800 | 1827 | } |
6bacf52f | 1828 | if (unlikely(page->mapping != mapping)) { |
afcb7ca0 JK |
1829 | f2fs_put_page(page, 1); |
1830 | goto repeat; | |
eb47b800 | 1831 | } |
4375a336 JK |
1832 | |
1833 | /* avoid symlink page */ | |
1834 | if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) { | |
1835 | err = f2fs_decrypt_one(inode, page); | |
1836 | if (err) { | |
1837 | f2fs_put_page(page, 1); | |
1838 | goto fail; | |
1839 | } | |
1840 | } | |
eb47b800 | 1841 | } |
393ff91f | 1842 | out: |
eb47b800 JK |
1843 | SetPageUptodate(page); |
1844 | clear_cold_data(page); | |
1845 | return 0; | |
9ba69cf9 | 1846 | |
8cdcb713 JK |
1847 | put_fail: |
1848 | f2fs_put_dnode(&dn); | |
9ba69cf9 JK |
1849 | unlock_fail: |
1850 | f2fs_unlock_op(sbi); | |
b3d208f9 | 1851 | f2fs_put_page(page, 1); |
3aab8f82 CY |
1852 | fail: |
1853 | f2fs_write_failed(mapping, pos + len); | |
1854 | return err; | |
eb47b800 JK |
1855 | } |
1856 | ||
a1dd3c13 JK |
1857 | static int f2fs_write_end(struct file *file, |
1858 | struct address_space *mapping, | |
1859 | loff_t pos, unsigned len, unsigned copied, | |
1860 | struct page *page, void *fsdata) | |
1861 | { | |
1862 | struct inode *inode = page->mapping->host; | |
1863 | ||
dfb2bf38 CY |
1864 | trace_f2fs_write_end(inode, pos, len, copied); |
1865 | ||
34ba94ba | 1866 | set_page_dirty(page); |
a1dd3c13 JK |
1867 | |
1868 | if (pos + copied > i_size_read(inode)) { | |
1869 | i_size_write(inode, pos + copied); | |
1870 | mark_inode_dirty(inode); | |
1871 | update_inode_page(inode); | |
1872 | } | |
1873 | ||
75c3c8bc | 1874 | f2fs_put_page(page, 1); |
a1dd3c13 JK |
1875 | return copied; |
1876 | } | |
1877 | ||
6f673763 OS |
1878 | static int check_direct_IO(struct inode *inode, struct iov_iter *iter, |
1879 | loff_t offset) | |
944fcfc1 JK |
1880 | { |
1881 | unsigned blocksize_mask = inode->i_sb->s_blocksize - 1; | |
944fcfc1 | 1882 | |
6f673763 | 1883 | if (iov_iter_rw(iter) == READ) |
944fcfc1 JK |
1884 | return 0; |
1885 | ||
1886 | if (offset & blocksize_mask) | |
1887 | return -EINVAL; | |
1888 | ||
5b46f25d AV |
1889 | if (iov_iter_alignment(iter) & blocksize_mask) |
1890 | return -EINVAL; | |
1891 | ||
944fcfc1 JK |
1892 | return 0; |
1893 | } | |
1894 | ||
22c6186e OS |
1895 | static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, |
1896 | loff_t offset) | |
eb47b800 JK |
1897 | { |
1898 | struct file *file = iocb->ki_filp; | |
3aab8f82 CY |
1899 | struct address_space *mapping = file->f_mapping; |
1900 | struct inode *inode = mapping->host; | |
1901 | size_t count = iov_iter_count(iter); | |
1902 | int err; | |
944fcfc1 | 1903 | |
b3d208f9 JK |
1904 | /* we don't need to use inline_data strictly */ |
1905 | if (f2fs_has_inline_data(inode)) { | |
1906 | err = f2fs_convert_inline_inode(inode); | |
1907 | if (err) | |
1908 | return err; | |
1909 | } | |
9ffe0fb5 | 1910 | |
fcc85a4d JK |
1911 | if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) |
1912 | return 0; | |
1913 | ||
6f673763 | 1914 | if (check_direct_IO(inode, iter, offset)) |
944fcfc1 JK |
1915 | return 0; |
1916 | ||
6f673763 | 1917 | trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter)); |
70407fad | 1918 | |
6f673763 | 1919 | if (iov_iter_rw(iter) == WRITE) |
59b802e5 JK |
1920 | __allocate_data_blocks(inode, offset, count); |
1921 | ||
17f8c842 | 1922 | err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block); |
6f673763 | 1923 | if (err < 0 && iov_iter_rw(iter) == WRITE) |
3aab8f82 | 1924 | f2fs_write_failed(mapping, offset + count); |
70407fad | 1925 | |
6f673763 | 1926 | trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err); |
70407fad | 1927 | |
3aab8f82 | 1928 | return err; |
eb47b800 JK |
1929 | } |
1930 | ||
487261f3 CY |
1931 | void f2fs_invalidate_page(struct page *page, unsigned int offset, |
1932 | unsigned int length) | |
eb47b800 JK |
1933 | { |
1934 | struct inode *inode = page->mapping->host; | |
487261f3 | 1935 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
a7ffdbe2 | 1936 | |
487261f3 CY |
1937 | if (inode->i_ino >= F2FS_ROOT_INO(sbi) && |
1938 | (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE)) | |
a7ffdbe2 JK |
1939 | return; |
1940 | ||
487261f3 CY |
1941 | if (PageDirty(page)) { |
1942 | if (inode->i_ino == F2FS_META_INO(sbi)) | |
1943 | dec_page_count(sbi, F2FS_DIRTY_META); | |
1944 | else if (inode->i_ino == F2FS_NODE_INO(sbi)) | |
1945 | dec_page_count(sbi, F2FS_DIRTY_NODES); | |
1946 | else | |
1947 | inode_dec_dirty_pages(inode); | |
1948 | } | |
eb47b800 JK |
1949 | ClearPagePrivate(page); |
1950 | } | |
1951 | ||
487261f3 | 1952 | int f2fs_release_page(struct page *page, gfp_t wait) |
eb47b800 | 1953 | { |
f68daeeb JK |
1954 | /* If this is dirty page, keep PagePrivate */ |
1955 | if (PageDirty(page)) | |
1956 | return 0; | |
1957 | ||
eb47b800 | 1958 | ClearPagePrivate(page); |
c3850aa1 | 1959 | return 1; |
eb47b800 JK |
1960 | } |
1961 | ||
1962 | static int f2fs_set_data_page_dirty(struct page *page) | |
1963 | { | |
1964 | struct address_space *mapping = page->mapping; | |
1965 | struct inode *inode = mapping->host; | |
1966 | ||
26c6b887 JK |
1967 | trace_f2fs_set_page_dirty(page, DATA); |
1968 | ||
eb47b800 | 1969 | SetPageUptodate(page); |
34ba94ba | 1970 | |
1e84371f | 1971 | if (f2fs_is_atomic_file(inode)) { |
34ba94ba JK |
1972 | register_inmem_page(inode, page); |
1973 | return 1; | |
1974 | } | |
1975 | ||
eb47b800 JK |
1976 | if (!PageDirty(page)) { |
1977 | __set_page_dirty_nobuffers(page); | |
a7ffdbe2 | 1978 | update_dirty_page(inode, page); |
eb47b800 JK |
1979 | return 1; |
1980 | } | |
1981 | return 0; | |
1982 | } | |
1983 | ||
c01e54b7 JK |
1984 | static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) |
1985 | { | |
454ae7e5 CY |
1986 | struct inode *inode = mapping->host; |
1987 | ||
b3d208f9 JK |
1988 | /* we don't need to use inline_data strictly */ |
1989 | if (f2fs_has_inline_data(inode)) { | |
1990 | int err = f2fs_convert_inline_inode(inode); | |
1991 | if (err) | |
1992 | return err; | |
1993 | } | |
bfad7c2d | 1994 | return generic_block_bmap(mapping, block, get_data_block); |
c01e54b7 JK |
1995 | } |
1996 | ||
429511cd CY |
1997 | void init_extent_cache_info(struct f2fs_sb_info *sbi) |
1998 | { | |
1999 | INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO); | |
2000 | init_rwsem(&sbi->extent_tree_lock); | |
2001 | INIT_LIST_HEAD(&sbi->extent_list); | |
2002 | spin_lock_init(&sbi->extent_lock); | |
2003 | sbi->total_ext_tree = 0; | |
2004 | atomic_set(&sbi->total_ext_node, 0); | |
2005 | } | |
2006 | ||
2007 | int __init create_extent_cache(void) | |
2008 | { | |
2009 | extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree", | |
2010 | sizeof(struct extent_tree)); | |
2011 | if (!extent_tree_slab) | |
2012 | return -ENOMEM; | |
2013 | extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node", | |
2014 | sizeof(struct extent_node)); | |
2015 | if (!extent_node_slab) { | |
2016 | kmem_cache_destroy(extent_tree_slab); | |
2017 | return -ENOMEM; | |
2018 | } | |
2019 | return 0; | |
2020 | } | |
2021 | ||
2022 | void destroy_extent_cache(void) | |
2023 | { | |
2024 | kmem_cache_destroy(extent_node_slab); | |
2025 | kmem_cache_destroy(extent_tree_slab); | |
2026 | } | |
2027 | ||
eb47b800 JK |
2028 | const struct address_space_operations f2fs_dblock_aops = { |
2029 | .readpage = f2fs_read_data_page, | |
2030 | .readpages = f2fs_read_data_pages, | |
2031 | .writepage = f2fs_write_data_page, | |
2032 | .writepages = f2fs_write_data_pages, | |
2033 | .write_begin = f2fs_write_begin, | |
a1dd3c13 | 2034 | .write_end = f2fs_write_end, |
eb47b800 | 2035 | .set_page_dirty = f2fs_set_data_page_dirty, |
487261f3 CY |
2036 | .invalidatepage = f2fs_invalidate_page, |
2037 | .releasepage = f2fs_release_page, | |
eb47b800 | 2038 | .direct_IO = f2fs_direct_IO, |
c01e54b7 | 2039 | .bmap = f2fs_bmap, |
eb47b800 | 2040 | }; |