drivers: power: report battery voltage in AOSP compatible format
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / btrfs / file.c
CommitLineData
6cbd5570
CM
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
39279cc3
CM
19#include <linux/fs.h>
20#include <linux/pagemap.h>
21#include <linux/highmem.h>
22#include <linux/time.h>
23#include <linux/init.h>
24#include <linux/string.h>
39279cc3
CM
25#include <linux/backing-dev.h>
26#include <linux/mpage.h>
a27bb332 27#include <linux/aio.h>
2fe17c10 28#include <linux/falloc.h>
39279cc3
CM
29#include <linux/swap.h>
30#include <linux/writeback.h>
31#include <linux/statfs.h>
32#include <linux/compat.h>
5a0e3ad6 33#include <linux/slab.h>
55e301fd 34#include <linux/btrfs.h>
39279cc3
CM
35#include "ctree.h"
36#include "disk-io.h"
37#include "transaction.h"
38#include "btrfs_inode.h"
39279cc3 39#include "print-tree.h"
e02119d5
CM
40#include "tree-log.h"
41#include "locking.h"
12fa8ec6 42#include "compat.h"
2aaa6655 43#include "volumes.h"
39279cc3 44
9247f317 45static struct kmem_cache *btrfs_inode_defrag_cachep;
4cb5300b
CM
46/*
47 * when auto defrag is enabled we
48 * queue up these defrag structs to remember which
49 * inodes need defragging passes
50 */
51struct inode_defrag {
52 struct rb_node rb_node;
53 /* objectid */
54 u64 ino;
55 /*
56 * transid where the defrag was added, we search for
57 * extents newer than this
58 */
59 u64 transid;
60
61 /* root objectid */
62 u64 root;
63
64 /* last offset we were able to defrag */
65 u64 last_offset;
66
67 /* if we've wrapped around back to zero once already */
68 int cycled;
69};
70
762f2263
MX
71static int __compare_inode_defrag(struct inode_defrag *defrag1,
72 struct inode_defrag *defrag2)
73{
74 if (defrag1->root > defrag2->root)
75 return 1;
76 else if (defrag1->root < defrag2->root)
77 return -1;
78 else if (defrag1->ino > defrag2->ino)
79 return 1;
80 else if (defrag1->ino < defrag2->ino)
81 return -1;
82 else
83 return 0;
84}
85
4cb5300b
CM
86/* pop a record for an inode into the defrag tree. The lock
87 * must be held already
88 *
89 * If you're inserting a record for an older transid than an
90 * existing record, the transid already in the tree is lowered
91 *
92 * If an existing record is found the defrag item you
93 * pass in is freed
94 */
8ddc4734 95static int __btrfs_add_inode_defrag(struct inode *inode,
4cb5300b
CM
96 struct inode_defrag *defrag)
97{
98 struct btrfs_root *root = BTRFS_I(inode)->root;
99 struct inode_defrag *entry;
100 struct rb_node **p;
101 struct rb_node *parent = NULL;
762f2263 102 int ret;
4cb5300b
CM
103
104 p = &root->fs_info->defrag_inodes.rb_node;
105 while (*p) {
106 parent = *p;
107 entry = rb_entry(parent, struct inode_defrag, rb_node);
108
762f2263
MX
109 ret = __compare_inode_defrag(defrag, entry);
110 if (ret < 0)
4cb5300b 111 p = &parent->rb_left;
762f2263 112 else if (ret > 0)
4cb5300b
CM
113 p = &parent->rb_right;
114 else {
115 /* if we're reinserting an entry for
116 * an old defrag run, make sure to
117 * lower the transid of our existing record
118 */
119 if (defrag->transid < entry->transid)
120 entry->transid = defrag->transid;
121 if (defrag->last_offset > entry->last_offset)
122 entry->last_offset = defrag->last_offset;
8ddc4734 123 return -EEXIST;
4cb5300b
CM
124 }
125 }
72ac3c0d 126 set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
4cb5300b
CM
127 rb_link_node(&defrag->rb_node, parent, p);
128 rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
8ddc4734
MX
129 return 0;
130}
4cb5300b 131
8ddc4734
MX
132static inline int __need_auto_defrag(struct btrfs_root *root)
133{
134 if (!btrfs_test_opt(root, AUTO_DEFRAG))
135 return 0;
136
137 if (btrfs_fs_closing(root->fs_info))
138 return 0;
4cb5300b 139
8ddc4734 140 return 1;
4cb5300b
CM
141}
142
143/*
144 * insert a defrag record for this inode if auto defrag is
145 * enabled
146 */
147int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
148 struct inode *inode)
149{
150 struct btrfs_root *root = BTRFS_I(inode)->root;
151 struct inode_defrag *defrag;
4cb5300b 152 u64 transid;
8ddc4734 153 int ret;
4cb5300b 154
8ddc4734 155 if (!__need_auto_defrag(root))
4cb5300b
CM
156 return 0;
157
72ac3c0d 158 if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
4cb5300b
CM
159 return 0;
160
161 if (trans)
162 transid = trans->transid;
163 else
164 transid = BTRFS_I(inode)->root->last_trans;
165
9247f317 166 defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
4cb5300b
CM
167 if (!defrag)
168 return -ENOMEM;
169
a4689d2b 170 defrag->ino = btrfs_ino(inode);
4cb5300b
CM
171 defrag->transid = transid;
172 defrag->root = root->root_key.objectid;
173
174 spin_lock(&root->fs_info->defrag_inodes_lock);
8ddc4734
MX
175 if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags)) {
176 /*
177 * If we set IN_DEFRAG flag and evict the inode from memory,
178 * and then re-read this inode, this new inode doesn't have
179 * IN_DEFRAG flag. At the case, we may find the existed defrag.
180 */
181 ret = __btrfs_add_inode_defrag(inode, defrag);
182 if (ret)
183 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
184 } else {
9247f317 185 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
8ddc4734 186 }
4cb5300b 187 spin_unlock(&root->fs_info->defrag_inodes_lock);
a0f98dde 188 return 0;
4cb5300b
CM
189}
190
191/*
8ddc4734
MX
192 * Requeue the defrag object. If there is a defrag object that points to
193 * the same inode in the tree, we will merge them together (by
194 * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
4cb5300b 195 */
48a3b636
ES
196static void btrfs_requeue_inode_defrag(struct inode *inode,
197 struct inode_defrag *defrag)
8ddc4734
MX
198{
199 struct btrfs_root *root = BTRFS_I(inode)->root;
200 int ret;
201
202 if (!__need_auto_defrag(root))
203 goto out;
204
205 /*
206 * Here we don't check the IN_DEFRAG flag, because we need merge
207 * them together.
208 */
209 spin_lock(&root->fs_info->defrag_inodes_lock);
210 ret = __btrfs_add_inode_defrag(inode, defrag);
211 spin_unlock(&root->fs_info->defrag_inodes_lock);
212 if (ret)
213 goto out;
214 return;
215out:
216 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
217}
218
4cb5300b 219/*
26176e7c
MX
220 * pick the defragable inode that we want, if it doesn't exist, we will get
221 * the next one.
4cb5300b 222 */
26176e7c
MX
223static struct inode_defrag *
224btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
4cb5300b
CM
225{
226 struct inode_defrag *entry = NULL;
762f2263 227 struct inode_defrag tmp;
4cb5300b
CM
228 struct rb_node *p;
229 struct rb_node *parent = NULL;
762f2263
MX
230 int ret;
231
232 tmp.ino = ino;
233 tmp.root = root;
4cb5300b 234
26176e7c
MX
235 spin_lock(&fs_info->defrag_inodes_lock);
236 p = fs_info->defrag_inodes.rb_node;
4cb5300b
CM
237 while (p) {
238 parent = p;
239 entry = rb_entry(parent, struct inode_defrag, rb_node);
240
762f2263
MX
241 ret = __compare_inode_defrag(&tmp, entry);
242 if (ret < 0)
4cb5300b 243 p = parent->rb_left;
762f2263 244 else if (ret > 0)
4cb5300b
CM
245 p = parent->rb_right;
246 else
26176e7c 247 goto out;
4cb5300b
CM
248 }
249
26176e7c
MX
250 if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
251 parent = rb_next(parent);
252 if (parent)
4cb5300b 253 entry = rb_entry(parent, struct inode_defrag, rb_node);
26176e7c
MX
254 else
255 entry = NULL;
4cb5300b 256 }
26176e7c
MX
257out:
258 if (entry)
259 rb_erase(parent, &fs_info->defrag_inodes);
260 spin_unlock(&fs_info->defrag_inodes_lock);
261 return entry;
4cb5300b
CM
262}
263
26176e7c 264void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
4cb5300b
CM
265{
266 struct inode_defrag *defrag;
26176e7c
MX
267 struct rb_node *node;
268
269 spin_lock(&fs_info->defrag_inodes_lock);
270 node = rb_first(&fs_info->defrag_inodes);
271 while (node) {
272 rb_erase(node, &fs_info->defrag_inodes);
273 defrag = rb_entry(node, struct inode_defrag, rb_node);
274 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
275
276 if (need_resched()) {
277 spin_unlock(&fs_info->defrag_inodes_lock);
278 cond_resched();
279 spin_lock(&fs_info->defrag_inodes_lock);
280 }
281
282 node = rb_first(&fs_info->defrag_inodes);
283 }
284 spin_unlock(&fs_info->defrag_inodes_lock);
285}
286
287#define BTRFS_DEFRAG_BATCH 1024
288
289static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
290 struct inode_defrag *defrag)
291{
4cb5300b
CM
292 struct btrfs_root *inode_root;
293 struct inode *inode;
4cb5300b
CM
294 struct btrfs_key key;
295 struct btrfs_ioctl_defrag_range_args range;
4cb5300b 296 int num_defrag;
6f1c3605
LB
297 int index;
298 int ret;
4cb5300b 299
26176e7c
MX
300 /* get the inode */
301 key.objectid = defrag->root;
302 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
303 key.offset = (u64)-1;
6f1c3605
LB
304
305 index = srcu_read_lock(&fs_info->subvol_srcu);
306
26176e7c
MX
307 inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
308 if (IS_ERR(inode_root)) {
6f1c3605
LB
309 ret = PTR_ERR(inode_root);
310 goto cleanup;
311 }
312 if (btrfs_root_refs(&inode_root->root_item) == 0) {
313 ret = -ENOENT;
314 goto cleanup;
26176e7c
MX
315 }
316
317 key.objectid = defrag->ino;
318 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
319 key.offset = 0;
320 inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
321 if (IS_ERR(inode)) {
6f1c3605
LB
322 ret = PTR_ERR(inode);
323 goto cleanup;
26176e7c 324 }
6f1c3605 325 srcu_read_unlock(&fs_info->subvol_srcu, index);
26176e7c
MX
326
327 /* do a chunk of defrag */
328 clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
4cb5300b
CM
329 memset(&range, 0, sizeof(range));
330 range.len = (u64)-1;
26176e7c 331 range.start = defrag->last_offset;
b66f00da
MX
332
333 sb_start_write(fs_info->sb);
26176e7c
MX
334 num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
335 BTRFS_DEFRAG_BATCH);
b66f00da 336 sb_end_write(fs_info->sb);
26176e7c
MX
337 /*
338 * if we filled the whole defrag batch, there
339 * must be more work to do. Queue this defrag
340 * again
341 */
342 if (num_defrag == BTRFS_DEFRAG_BATCH) {
343 defrag->last_offset = range.start;
344 btrfs_requeue_inode_defrag(inode, defrag);
345 } else if (defrag->last_offset && !defrag->cycled) {
346 /*
347 * we didn't fill our defrag batch, but
348 * we didn't start at zero. Make sure we loop
349 * around to the start of the file.
350 */
351 defrag->last_offset = 0;
352 defrag->cycled = 1;
353 btrfs_requeue_inode_defrag(inode, defrag);
354 } else {
355 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
356 }
357
358 iput(inode);
359 return 0;
6f1c3605
LB
360cleanup:
361 srcu_read_unlock(&fs_info->subvol_srcu, index);
362 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
363 return ret;
26176e7c
MX
364}
365
366/*
367 * run through the list of inodes in the FS that need
368 * defragging
369 */
370int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
371{
372 struct inode_defrag *defrag;
373 u64 first_ino = 0;
374 u64 root_objectid = 0;
4cb5300b
CM
375
376 atomic_inc(&fs_info->defrag_running);
4cb5300b 377 while(1) {
dc81cdc5
MX
378 /* Pause the auto defragger. */
379 if (test_bit(BTRFS_FS_STATE_REMOUNTING,
380 &fs_info->fs_state))
381 break;
382
26176e7c
MX
383 if (!__need_auto_defrag(fs_info->tree_root))
384 break;
4cb5300b
CM
385
386 /* find an inode to defrag */
26176e7c
MX
387 defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
388 first_ino);
4cb5300b 389 if (!defrag) {
26176e7c 390 if (root_objectid || first_ino) {
762f2263 391 root_objectid = 0;
4cb5300b
CM
392 first_ino = 0;
393 continue;
394 } else {
395 break;
396 }
397 }
398
4cb5300b 399 first_ino = defrag->ino + 1;
762f2263 400 root_objectid = defrag->root;
4cb5300b 401
26176e7c 402 __btrfs_run_defrag_inode(fs_info, defrag);
4cb5300b 403 }
4cb5300b
CM
404 atomic_dec(&fs_info->defrag_running);
405
406 /*
407 * during unmount, we use the transaction_wait queue to
408 * wait for the defragger to stop
409 */
410 wake_up(&fs_info->transaction_wait);
411 return 0;
412}
39279cc3 413
d352ac68
CM
414/* simple helper to fault in pages and copy. This should go away
415 * and be replaced with calls into generic code.
416 */
d397712b 417static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
d0215f3e 418 size_t write_bytes,
a1b32a59 419 struct page **prepared_pages,
11c65dcc 420 struct iov_iter *i)
39279cc3 421{
914ee295 422 size_t copied = 0;
d0215f3e 423 size_t total_copied = 0;
11c65dcc 424 int pg = 0;
39279cc3
CM
425 int offset = pos & (PAGE_CACHE_SIZE - 1);
426
11c65dcc 427 while (write_bytes > 0) {
39279cc3
CM
428 size_t count = min_t(size_t,
429 PAGE_CACHE_SIZE - offset, write_bytes);
11c65dcc 430 struct page *page = prepared_pages[pg];
914ee295
XZ
431 /*
432 * Copy data from userspace to the current page
433 *
434 * Disable pagefault to avoid recursive lock since
435 * the pages are already locked
436 */
437 pagefault_disable();
438 copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
439 pagefault_enable();
11c65dcc 440
39279cc3
CM
441 /* Flush processor's dcache for this page */
442 flush_dcache_page(page);
31339acd
CM
443
444 /*
445 * if we get a partial write, we can end up with
446 * partially up to date pages. These add
447 * a lot of complexity, so make sure they don't
448 * happen by forcing this copy to be retried.
449 *
450 * The rest of the btrfs_file_write code will fall
451 * back to page at a time copies after we return 0.
452 */
453 if (!PageUptodate(page) && copied < count)
454 copied = 0;
455
11c65dcc
JB
456 iov_iter_advance(i, copied);
457 write_bytes -= copied;
914ee295 458 total_copied += copied;
39279cc3 459
914ee295 460 /* Return to btrfs_file_aio_write to fault page */
9f570b8d 461 if (unlikely(copied == 0))
914ee295 462 break;
11c65dcc
JB
463
464 if (unlikely(copied < PAGE_CACHE_SIZE - offset)) {
465 offset += copied;
466 } else {
467 pg++;
468 offset = 0;
469 }
39279cc3 470 }
914ee295 471 return total_copied;
39279cc3
CM
472}
473
d352ac68
CM
474/*
475 * unlocks pages after btrfs_file_write is done with them
476 */
48a3b636 477static void btrfs_drop_pages(struct page **pages, size_t num_pages)
39279cc3
CM
478{
479 size_t i;
480 for (i = 0; i < num_pages; i++) {
d352ac68
CM
481 /* page checked is some magic around finding pages that
482 * have been modified without going through btrfs_set_page_dirty
483 * clear it here
484 */
4a096752 485 ClearPageChecked(pages[i]);
39279cc3
CM
486 unlock_page(pages[i]);
487 mark_page_accessed(pages[i]);
488 page_cache_release(pages[i]);
489 }
490}
491
d352ac68
CM
492/*
493 * after copy_from_user, pages need to be dirtied and we need to make
494 * sure holes are created between the current EOF and the start of
495 * any next extents (if required).
496 *
497 * this also makes the decision about creating an inline extent vs
498 * doing real data extents, marking pages dirty and delalloc as required.
499 */
be1a12a0 500int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
48a3b636
ES
501 struct page **pages, size_t num_pages,
502 loff_t pos, size_t write_bytes,
503 struct extent_state **cached)
39279cc3 504{
39279cc3 505 int err = 0;
a52d9a80 506 int i;
db94535d 507 u64 num_bytes;
a52d9a80
CM
508 u64 start_pos;
509 u64 end_of_last_block;
510 u64 end_pos = pos + write_bytes;
511 loff_t isize = i_size_read(inode);
39279cc3 512
5f39d397 513 start_pos = pos & ~((u64)root->sectorsize - 1);
fda2832f 514 num_bytes = ALIGN(write_bytes + pos - start_pos, root->sectorsize);
39279cc3 515
db94535d 516 end_of_last_block = start_pos + num_bytes - 1;
2ac55d41 517 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
be1a12a0 518 cached);
d0215f3e
JB
519 if (err)
520 return err;
9ed74f2d 521
c8b97818
CM
522 for (i = 0; i < num_pages; i++) {
523 struct page *p = pages[i];
524 SetPageUptodate(p);
525 ClearPageChecked(p);
526 set_page_dirty(p);
a52d9a80 527 }
9f570b8d
JB
528
529 /*
530 * we've only changed i_size in ram, and we haven't updated
531 * the disk i_size. There is no need to log the inode
532 * at this time.
533 */
534 if (end_pos > isize)
a52d9a80 535 i_size_write(inode, end_pos);
a22285a6 536 return 0;
39279cc3
CM
537}
538
d352ac68
CM
539/*
540 * this drops all the extents in the cache that intersect the range
541 * [start, end]. Existing extents are split as required.
542 */
7014cdb4
JB
543void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
544 int skip_pinned)
a52d9a80
CM
545{
546 struct extent_map *em;
3b951516
CM
547 struct extent_map *split = NULL;
548 struct extent_map *split2 = NULL;
a52d9a80 549 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
39b5637f 550 u64 len = end - start + 1;
5dc562c5 551 u64 gen;
3b951516
CM
552 int ret;
553 int testend = 1;
5b21f2ed 554 unsigned long flags;
c8b97818 555 int compressed = 0;
09a2a8f9 556 bool modified;
a52d9a80 557
e6dcd2dc 558 WARN_ON(end < start);
3b951516 559 if (end == (u64)-1) {
39b5637f 560 len = (u64)-1;
3b951516
CM
561 testend = 0;
562 }
d397712b 563 while (1) {
7014cdb4
JB
564 int no_splits = 0;
565
09a2a8f9 566 modified = false;
3b951516 567 if (!split)
172ddd60 568 split = alloc_extent_map();
3b951516 569 if (!split2)
172ddd60 570 split2 = alloc_extent_map();
7014cdb4
JB
571 if (!split || !split2)
572 no_splits = 1;
3b951516 573
890871be 574 write_lock(&em_tree->lock);
39b5637f 575 em = lookup_extent_mapping(em_tree, start, len);
d1310b2e 576 if (!em) {
890871be 577 write_unlock(&em_tree->lock);
a52d9a80 578 break;
d1310b2e 579 }
5b21f2ed 580 flags = em->flags;
5dc562c5 581 gen = em->generation;
5b21f2ed 582 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
55ef6899 583 if (testend && em->start + em->len >= start + len) {
5b21f2ed 584 free_extent_map(em);
a1ed835e 585 write_unlock(&em_tree->lock);
5b21f2ed
ZY
586 break;
587 }
55ef6899
YZ
588 start = em->start + em->len;
589 if (testend)
5b21f2ed 590 len = start + len - (em->start + em->len);
5b21f2ed 591 free_extent_map(em);
a1ed835e 592 write_unlock(&em_tree->lock);
5b21f2ed
ZY
593 continue;
594 }
c8b97818 595 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
3ce7e67a 596 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
3b277594 597 clear_bit(EXTENT_FLAG_LOGGING, &flags);
09a2a8f9 598 modified = !list_empty(&em->list);
a52d9a80 599 remove_extent_mapping(em_tree, em);
7014cdb4
JB
600 if (no_splits)
601 goto next;
3b951516
CM
602
603 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
604 em->start < start) {
605 split->start = em->start;
606 split->len = start - em->start;
ff5b7ee3 607 split->orig_start = em->orig_start;
3b951516 608 split->block_start = em->block_start;
c8b97818
CM
609
610 if (compressed)
611 split->block_len = em->block_len;
612 else
613 split->block_len = split->len;
cc95bef6 614 split->ram_bytes = em->ram_bytes;
b4939680
JB
615 split->orig_block_len = max(split->block_len,
616 em->orig_block_len);
5dc562c5 617 split->generation = gen;
3b951516 618 split->bdev = em->bdev;
5b21f2ed 619 split->flags = flags;
261507a0 620 split->compress_type = em->compress_type;
09a2a8f9 621 ret = add_extent_mapping(em_tree, split, modified);
79787eaa 622 BUG_ON(ret); /* Logic error */
3b951516
CM
623 free_extent_map(split);
624 split = split2;
625 split2 = NULL;
626 }
627 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
628 testend && em->start + em->len > start + len) {
629 u64 diff = start + len - em->start;
630
631 split->start = start + len;
632 split->len = em->start + em->len - (start + len);
633 split->bdev = em->bdev;
5b21f2ed 634 split->flags = flags;
261507a0 635 split->compress_type = em->compress_type;
5dc562c5 636 split->generation = gen;
b4939680
JB
637 split->orig_block_len = max(em->block_len,
638 em->orig_block_len);
cc95bef6 639 split->ram_bytes = em->ram_bytes;
3b951516 640
c8b97818
CM
641 if (compressed) {
642 split->block_len = em->block_len;
643 split->block_start = em->block_start;
445a6944 644 split->orig_start = em->orig_start;
c8b97818
CM
645 } else {
646 split->block_len = split->len;
647 split->block_start = em->block_start + diff;
70c8a91c 648 split->orig_start = em->orig_start;
c8b97818 649 }
3b951516 650
09a2a8f9 651 ret = add_extent_mapping(em_tree, split, modified);
79787eaa 652 BUG_ON(ret); /* Logic error */
3b951516
CM
653 free_extent_map(split);
654 split = NULL;
655 }
7014cdb4 656next:
890871be 657 write_unlock(&em_tree->lock);
d1310b2e 658
a52d9a80
CM
659 /* once for us */
660 free_extent_map(em);
661 /* once for the tree*/
662 free_extent_map(em);
663 }
3b951516
CM
664 if (split)
665 free_extent_map(split);
666 if (split2)
667 free_extent_map(split2);
a52d9a80
CM
668}
669
39279cc3
CM
670/*
671 * this is very complex, but the basic idea is to drop all extents
672 * in the range start - end. hint_block is filled in with a block number
673 * that would be a good hint to the block allocator for this file.
674 *
675 * If an extent intersects the range but is not entirely inside the range
676 * it is either truncated or split. Anything entirely inside the range
677 * is deleted from the tree.
678 */
5dc562c5
JB
679int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
680 struct btrfs_root *root, struct inode *inode,
681 struct btrfs_path *path, u64 start, u64 end,
2aaa6655 682 u64 *drop_end, int drop_cache)
39279cc3 683{
5f39d397 684 struct extent_buffer *leaf;
920bbbfb 685 struct btrfs_file_extent_item *fi;
00f5c795 686 struct btrfs_key key;
920bbbfb 687 struct btrfs_key new_key;
33345d01 688 u64 ino = btrfs_ino(inode);
920bbbfb
YZ
689 u64 search_start = start;
690 u64 disk_bytenr = 0;
691 u64 num_bytes = 0;
692 u64 extent_offset = 0;
693 u64 extent_end = 0;
694 int del_nr = 0;
695 int del_slot = 0;
696 int extent_type;
ccd467d6 697 int recow;
00f5c795 698 int ret;
dc7fdde3 699 int modify_tree = -1;
5dc562c5 700 int update_refs = (root->ref_cows || root == root->fs_info->tree_root);
c3308f84 701 int found = 0;
39279cc3 702
a1ed835e
CM
703 if (drop_cache)
704 btrfs_drop_extent_cache(inode, start, end - 1, 0);
a52d9a80 705
dc7fdde3
CM
706 if (start >= BTRFS_I(inode)->disk_i_size)
707 modify_tree = 0;
708
d397712b 709 while (1) {
ccd467d6 710 recow = 0;
33345d01 711 ret = btrfs_lookup_file_extent(trans, root, path, ino,
dc7fdde3 712 search_start, modify_tree);
39279cc3 713 if (ret < 0)
920bbbfb
YZ
714 break;
715 if (ret > 0 && path->slots[0] > 0 && search_start == start) {
716 leaf = path->nodes[0];
717 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
33345d01 718 if (key.objectid == ino &&
920bbbfb
YZ
719 key.type == BTRFS_EXTENT_DATA_KEY)
720 path->slots[0]--;
39279cc3 721 }
920bbbfb 722 ret = 0;
8c2383c3 723next_slot:
5f39d397 724 leaf = path->nodes[0];
920bbbfb
YZ
725 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
726 BUG_ON(del_nr > 0);
727 ret = btrfs_next_leaf(root, path);
728 if (ret < 0)
729 break;
730 if (ret > 0) {
731 ret = 0;
732 break;
8c2383c3 733 }
920bbbfb
YZ
734 leaf = path->nodes[0];
735 recow = 1;
736 }
737
738 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
33345d01 739 if (key.objectid > ino ||
920bbbfb
YZ
740 key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
741 break;
742
743 fi = btrfs_item_ptr(leaf, path->slots[0],
744 struct btrfs_file_extent_item);
745 extent_type = btrfs_file_extent_type(leaf, fi);
746
747 if (extent_type == BTRFS_FILE_EXTENT_REG ||
748 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
749 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
750 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
751 extent_offset = btrfs_file_extent_offset(leaf, fi);
752 extent_end = key.offset +
753 btrfs_file_extent_num_bytes(leaf, fi);
754 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
755 extent_end = key.offset +
756 btrfs_file_extent_inline_len(leaf, fi);
8c2383c3 757 } else {
920bbbfb 758 WARN_ON(1);
8c2383c3 759 extent_end = search_start;
39279cc3
CM
760 }
761
920bbbfb
YZ
762 if (extent_end <= search_start) {
763 path->slots[0]++;
8c2383c3 764 goto next_slot;
39279cc3
CM
765 }
766
c3308f84 767 found = 1;
920bbbfb 768 search_start = max(key.offset, start);
dc7fdde3
CM
769 if (recow || !modify_tree) {
770 modify_tree = -1;
b3b4aa74 771 btrfs_release_path(path);
920bbbfb 772 continue;
39279cc3 773 }
6643558d 774
920bbbfb
YZ
775 /*
776 * | - range to drop - |
777 * | -------- extent -------- |
778 */
779 if (start > key.offset && end < extent_end) {
780 BUG_ON(del_nr > 0);
781 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
782
783 memcpy(&new_key, &key, sizeof(new_key));
784 new_key.offset = start;
785 ret = btrfs_duplicate_item(trans, root, path,
786 &new_key);
787 if (ret == -EAGAIN) {
b3b4aa74 788 btrfs_release_path(path);
920bbbfb 789 continue;
6643558d 790 }
920bbbfb
YZ
791 if (ret < 0)
792 break;
793
794 leaf = path->nodes[0];
795 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
796 struct btrfs_file_extent_item);
797 btrfs_set_file_extent_num_bytes(leaf, fi,
798 start - key.offset);
799
800 fi = btrfs_item_ptr(leaf, path->slots[0],
801 struct btrfs_file_extent_item);
802
803 extent_offset += start - key.offset;
804 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
805 btrfs_set_file_extent_num_bytes(leaf, fi,
806 extent_end - start);
807 btrfs_mark_buffer_dirty(leaf);
808
5dc562c5 809 if (update_refs && disk_bytenr > 0) {
771ed689 810 ret = btrfs_inc_extent_ref(trans, root,
920bbbfb
YZ
811 disk_bytenr, num_bytes, 0,
812 root->root_key.objectid,
813 new_key.objectid,
66d7e7f0 814 start - extent_offset, 0);
79787eaa 815 BUG_ON(ret); /* -ENOMEM */
771ed689 816 }
920bbbfb 817 key.offset = start;
6643558d 818 }
920bbbfb
YZ
819 /*
820 * | ---- range to drop ----- |
821 * | -------- extent -------- |
822 */
823 if (start <= key.offset && end < extent_end) {
824 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
6643558d 825
920bbbfb
YZ
826 memcpy(&new_key, &key, sizeof(new_key));
827 new_key.offset = end;
afe5fea7 828 btrfs_set_item_key_safe(root, path, &new_key);
6643558d 829
920bbbfb
YZ
830 extent_offset += end - key.offset;
831 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
832 btrfs_set_file_extent_num_bytes(leaf, fi,
833 extent_end - end);
834 btrfs_mark_buffer_dirty(leaf);
2671485d 835 if (update_refs && disk_bytenr > 0)
920bbbfb 836 inode_sub_bytes(inode, end - key.offset);
920bbbfb 837 break;
39279cc3 838 }
771ed689 839
920bbbfb
YZ
840 search_start = extent_end;
841 /*
842 * | ---- range to drop ----- |
843 * | -------- extent -------- |
844 */
845 if (start > key.offset && end >= extent_end) {
846 BUG_ON(del_nr > 0);
847 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
8c2383c3 848
920bbbfb
YZ
849 btrfs_set_file_extent_num_bytes(leaf, fi,
850 start - key.offset);
851 btrfs_mark_buffer_dirty(leaf);
2671485d 852 if (update_refs && disk_bytenr > 0)
920bbbfb 853 inode_sub_bytes(inode, extent_end - start);
920bbbfb
YZ
854 if (end == extent_end)
855 break;
c8b97818 856
920bbbfb
YZ
857 path->slots[0]++;
858 goto next_slot;
31840ae1
ZY
859 }
860
920bbbfb
YZ
861 /*
862 * | ---- range to drop ----- |
863 * | ------ extent ------ |
864 */
865 if (start <= key.offset && end >= extent_end) {
866 if (del_nr == 0) {
867 del_slot = path->slots[0];
868 del_nr = 1;
869 } else {
870 BUG_ON(del_slot + del_nr != path->slots[0]);
871 del_nr++;
872 }
31840ae1 873
5dc562c5
JB
874 if (update_refs &&
875 extent_type == BTRFS_FILE_EXTENT_INLINE) {
a76a3cd4 876 inode_sub_bytes(inode,
920bbbfb
YZ
877 extent_end - key.offset);
878 extent_end = ALIGN(extent_end,
879 root->sectorsize);
5dc562c5 880 } else if (update_refs && disk_bytenr > 0) {
31840ae1 881 ret = btrfs_free_extent(trans, root,
920bbbfb
YZ
882 disk_bytenr, num_bytes, 0,
883 root->root_key.objectid,
5d4f98a2 884 key.objectid, key.offset -
66d7e7f0 885 extent_offset, 0);
79787eaa 886 BUG_ON(ret); /* -ENOMEM */
920bbbfb
YZ
887 inode_sub_bytes(inode,
888 extent_end - key.offset);
31840ae1 889 }
31840ae1 890
920bbbfb
YZ
891 if (end == extent_end)
892 break;
893
894 if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
895 path->slots[0]++;
896 goto next_slot;
897 }
898
899 ret = btrfs_del_items(trans, root, path, del_slot,
900 del_nr);
79787eaa
JM
901 if (ret) {
902 btrfs_abort_transaction(trans, root, ret);
5dc562c5 903 break;
79787eaa 904 }
920bbbfb
YZ
905
906 del_nr = 0;
907 del_slot = 0;
908
b3b4aa74 909 btrfs_release_path(path);
920bbbfb 910 continue;
39279cc3 911 }
920bbbfb
YZ
912
913 BUG_ON(1);
39279cc3 914 }
920bbbfb 915
79787eaa 916 if (!ret && del_nr > 0) {
920bbbfb 917 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
79787eaa
JM
918 if (ret)
919 btrfs_abort_transaction(trans, root, ret);
6643558d 920 }
920bbbfb 921
2aaa6655 922 if (drop_end)
c3308f84 923 *drop_end = found ? min(end, extent_end) : end;
5dc562c5
JB
924 btrfs_release_path(path);
925 return ret;
926}
927
928int btrfs_drop_extents(struct btrfs_trans_handle *trans,
929 struct btrfs_root *root, struct inode *inode, u64 start,
2671485d 930 u64 end, int drop_cache)
5dc562c5
JB
931{
932 struct btrfs_path *path;
933 int ret;
934
935 path = btrfs_alloc_path();
936 if (!path)
937 return -ENOMEM;
2aaa6655 938 ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL,
2671485d 939 drop_cache);
920bbbfb 940 btrfs_free_path(path);
39279cc3
CM
941 return ret;
942}
943
d899e052 944static int extent_mergeable(struct extent_buffer *leaf, int slot,
6c7d54ac
YZ
945 u64 objectid, u64 bytenr, u64 orig_offset,
946 u64 *start, u64 *end)
d899e052
YZ
947{
948 struct btrfs_file_extent_item *fi;
949 struct btrfs_key key;
950 u64 extent_end;
951
952 if (slot < 0 || slot >= btrfs_header_nritems(leaf))
953 return 0;
954
955 btrfs_item_key_to_cpu(leaf, &key, slot);
956 if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
957 return 0;
958
959 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
960 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
961 btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
6c7d54ac 962 btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
d899e052
YZ
963 btrfs_file_extent_compression(leaf, fi) ||
964 btrfs_file_extent_encryption(leaf, fi) ||
965 btrfs_file_extent_other_encoding(leaf, fi))
966 return 0;
967
968 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
969 if ((*start && *start != key.offset) || (*end && *end != extent_end))
970 return 0;
971
972 *start = key.offset;
973 *end = extent_end;
974 return 1;
975}
976
977/*
978 * Mark extent in the range start - end as written.
979 *
980 * This changes extent type from 'pre-allocated' to 'regular'. If only
981 * part of extent is marked as written, the extent will be split into
982 * two or three.
983 */
984int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
d899e052
YZ
985 struct inode *inode, u64 start, u64 end)
986{
920bbbfb 987 struct btrfs_root *root = BTRFS_I(inode)->root;
d899e052
YZ
988 struct extent_buffer *leaf;
989 struct btrfs_path *path;
990 struct btrfs_file_extent_item *fi;
991 struct btrfs_key key;
920bbbfb 992 struct btrfs_key new_key;
d899e052
YZ
993 u64 bytenr;
994 u64 num_bytes;
995 u64 extent_end;
5d4f98a2 996 u64 orig_offset;
d899e052
YZ
997 u64 other_start;
998 u64 other_end;
920bbbfb
YZ
999 u64 split;
1000 int del_nr = 0;
1001 int del_slot = 0;
6c7d54ac 1002 int recow;
d899e052 1003 int ret;
33345d01 1004 u64 ino = btrfs_ino(inode);
d899e052 1005
d899e052 1006 path = btrfs_alloc_path();
d8926bb3
MF
1007 if (!path)
1008 return -ENOMEM;
d899e052 1009again:
6c7d54ac 1010 recow = 0;
920bbbfb 1011 split = start;
33345d01 1012 key.objectid = ino;
d899e052 1013 key.type = BTRFS_EXTENT_DATA_KEY;
920bbbfb 1014 key.offset = split;
d899e052
YZ
1015
1016 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
41415730
JB
1017 if (ret < 0)
1018 goto out;
d899e052
YZ
1019 if (ret > 0 && path->slots[0] > 0)
1020 path->slots[0]--;
1021
1022 leaf = path->nodes[0];
1023 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
33345d01 1024 BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY);
d899e052
YZ
1025 fi = btrfs_item_ptr(leaf, path->slots[0],
1026 struct btrfs_file_extent_item);
920bbbfb
YZ
1027 BUG_ON(btrfs_file_extent_type(leaf, fi) !=
1028 BTRFS_FILE_EXTENT_PREALLOC);
d899e052
YZ
1029 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1030 BUG_ON(key.offset > start || extent_end < end);
1031
1032 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1033 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
5d4f98a2 1034 orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
6c7d54ac
YZ
1035 memcpy(&new_key, &key, sizeof(new_key));
1036
1037 if (start == key.offset && end < extent_end) {
1038 other_start = 0;
1039 other_end = start;
1040 if (extent_mergeable(leaf, path->slots[0] - 1,
33345d01 1041 ino, bytenr, orig_offset,
6c7d54ac
YZ
1042 &other_start, &other_end)) {
1043 new_key.offset = end;
afe5fea7 1044 btrfs_set_item_key_safe(root, path, &new_key);
6c7d54ac
YZ
1045 fi = btrfs_item_ptr(leaf, path->slots[0],
1046 struct btrfs_file_extent_item);
224ecce5
JB
1047 btrfs_set_file_extent_generation(leaf, fi,
1048 trans->transid);
6c7d54ac
YZ
1049 btrfs_set_file_extent_num_bytes(leaf, fi,
1050 extent_end - end);
1051 btrfs_set_file_extent_offset(leaf, fi,
1052 end - orig_offset);
1053 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1054 struct btrfs_file_extent_item);
224ecce5
JB
1055 btrfs_set_file_extent_generation(leaf, fi,
1056 trans->transid);
6c7d54ac
YZ
1057 btrfs_set_file_extent_num_bytes(leaf, fi,
1058 end - other_start);
1059 btrfs_mark_buffer_dirty(leaf);
1060 goto out;
1061 }
1062 }
1063
1064 if (start > key.offset && end == extent_end) {
1065 other_start = end;
1066 other_end = 0;
1067 if (extent_mergeable(leaf, path->slots[0] + 1,
33345d01 1068 ino, bytenr, orig_offset,
6c7d54ac
YZ
1069 &other_start, &other_end)) {
1070 fi = btrfs_item_ptr(leaf, path->slots[0],
1071 struct btrfs_file_extent_item);
1072 btrfs_set_file_extent_num_bytes(leaf, fi,
1073 start - key.offset);
224ecce5
JB
1074 btrfs_set_file_extent_generation(leaf, fi,
1075 trans->transid);
6c7d54ac
YZ
1076 path->slots[0]++;
1077 new_key.offset = start;
afe5fea7 1078 btrfs_set_item_key_safe(root, path, &new_key);
6c7d54ac
YZ
1079
1080 fi = btrfs_item_ptr(leaf, path->slots[0],
1081 struct btrfs_file_extent_item);
224ecce5
JB
1082 btrfs_set_file_extent_generation(leaf, fi,
1083 trans->transid);
6c7d54ac
YZ
1084 btrfs_set_file_extent_num_bytes(leaf, fi,
1085 other_end - start);
1086 btrfs_set_file_extent_offset(leaf, fi,
1087 start - orig_offset);
1088 btrfs_mark_buffer_dirty(leaf);
1089 goto out;
1090 }
1091 }
d899e052 1092
920bbbfb
YZ
1093 while (start > key.offset || end < extent_end) {
1094 if (key.offset == start)
1095 split = end;
1096
920bbbfb
YZ
1097 new_key.offset = split;
1098 ret = btrfs_duplicate_item(trans, root, path, &new_key);
1099 if (ret == -EAGAIN) {
b3b4aa74 1100 btrfs_release_path(path);
920bbbfb 1101 goto again;
d899e052 1102 }
79787eaa
JM
1103 if (ret < 0) {
1104 btrfs_abort_transaction(trans, root, ret);
1105 goto out;
1106 }
d899e052 1107
920bbbfb
YZ
1108 leaf = path->nodes[0];
1109 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
d899e052 1110 struct btrfs_file_extent_item);
224ecce5 1111 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
d899e052 1112 btrfs_set_file_extent_num_bytes(leaf, fi,
920bbbfb
YZ
1113 split - key.offset);
1114
1115 fi = btrfs_item_ptr(leaf, path->slots[0],
1116 struct btrfs_file_extent_item);
1117
224ecce5 1118 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
920bbbfb
YZ
1119 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
1120 btrfs_set_file_extent_num_bytes(leaf, fi,
1121 extent_end - split);
d899e052
YZ
1122 btrfs_mark_buffer_dirty(leaf);
1123
920bbbfb
YZ
1124 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
1125 root->root_key.objectid,
66d7e7f0 1126 ino, orig_offset, 0);
79787eaa 1127 BUG_ON(ret); /* -ENOMEM */
d899e052 1128
920bbbfb
YZ
1129 if (split == start) {
1130 key.offset = start;
1131 } else {
1132 BUG_ON(start != key.offset);
d899e052 1133 path->slots[0]--;
920bbbfb 1134 extent_end = end;
d899e052 1135 }
6c7d54ac 1136 recow = 1;
d899e052
YZ
1137 }
1138
920bbbfb
YZ
1139 other_start = end;
1140 other_end = 0;
6c7d54ac 1141 if (extent_mergeable(leaf, path->slots[0] + 1,
33345d01 1142 ino, bytenr, orig_offset,
6c7d54ac
YZ
1143 &other_start, &other_end)) {
1144 if (recow) {
b3b4aa74 1145 btrfs_release_path(path);
6c7d54ac
YZ
1146 goto again;
1147 }
920bbbfb
YZ
1148 extent_end = other_end;
1149 del_slot = path->slots[0] + 1;
1150 del_nr++;
1151 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1152 0, root->root_key.objectid,
66d7e7f0 1153 ino, orig_offset, 0);
79787eaa 1154 BUG_ON(ret); /* -ENOMEM */
d899e052 1155 }
920bbbfb
YZ
1156 other_start = 0;
1157 other_end = start;
6c7d54ac 1158 if (extent_mergeable(leaf, path->slots[0] - 1,
33345d01 1159 ino, bytenr, orig_offset,
6c7d54ac
YZ
1160 &other_start, &other_end)) {
1161 if (recow) {
b3b4aa74 1162 btrfs_release_path(path);
6c7d54ac
YZ
1163 goto again;
1164 }
920bbbfb
YZ
1165 key.offset = other_start;
1166 del_slot = path->slots[0];
1167 del_nr++;
1168 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1169 0, root->root_key.objectid,
66d7e7f0 1170 ino, orig_offset, 0);
79787eaa 1171 BUG_ON(ret); /* -ENOMEM */
920bbbfb
YZ
1172 }
1173 if (del_nr == 0) {
3f6fae95
SL
1174 fi = btrfs_item_ptr(leaf, path->slots[0],
1175 struct btrfs_file_extent_item);
920bbbfb
YZ
1176 btrfs_set_file_extent_type(leaf, fi,
1177 BTRFS_FILE_EXTENT_REG);
224ecce5 1178 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
920bbbfb 1179 btrfs_mark_buffer_dirty(leaf);
6c7d54ac 1180 } else {
3f6fae95
SL
1181 fi = btrfs_item_ptr(leaf, del_slot - 1,
1182 struct btrfs_file_extent_item);
6c7d54ac
YZ
1183 btrfs_set_file_extent_type(leaf, fi,
1184 BTRFS_FILE_EXTENT_REG);
224ecce5 1185 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
6c7d54ac
YZ
1186 btrfs_set_file_extent_num_bytes(leaf, fi,
1187 extent_end - key.offset);
1188 btrfs_mark_buffer_dirty(leaf);
920bbbfb 1189
6c7d54ac 1190 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
79787eaa
JM
1191 if (ret < 0) {
1192 btrfs_abort_transaction(trans, root, ret);
1193 goto out;
1194 }
6c7d54ac 1195 }
920bbbfb 1196out:
d899e052
YZ
1197 btrfs_free_path(path);
1198 return 0;
1199}
1200
b1bf862e
CM
1201/*
1202 * on error we return an unlocked page and the error value
1203 * on success we return a locked page and 0
1204 */
b6316429
JB
1205static int prepare_uptodate_page(struct page *page, u64 pos,
1206 bool force_uptodate)
b1bf862e
CM
1207{
1208 int ret = 0;
1209
b6316429
JB
1210 if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) &&
1211 !PageUptodate(page)) {
b1bf862e
CM
1212 ret = btrfs_readpage(NULL, page);
1213 if (ret)
1214 return ret;
1215 lock_page(page);
1216 if (!PageUptodate(page)) {
1217 unlock_page(page);
1218 return -EIO;
1219 }
1220 }
1221 return 0;
1222}
1223
39279cc3 1224/*
d352ac68
CM
1225 * this gets pages into the page cache and locks them down, it also properly
1226 * waits for data=ordered extents to finish before allowing the pages to be
1227 * modified.
39279cc3 1228 */
d397712b 1229static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
98ed5174
CM
1230 struct page **pages, size_t num_pages,
1231 loff_t pos, unsigned long first_index,
b6316429 1232 size_t write_bytes, bool force_uptodate)
39279cc3 1233{
2ac55d41 1234 struct extent_state *cached_state = NULL;
39279cc3
CM
1235 int i;
1236 unsigned long index = pos >> PAGE_CACHE_SHIFT;
496ad9aa 1237 struct inode *inode = file_inode(file);
3b16a4e3 1238 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
39279cc3 1239 int err = 0;
b1bf862e 1240 int faili = 0;
8c2383c3 1241 u64 start_pos;
e6dcd2dc 1242 u64 last_pos;
8c2383c3 1243
5f39d397 1244 start_pos = pos & ~((u64)root->sectorsize - 1);
e6dcd2dc 1245 last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
39279cc3 1246
e6dcd2dc 1247again:
39279cc3 1248 for (i = 0; i < num_pages; i++) {
a94733d0 1249 pages[i] = find_or_create_page(inode->i_mapping, index + i,
e3a41a5b 1250 mask | __GFP_WRITE);
39279cc3 1251 if (!pages[i]) {
b1bf862e
CM
1252 faili = i - 1;
1253 err = -ENOMEM;
1254 goto fail;
1255 }
1256
1257 if (i == 0)
b6316429
JB
1258 err = prepare_uptodate_page(pages[i], pos,
1259 force_uptodate);
b1bf862e
CM
1260 if (i == num_pages - 1)
1261 err = prepare_uptodate_page(pages[i],
b6316429 1262 pos + write_bytes, false);
b1bf862e
CM
1263 if (err) {
1264 page_cache_release(pages[i]);
1265 faili = i - 1;
1266 goto fail;
39279cc3 1267 }
ccd467d6 1268 wait_on_page_writeback(pages[i]);
39279cc3 1269 }
b1bf862e 1270 err = 0;
0762704b 1271 if (start_pos < inode->i_size) {
e6dcd2dc 1272 struct btrfs_ordered_extent *ordered;
2ac55d41 1273 lock_extent_bits(&BTRFS_I(inode)->io_tree,
d0082371 1274 start_pos, last_pos - 1, 0, &cached_state);
d397712b
CM
1275 ordered = btrfs_lookup_first_ordered_extent(inode,
1276 last_pos - 1);
e6dcd2dc
CM
1277 if (ordered &&
1278 ordered->file_offset + ordered->len > start_pos &&
1279 ordered->file_offset < last_pos) {
1280 btrfs_put_ordered_extent(ordered);
2ac55d41
JB
1281 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1282 start_pos, last_pos - 1,
1283 &cached_state, GFP_NOFS);
e6dcd2dc
CM
1284 for (i = 0; i < num_pages; i++) {
1285 unlock_page(pages[i]);
1286 page_cache_release(pages[i]);
1287 }
1288 btrfs_wait_ordered_range(inode, start_pos,
1289 last_pos - start_pos);
1290 goto again;
1291 }
1292 if (ordered)
1293 btrfs_put_ordered_extent(ordered);
1294
2ac55d41 1295 clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
32c00aff 1296 last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
9e8a4a8b
LB
1297 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
1298 0, 0, &cached_state, GFP_NOFS);
2ac55d41
JB
1299 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1300 start_pos, last_pos - 1, &cached_state,
1301 GFP_NOFS);
0762704b 1302 }
e6dcd2dc 1303 for (i = 0; i < num_pages; i++) {
32c7f202
WF
1304 if (clear_page_dirty_for_io(pages[i]))
1305 account_page_redirty(pages[i]);
e6dcd2dc
CM
1306 set_page_extent_mapped(pages[i]);
1307 WARN_ON(!PageLocked(pages[i]));
1308 }
39279cc3 1309 return 0;
b1bf862e
CM
1310fail:
1311 while (faili >= 0) {
1312 unlock_page(pages[faili]);
1313 page_cache_release(pages[faili]);
1314 faili--;
1315 }
1316 return err;
1317
39279cc3
CM
1318}
1319
d0215f3e
JB
1320static noinline ssize_t __btrfs_buffered_write(struct file *file,
1321 struct iov_iter *i,
1322 loff_t pos)
4b46fce2 1323{
496ad9aa 1324 struct inode *inode = file_inode(file);
11c65dcc 1325 struct btrfs_root *root = BTRFS_I(inode)->root;
11c65dcc 1326 struct page **pages = NULL;
39279cc3 1327 unsigned long first_index;
d0215f3e
JB
1328 size_t num_written = 0;
1329 int nrptrs;
c9149235 1330 int ret = 0;
b6316429 1331 bool force_page_uptodate = false;
4b46fce2 1332
d0215f3e 1333 nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
11c65dcc
JB
1334 PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
1335 (sizeof(struct page *)));
142349f5
WF
1336 nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1337 nrptrs = max(nrptrs, 8);
8c2383c3 1338 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
d0215f3e
JB
1339 if (!pages)
1340 return -ENOMEM;
ab93dbec 1341
39279cc3 1342 first_index = pos >> PAGE_CACHE_SHIFT;
39279cc3 1343
d0215f3e 1344 while (iov_iter_count(i) > 0) {
39279cc3 1345 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
d0215f3e 1346 size_t write_bytes = min(iov_iter_count(i),
11c65dcc 1347 nrptrs * (size_t)PAGE_CACHE_SIZE -
8c2383c3 1348 offset);
3a90983d
YZ
1349 size_t num_pages = (write_bytes + offset +
1350 PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
d0215f3e
JB
1351 size_t dirty_pages;
1352 size_t copied;
39279cc3 1353
8c2383c3 1354 WARN_ON(num_pages > nrptrs);
1832a6d5 1355
914ee295
XZ
1356 /*
1357 * Fault pages before locking them in prepare_pages
1358 * to avoid recursive lock
1359 */
d0215f3e 1360 if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
914ee295 1361 ret = -EFAULT;
d0215f3e 1362 break;
914ee295
XZ
1363 }
1364
1365 ret = btrfs_delalloc_reserve_space(inode,
1366 num_pages << PAGE_CACHE_SHIFT);
1832a6d5 1367 if (ret)
d0215f3e 1368 break;
1832a6d5 1369
4a64001f
JB
1370 /*
1371 * This is going to setup the pages array with the number of
1372 * pages we want, so we don't really need to worry about the
1373 * contents of pages from loop to loop
1374 */
39279cc3 1375 ret = prepare_pages(root, file, pages, num_pages,
b6316429
JB
1376 pos, first_index, write_bytes,
1377 force_page_uptodate);
6a63209f 1378 if (ret) {
914ee295
XZ
1379 btrfs_delalloc_release_space(inode,
1380 num_pages << PAGE_CACHE_SHIFT);
d0215f3e 1381 break;
6a63209f 1382 }
39279cc3 1383
914ee295 1384 copied = btrfs_copy_from_user(pos, num_pages,
d0215f3e 1385 write_bytes, pages, i);
b1bf862e
CM
1386
1387 /*
1388 * if we have trouble faulting in the pages, fall
1389 * back to one page at a time
1390 */
1391 if (copied < write_bytes)
1392 nrptrs = 1;
1393
b6316429
JB
1394 if (copied == 0) {
1395 force_page_uptodate = true;
b1bf862e 1396 dirty_pages = 0;
b6316429
JB
1397 } else {
1398 force_page_uptodate = false;
b1bf862e
CM
1399 dirty_pages = (copied + offset +
1400 PAGE_CACHE_SIZE - 1) >>
1401 PAGE_CACHE_SHIFT;
b6316429 1402 }
914ee295 1403
d0215f3e
JB
1404 /*
1405 * If we had a short copy we need to release the excess delaloc
1406 * bytes we reserved. We need to increment outstanding_extents
1407 * because btrfs_delalloc_release_space will decrement it, but
1408 * we still have an outstanding extent for the chunk we actually
1409 * managed to copy.
1410 */
914ee295 1411 if (num_pages > dirty_pages) {
9e0baf60
JB
1412 if (copied > 0) {
1413 spin_lock(&BTRFS_I(inode)->lock);
1414 BTRFS_I(inode)->outstanding_extents++;
1415 spin_unlock(&BTRFS_I(inode)->lock);
1416 }
914ee295
XZ
1417 btrfs_delalloc_release_space(inode,
1418 (num_pages - dirty_pages) <<
1419 PAGE_CACHE_SHIFT);
1420 }
1421
1422 if (copied > 0) {
be1a12a0
JB
1423 ret = btrfs_dirty_pages(root, inode, pages,
1424 dirty_pages, pos, copied,
1425 NULL);
d0215f3e
JB
1426 if (ret) {
1427 btrfs_delalloc_release_space(inode,
1428 dirty_pages << PAGE_CACHE_SHIFT);
1429 btrfs_drop_pages(pages, num_pages);
1430 break;
1431 }
54aa1f4d 1432 }
39279cc3 1433
39279cc3
CM
1434 btrfs_drop_pages(pages, num_pages);
1435
d0215f3e
JB
1436 cond_resched();
1437
d0e1d66b 1438 balance_dirty_pages_ratelimited(inode->i_mapping);
d0215f3e 1439 if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
b53d3f5d 1440 btrfs_btree_balance_dirty(root);
cb843a6f 1441
914ee295
XZ
1442 pos += copied;
1443 num_written += copied;
d0215f3e 1444 }
39279cc3 1445
d0215f3e
JB
1446 kfree(pages);
1447
1448 return num_written ? num_written : ret;
1449}
1450
1451static ssize_t __btrfs_direct_write(struct kiocb *iocb,
1452 const struct iovec *iov,
1453 unsigned long nr_segs, loff_t pos,
1454 loff_t *ppos, size_t count, size_t ocount)
1455{
1456 struct file *file = iocb->ki_filp;
d0215f3e
JB
1457 struct iov_iter i;
1458 ssize_t written;
1459 ssize_t written_buffered;
1460 loff_t endbyte;
1461 int err;
1462
1463 written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
1464 count, ocount);
1465
d0215f3e
JB
1466 if (written < 0 || written == count)
1467 return written;
1468
1469 pos += written;
1470 count -= written;
1471 iov_iter_init(&i, iov, nr_segs, count, written);
1472 written_buffered = __btrfs_buffered_write(file, &i, pos);
1473 if (written_buffered < 0) {
1474 err = written_buffered;
1475 goto out;
39279cc3 1476 }
d0215f3e
JB
1477 endbyte = pos + written_buffered - 1;
1478 err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
1479 if (err)
1480 goto out;
1481 written += written_buffered;
1482 *ppos = pos + written_buffered;
1483 invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
1484 endbyte >> PAGE_CACHE_SHIFT);
39279cc3 1485out:
d0215f3e
JB
1486 return written ? written : err;
1487}
5b92ee72 1488
6c760c07
JB
1489static void update_time_for_write(struct inode *inode)
1490{
1491 struct timespec now;
1492
1493 if (IS_NOCMTIME(inode))
1494 return;
1495
1496 now = current_fs_time(inode->i_sb);
1497 if (!timespec_equal(&inode->i_mtime, &now))
1498 inode->i_mtime = now;
1499
1500 if (!timespec_equal(&inode->i_ctime, &now))
1501 inode->i_ctime = now;
1502
1503 if (IS_I_VERSION(inode))
1504 inode_inc_iversion(inode);
1505}
1506
d0215f3e
JB
1507static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
1508 const struct iovec *iov,
1509 unsigned long nr_segs, loff_t pos)
1510{
1511 struct file *file = iocb->ki_filp;
496ad9aa 1512 struct inode *inode = file_inode(file);
d0215f3e
JB
1513 struct btrfs_root *root = BTRFS_I(inode)->root;
1514 loff_t *ppos = &iocb->ki_pos;
0c1a98c8 1515 u64 start_pos;
d0215f3e
JB
1516 ssize_t num_written = 0;
1517 ssize_t err = 0;
1518 size_t count, ocount;
b812ce28 1519 bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host);
d0215f3e 1520
d0215f3e
JB
1521 mutex_lock(&inode->i_mutex);
1522
1523 err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
1524 if (err) {
1525 mutex_unlock(&inode->i_mutex);
1526 goto out;
1527 }
1528 count = ocount;
1529
1530 current->backing_dev_info = inode->i_mapping->backing_dev_info;
1531 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
1532 if (err) {
1533 mutex_unlock(&inode->i_mutex);
1534 goto out;
1535 }
1536
1537 if (count == 0) {
1538 mutex_unlock(&inode->i_mutex);
1539 goto out;
1540 }
1541
1542 err = file_remove_suid(file);
1543 if (err) {
1544 mutex_unlock(&inode->i_mutex);
1545 goto out;
1546 }
1547
1548 /*
1549 * If BTRFS flips readonly due to some impossible error
1550 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1551 * although we have opened a file as writable, we have
1552 * to stop this write operation to ensure FS consistency.
1553 */
87533c47 1554 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
d0215f3e
JB
1555 mutex_unlock(&inode->i_mutex);
1556 err = -EROFS;
1557 goto out;
1558 }
1559
6c760c07
JB
1560 /*
1561 * We reserve space for updating the inode when we reserve space for the
1562 * extent we are going to write, so we will enospc out there. We don't
1563 * need to start yet another transaction to update the inode as we will
1564 * update the inode when we finish writing whatever data we write.
1565 */
1566 update_time_for_write(inode);
d0215f3e 1567
0c1a98c8
MX
1568 start_pos = round_down(pos, root->sectorsize);
1569 if (start_pos > i_size_read(inode)) {
1570 err = btrfs_cont_expand(inode, i_size_read(inode), start_pos);
1571 if (err) {
1572 mutex_unlock(&inode->i_mutex);
1573 goto out;
1574 }
1575 }
1576
b812ce28
JB
1577 if (sync)
1578 atomic_inc(&BTRFS_I(inode)->sync_writers);
1579
d0215f3e
JB
1580 if (unlikely(file->f_flags & O_DIRECT)) {
1581 num_written = __btrfs_direct_write(iocb, iov, nr_segs,
1582 pos, ppos, count, ocount);
1583 } else {
1584 struct iov_iter i;
1585
1586 iov_iter_init(&i, iov, nr_segs, count, num_written);
1587
1588 num_written = __btrfs_buffered_write(file, &i, pos);
1589 if (num_written > 0)
1590 *ppos = pos + num_written;
1591 }
1592
1593 mutex_unlock(&inode->i_mutex);
2ff3e9b6 1594
5a3f23d5 1595 /*
6c760c07
JB
1596 * We also have to set last_sub_trans to the current log transid,
1597 * otherwise subsequent syncs to a file that's been synced in this
1598 * transaction will appear to have already occured.
5a3f23d5 1599 */
6c760c07 1600 BTRFS_I(inode)->last_sub_trans = root->log_transid;
d0215f3e
JB
1601 if (num_written > 0 || num_written == -EIOCBQUEUED) {
1602 err = generic_write_sync(file, pos, num_written);
1603 if (err < 0 && num_written > 0)
2ff3e9b6
CM
1604 num_written = err;
1605 }
0a3404dc 1606
b812ce28
JB
1607 if (sync)
1608 atomic_dec(&BTRFS_I(inode)->sync_writers);
0a3404dc 1609out:
39279cc3 1610 current->backing_dev_info = NULL;
39279cc3
CM
1611 return num_written ? num_written : err;
1612}
1613
d397712b 1614int btrfs_release_file(struct inode *inode, struct file *filp)
e1b81e67 1615{
5a3f23d5
CM
1616 /*
1617 * ordered_data_close is set by settattr when we are about to truncate
1618 * a file from a non-zero size to a zero size. This tries to
1619 * flush down new bytes that may have been written if the
1620 * application were using truncate to replace a file in place.
1621 */
72ac3c0d
JB
1622 if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
1623 &BTRFS_I(inode)->runtime_flags)) {
569e0f35
JB
1624 struct btrfs_trans_handle *trans;
1625 struct btrfs_root *root = BTRFS_I(inode)->root;
1626
1627 /*
1628 * We need to block on a committing transaction to keep us from
1629 * throwing a ordered operation on to the list and causing
1630 * something like sync to deadlock trying to flush out this
1631 * inode.
1632 */
1633 trans = btrfs_start_transaction(root, 0);
1634 if (IS_ERR(trans))
1635 return PTR_ERR(trans);
1636 btrfs_add_ordered_operation(trans, BTRFS_I(inode)->root, inode);
1637 btrfs_end_transaction(trans, root);
5a3f23d5
CM
1638 if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
1639 filemap_flush(inode->i_mapping);
1640 }
6bf13c0c
SW
1641 if (filp->private_data)
1642 btrfs_ioctl_trans_end(filp);
e1b81e67
M
1643 return 0;
1644}
1645
d352ac68
CM
1646/*
1647 * fsync call for both files and directories. This logs the inode into
1648 * the tree log instead of forcing full commits whenever possible.
1649 *
1650 * It needs to call filemap_fdatawait so that all ordered extent updates are
1651 * in the metadata btree are up to date for copying to the log.
1652 *
1653 * It drops the inode mutex before doing the tree log commit. This is an
1654 * important optimization for directories because holding the mutex prevents
1655 * new operations on the dir while we write to disk.
1656 */
02c24a82 1657int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
39279cc3 1658{
7ea80859 1659 struct dentry *dentry = file->f_path.dentry;
39279cc3
CM
1660 struct inode *inode = dentry->d_inode;
1661 struct btrfs_root *root = BTRFS_I(inode)->root;
15ee9bc7 1662 int ret = 0;
39279cc3 1663 struct btrfs_trans_handle *trans;
2ab28f32 1664 bool full_sync = 0;
39279cc3 1665
1abe9b8a 1666 trace_btrfs_sync_file(file, datasync);
257c62e1 1667
90abccf2
MX
1668 /*
1669 * We write the dirty pages in the range and wait until they complete
1670 * out of the ->i_mutex. If so, we can flush the dirty pages by
2ab28f32
JB
1671 * multi-task, and make the performance up. See
1672 * btrfs_wait_ordered_range for an explanation of the ASYNC check.
90abccf2 1673 */
b812ce28 1674 atomic_inc(&BTRFS_I(inode)->sync_writers);
2ab28f32
JB
1675 ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
1676 if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1677 &BTRFS_I(inode)->runtime_flags))
1678 ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
b812ce28 1679 atomic_dec(&BTRFS_I(inode)->sync_writers);
90abccf2
MX
1680 if (ret)
1681 return ret;
1682
02c24a82
JB
1683 mutex_lock(&inode->i_mutex);
1684
0885ef5b 1685 /*
90abccf2
MX
1686 * We flush the dirty pages again to avoid some dirty pages in the
1687 * range being left.
0885ef5b 1688 */
2ecb7923 1689 atomic_inc(&root->log_batch);
2ab28f32
JB
1690 full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1691 &BTRFS_I(inode)->runtime_flags);
1692 if (full_sync)
1693 btrfs_wait_ordered_range(inode, start, end - start + 1);
2ecb7923 1694 atomic_inc(&root->log_batch);
257c62e1 1695
39279cc3 1696 /*
fa41700e
FM
1697 * If the last transaction that changed this file was before the current
1698 * transaction and we have the full sync flag set in our inode, we can
1699 * bail out now without any syncing.
1700 *
1701 * Note that we can't bail out if the full sync flag isn't set. This is
1702 * because when the full sync flag is set we start all ordered extents
1703 * and wait for them to fully complete - when they complete they update
1704 * the inode's last_trans field through:
1705 *
1706 * btrfs_finish_ordered_io() ->
1707 * btrfs_update_inode_fallback() ->
1708 * btrfs_update_inode() ->
1709 * btrfs_set_inode_last_trans()
1710 *
1711 * So we are sure that last_trans is up to date and can do this check to
1712 * bail out safely. For the fast path, when the full sync flag is not
1713 * set in our inode, we can not do it because we start only our ordered
1714 * extents and don't wait for them to complete (that is when
1715 * btrfs_finish_ordered_io runs), so here at this point their last_trans
1716 * value might be less than or equals to fs_info->last_trans_committed,
1717 * and setting a speculative last_trans for an inode when a buffered
1718 * write is made (such as fs_info->generation + 1 for example) would not
1719 * be reliable since after setting the value and before fsync is called
1720 * any number of transactions can start and commit (transaction kthread
1721 * commits the current transaction periodically), and a transaction
1722 * commit does not start nor waits for ordered extents to complete.
257c62e1 1723 */
a4abeea4 1724 smp_mb();
22ee6985 1725 if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
fa41700e
FM
1726 (full_sync && BTRFS_I(inode)->last_trans <=
1727 root->fs_info->last_trans_committed)) {
5dc562c5
JB
1728 /*
1729 * We'v had everything committed since the last time we were
1730 * modified so clear this flag in case it was set for whatever
1731 * reason, it's no longer relevant.
1732 */
1733 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1734 &BTRFS_I(inode)->runtime_flags);
02c24a82 1735 mutex_unlock(&inode->i_mutex);
15ee9bc7
JB
1736 goto out;
1737 }
15ee9bc7
JB
1738
1739 /*
a52d9a80
CM
1740 * ok we haven't committed the transaction yet, lets do a commit
1741 */
6f902af4 1742 if (file->private_data)
6bf13c0c
SW
1743 btrfs_ioctl_trans_end(file);
1744
a22285a6
YZ
1745 trans = btrfs_start_transaction(root, 0);
1746 if (IS_ERR(trans)) {
1747 ret = PTR_ERR(trans);
02c24a82 1748 mutex_unlock(&inode->i_mutex);
39279cc3
CM
1749 goto out;
1750 }
e02119d5 1751
2cfbd50b 1752 ret = btrfs_log_dentry_safe(trans, root, dentry);
02c24a82
JB
1753 if (ret < 0) {
1754 mutex_unlock(&inode->i_mutex);
e02119d5 1755 goto out;
02c24a82 1756 }
49eb7e46
CM
1757
1758 /* we've logged all the items and now have a consistent
1759 * version of the file in the log. It is possible that
1760 * someone will come in and modify the file, but that's
1761 * fine because the log is consistent on disk, and we
1762 * have references to all of the file's extents
1763 *
1764 * It is possible that someone will come in and log the
1765 * file again, but that will end up using the synchronization
1766 * inside btrfs_sync_log to keep things safe.
1767 */
02c24a82 1768 mutex_unlock(&inode->i_mutex);
49eb7e46 1769
257c62e1
CM
1770 if (ret != BTRFS_NO_LOG_SYNC) {
1771 if (ret > 0) {
2ab28f32
JB
1772 /*
1773 * If we didn't already wait for ordered extents we need
1774 * to do that now.
1775 */
1776 if (!full_sync)
1777 btrfs_wait_ordered_range(inode, start,
1778 end - start + 1);
12fcfd22 1779 ret = btrfs_commit_transaction(trans, root);
257c62e1
CM
1780 } else {
1781 ret = btrfs_sync_log(trans, root);
2ab28f32 1782 if (ret == 0) {
257c62e1 1783 ret = btrfs_end_transaction(trans, root);
2ab28f32
JB
1784 } else {
1785 if (!full_sync)
1786 btrfs_wait_ordered_range(inode, start,
1787 end -
1788 start + 1);
257c62e1 1789 ret = btrfs_commit_transaction(trans, root);
2ab28f32 1790 }
257c62e1
CM
1791 }
1792 } else {
1793 ret = btrfs_end_transaction(trans, root);
e02119d5 1794 }
39279cc3 1795out:
014e4ac4 1796 return ret > 0 ? -EIO : ret;
39279cc3
CM
1797}
1798
f0f37e2f 1799static const struct vm_operations_struct btrfs_file_vm_ops = {
92fee66d 1800 .fault = filemap_fault,
9ebefb18 1801 .page_mkwrite = btrfs_page_mkwrite,
0b173bc4 1802 .remap_pages = generic_file_remap_pages,
9ebefb18
CM
1803};
1804
1805static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
1806{
058a457e
MX
1807 struct address_space *mapping = filp->f_mapping;
1808
1809 if (!mapping->a_ops->readpage)
1810 return -ENOEXEC;
1811
9ebefb18 1812 file_accessed(filp);
058a457e 1813 vma->vm_ops = &btrfs_file_vm_ops;
058a457e 1814
9ebefb18
CM
1815 return 0;
1816}
1817
2aaa6655
JB
1818static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf,
1819 int slot, u64 start, u64 end)
1820{
1821 struct btrfs_file_extent_item *fi;
1822 struct btrfs_key key;
1823
1824 if (slot < 0 || slot >= btrfs_header_nritems(leaf))
1825 return 0;
1826
1827 btrfs_item_key_to_cpu(leaf, &key, slot);
1828 if (key.objectid != btrfs_ino(inode) ||
1829 key.type != BTRFS_EXTENT_DATA_KEY)
1830 return 0;
1831
1832 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1833
1834 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
1835 return 0;
1836
1837 if (btrfs_file_extent_disk_bytenr(leaf, fi))
1838 return 0;
1839
1840 if (key.offset == end)
1841 return 1;
1842 if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
1843 return 1;
1844 return 0;
1845}
1846
1847static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
1848 struct btrfs_path *path, u64 offset, u64 end)
1849{
1850 struct btrfs_root *root = BTRFS_I(inode)->root;
1851 struct extent_buffer *leaf;
1852 struct btrfs_file_extent_item *fi;
1853 struct extent_map *hole_em;
1854 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1855 struct btrfs_key key;
1856 int ret;
1857
1858 key.objectid = btrfs_ino(inode);
1859 key.type = BTRFS_EXTENT_DATA_KEY;
1860 key.offset = offset;
1861
1862
1863 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1864 if (ret < 0)
1865 return ret;
1866 BUG_ON(!ret);
1867
1868 leaf = path->nodes[0];
1869 if (hole_mergeable(inode, leaf, path->slots[0]-1, offset, end)) {
1870 u64 num_bytes;
1871
1872 path->slots[0]--;
1873 fi = btrfs_item_ptr(leaf, path->slots[0],
1874 struct btrfs_file_extent_item);
1875 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
1876 end - offset;
1877 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1878 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
1879 btrfs_set_file_extent_offset(leaf, fi, 0);
1880 btrfs_mark_buffer_dirty(leaf);
1881 goto out;
1882 }
1883
1884 if (hole_mergeable(inode, leaf, path->slots[0]+1, offset, end)) {
1885 u64 num_bytes;
1886
1887 path->slots[0]++;
1888 key.offset = offset;
afe5fea7 1889 btrfs_set_item_key_safe(root, path, &key);
2aaa6655
JB
1890 fi = btrfs_item_ptr(leaf, path->slots[0],
1891 struct btrfs_file_extent_item);
1892 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
1893 offset;
1894 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1895 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
1896 btrfs_set_file_extent_offset(leaf, fi, 0);
1897 btrfs_mark_buffer_dirty(leaf);
1898 goto out;
1899 }
1900 btrfs_release_path(path);
1901
1902 ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset,
1903 0, 0, end - offset, 0, end - offset,
1904 0, 0, 0);
1905 if (ret)
1906 return ret;
1907
1908out:
1909 btrfs_release_path(path);
1910
1911 hole_em = alloc_extent_map();
1912 if (!hole_em) {
1913 btrfs_drop_extent_cache(inode, offset, end - 1, 0);
1914 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1915 &BTRFS_I(inode)->runtime_flags);
1916 } else {
1917 hole_em->start = offset;
1918 hole_em->len = end - offset;
cc95bef6 1919 hole_em->ram_bytes = hole_em->len;
2aaa6655
JB
1920 hole_em->orig_start = offset;
1921
1922 hole_em->block_start = EXTENT_MAP_HOLE;
1923 hole_em->block_len = 0;
b4939680 1924 hole_em->orig_block_len = 0;
2aaa6655
JB
1925 hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
1926 hole_em->compress_type = BTRFS_COMPRESS_NONE;
1927 hole_em->generation = trans->transid;
1928
1929 do {
1930 btrfs_drop_extent_cache(inode, offset, end - 1, 0);
1931 write_lock(&em_tree->lock);
09a2a8f9 1932 ret = add_extent_mapping(em_tree, hole_em, 1);
2aaa6655
JB
1933 write_unlock(&em_tree->lock);
1934 } while (ret == -EEXIST);
1935 free_extent_map(hole_em);
1936 if (ret)
1937 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1938 &BTRFS_I(inode)->runtime_flags);
1939 }
1940
1941 return 0;
1942}
1943
1944static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
1945{
1946 struct btrfs_root *root = BTRFS_I(inode)->root;
1947 struct extent_state *cached_state = NULL;
1948 struct btrfs_path *path;
1949 struct btrfs_block_rsv *rsv;
1950 struct btrfs_trans_handle *trans;
0061280d
MX
1951 u64 lockstart = round_up(offset, BTRFS_I(inode)->root->sectorsize);
1952 u64 lockend = round_down(offset + len,
1953 BTRFS_I(inode)->root->sectorsize) - 1;
2aaa6655
JB
1954 u64 cur_offset = lockstart;
1955 u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
1956 u64 drop_end;
2aaa6655
JB
1957 int ret = 0;
1958 int err = 0;
6347b3c4
MX
1959 bool same_page = ((offset >> PAGE_CACHE_SHIFT) ==
1960 ((offset + len - 1) >> PAGE_CACHE_SHIFT));
2aaa6655
JB
1961
1962 btrfs_wait_ordered_range(inode, offset, len);
1963
1964 mutex_lock(&inode->i_mutex);
7426cc04
MX
1965 /*
1966 * We needn't truncate any page which is beyond the end of the file
1967 * because we are sure there is no data there.
1968 */
2aaa6655
JB
1969 /*
1970 * Only do this if we are in the same page and we aren't doing the
1971 * entire page.
1972 */
1973 if (same_page && len < PAGE_CACHE_SIZE) {
7426cc04
MX
1974 if (offset < round_up(inode->i_size, PAGE_CACHE_SIZE))
1975 ret = btrfs_truncate_page(inode, offset, len, 0);
2aaa6655
JB
1976 mutex_unlock(&inode->i_mutex);
1977 return ret;
1978 }
1979
1980 /* zero back part of the first page */
7426cc04
MX
1981 if (offset < round_up(inode->i_size, PAGE_CACHE_SIZE)) {
1982 ret = btrfs_truncate_page(inode, offset, 0, 0);
1983 if (ret) {
1984 mutex_unlock(&inode->i_mutex);
1985 return ret;
1986 }
2aaa6655
JB
1987 }
1988
1989 /* zero the front end of the last page */
0061280d
MX
1990 if (offset + len < round_up(inode->i_size, PAGE_CACHE_SIZE)) {
1991 ret = btrfs_truncate_page(inode, offset + len, 0, 1);
1992 if (ret) {
1993 mutex_unlock(&inode->i_mutex);
1994 return ret;
1995 }
2aaa6655
JB
1996 }
1997
1998 if (lockend < lockstart) {
1999 mutex_unlock(&inode->i_mutex);
2000 return 0;
2001 }
2002
2003 while (1) {
2004 struct btrfs_ordered_extent *ordered;
2005
2006 truncate_pagecache_range(inode, lockstart, lockend);
2007
2008 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2009 0, &cached_state);
2010 ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
2011
2012 /*
2013 * We need to make sure we have no ordered extents in this range
2014 * and nobody raced in and read a page in this range, if we did
2015 * we need to try again.
2016 */
2017 if ((!ordered ||
2018 (ordered->file_offset + ordered->len < lockstart ||
2019 ordered->file_offset > lockend)) &&
2020 !test_range_bit(&BTRFS_I(inode)->io_tree, lockstart,
2021 lockend, EXTENT_UPTODATE, 0,
2022 cached_state)) {
2023 if (ordered)
2024 btrfs_put_ordered_extent(ordered);
2025 break;
2026 }
2027 if (ordered)
2028 btrfs_put_ordered_extent(ordered);
2029 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
2030 lockend, &cached_state, GFP_NOFS);
2031 btrfs_wait_ordered_range(inode, lockstart,
2032 lockend - lockstart + 1);
2033 }
2034
2035 path = btrfs_alloc_path();
2036 if (!path) {
2037 ret = -ENOMEM;
2038 goto out;
2039 }
2040
66d8f3dd 2041 rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
2aaa6655
JB
2042 if (!rsv) {
2043 ret = -ENOMEM;
2044 goto out_free;
2045 }
2046 rsv->size = btrfs_calc_trunc_metadata_size(root, 1);
2047 rsv->failfast = 1;
2048
2049 /*
2050 * 1 - update the inode
2051 * 1 - removing the extents in the range
2052 * 1 - adding the hole extent
2053 */
2054 trans = btrfs_start_transaction(root, 3);
2055 if (IS_ERR(trans)) {
2056 err = PTR_ERR(trans);
2057 goto out_free;
2058 }
2059
2060 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
2061 min_size);
2062 BUG_ON(ret);
2063 trans->block_rsv = rsv;
2064
2065 while (cur_offset < lockend) {
2066 ret = __btrfs_drop_extents(trans, root, inode, path,
2067 cur_offset, lockend + 1,
2068 &drop_end, 1);
2069 if (ret != -ENOSPC)
2070 break;
2071
2072 trans->block_rsv = &root->fs_info->trans_block_rsv;
2073
2074 ret = fill_holes(trans, inode, path, cur_offset, drop_end);
2075 if (ret) {
2076 err = ret;
2077 break;
2078 }
2079
2080 cur_offset = drop_end;
2081
2082 ret = btrfs_update_inode(trans, root, inode);
2083 if (ret) {
2084 err = ret;
2085 break;
2086 }
2087
2aaa6655 2088 btrfs_end_transaction(trans, root);
b53d3f5d 2089 btrfs_btree_balance_dirty(root);
2aaa6655
JB
2090
2091 trans = btrfs_start_transaction(root, 3);
2092 if (IS_ERR(trans)) {
2093 ret = PTR_ERR(trans);
2094 trans = NULL;
2095 break;
2096 }
2097
2098 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
2099 rsv, min_size);
2100 BUG_ON(ret); /* shouldn't happen */
2101 trans->block_rsv = rsv;
2102 }
2103
2104 if (ret) {
2105 err = ret;
2106 goto out_trans;
2107 }
2108
2109 trans->block_rsv = &root->fs_info->trans_block_rsv;
2110 ret = fill_holes(trans, inode, path, cur_offset, drop_end);
2111 if (ret) {
2112 err = ret;
2113 goto out_trans;
2114 }
2115
2116out_trans:
2117 if (!trans)
2118 goto out_free;
2119
e1f5790e
TI
2120 inode_inc_iversion(inode);
2121 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
2122
2aaa6655
JB
2123 trans->block_rsv = &root->fs_info->trans_block_rsv;
2124 ret = btrfs_update_inode(trans, root, inode);
2aaa6655 2125 btrfs_end_transaction(trans, root);
b53d3f5d 2126 btrfs_btree_balance_dirty(root);
2aaa6655
JB
2127out_free:
2128 btrfs_free_path(path);
2129 btrfs_free_block_rsv(root, rsv);
2130out:
2131 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2132 &cached_state, GFP_NOFS);
2133 mutex_unlock(&inode->i_mutex);
2134 if (ret && !err)
2135 err = ret;
2136 return err;
2137}
2138
2fe17c10
CH
2139static long btrfs_fallocate(struct file *file, int mode,
2140 loff_t offset, loff_t len)
2141{
496ad9aa 2142 struct inode *inode = file_inode(file);
2fe17c10 2143 struct extent_state *cached_state = NULL;
6113077c 2144 struct btrfs_root *root = BTRFS_I(inode)->root;
2fe17c10
CH
2145 u64 cur_offset;
2146 u64 last_byte;
2147 u64 alloc_start;
2148 u64 alloc_end;
2149 u64 alloc_hint = 0;
2150 u64 locked_end;
2fe17c10 2151 struct extent_map *em;
797f4277 2152 int blocksize = BTRFS_I(inode)->root->sectorsize;
2fe17c10
CH
2153 int ret;
2154
797f4277
MX
2155 alloc_start = round_down(offset, blocksize);
2156 alloc_end = round_up(offset + len, blocksize);
2fe17c10 2157
2aaa6655
JB
2158 /* Make sure we aren't being give some crap mode */
2159 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2fe17c10
CH
2160 return -EOPNOTSUPP;
2161
2aaa6655
JB
2162 if (mode & FALLOC_FL_PUNCH_HOLE)
2163 return btrfs_punch_hole(inode, offset, len);
2164
d98456fc
CM
2165 /*
2166 * Make sure we have enough space before we do the
2167 * allocation.
2168 */
0ff6fabd 2169 ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start);
d98456fc
CM
2170 if (ret)
2171 return ret;
6113077c
WS
2172 if (root->fs_info->quota_enabled) {
2173 ret = btrfs_qgroup_reserve(root, alloc_end - alloc_start);
2174 if (ret)
2175 goto out_reserve_fail;
2176 }
d98456fc 2177
2fe17c10
CH
2178 /*
2179 * wait for ordered IO before we have any locks. We'll loop again
2180 * below with the locks held.
2181 */
2182 btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
2183
2184 mutex_lock(&inode->i_mutex);
2185 ret = inode_newsize_ok(inode, alloc_end);
2186 if (ret)
2187 goto out;
2188
2189 if (alloc_start > inode->i_size) {
a41ad394
JB
2190 ret = btrfs_cont_expand(inode, i_size_read(inode),
2191 alloc_start);
2fe17c10
CH
2192 if (ret)
2193 goto out;
2194 }
2195
2fe17c10
CH
2196 locked_end = alloc_end - 1;
2197 while (1) {
2198 struct btrfs_ordered_extent *ordered;
2199
2200 /* the extent lock is ordered inside the running
2201 * transaction
2202 */
2203 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
d0082371 2204 locked_end, 0, &cached_state);
2fe17c10
CH
2205 ordered = btrfs_lookup_first_ordered_extent(inode,
2206 alloc_end - 1);
2207 if (ordered &&
2208 ordered->file_offset + ordered->len > alloc_start &&
2209 ordered->file_offset < alloc_end) {
2210 btrfs_put_ordered_extent(ordered);
2211 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
2212 alloc_start, locked_end,
2213 &cached_state, GFP_NOFS);
2214 /*
2215 * we can't wait on the range with the transaction
2216 * running or with the extent lock held
2217 */
2218 btrfs_wait_ordered_range(inode, alloc_start,
2219 alloc_end - alloc_start);
2220 } else {
2221 if (ordered)
2222 btrfs_put_ordered_extent(ordered);
2223 break;
2224 }
2225 }
2226
2227 cur_offset = alloc_start;
2228 while (1) {
f1e490a7
JB
2229 u64 actual_end;
2230
2fe17c10
CH
2231 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
2232 alloc_end - cur_offset, 0);
79787eaa
JM
2233 if (IS_ERR_OR_NULL(em)) {
2234 if (!em)
2235 ret = -ENOMEM;
2236 else
2237 ret = PTR_ERR(em);
2238 break;
2239 }
2fe17c10 2240 last_byte = min(extent_map_end(em), alloc_end);
f1e490a7 2241 actual_end = min_t(u64, extent_map_end(em), offset + len);
797f4277 2242 last_byte = ALIGN(last_byte, blocksize);
f1e490a7 2243
2fe17c10
CH
2244 if (em->block_start == EXTENT_MAP_HOLE ||
2245 (cur_offset >= inode->i_size &&
2246 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
2247 ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
2248 last_byte - cur_offset,
2249 1 << inode->i_blkbits,
2250 offset + len,
2251 &alloc_hint);
1b9c332b 2252
2fe17c10
CH
2253 if (ret < 0) {
2254 free_extent_map(em);
2255 break;
2256 }
f1e490a7
JB
2257 } else if (actual_end > inode->i_size &&
2258 !(mode & FALLOC_FL_KEEP_SIZE)) {
2259 /*
2260 * We didn't need to allocate any more space, but we
2261 * still extended the size of the file so we need to
2262 * update i_size.
2263 */
2264 inode->i_ctime = CURRENT_TIME;
2265 i_size_write(inode, actual_end);
2266 btrfs_ordered_update_i_size(inode, actual_end, NULL);
2fe17c10
CH
2267 }
2268 free_extent_map(em);
2269
2270 cur_offset = last_byte;
2271 if (cur_offset >= alloc_end) {
2272 ret = 0;
2273 break;
2274 }
2275 }
2276 unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
2277 &cached_state, GFP_NOFS);
2fe17c10
CH
2278out:
2279 mutex_unlock(&inode->i_mutex);
6113077c
WS
2280 if (root->fs_info->quota_enabled)
2281 btrfs_qgroup_free(root, alloc_end - alloc_start);
2282out_reserve_fail:
d98456fc 2283 /* Let go of our reservation. */
0ff6fabd 2284 btrfs_free_reserved_data_space(inode, alloc_end - alloc_start);
2fe17c10
CH
2285 return ret;
2286}
2287
965c8e59 2288static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
b2675157
JB
2289{
2290 struct btrfs_root *root = BTRFS_I(inode)->root;
2291 struct extent_map *em;
2292 struct extent_state *cached_state = NULL;
2293 u64 lockstart = *offset;
2294 u64 lockend = i_size_read(inode);
2295 u64 start = *offset;
2296 u64 orig_start = *offset;
2297 u64 len = i_size_read(inode);
2298 u64 last_end = 0;
2299 int ret = 0;
2300
2301 lockend = max_t(u64, root->sectorsize, lockend);
2302 if (lockend <= lockstart)
2303 lockend = lockstart + root->sectorsize;
2304
1214b53f 2305 lockend--;
b2675157
JB
2306 len = lockend - lockstart + 1;
2307
2308 len = max_t(u64, len, root->sectorsize);
2309 if (inode->i_size == 0)
2310 return -ENXIO;
2311
2312 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
d0082371 2313 &cached_state);
b2675157
JB
2314
2315 /*
2316 * Delalloc is such a pain. If we have a hole and we have pending
2317 * delalloc for a portion of the hole we will get back a hole that
2318 * exists for the entire range since it hasn't been actually written
2319 * yet. So to take care of this case we need to look for an extent just
2320 * before the position we want in case there is outstanding delalloc
2321 * going on here.
2322 */
965c8e59 2323 if (whence == SEEK_HOLE && start != 0) {
b2675157
JB
2324 if (start <= root->sectorsize)
2325 em = btrfs_get_extent_fiemap(inode, NULL, 0, 0,
2326 root->sectorsize, 0);
2327 else
2328 em = btrfs_get_extent_fiemap(inode, NULL, 0,
2329 start - root->sectorsize,
2330 root->sectorsize, 0);
2331 if (IS_ERR(em)) {
6af021d8 2332 ret = PTR_ERR(em);
b2675157
JB
2333 goto out;
2334 }
2335 last_end = em->start + em->len;
2336 if (em->block_start == EXTENT_MAP_DELALLOC)
2337 last_end = min_t(u64, last_end, inode->i_size);
2338 free_extent_map(em);
2339 }
2340
2341 while (1) {
2342 em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0);
2343 if (IS_ERR(em)) {
6af021d8 2344 ret = PTR_ERR(em);
b2675157
JB
2345 break;
2346 }
2347
2348 if (em->block_start == EXTENT_MAP_HOLE) {
2349 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2350 if (last_end <= orig_start) {
2351 free_extent_map(em);
2352 ret = -ENXIO;
2353 break;
2354 }
2355 }
2356
965c8e59 2357 if (whence == SEEK_HOLE) {
b2675157
JB
2358 *offset = start;
2359 free_extent_map(em);
2360 break;
2361 }
2362 } else {
965c8e59 2363 if (whence == SEEK_DATA) {
b2675157
JB
2364 if (em->block_start == EXTENT_MAP_DELALLOC) {
2365 if (start >= inode->i_size) {
2366 free_extent_map(em);
2367 ret = -ENXIO;
2368 break;
2369 }
2370 }
2371
f9e4fb53
LB
2372 if (!test_bit(EXTENT_FLAG_PREALLOC,
2373 &em->flags)) {
2374 *offset = start;
2375 free_extent_map(em);
2376 break;
2377 }
b2675157
JB
2378 }
2379 }
2380
2381 start = em->start + em->len;
2382 last_end = em->start + em->len;
2383
2384 if (em->block_start == EXTENT_MAP_DELALLOC)
2385 last_end = min_t(u64, last_end, inode->i_size);
2386
2387 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2388 free_extent_map(em);
2389 ret = -ENXIO;
2390 break;
2391 }
2392 free_extent_map(em);
2393 cond_resched();
2394 }
2395 if (!ret)
2396 *offset = min(*offset, inode->i_size);
2397out:
2398 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2399 &cached_state, GFP_NOFS);
2400 return ret;
2401}
2402
965c8e59 2403static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
b2675157
JB
2404{
2405 struct inode *inode = file->f_mapping->host;
2406 int ret;
2407
2408 mutex_lock(&inode->i_mutex);
965c8e59 2409 switch (whence) {
b2675157
JB
2410 case SEEK_END:
2411 case SEEK_CUR:
965c8e59 2412 offset = generic_file_llseek(file, offset, whence);
b2675157
JB
2413 goto out;
2414 case SEEK_DATA:
2415 case SEEK_HOLE:
48802c8a
JL
2416 if (offset >= i_size_read(inode)) {
2417 mutex_unlock(&inode->i_mutex);
2418 return -ENXIO;
2419 }
2420
965c8e59 2421 ret = find_desired_extent(inode, &offset, whence);
b2675157
JB
2422 if (ret) {
2423 mutex_unlock(&inode->i_mutex);
2424 return ret;
2425 }
2426 }
2427
9a4327ca 2428 if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) {
48802c8a 2429 offset = -EINVAL;
9a4327ca
DC
2430 goto out;
2431 }
2432 if (offset > inode->i_sb->s_maxbytes) {
48802c8a 2433 offset = -EINVAL;
9a4327ca
DC
2434 goto out;
2435 }
b2675157
JB
2436
2437 /* Special lock needed here? */
2438 if (offset != file->f_pos) {
2439 file->f_pos = offset;
2440 file->f_version = 0;
2441 }
2442out:
2443 mutex_unlock(&inode->i_mutex);
2444 return offset;
2445}
2446
828c0950 2447const struct file_operations btrfs_file_operations = {
b2675157 2448 .llseek = btrfs_file_llseek,
39279cc3 2449 .read = do_sync_read,
4a001071 2450 .write = do_sync_write,
9ebefb18 2451 .aio_read = generic_file_aio_read,
e9906a98 2452 .splice_read = generic_file_splice_read,
11c65dcc 2453 .aio_write = btrfs_file_aio_write,
9ebefb18 2454 .mmap = btrfs_file_mmap,
39279cc3 2455 .open = generic_file_open,
e1b81e67 2456 .release = btrfs_release_file,
39279cc3 2457 .fsync = btrfs_sync_file,
2fe17c10 2458 .fallocate = btrfs_fallocate,
34287aa3 2459 .unlocked_ioctl = btrfs_ioctl,
39279cc3 2460#ifdef CONFIG_COMPAT
34287aa3 2461 .compat_ioctl = btrfs_ioctl,
39279cc3
CM
2462#endif
2463};
9247f317
MX
2464
2465void btrfs_auto_defrag_exit(void)
2466{
2467 if (btrfs_inode_defrag_cachep)
2468 kmem_cache_destroy(btrfs_inode_defrag_cachep);
2469}
2470
2471int btrfs_auto_defrag_init(void)
2472{
2473 btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
2474 sizeof(struct inode_defrag), 0,
2475 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
2476 NULL);
2477 if (!btrfs_inode_defrag_cachep)
2478 return -ENOMEM;
2479
2480 return 0;
2481}